Compare commits

127 Commits

Author SHA1 Message Date
828106ba81 feat: добавить скрипты для инициализации тестовых данных и настройки окружения
Some checks failed
Test / cargo test (push) Failing after 58s
2026-02-09 22:39:12 +00:00
a7dd18fa1d feat: удалить устаревшие файлы и директории из проекта 2026-02-10 02:07:52 +04:00
f8cca32968 feat: изменить язык документации на русский 2026-02-10 02:05:27 +04:00
ef93237724 Add .gitignore for Python and project-specific files; implement archive roundtrip validator
Some checks failed
Test / cargo test (push) Failing after 50s
- Updated .gitignore to include common Python artifacts and project-specific files.
- Added `archive_roundtrip_validator.py` script for validating NRes and RsLi formats against real game data.
- Created README.md for the tools directory, detailing usage and supported signatures.
- Enhanced nres.md with practical nuances and empirical checks for game data.
2026-02-10 01:58:16 +04:00
58a896221f feat: обновление навигации в документации, добавление разделов для 3D моделей, текстур и эффектов 2026-02-10 01:49:09 +04:00
3f48f53bd5 feat: добавление документации по эффектам и частицам 2026-02-10 01:48:59 +04:00
2953f0c8c9 feat: добавление документации по модели ресурсов MSH/AniMesh 2026-02-10 01:47:19 +04:00
022ec608f5 feat: добавление документации по текстурам и материалам 2026-02-10 01:44:01 +04:00
54c94fddb5 Add detailed documentation for NRes and RsLi resource formats
Some checks failed
Test / cargo test (push) Failing after 41s
- Introduced a comprehensive markdown file `nres.md` detailing the structure, header, and operations of the NRes and RsLi formats.
- Updated `mkdocs.yml` to reflect the new documentation structure, consolidating NRes and RsLi under a single entry.
2026-02-10 00:30:25 +04:00
0def311fd1 feat: обновление документации по алгоритмам декомпрессии и добавление файлов .gitkeep в директории libs и tools 2026-02-05 03:28:03 +04:00
2f157d0972 feat: добавление файлов .gitkeep в директории libs и tools 2026-02-05 01:44:31 +04:00
8f57a8f0f9 feat: добавление файлов конфигурации Zig и обновление .gitignore
Some checks failed
Test / cargo test (push) Failing after 47s
2026-02-05 01:40:47 +04:00
40e7d88fd0 Add NRes format documentation and decompression algorithms
Some checks failed
Test / cargo test (push) Failing after 40s
- Created `huffman_decompression.md` detailing the Huffman decompression algorithm used in NRes, including context structure, block modes, and decoding methods.
- Created `overview.md` for the NRes format, outlining file structure, header details, file entries, and packing algorithms.
- Updated `mkdocs.yml` to include new documentation files in the navigation structure.
2026-02-05 01:32:24 +04:00
afe6b9a29b feat: remove Rust project 2026-02-05 00:37:59 +04:00
6a46fe9825 chore(deps): update actions/checkout action to v6
All checks were successful
Test / cargo test (pull_request) Successful in 1m36s
Test / cargo test (push) Successful in 1m45s
RenovateBot / renovate (push) Successful in 1m50s
2026-01-30 14:16:24 +00:00
7818a7ef3f chore: update renovate workflow to include GITHUB_COM_TOKEN
All checks were successful
Test / cargo test (push) Successful in 1m43s
RenovateBot / renovate (push) Successful in 25s
2026-01-30 18:15:52 +04:00
15f2a73e95 chore: wire RENOVATE_LOG_LEVEL
All checks were successful
RenovateBot / renovate (push) Successful in 21s
Test / cargo test (push) Successful in 1m35s
2026-01-30 04:35:32 +04:00
2890b69678 migrate renovate config to gitea
All checks were successful
RenovateBot / renovate (push) Successful in 1m51s
Test / cargo test (push) Successful in 1m34s
2026-01-30 04:27:02 +04:00
27e9d2b39c Move CI to Gitea Actions
All checks were successful
Test / cargo test (push) Successful in 1m37s
2026-01-30 04:00:58 +04:00
b283e2a8df Update dependencies and fix clippy warnings
Some checks failed
Mirror / mirror (push) Failing after 7s
Test / cargo test (push) Successful in 1m39s
2026-01-30 03:29:08 +04:00
9dcce90201 chore: update dependencies and fix clippy warnings
Some checks failed
Mirror / mirror (push) Failing after 1m45s
Test / cargo test (push) Successful in 1m33s
- refresh Cargo.lock to latest compatible crates
- simplify u32->u64 conversion in libnres
- use is_multiple_of in unpacker list validation
2026-01-19 20:52:54 +04:00
renovate[bot]
7c876faf12 Update Rust crate console to v0.16.1 (#48)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-08 13:25:23 +00:00
renovate[bot]
39c66e698e Update Rust crate log to v0.4.28 (#47)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-04 05:03:11 +00:00
renovate[bot]
abac84a008 Update Rust crate image to v0.25.8 (#46)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-03 20:21:26 +00:00
renovate[bot]
b44217d4af Update Rust crate clap to v4.5.47 (#45)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-03 05:25:25 +00:00
renovate[bot]
c268e4c205 Update all digest updates (#41)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-27 12:23:15 +04:00
renovate[bot]
8aabe74eb2 Update Rust crate thiserror to v2.0.15 (#39)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-17 10:13:52 +00:00
84f2175fd2 Merge pull request #33 from valentineus/renovate/all-digest
Update all digest updates
2025-08-13 18:16:56 +04:00
renovate[bot]
307b9c6d90 Update all digest updates 2025-08-13 13:45:03 +00:00
renovate[bot]
7de26b16d4 Update Rust crate clap to v4.5.41 (#32)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-10 19:47:12 +00:00
52f2ad43e6 Merge pull request #29 from valentineus/renovate/all-digest
Update all digest updates
2025-07-09 03:23:23 +04:00
renovate[bot]
c4dec3fe4c Update all digest updates 2025-07-08 20:30:48 +00:00
e51edcb561 Update dependencies in Cargo.lock 2025-06-14 23:02:49 +00:00
2273fd4263 Merge pull request #7 from valentineus/nres
Обновление структуры проекта
2025-06-15 02:42:55 +04:00
renovate[bot]
d4f104cf5e Update Rust crate clap to v4.5.40 (#28)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-10 13:27:38 +00:00
renovate[bot]
7f41a51f2a Update all digest updates (#27)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-28 03:58:39 +00:00
renovate[bot]
e97610a8ac Update Rust crate clap to v4.5.38 (#26)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-11 06:45:14 +00:00
renovate[bot]
ee02d922ae Update Rust crate miette to v7.6.0 (#25)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-27 14:41:46 +00:00
renovate[bot]
dbd7b6bf33 Update all digest updates (#24)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-21 18:14:47 +00:00
renovate[bot]
949c0aa087 Update all digest updates (#14)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-21 09:37:22 +00:00
renovate[bot]
4f29af53b6 Update Rust crate console to v0.15.11 (#13)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-02 05:23:27 +00:00
renovate[bot]
1d62740d59 Update Rust crate clap to v4.5.31 (#12)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-24 22:22:28 +00:00
d274602104 Merge branch 'master' into nres 2025-02-23 17:23:33 +04:00
8bc39d10b1 Updated dependencies 2025-02-23 17:22:30 +04:00
88faa6e3ea Merge branch 'master' into nres 2025-02-22 14:19:02 +04:00
renovate[bot]
66705ba4f0 Update Rust crate log to v0.4.26 (#11)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-21 10:51:06 +00:00
renovate[bot]
bb4c217ee2 Update all digest updates (#10)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-20 12:35:42 +00:00
renovate[bot]
c83822e353 Update Rust crate clap to v4.5.30 (#9)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-18 03:03:44 +00:00
renovate[bot]
130ee8df5b Update Rust crate clap to v4.5.29 (#8)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-12 02:55:33 +00:00
8d8653133b Обновление структуры проекта 2025-02-08 01:11:02 +00:00
94d2f8a512 Обновление зависимостей 2025-02-08 00:44:59 +00:00
215a093344 Updated Renovate config 2025-02-05 03:43:58 +04:00
3de1575082 Merge pull request #5 from valentineus/renovate/all-digest 2025-02-04 05:49:24 +04:00
renovate[bot]
aa8e1184bf Update Rust crate clap to v4.5.28 2025-02-04 01:47:42 +00:00
feb7ebe722 Merge pull request #4 from valentineus/renovate/all-digest
Update Rust crate miette to v7.5.0
2025-02-01 17:58:58 +04:00
renovate[bot]
becadef5ee Update Rust crate miette to v7.5.0 2025-02-01 04:26:56 +00:00
a4b36e1aea Merge pull request #3 from valentineus/renovate/all-digest
Update all digest updates
2025-01-30 04:34:02 +04:00
renovate[bot]
c7b099b596 Update all digest updates 2025-01-30 00:26:30 +00:00
48a08445e7 Added mirror 2025-01-30 04:25:11 +04:00
694de5edfa Moved Renocate config 2025-01-30 01:59:31 +04:00
0dc37e9604 Outdated CI and Renovate configurations have been removed, and a new Dependabot configuration file for dependency management has been added. 2025-01-24 20:50:13 +04:00
3d2e970225 Update Rust crate clap to v4.5.27 2025-01-21 00:01:51 +00:00
d90b9830bc Updated all dependencies 2025-01-20 20:18:27 +00:00
f91e1bda22 Update Rust crate serde_json to v1.0.137 2025-01-20 00:02:56 +00:00
e9a0fd718f Update Rust crate log to v0.4.25 2025-01-15 00:03:21 +00:00
509ce2d83d Update all digest updates 2025-01-10 23:44:03 +00:00
391756b77d Update all digest updates 2025-01-10 21:04:54 +00:00
035153c7c0 Update all digest updates 2025-01-07 21:04:58 +00:00
885a593829 Update Rust crate serde to v1.0.217 2024-12-27 21:02:46 +00:00
7c3c8cc969 Update all digest updates 2024-12-21 21:03:04 +00:00
00c62a9909 Update Rust crate thiserror to v2.0.8 2024-12-18 21:03:04 +00:00
c2899d27af Update Rust crate console to v0.15.10 2024-12-16 15:42:52 +00:00
e60fdd1958 Update Rust crate thiserror to v2.0.7 2024-12-14 21:02:34 +00:00
dd6d440ba5 Update Rust crate serde to v1.0.216 2024-12-11 21:04:41 +00:00
36a082ba18 Update all digest updates 2024-12-08 21:03:54 +00:00
09689a937c Update all digest updates 2024-12-03 21:01:39 +00:00
39f6479415 Update Rust crate miette to v7.4.0 2024-11-27 21:02:44 +00:00
01a2a47370 Update Rust crate miette to v7.3.0 2024-11-26 21:05:22 +00:00
4cd42afa37 Update Rust crate serde_json to v1.0.133 2024-11-17 21:05:34 +00:00
298aa954b9 Update Rust crate clap to v4.5.21 2024-11-13 21:01:52 +00:00
910deb6c17 Update all digest updates 2024-11-12 21:01:58 +00:00
4a22e2177e Merge pull request 'Update Rust crate thiserror to v2' (!36) from renovate/thiserror-2.x into master
Reviewed-on: #36
2024-11-11 15:10:34 +03:00
729c972573 Update Rust crate thiserror to v2 2024-11-10 21:05:05 +00:00
250d78a955 Update Rust crate thiserror to v1.0.69 2024-11-10 21:04:56 +00:00
03f2d762bb Merge pull request 'Update ghcr.io/renovatebot/renovate Docker tag to v39' (!34) from renovate/ghcr.io-renovatebot-renovate-39.x into master
Reviewed-on: #34
2024-11-06 09:43:20 +03:00
fcaa729544 Update all digest updates 2024-11-05 21:02:13 +00:00
8c2a6e2c19 Update ghcr.io/renovatebot/renovate Docker tag to v39 2024-11-04 21:02:13 +00:00
daa2efba89 Update Rust crate thiserror to v1.0.66 2024-11-01 21:03:36 +00:00
b5748505ef Update Rust crate serde to v1.0.214 2024-10-28 21:02:55 +00:00
d305b1f005 Update all digest updates 2024-10-22 21:01:55 +00:00
2cfba4891c Update Rust crate serde_json to v1.0.132 2024-10-19 21:01:55 +00:00
777d3814d3 Update Rust crate serde_json to v1.0.131 2024-10-18 23:23:57 +00:00
784ceeebdf Update Rust crate serde_json to v1.0.130 2024-10-18 21:02:00 +00:00
e3675555ea Update all digest updates 2024-10-17 21:02:30 +00:00
91104e214f Update Rust crate image to v0.25.3 2024-10-16 21:04:18 +00:00
9198b18652 Update Rust crate clap to v4.5.20 2024-10-08 21:04:51 +00:00
1ad7949828 Update Rust crate clap to v4.5.19 2024-10-01 21:03:54 +00:00
b98f01a810 Update Rust crate thiserror to v1.0.64 2024-09-24 09:34:04 +00:00
fa88050a52 Update all digest updates 2024-09-23 21:04:33 +00:00
1123c8a56e Update all digest updates 2024-09-15 21:07:25 +00:00
2eb6333552 Update Rust crate serde to v1.0.209 2024-08-24 12:51:04 +00:00
c5224e006f Update Rust crate serde_json to v1.0.127 2024-08-23 21:04:54 +00:00
79599f3cf4 Update Rust crate clap to v4.5.16 2024-08-15 23:00:19 +00:00
7acf99b9d6 Update all digest updates 2024-08-15 21:03:43 +00:00
ec542703b4 Update Rust crate serde to v1.0.207 2024-08-12 21:02:14 +00:00
ee1cdda38b Update Rust crate serde_json to v1.0.124 2024-08-11 21:42:34 +00:00
293a1de413 Update all digest updates 2024-08-11 21:04:17 +00:00
6635d4da9a Update Rust crate clap to v4.5.15 2024-08-10 21:04:17 +00:00
f549769fcf Update all digest updates 2024-08-08 21:05:42 +00:00
c0a56acc0c Update Rust crate serde_json to v1.0.122 2024-08-02 21:05:34 +00:00
a136dc5fa4 Update Rust crate clap to v4.5.13 2024-07-31 22:13:23 +00:00
1b13f2acfc Update Rust crate clap to v4.5.12 2024-07-31 21:03:54 +00:00
6c127ce028 Update Rust crate serde_json to v1.0.121 2024-07-29 21:03:12 +00:00
bc2e051741 Merge branch 'master' into renovate/ghcr.io-renovatebot-renovate-38.x 2024-07-26 17:12:49 +03:00
9abd2a4558 Update ghcr.io/renovatebot/renovate Docker tag to v38 2024-07-25 21:03:45 +00:00
f267a56fd0 Update Rust crate clap to v4.5.11 2024-07-25 21:03:42 +00:00
1d592418af Update Rust crate clap to v4.5.10 2024-07-23 21:02:15 +00:00
3448f0f930 Update Rust crate image to v0.25.2 2024-07-21 21:04:37 +00:00
039ed238a6 Added Gitea CI testing 2024-07-19 18:23:35 +04:00
b7349f9df9 Added CI check 2024-07-19 13:08:47 +00:00
12c7f0284e Added DevContainer 2024-07-19 13:08:46 +00:00
5c9a691495 Update Rust crate miette to v7 2024-07-19 12:43:23 +00:00
bf8be5c045 Update all digest updates 2024-07-19 12:41:15 +00:00
ee8a5fc02b Added Gitea 2024-07-19 16:39:08 +04:00
a990de90fe Deleted vendor folder 2024-07-19 16:37:58 +04:00
3d48cd3f81 Initial MkDocs 2024-02-06 02:26:50 +04:00
78d6eca336 Initial GitHub Actions 2024-02-06 02:20:26 +04:00
7349 changed files with 2823 additions and 2162297 deletions

View File

@@ -1,5 +0,0 @@
[source.crates-io]
replace-with = "vendored-sources"
[source.vendored-sources]
directory = "vendor"

View File

@@ -0,0 +1,9 @@
{
"image": "mcr.microsoft.com/devcontainers/rust:latest",
"customizations": {
"vscode": {
"extensions": ["rust-lang.rust-analyzer"]
}
},
"runArgs": ["--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined"]
}

View File

@@ -0,0 +1,28 @@
name: RenovateBot
on:
schedule:
- cron: "@daily"
push:
branches:
- master
jobs:
renovate:
container: ghcr.io/renovatebot/renovate:43
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v6
- name: Run renovate
run: |
renovate
env:
GITHUB_COM_TOKEN: ${{ secrets.RENOVATE_GITHUB_TOKEN }}
LOG_LEVEL: ${{ vars.RENOVATE_LOG_LEVEL }}
RENOVATE_CONFIG_FILE: renovate.config.cjs
RENOVATE_LOG_LEVEL: ${{ vars.RENOVATE_LOG_LEVEL }}
RENOVATE_REPOSITORIES: ${{ gitea.repository }}
RENOVATE_TOKEN: ${{ secrets.RENOVATE_TOKEN }}

13
.gitea/workflows/test.yml Normal file
View File

@@ -0,0 +1,13 @@
name: Test
on: [push, pull_request]
jobs:
test:
name: cargo test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
- run: cargo check --all
- run: cargo test --all-features

14
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,14 @@
version: 2
updates:
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "devcontainers"
directory: "/"
schedule:
interval: "weekly"

219
.gitignore vendored
View File

@@ -1 +1,218 @@
/target *~
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
# Windows thumbnail cache files
Thumbs.db
Thumbs.db:encryptable
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
tmp/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pdm
.pdm.toml
# PEP 582
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# Poetry local configuration file
poetry.toml
# ruff
.ruff_cache/
# LSP config files
pyrightconfig.json

30
.renovaterc Normal file
View File

@@ -0,0 +1,30 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended",
":disableDependencyDashboard"
],
"assignees": [
"valentineus"
],
"labels": [
"dependencies",
"automated"
],
"packageRules": [
{
"groupName": "all digest updates",
"groupSlug": "all-digest",
"matchUpdateTypes": [
"minor",
"patch",
"pin",
"digest"
],
"matchPackageNames": [
"*"
],
"automerge": true
}
]
}

1182
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,6 @@
[workspace] [workspace]
resolver = "2" resolver = "3"
members = [ members = ["crates/*"]
"libnres",
"nres-cli",
"packer",
"texture-decoder",
"unpacker",
]
[profile.release] [profile.release]
codegen-units = 1 codegen-units = 1

View File

@@ -1,11 +0,0 @@
# Utilities for the game "Parkan: Iron Strategy"
This repository contains utilities, tools, and libraries for the game "Parkan: Iron Strategy."
## List of projects
- [unpacker](unpacker): Text-based utility for unpacking game resources in the NRres format. Allows unpacking 100% of game resources.
- [packer](packer): Text-based utility for packing game resources in the NRres format. Allows packing 100% of game resources.
- [texture-decoder](texture-decoder): (WIP) Decoder for game textures. Decodes approximately 20% of game textures.
- [libnres](libnres): _(Deprecation)_ Library for NRes files.
- [nres-cli](nres-cli): _(Deprecation)_ Console tool for NRes files.

17
docs/index.md Normal file
View File

@@ -0,0 +1,17 @@
# Welcome to MkDocs
For full documentation visit [mkdocs.org](https://www.mkdocs.org).
## Commands
* `mkdocs new [dir-name]` - Create a new project.
* `mkdocs serve` - Start the live-reloading docs server.
* `mkdocs build` - Build the documentation site.
* `mkdocs -h` - Print help message and exit.
## Project layout
mkdocs.yml # The configuration file.
docs/
index.md # The documentation homepage.
... # Other markdown pages, images and other files.

69
docs/specs/effects.md Normal file
View File

@@ -0,0 +1,69 @@
# Эффекты и частицы
Пока что — **не байтовая спецификация**, а “карта” по тому, что видно в библиотеках. Полную документацию по эффектам/шейдерам/частицам можно будет сделать после того, как:
- найдём формат эффекта (файл/ресурс),
- найдём точку загрузки/парсинга,
- найдём точки рендера (создание буферов/вершинного формата/материалов).
---
## 1) Что видно по `Effect.dll`
- Есть экспорт `CreateFxManager(...)`, который создаёт менеджер эффектов и регистрирует его в движке.
- Внутри много логики “сообщений/команд” через виртуальные вызовы (похоже на общий компонентный интерфейс).
- Явного парсера формата эффекта (по типу “читать заголовок, читать эмиттеры…”) в найденных местах пока не идентифицировано.
---
## 2) Что видно по `Terrain.dll` (рендер‑статистика частиц)
В `Terrain.dll` есть отладочная/статистическая телеметрия:
- количество отрендеренных частиц (`Rendered particles`)
- количество батчей (`Rendered batches`)
- количество отрендеренных треугольников
Это подтверждает:
- частицы рендерятся батчами,
- они интегрированы в общий 3Dрендер (через тот же графический слой).
---
## 3) Что важно для совместимости
Даже без точного формата эффекта, из поведения оригинала следует:
- Эффекты/частицы завязаны на общий набор рендер‑фич (фильтрация/мультитекстурность/блендинг).
- На слабом железе (и для минимализма) должны работать деградации:
- без мипмапов,
- без bilinear/trilinear,
- без multitexturing,
- возможно с 16бит текстурами.
---
## 4) План “докопать” до формата эффектов
1. Найти **точку создания эффекта по имени/ID**:
- поискать места, где в строки/лог пишется имя эффекта,
- найти функции, которые принимают “путь/имя” и возвращают handle.
2. Найти **точку загрузки данных**:
- чтение из NRes/RsLi ресурса,
- распаковка/декодирование.
3. Зафиксировать **структуру данных эффекта в памяти**:
- эмиттеры,
- спауны,
- lifetime,
- ключи размера/цвета,
- привязка к текстурам/материалам.
4. Найти рендер‑код:
- какой vertex format у частицы,
- как формируются квадраты/ленты (billboard/trail),
- какие stateы включаются.
После этого можно будет выпустить полноценный документ “FX format”.

314
docs/specs/msh.md Normal file
View File

@@ -0,0 +1,314 @@
# 3D модели (MSH / AniMesh)
Документ описывает **модельные ресурсы** старого движка по результатам анализа `AniMesh.dll` и сопутствующих библиотек.
---
## 0) Термины
- **Модель** — набор геометрии + иерархия узлов (node/bone) + дополнительные таблицы (батчи/слоты/треки).
- **Node** — узел иерархии (часть/кость). Визуально: “кусок” модели, которому можно применять transform (rigid).
- **LOD** — уровень детализации. В коде обнаружены **3 уровня LOD: 0..2** (и “текущий” LOD через `-1`).
- **Slot** — связка “(node, LOD, group) → диапазоны геометрии + bounds”.
- **Batch** — рендер‑пакет: “материал + диапазон индексов + baseVertex”.
---
## 1) Архитектура модели в движке (как это реально рисуется)
### 1.1 Рендер‑модель: rigidскининг (по узлам), без весов вершин
По коду выборка геометрии делается так:
1. Выбирается **LOD** (в объекте хранится `current_lod`, см. `sub_100124D0`).
2. Для каждого узла **node** выбирается **slot** по `(nodeIndex, group, lod)`:
- Если lod == `-1`, то берётся `current_lod`.
- Если в nodeтаблице хранится `0xFFFF`, slot отсутствует.
3. Slot задаёт **диапазон batchей** (`batch_start`, `batch_count`).
4. Рендерер получает batchдиапазон и для каждого batch делает `DrawIndexedPrimitive` (абстрактный вызов через графический интерфейс движка), используя:
- `baseVertex`
- `indexStart`
- `indexCount`
- материал (индекс материала/шейдера в batchе)
**Важно:** в “модельном” формате не видно классических skin weights (4 bone indices + 4 weights). Это очень похоже на “rigid parts”: каждый batch/часть привязан к одному узлу (или группе узлов) и рендерится с матрицей этого узла.
---
## 2) Набор ресурсов модели (что лежит внутри “файла модели”)
Ниже перечислены ресурсы, которые гарантированно встречаются в загрузчике `AniMesh`:
- **Res1** — node table (таблица узлов и LODслотов).
- **Res2** — header + slot table (слоты и bounds).
- **Res3** — vertex positions (float3).
- **Res4** — packed normals (4 байта на вершину; s8компоненты).
- **Res5** — packed UV0 (4 байта на вершину; s16 U,V).
- **Res6** — index buffer (u16 индексы).
- **Res7** — triangle descriptors (по 16 байт на треугольник).
- **Res8** — keyframes / anim track data (используется в интерполяции).
- **Res10** — string table (имена: материалов/узлов/частей — точный маппинг зависит от вызывающей стороны).
- **Res13** — batch table (по 20 байт на batch).
- **Res19** — дополнительная таблица для анимации/маппинга (используется вместе с Res8; точная семантика пока не восстановлена).
Опциональные (встречаются условно, если ресурс присутствует):
- **Res15** — pervertex stream, stride 8 (семантика не подтверждена).
- **Res16** — pervertex stream, stride 8, при этом движок создаёт **два “под‑потока” по 4 байта** (см. ниже).
- **Res18** — pervertex stream, stride 4 (семантика не подтверждена).
- **Res20** — дополнительный массив + отдельное “count/meta” поле из заголовка ресурса.
---
## 3) Декодирование базовой геометрии
### 3.1 Positions (Res3)
- Структура: массив `float3`.
- Stride: `12`.
- Использование: `pos = *(float3*)(res3 + 12*vertexIndex)`.
### 3.2 UV0 (Res5) — packed s16
- Stride: `4`.
- Формат: `int16 u, int16 v`
- Нормализация (из кода): `uv = (u, v) * (1/1024)`
То есть:
- `u_float = (int16)u / 1024.0`
- `v_float = (int16)v / 1024.0`
### 3.3 Normals (Res4) — packed s8
- Stride: `4`.
- Формат (минимально подтверждено): `int8 nx, int8 ny, int8 nz, int8 nw(?)`
- Нормализация (из кода): множитель `1/128 = 0.0078125`
То есть:
- `n = (nx, ny, nz) / 128.0`
4й байт пока не подтверждён (встречается как паддинг/знак/индекс — нужно дальше копать).
---
## 4) Таблицы, задающие разбиение геометрии
### 4.1 Batch table (Res13), запись 20 байт
Batch используется в рендере и в обходе треугольников. Из обхода достоверно:
- `indexCount` читается как `u16` по смещению `+8`.
- `indexStart` используется как **u32 по смещению `+10`** (движок читает dword и умножает на 2 для смещения в u16индексах).
- `baseVertex` читается как `u32` по смещению `+16`.
Рекомендуемая реконструкция:
- `+0 u16 batchFlags` — используется для фильтрации (битовая маска).
- `+2 u16 materialIndex` — очень похоже на индекс материала/шейдера.
- `+4 u16 unk4`
- `+6 u16 unk6`**возможный** `nodeIndex` (часто именно здесь держат привязку батча к кости).
- `+8 u16 indexCount` — число индексов (кратно 3 для треугольников).
- `+10 u32 indexStart` — стартовый индекс в общем index buffer (в элементах u16).
- `+14 u16 unk14` — возможно “primitive/strip mode” или ещё один флаг.
- `+16 u32 baseVertex` — смещение вершинного индекса (в вершинах).
### 4.2 Triangle descriptors (Res7), запись 16 байт
Треугольные дескрипторы используются при итерации треугольников (коллизии/выбор/тесты):
- `+0 u16 triFlags` — используется для фильтрации (битовая маска)
- Остальные поля пока не подтверждены (вероятно: доп. флаги, группа, precomputed normal, ID поверхности и т.п.)
**Важно:** индексы вершин треугольника берутся **из index buffer (Res6)** через `indexStart/indexCount` batchа. TriDesc не хранит сами индексы.
---
## 5) Slot table (Res2 + смещение 140), запись 68 байт
Slot — ключевая структура, по которой движок:
- получает bounds (AABB + sphere),
- получает диапазон batchей для рендера/обхода,
- получает стартовый индекс треугольников (triStart) в TriDesc.
В коде Slot читается как `u16`‑поля + как `float`‑поля (AABB/sphere). Подтверждённая раскладка:
### 5.1 Заголовок slot (первые 8 байт)
- `+0 u16 triStart` — индекс первого треугольника в `Res7` (TriDesc), используемый в обходе.
- `+2 u16 slotFlagsOrUnk` — пока не восстановлено (не путать с batchFlags/triFlags).
- `+4 u16 batchStart` — индекс первого batchа в `Res13`.
- `+6 u16 batchCount` — количество batchей.
### 5.2 AABB (локальные границы, 24 байта)
- `+8 float aabbMin.x`
- `+12 float aabbMin.y`
- `+16 float aabbMin.z`
- `+20 float aabbMax.x`
- `+24 float aabbMax.y`
- `+28 float aabbMax.z`
### 5.3 Bounding sphere (локальные границы, 16 байт)
- `+32 float sphereCenter.x`
- `+36 float sphereCenter.y`
- `+40 float sphereCenter.z`
- `+44 float sphereRadius`
### 5.4 Хвост (20 байт)
- `+48..+67` — не используется в найденных вызовах bounds/рендера; назначение неизвестно. Возможные кандидаты: LODдистанции, доп. bounds, служебные поля экспортёра.
---
## 6) Node table (Res1), запись 19 \* u16 на узел (38 байт)
Node table — это не “матрицы узлов”, а компактная карта слотов по LOD и группам.
Движок вычисляет адрес слова так:
`wordIndex = nodeIndex * 19 + lod * 5 + group + 4`
где:
- `lod` в диапазоне `0..2` (**три уровня LOD**)
- `group` в диапазоне `0..4` (**пять групп слотов**)
- если вместо `lod` передать `-1`, движок подставит `current_lod` из инстанса.
Из этого следует структура узла:
### 6.1 Заголовок узла (первые 4 u16)
- `u16 hdr0`
- `u16 hdr1`
- `u16 hdr2`
- `u16 hdr3`
Семантика заголовка узла **пока не восстановлена** (кандидаты: parent/firstChild/nextSibling/flags).
### 6.2 SlotIndexматрица: 3 LOD \* 5 groups = 15 u16
Дальше идут 15 слов:
- для `lod=0`: `slotIndex[group0..4]`
- для `lod=1`: `slotIndex[group0..4]`
- для `lod=2`: `slotIndex[group0..4]`
`slotIndex` — это индекс в slot table (`Res2+140`), либо `0xFFFF` если слота нет.
**Группы (0..4)**: в коде чаще всего используется `group=0`. Остальные группы встречаются как параметр обхода, но назначение (например, “коллизия”, “тени”, “декали”, “альфа‑геометрия” и т.п.) пока не доказано. В документации ниже они называются просто `group`.
---
## 7) Рендер‑проход (рекомендуемая реконструкция)
Минимальный корректный порт рендера может повторять логику:
1. Определить `current_lod` (0..2) для модели (по дистанции/настройкам).
2. Для каждого node:
- взять slotIndex = node.slotIndex[current_lod][group=0]
- если `0xFFFF` — пропустить
- slot = slotTable[slotIndex]
3. Для slotа:
- для i in `0 .. slot.batchCount-1`:
- batch = batchTable[slot.batchStart + i]
- применить материал `materialIndex`
- применить transform узла (как минимум: rootTransform \* nodeTransform)
- нарисовать индексированную геометрию:
- baseVertex = batch.baseVertex
- indexStart = batch.indexStart
- indexCount = batch.indexCount
4. Для culling:
- использовать slot AABB/sphere, трансформируя их матрицей узла/инстанса.
- при неравномерном scale радиус сферы масштабируется по `max(scaleX, scaleY, scaleZ)` (так делает оригинальный код).
---
## 8) Обход треугольников (коллизия/пикинг/дебаг)
В движке есть универсальный обход:
- Идём по slotам (node, lod, group).
- Для каждого slot:
- for batch in slot.batchRange:
- получаем индексы из Res6 (indexStart/indexCount)
- triCount = (indexCount + 2) / 3
- параллельно двигаем указатель TriDesc начиная с `triStart`
- для каждого треугольника:
- читаем `triFlags` (TriDesc[0])
- фильтруем по маскам
- вызываем callback, которому доступны:
- triDesc (16 байт)
- три индекса (из index buffer)
- три позиции (из Res3 через baseVertex + индекс)
---
## 9) Опциональные vertex streams (Res15/16/18/20) — текущий статус
Эти ресурсы загружаются, но в найденных местах пока **нет однозначного декодера**. Что точно видно по загрузчику:
- **Res15**: stride 8, массив на вершину.
- кандидаты: `float2 uv1` (lightmap), либо 4×`int16` (2 UVпары), либо что‑то иное.
- **Res16**: stride 8, но движок создаёт два “под‑потока”:
- streamA = res16 + 0, stride 8
- streamB = res16 + 4, stride 8 Это сильно похоже на “два packedвектора по 4 байта”, например `tangent` и `bitangent` (s8×4).
- **Res18**: stride 4, массив на вершину.
- кандидаты: `D3DCOLOR` (RGBA), либо packedпараметры освещения/окклюзии.
- **Res20**: присутствует не всегда; отдельно читается `count/meta` поле из заголовка ресурса.
- кандидаты: дополнительная таблица соответствий (vertex remap), либо ускорение для эффектов/деформаций.
---
## 10) Как “создавать” модели (экспортёр / конвертер) — практическая рекомендация
Чтобы собрать совместимый формат (минимум, достаточный для рендера и коллизии), нужно:
1. Сформировать единый массив вершин:
- positions (Res3)
- packed normals (Res4) — если хотите сохранить оригинальную упаковку
- packed uv0 (Res5)
2. Сформировать index buffer (Res6) u16.
3. Сформировать batch table (Res13):
- сгруппировать треугольники по (материал, узел/часть, режим)
- записать `baseVertex`, `indexStart`, `indexCount`
- заполнить неизвестные поля нулями (пока нет доказанной семантики).
4. Сформировать triangle descriptor table (Res7):
- на каждый треугольник 16 байт
- минимум: `triFlags=0`
- остальное — 0.
5. Сформировать slot table (Res2+140):
- для каждого (node, lod, group) задать:
- triStart (индекс начала triDesc для обхода)
- batchStart/batchCount
- AABB и bounding sphere в локальных координатах узла/части
- неиспользуемые поля хвоста = 0.
6. Сформировать node table (Res1):
- для каждого node:
- 4 заголовочных u16 (пока можно 0)
- 15 slotIndexов (LOD0..2 × group0..4), `0xFFFF` где нет слота.
7. Анимацию/Res8/Res19/Res11:
- если не нужна — можно отсутствующими, но надо проверить, что загрузчик/движок допускает “статическую” модель без этих ресурсов (в оригинале много логики завязано на них).
---
## 11) Что ещё нужно восстановить, чтобы документация стала “закрывающей” на 100%
1. Точная семантика `batch.unk6` (вероятный nodeIndex) и `batch.unk4/unk14`.
2. Полная раскладка TriDesc16 (кроме triFlags).
3. Назначение `slotFlagsOrUnk`.
4. Семантика групп `group=1..4` в nodeтаблице.
5. Назначение и декодирование Res15/Res16/Res18/Res20.
6. Связь строковой таблицы (Res10) с материалами/узлами (кто именно как индексирует строки).

718
docs/specs/nres.md Normal file
View File

@@ -0,0 +1,718 @@
# Форматы игровых ресурсов
## Обзор
Библиотека `Ngi32.dll` реализует два различных формата архивов ресурсов:
1. **NRes** — основной формат архива ресурсов, используемый через API `niOpenResFile` / `niCreateResFile`. Каталог файлов расположен в **конце** файла. Поддерживает создание, редактирование, добавление и удаление записей.
2. **RsLi** — формат библиотеки ресурсов, используемый через API `rsOpenLib` / `rsLoad`. Таблица записей расположена **в начале** файла (сразу после заголовка) и зашифрована XOR-шифром. Поддерживает несколько методов сжатия. Только чтение.
---
# Часть 1. Формат NRes
## 1.1. Общая структура файла
```
┌──────────────────────────┐ Смещение 0
│ Заголовок (16 байт) │
├──────────────────────────┤ Смещение 16
│ │
│ Данные ресурсов │
│ (выровнены по 8 байт) │
│ │
├──────────────────────────┤ Смещение = total_size - entry_count × 64
│ Каталог записей │
│ (entry_count × 64 байт) │
└──────────────────────────┘ Смещение = total_size
```
## 1.2. Заголовок файла (16 байт)
| Смещение | Размер | Тип | Значение | Описание |
| -------- | ------ | ------- | ------------------- | ------------------------------------ |
| 0 | 4 | char[4] | `NRes` (0x4E526573) | Магическая сигнатура (little-endian) |
| 4 | 4 | uint32 | `0x00000100` (256) | Версия формата (1.0) |
| 8 | 4 | int32 | — | Количество записей в каталоге |
| 12 | 4 | int32 | — | Полный размер файла в байтах |
**Валидация при открытии:** магическая сигнатура и версия должны совпадать точно. Поле `total_size` (смещение 12) **проверяется на равенство** с фактическим размером файла (`GetFileSize`). Если значения не совпадают — файл отклоняется.
## 1.3. Положение каталога в файле
Каталог располагается в самом конце файла. Его смещение вычисляется по формуле:
```
directory_offset = total_size - entry_count × 64
```
Данные ресурсов занимают пространство между заголовком (16 байт) и каталогом.
## 1.4. Запись каталога (64 байта)
Каждая запись каталога занимает ровно **64 байта** (0x40):
| Смещение | Размер | Тип | Описание |
| -------- | ------ | -------- | ------------------------------------------------- |
| 0 | 4 | uint32 | Тип / идентификатор ресурса |
| 4 | 4 | uint32 | Атрибут 1 (например, формат, дата, категория) |
| 8 | 4 | uint32 | Атрибут 2 (например, подтип, метка времени) |
| 12 | 4 | uint32 | Размер данных ресурса в байтах |
| 16 | 4 | uint32 | Атрибут 3 (дополнительный параметр) |
| 20 | 36 | char[36] | Имя файла (null-terminated, макс. 35 символов) |
| 56 | 4 | uint32 | Смещение данных от начала файла |
| 60 | 4 | uint32 | Индекс сортировки (для двоичного поиска по имени) |
### Поле «Имя файла» (смещение 20, 36 байт)
- Максимальная длина имени: **35 символов** + 1 байт null-терминатор.
- При записи поле сначала обнуляется (`memset(0, 36 байт)`), затем копируется имя (`strncpy`, макс. 35 символов).
- Поиск по имени выполняется **без учёта регистра** (`_strcmpi`).
### Поле «Индекс сортировки» (смещение 60)
Используется для **двоичного поиска по имени**. Содержит индекс оригинальной записи, отсортированной в алфавитном порядке (регистронезависимо). Индекс строится при сохранении файла функцией `sub_10013260` с помощью **пузырьковой сортировки** по именам.
**Алгоритм поиска** (`sub_10011E60`): классический двоичный поиск по отсортированному массиву индексов. Возвращает оригинальный индекс записи или `-1` при отсутствии.
### Поле «Смещение данных» (смещение 56)
Абсолютное смещение от начала файла. Данные читаются из mapped view: `pointer = mapped_base + data_offset`.
## 1.5. Выравнивание данных
При добавлении ресурса его данные записываются последовательно, после чего выполняется **выравнивание по 8-байтной границе**:
```c
padding = ((data_size + 7) & ~7) - data_size;
// Если padding > 0, записываются нулевые байты
```
Таким образом, каждый блок данных начинается с адреса, кратного 8.
При изменении размера данных ресурса выполняется сдвиг всех последующих данных и обновление смещений всех затронутых записей каталога.
## 1.6. Создание файла (API `niCreateResFile`)
При создании нового файла:
1. Если файл уже существует и содержит корректный NRes-архив, существующий каталог считывается с конца файла, а файл усекается до начала каталога.
2. Если файл пуст или не является NRes-архивом, создаётся новый с пустым каталогом. Поля `entry_count = 0`, `total_size = 16`.
При закрытии файла (`sub_100122D0`):
1. Заголовок переписывается в начало файла (16 байт).
2. Вычисляется `total_size = data_end_offset + entry_count × 64`.
3. Индексы сортировки пересчитываются.
4. Каталог записей записывается в конец файла.
## 1.7. Режимы сортировки каталога
Функция `sub_10012560` поддерживает 12 режимов сортировки (011):
| Режим | Порядок сортировки |
| ----- | --------------------------------- |
| 0 | Без сортировки (сброс) |
| 1 | По атрибуту 1 (смещение 4) |
| 2 | По атрибуту 2 (смещение 8) |
| 3 | По (атрибут 1, атрибут 2) |
| 4 | По типу ресурса (смещение 0) |
| 5 | По (тип, атрибут 1) |
| 6 | По (тип, атрибут 1) — идентичен 5 |
| 7 | По (тип, атрибут 1, атрибут 2) |
| 8 | По имени (регистронезависимо) |
| 9 | По (тип, имя) |
| 10 | По (атрибут 1, имя) |
| 11 | По (атрибут 2, имя) |
## 1.8. Операция `niOpenResFileEx` — флаги открытия
Второй параметр — битовые флаги:
| Бит | Маска | Описание |
| --- | ----- | ----------------------------------------------------------------------------------- |
| 0 | 0x01 | Sequential scan hint (`FILE_FLAG_SEQUENTIAL_SCAN` вместо `FILE_FLAG_RANDOM_ACCESS`) |
| 1 | 0x02 | Открыть для записи (read-write). Без флага — только чтение |
| 2 | 0x04 | Пометить файл как «кэшируемый» (не выгружать при refcount=0) |
| 3 | 0x08 | Raw-режим: не проверять заголовок NRes, трактовать весь файл как единый ресурс |
## 1.9. Виртуальное касание страниц
Функция `sub_100197D0` выполняет «касание» страниц памяти для принудительной загрузки из memory-mapped файла. Она обходит адресное пространство с шагом 4096 байт (размер страницы), начиная с 0x10000 (64 КБ):
```
for (result = 0x10000; result < size; result += 4096);
```
Вызывается при чтении данных ресурса с флагом `a3 != 0` для предзагрузки данных в оперативную память.
---
# Часть 2. Формат RsLi
## 2.1. Общая структура файла
```
┌───────────────────────────────┐ Смещение 0
│ Заголовок файла (32 байта) │
├───────────────────────────────┤ Смещение 32
│ Таблица записей (зашифрована)│
│ (entry_count × 32 байт) │
├───────────────────────────────┤ Смещение 32 + entry_count × 32
│ │
│ Данные ресурсов │
│ │
├───────────────────────────────┤
│ [Опциональный трейлер — 6 б] │
└───────────────────────────────┘
```
## 2.2. Заголовок файла (32 байта)
| Смещение | Размер | Тип | Значение | Описание |
| -------- | ------ | ------- | ----------------- | --------------------------------------------- |
| 0 | 2 | char[2] | `NL` (0x4C4E) | Магическая сигнатура |
| 2 | 1 | uint8 | `0x00` | Зарезервировано (должно быть 0) |
| 3 | 1 | uint8 | `0x01` | Версия формата |
| 4 | 2 | int16 | — | Количество записей (sign-extended при чтении) |
| 6 | 8 | — | — | Зарезервировано / не используется |
| 14 | 2 | uint16 | `0xABBA` или иное | Флаг предсортировки (см. ниже) |
| 16 | 4 | — | — | Зарезервировано |
| 20 | 4 | uint32 | — | **Начальное состояние XOR-шифра** (seed) |
| 24 | 8 | — | — | Зарезервировано |
### Флаг предсортировки (смещение 14)
- Если `*(uint16*)(header + 14) == 0xABBA` — движок **не строит** таблицу индексов в памяти. Значения `entry[i].sort_to_original` используются **как есть** (и для двоичного поиска, и как XORключ для данных).
- Если значение **отлично от 0xABBA** — после загрузки выполняется **пузырьковая сортировка** имён и строится перестановка `sort_to_original[]`, которая затем **записывается в `entry[i].sort_to_original`**, перетирая значения из файла. Именно эта перестановка далее используется и для поиска, и как XORключ (младшие 16 бит).
## 2.3. XOR-шифр таблицы записей
Таблица записей начинается со смещения 32 и зашифрована поточным XOR-шифром. Ключ инициализируется из DWORD по смещению 20 заголовка.
### Начальное состояние
```
seed = *(uint32*)(header + 20)
lo = seed & 0xFF // Младший байт
hi = (seed >> 8) & 0xFF // Второй байт
```
### Алгоритм дешифровки (побайтовый)
Для каждого зашифрованного байта `encrypted[i]`, начиная с `i = 0`:
```
step 1: lo = hi ^ ((lo << 1) & 0xFF) // Сдвиг lo влево на 1, XOR с hi
step 2: decrypted[i] = lo ^ encrypted[i] // Расшифровка байта
step 3: hi = lo ^ ((hi >> 1) & 0xFF) // Сдвиг hi вправо на 1, XOR с lo
```
**Пример реализации:**
```python
def decrypt_rs_entries(encrypted_data: bytes, seed: int) -> bytes:
lo = seed & 0xFF
hi = (seed >> 8) & 0xFF
result = bytearray(len(encrypted_data))
for i in range(len(encrypted_data)):
lo = (hi ^ ((lo << 1) & 0xFF)) & 0xFF
result[i] = lo ^ encrypted_data[i]
hi = (lo ^ ((hi >> 1) & 0xFF)) & 0xFF
return bytes(result)
```
Этот же алгоритм используется для шифрования данных ресурсов с методом XOR (флаги 0x20, 0x60, 0xA0), но с другим начальным ключом из записи.
## 2.4. Запись таблицы (32 байта, на диске, до дешифровки)
После дешифровки каждая запись имеет следующую структуру:
| Смещение | Размер | Тип | Описание |
| -------- | ------ | -------- | -------------------------------------------------------------- |
| 0 | 12 | char[12] | Имя ресурса (ASCII, обычно uppercase; строка читается до `\0`) |
| 12 | 4 | — | Зарезервировано (движком игнорируется) |
| 16 | 2 | int16 | **Флаги** (метод сжатия и атрибуты) |
| 18 | 2 | int16 | **`sort_to_original[i]` / XORключ** (см. ниже) |
| 20 | 4 | uint32 | **Размер распакованных данных** (`unpacked_size`) |
| 24 | 4 | uint32 | Смещение данных от начала файла (`data_offset`) |
| 28 | 4 | uint32 | Размер упакованных данных в байтах (`packed_size`) |
### Имена ресурсов
- Поле `name[12]` копируется побайтно. Внутренне движок всегда имеет `\0` сразу после этих 12 байт (зарезервированные 4 байта в памяти принудительно обнуляются), поэтому имя **может быть длиной до 12 символов** даже без `\0` внутри `name[12]`.
- На практике имена обычно **uppercase ASCII**. `rsFind` приводит запрос к верхнему регистру (`_strupr`) и сравнивает побайтно.
- `rsFind` копирует имя запроса `strncpy(..., 16)` и принудительно ставит `\0` в `Destination[15]`, поэтому запрос длиннее 15 символов будет усечён.
### Поле `sort_to_original[i]` (смещение 18)
Это **не “свойство записи”**, а элемент таблицы индексов, по которой `rsFind` делает двоичный поиск:
- Таблица реализована “внутри записей”: значение берётся как `entry[i].sort_to_original` (где `i` — позиция двоичного поиска), а реальная запись для сравнения берётся как `entry[ sort_to_original[i] ]`.
- Тем же значением (младшие 16 бит) инициализируется XORшифр данных для методов, где он используется (0x20/0x60/0xA0). Поэтому при упаковке/шифровании данных ключ должен совпадать с итоговым `sort_to_original[i]` (см. флаг 0xABBA в разделе 2.2).
Поиск выполняется **двоичным поиском** по этой таблице, с фолбэком на **линейный поиск** если двоичный не нашёл (поведение `rsFind`).
## 2.5. Поле флагов (смещение 16 записи)
Биты поля флагов кодируют метод сжатия и дополнительные атрибуты:
```
Биты [8:5] (маска 0x1E0): Метод сжатия/шифрования
Бит [6] (маска 0x040): Флаг realloc (буфер декомпрессии может быть больше)
```
### Методы сжатия (биты 85, маска 0x1E0)
| Значение | Hex | Описание |
| -------- | ----- | --------------------------------------- |
| 0x000 | 0x00 | Без сжатия (копирование) |
| 0x020 | 0x20 | Только XOR-шифр |
| 0x040 | 0x40 | LZSS (простой вариант) |
| 0x060 | 0x60 | XOR-шифр + LZSS (простой вариант) |
| 0x080 | 0x80 | LZSS с адаптивным кодированием Хаффмана |
| 0x0A0 | 0xA0 | XOR-шифр + LZSS с Хаффманом |
| 0x100 | 0x100 | Deflate (аналог zlib/RFC 1951) |
Примечание: `rsGetPackMethod()` возвращает `flags & 0x1C0` (без бита 0x20). Поэтому:
- для 0x20 вернётся 0x00,
- для 0x60 вернётся 0x40,
- для 0xA0 вернётся 0x80.
### Бит 0x40 (выделение +0x12 и последующее `realloc`)
Бит 0x40 проверяется отдельно (`flags & 0x40`). Если он установлен, выходной буфер выделяется с запасом `+0x12` (18 байт), а после распаковки вызывается `realloc` для усечения до точного `unpacked_size`.
Важно: этот же бит входит в код методов 0x40/0x60, поэтому для них поведение “+0x12 и shrink” включено автоматически.
## 2.6. Размеры данных
В каждой записи на диске хранятся оба значения:
- `unpacked_size` (смещение 20) — размер распакованных данных.
- `packed_size` (смещение 28) — размер упакованных данных (байт во входном потоке для выбранного метода).
Для метода 0x00 (без сжатия) обычно `packed_size == unpacked_size`.
`rsGetInfo` возвращает именно `unpacked_size` (то, сколько байт выдаст `rsLoad`).
Практический нюанс для метода `0x100` (Deflate): в реальных игровых данных встречается запись, где `packed_size` указывает на диапазон до `EOF + 1`. Поток успешно декодируется и без последнего байта; это похоже на lookahead-поведение декодера.
## 2.7. Опциональный трейлер медиа (6 байт)
При открытии с флагом `a2 & 2`:
| Смещение от конца | Размер | Тип | Описание |
| ----------------- | ------ | ------- | ----------------------- |
| 6 | 2 | char[2] | Сигнатура `AO` (0x4F41) |
| 4 | 4 | uint32 | Смещение медиа-оверлея |
Если трейлер присутствует, все смещения данных в записях корректируются: `effective_offset = entry_offset + media_overlay_offset`.
---
# Часть 3. Алгоритмы сжатия (формат RsLi)
## 3.1. XOR-шифр данных (метод 0x20)
Алгоритм идентичен XORшифру таблицы записей (раздел 2.3), но начальный ключ берётся из `entry[i].sort_to_original` (смещение 18 записи, младшие 16 бит).
Важно про размер входа:
- В ветке **0x20** движок XORит ровно `unpacked_size` байт (и ожидает, что поток данных имеет ту же длину; на практике `packed_size == unpacked_size`).
- В ветках **0x60/0xA0** XOR применяется к **упакованному** потоку длиной `packed_size` перед декомпрессией.
### Инициализация
```
key16 = (uint16)entry.sort_to_original // int16 на диске по смещению 18
lo = key16 & 0xFF
hi = (key16 >> 8) & 0xFF
```
### Дешифровка (псевдокод)
```
for i in range(N): # N = unpacked_size (для 0x20) или packed_size (для 0x60/0xA0)
lo = (hi ^ ((lo << 1) & 0xFF)) & 0xFF
out[i] = in[i] ^ lo
hi = (lo ^ ((hi >> 1) & 0xFF)) & 0xFF
```
## 3.2. LZSS — простой вариант (метод 0x40)
Классический алгоритм LZSS (Lempel-Ziv-Storer-Szymanski) с кольцевым буфером.
### Параметры
| Параметр | Значение |
| ----------------------------- | ------------------ |
| Размер кольцевого буфера | 4096 байт (0x1000) |
| Начальная позиция записи | 4078 (0xFEE) |
| Начальное заполнение | 0x20 (пробел) |
| Минимальная длина совпадения | 3 |
| Максимальная длина совпадения | 18 (4 бита + 3) |
### Алгоритм декомпрессии
```
Инициализация:
ring_buffer[0..4095] = 0x20 (заполнить пробелами)
ring_pos = 4078
flags_byte = 0
flags_bits_remaining = 0
Цикл (пока не заполнен выходной буфер И не исчерпан входной):
1. Если flags_bits_remaining == 0:
- Прочитать 1 байт из входного потока → flags_byte
- flags_bits_remaining = 8
Декодировать как:
- Старший бит устанавливается в 0x7F (маркер)
- Оставшиеся 7 бит — флаги текущей группы
Реально в коде: control_word = (flags_byte) | (0x7F << 8)
Каждый бит проверяется сдвигом вправо.
2. Проверить младший бит control_word:
Если бит = 1 (литерал):
- Прочитать 1 байт из входного потока → byte
- ring_buffer[ring_pos] = byte
- ring_pos = (ring_pos + 1) & 0xFFF
- Записать byte в выходной буфер
Если бит = 0 (ссылка):
- Прочитать 2 байта: low_byte, high_byte
- offset = low_byte | ((high_byte & 0xF0) << 4) // 12 бит
- length = (high_byte & 0x0F) + 3 // 4 бита + 3
- Скопировать length байт из ring_buffer[offset...]:
для j от 0 до length-1:
byte = ring_buffer[(offset + j) & 0xFFF]
ring_buffer[ring_pos] = byte
ring_pos = (ring_pos + 1) & 0xFFF
записать byte в выходной буфер
3. Сдвинуть control_word вправо на 1 бит
4. flags_bits_remaining -= 1
```
### Подробная раскладка пары ссылки (2 байта)
```
Байт 0 (low): OOOOOOOO (биты [7:0] смещения)
Байт 1 (high): OOOOLLLL O = биты [11:8] смещения, L = длина 3
offset = low | ((high & 0xF0) << 4) // Диапазон: 04095
length = (high & 0x0F) + 3 // Диапазон: 318
```
## 3.3. LZSS с адаптивным кодированием Хаффмана (метод 0x80)
Расширенный вариант LZSS, где литералы и длины совпадений кодируются с помощью адаптивного дерева Хаффмана.
### Параметры
| Параметр | Значение |
| -------------------------------- | ------------------------------ |
| Размер кольцевого буфера | 4096 байт |
| Начальная позиция записи | **4036** (0xFC4) |
| Начальное заполнение | 0x20 (пробел) |
| Количество листовых узлов дерева | 314 |
| Символы литералов | 0255 (байты) |
| Символы длин | 256313 (длина = символ 253) |
| Начальная длина | 3 (при символе 256) |
| Максимальная длина | 60 (при символе 313) |
### Дерево Хаффмана
Дерево строится как **адаптивное** (dynamic, self-adjusting):
- **627 узлов**: 314 листовых + 313 внутренних.
- Все листья изначально имеют **вес 1**.
- Корень дерева — узел с индексом 0 (в массиве `parent`).
- После декодирования каждого символа дерево **обновляется** (функция `sub_1001B0AE`): вес узла инкрементируется, и при нарушении порядка узлы **переставляются** для поддержания свойства.
- При достижении суммарного веса **0x8000 (32768)** — все веса **делятся на 2** (с округлением вверх) и дерево полностью перестраивается.
### Кодирование позиции
Позиция в кольцевом буфере кодируется с помощью **d-кода** (таблица дистанций):
- 8 бит позиции ищутся в таблице `d_code[256]`, определяя базовое значение и количество дополнительных битов.
- Из потока считываются дополнительные биты, которые объединяются с базовым значением.
- Финальная позиция: `pos = (ring_pos 1 decoded_position) & 0xFFF`
**Таблицы инициализации** (d-коды):
```
Таблица базовых значений — byte_100371D0[6]:
{ 0x01, 0x03, 0x08, 0x0C, 0x18, 0x10 }
Таблица дополнительных битов — byte_100371D6[6]:
{ 0x20, 0x30, 0x40, 0x30, 0x30, 0x10 }
```
### Алгоритм декомпрессии (высокоуровневый)
```
Инициализация:
ring_buffer[0..4095] = 0x20
ring_pos = 4036
Инициализировать дерево Хаффмана (314 листьев, все веса = 1)
Инициализировать таблицы d-кодов
Цикл:
1. Декодировать символ из потока по дереву Хаффмана:
- Начать с корня
- Читать биты, спускаться по дереву (0 = левый, 1 = правый)
- Пока не достигнут лист → символ = лист 627
2. Обновить дерево Хаффмана для декодированного символа
3. Если символ < 256 (литерал):
- ring_buffer[ring_pos] = символ
- ring_pos = (ring_pos + 1) & 0xFFF
- Записать символ в выходной буфер
4. Если символ >= 256 (ссылка):
- length = символ 253
- Декодировать позицию через d-код:
a) Прочитать 8 бит из потока
b) Найти d-код и дополнительные биты по таблице
c) Прочитать дополнительные биты
d) position = (ring_pos 1 full_position) & 0xFFF
- Скопировать length байт из ring_buffer[position...]
5. Если выходной буфер заполнен → завершить
```
## 3.4. XOR + LZSS (методы 0x60 и 0xA0)
Комбинированный метод: сначала XOR-дешифровка, затем LZSS-декомпрессия.
### Алгоритм
1. Выделить временный буфер размером `compressed_size` (поле из записи, смещение 28).
2. Дешифровать сжатые данные XOR-шифром (раздел 3.1) с ключом из записи во временный буфер.
3. Применить LZSS-декомпрессию (простую или с Хаффманом, в зависимости от конкретного метода) из временного буфера в выходной.
4. Освободить временный буфер.
- **0x60** — XOR + простой LZSS (раздел 3.2)
- **0xA0** — XOR + LZSS с Хаффманом (раздел 3.3)
### Начальное состояние XOR для данных
При комбинированном методе seed берётся из поля по смещению 20 записи (4-байтный). Однако ключ обрабатывается как 16-битный: `lo = seed & 0xFF`, `hi = (seed >> 8) & 0xFF`.
## 3.5. Deflate (метод 0x100)
Полноценная реализация алгоритма **Deflate** (RFC 1951) с блочной структурой.
### Общая структура
Данные состоят из последовательности блоков. Каждый блок начинается с:
- **1 бит** — `is_final`: признак последнего блока
- **2 бита** — `block_type`: тип блока
### Типы блоков
| block_type | Описание | Функция |
| ---------- | --------------------------- | ---------------- |
| 0 | Без сжатия (stored) | `sub_1001A750` |
| 1 | Фиксированные коды Хаффмана | `sub_1001A8C0` |
| 2 | Динамические коды Хаффмана | `sub_1001AA30` |
| 3 | Зарезервировано (ошибка) | Возвращает код 2 |
### Блок типа 0 (stored)
1. Отбросить оставшиеся биты до границы байта (выравнивание).
2. Прочитать 16 бит — `LEN` (длина блока).
3. Прочитать 16 бит — `NLEN` (дополнение длины, `NLEN == ~LEN & 0xFFFF`).
4. Проверить: `LEN == (uint16)(~NLEN)`. При несовпадении — ошибка.
5. Скопировать `LEN` байт из входного потока в выходной.
Декомпрессор использует внутренний буфер размером **32768 байт** (0x8000). При заполнении — промежуточная запись результата.
### Блок типа 1 (фиксированные коды)
Стандартные коды Deflate:
- Литералы/длины: 288 кодов
- 0143: 8-битные коды
- 144255: 9-битные коды
- 256279: 7-битные коды
- 280287: 8-битные коды
- Дистанции: 30 кодов, все 5-битные
Используются предопределённые таблицы длин и дистанций (`unk_100370AC`, `unk_1003712C` и соответствующие экстра-биты).
### Блок типа 2 (динамические коды)
1. Прочитать 5 бит → `HLIT` (количество литералов/длин 257). Диапазон: 257286.
2. Прочитать 5 бит → `HDIST` (количество дистанций 1). Диапазон: 130.
3. Прочитать 4 бита → `HCLEN` (количество кодов длин 4). Диапазон: 419.
4. Прочитать `HCLEN` × 3 бит — длины кодов для алфавита длин.
5. Построить дерево Хаффмана для алфавита длин (19 символов).
6. С помощью этого дерева декодировать длины кодов для литералов/длин и дистанций.
7. Построить два дерева Хаффмана: для литералов/длин и для дистанций.
8. Декодировать данные.
**Порядок кодов длин** (стандартный Deflate):
```
{ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }
```
Хранится в `dword_10037060`.
### Валидации
- `HLIT + 257 <= 286` (max 0x11E)
- `HDIST + 1 <= 30` (max 0x1E)
- При нарушении — возвращается ошибка 1.
## 3.6. Метод 0x00 (без сжатия)
Данные копируются «как есть» напрямую из файла. Вызывается через указатель на функцию `dword_1003A1B8` (фактически `memcpy` или аналог).
---
# Часть 4. Внутренние структуры в памяти
## 4.1. Внутренняя структура NRes-архива (opened, 0x68 байт = 104)
```c
struct NResArchive { // Размер: 0x68 (104 байта)
void* vtable; // +0: Указатель на таблицу виртуальных методов
int32_t entry_count; // +4: Количество записей
void* mapped_base; // +8: Базовый адрес mapped view
void* directory_ptr; // +12: Указатель на каталог записей в памяти
char* filename; // +16: Путь к файлу (_strdup)
int32_t ref_count; // +20: Счётчик ссылок
uint32_t last_release_time; // +24: timeGetTime() при последнем Release
// +28..+91: Для raw-режима — встроенная запись (единственный File entry)
NResArchive* next; // +92: Следующий архив в связном списке
uint8_t is_writable; // +100: Файл открыт для записи
uint8_t is_cacheable; // +101: Не выгружать при refcount = 0
};
```
## 4.2. Внутренняя структура RsLi-архива (56 + 64 × N байт)
```c
struct RsLibHeader { // 56 байт (14 DWORD)
uint32_t magic; // +0: 'RsLi' (0x694C7352)
int32_t entry_count; // +4: Количество записей
uint32_t media_offset; // +8: Смещение медиа-оверлея
uint32_t reserved_0C; // +12: 0
HANDLE file_handle_2; // +16: -1 (дополнительный хэндл)
uint32_t reserved_14; // +20: 0
uint32_t reserved_18; // +24: —
uint32_t reserved_1C; // +28: 0
HANDLE mapping_handle_2; // +32: -1
uint32_t reserved_24; // +36: 0
uint32_t flag_28; // +40: (flags >> 7) & 1
HANDLE file_handle; // +44: Хэндл файла
HANDLE mapping_handle; // +48: Хэндл файлового маппинга
void* mapped_view; // +52: Указатель на mapped view
};
// Далее следуют entry_count записей по 64 байта каждая
```
### Внутренняя запись RsLi (64 байта)
```c
struct RsLibEntry { // 64 байта (16 DWORD)
char name[16]; // +0: Имя (12 из файла + 4 нуля)
int32_t flags; // +16: Флаги (sign-extended из int16)
int32_t sort_index; // +20: sort_to_original[i] (таблица индексов / XORключ)
uint32_t uncompressed_size; // +24: Размер несжатых данных (из поля 20 записи)
void* data_ptr; // +28: Указатель на данные в mapped view
uint32_t compressed_size; // +32: Размер сжатых данных (из поля 28 записи)
uint32_t reserved_24; // +36: 0
uint32_t reserved_28; // +40: 0
uint32_t reserved_2C; // +44: 0
void* loaded_data; // +48: Указатель на декомпрессированные данные
// +52..+63: дополнительные поля
};
```
---
# Часть 5. Экспортируемые API-функции
## 5.1. NRes API
| Функция | Описание |
| ------------------------------ | ------------------------------------------------------------------------- |
| `niOpenResFile(path)` | Открыть NRes-архив (только чтение), эквивалент `niOpenResFileEx(path, 0)` |
| `niOpenResFileEx(path, flags)` | Открыть NRes-архив с флагами |
| `niOpenResInMem(ptr, size)` | Открыть NRes-архив из памяти |
| `niCreateResFile(path)` | Создать/открыть NRes-архив для записи |
## 5.2. RsLi API
| Функция | Описание |
| ------------------------------- | -------------------------------------------------------- |
| `rsOpenLib(path, flags)` | Открыть RsLi-библиотеку |
| `rsCloseLib(lib)` | Закрыть библиотеку |
| `rsLibNum(lib)` | Получить количество записей |
| `rsFind(lib, name)` | Найти запись по имени (→ индекс или 1) |
| `rsLoad(lib, index)` | Загрузить и декомпрессировать ресурс |
| `rsLoadFast(lib, index, flags)` | Быстрая загрузка (без декомпрессии если возможно) |
| `rsLoadPacked(lib, index)` | Загрузить в «упакованном» виде (отложенная декомпрессия) |
| `rsLoadByName(lib, name)` | `rsFind` + `rsLoad` |
| `rsGetInfo(lib, index, out)` | Получить имя и размер ресурса |
| `rsGetPackMethod(lib, index)` | Получить метод сжатия (`flags & 0x1C0`) |
| `ngiUnpack(packed)` | Декомпрессировать ранее загруженный упакованный ресурс |
| `ngiAlloc(size)` | Выделить память (с обработкой ошибок) |
| `ngiFree(ptr)` | Освободить память |
| `ngiGetMemSize(ptr)` | Получить размер выделенного блока |
---
# Часть 6. Контрольные заметки для реализации
## 6.1. Кодировки и регистр
- **NRes**: имена хранятся **как есть** (case-insensitive при поиске через `_strcmpi`).
- **RsLi**: имена хранятся в **верхнем регистре**. Перед поиском запрос приводится к верхнему регистру (`_strupr`). Сравнение — через `strcmp` (case-sensitive для уже uppercase строк).
## 6.2. Порядок байт
Все значения хранятся в **little-endian** порядке (платформа x86/Win32).
## 6.3. Выравнивание
- **NRes**: данные каждого ресурса выровнены по границе **8 байт** (0-padding между файлами).
- **RsLi**: выравнивание данных не описано в коде (данные идут подряд).
## 6.4. Размер записей на диске
- **NRes**: каталог — **64 байта** на запись, расположен в конце файла.
- **RsLi**: таблица — **32 байта** на запись (зашифрованная), расположена в начале файла (сразу после 32-байтного заголовка).
## 6.5. Кэширование и memory mapping
Оба формата используют Windows Memory-Mapped Files (`CreateFileMapping` + `MapViewOfFile`). NRes-архивы организованы в глобальный **связный список** (`dword_1003A66C`) со счётчиком ссылок и таймером неактивности (10 секунд = 0x2710 мс). При refcount == 0 и истечении таймера архив автоматически выгружается (если не установлен флаг `is_cacheable`).
## 6.6. Размер seed XOR
- **Заголовок RsLi**: seed — **4 байта** (DWORD) по смещению 20, но используются только младшие 2 байта (`lo = byte[0]`, `hi = byte[1]`).
- **Запись RsLi**: sort_to_original[i] — **2 байта** (int16) по смещению 18 записи.
- **Данные при комбинированном XOR+LZSS**: seed — **4 байта** (DWORD) из поля по смещению 20 записи, но опять используются только 2 байта.
## 6.7. Эмпирическая проверка на данных игры
- Найдено архивов по сигнатуре: **122** (`NRes`: 120, `RsLi`: 2).
- Выполнен полный roundtrip `unpack -> pack -> byte-compare`: **122/122** архивов совпали побайтно.
- Для `RsLi` в проверенном наборе встретились методы: `0x040` и `0x100`.
Подтверждённые нюансы:
- Для LZSS (метод `0x040`) рабочая раскладка нибблов в ссылке: `OOOO LLLL`, а не `LLLL OOOO`.
- Для Deflate (метод `0x100`) возможен случай `packed_size == фактический_конец + 1` на последней записи файла.

90
docs/specs/textures.md Normal file
View File

@@ -0,0 +1,90 @@
# Текстуры и материалы
На текущем этапе в дизассемблированных библиотеках **не найден полный декодер формата текстурного файла** (нет явных парсеров DDS/TGA/BMP и т.п.). Поэтому документ пока фиксирует:
- что можно достоверно вывести по рендер‑конфигу,
- что видно по структурам модели (materialIndex),
- какие места требуют дальнейшего анализа.
---
## 1) Материал в модели
В batch table модели (см. документацию по MSH/AniMesh) есть поле, очень похожее на:
- `materialIndex: u16` (batch + 2)
Это индекс, по которому рендерер выбирает:
- текстуру(ы),
- параметры (blend, alpha test, двухтекстурность и т.п.),
- “шейдер/пайплайн” (в терминах оригинального рендера — набор stateов).
**Где лежит таблица материалов** (внутри модели или глобально) — требует подтверждения:
- вероятный кандидат — отдельный ресурс/таблица, на которую `materialIndex` ссылается.
- строковая таблица `Res10` может хранить имена материалов/текстур, но маппинг не доказан.
---
## 2) Переключатели рендера, влияющие на текстуры (из Ngi32.dll)
В `Ngi32.dll` есть набор runtimeнастроек (похоже, читаются из системных настроек/INI/registry), которые сильно влияют на текстурный пайплайн:
- `DisableMipmap`
- `DisableBilinear`
- `DisableTrilinear`
- `DisableMultiTexturing`
- `Disable32bitTextures` / `Force16bitTextures`
- `ForceSoftware`
- `ForceNoFiltering`
- `ForceHWTnL`
- `ForceNoHWTnL`
Практический вывод для порта:
- движок может работать **без мипмапов**, **без фильтрации**, и даже **без multitexturing**.
---
## 3) “Две текстуры” и дополнительные UVпотоки
В загрузчике модели присутствуют дополнительные pervertex ресурсы:
- Res15 (stride 8) — кандидат на UV1 (lightmap/second layer)
- Res16 (stride 8, split в 2×4) — кандидат на tangent/bitangent (normal mapping)
- Res18 (stride 4) — кандидат на vertex color / AO
Если материал реально поддерживает:
- вторую текстуру (detail map, lightmap),
- нормалмапы,
то где‑то должен быть код:
- который выбирает эти потоки как входные атрибуты вершинного шейдера/пайплайна,
- который активирует multitexturing.
Сейчас в найденных фрагментах это ещё **не подтверждено**, но структура данных “просится” именно туда.
---
## 4) Что нужно найти дальше (чтобы написать полноценную спецификацию материалов/текстур)
1. Место, где `materialIndex` разворачивается в набор render states:
- alpha blending / alpha test
- zwrite/ztest
- culling
- 1pass vs 2pass (multitexturing)
2. Формат записи “material record”:
- какие поля
- ссылки на текстуры (ID, имя, индекс в таблице)
3. Формат “texture asset”:
- где хранится (внутри NRes или отдельным файлом)
- компрессия/палитра/мipы
4. Привязка строковой таблицы `Res10` к материалам:
- это имена материалов?
- это имена текстур?
- или это имена узлов/анимаций?
До подтверждения этих пунктов разумнее держать документацию как “архитектурную карту”, а не как точный байтовый формат.

View File

@@ -1,16 +0,0 @@
[package]
name = "libnres"
version = "0.1.4"
description = "Library for NRes files"
authors = ["Valentin Popov <valentin@popov.link>"]
homepage = "https://git.popov.link/valentineus/fparkan"
repository = "https://git.popov.link/valentineus/fparkan.git"
license = "GPL-2.0"
edition = "2021"
keywords = ["gamedev", "library", "nres"]
[dependencies]
byteorder = "1.4"
log = "0.4"
miette = "5.6"
thiserror = "1.0"

View File

@@ -1,25 +0,0 @@
# Library for NRes files (Deprecated)
Library for viewing and retrieving game resources of the game **"Parkan: Iron Strategy"**.
All versions of the game are supported: Demo, IS, IS: Part 1, IS: Part 2.
Supports files with `lib`, `trf`, `rlb` extensions.
The files `gamefont.rlb` and `sprites.lib` are not supported.
This files have an unknown signature.
## Example
Example of extracting game resources:
```rust
fn main() {
let file = std::fs::File::open("./voices.lib").unwrap();
// Extracting the list of files
let list = libnres::reader::get_list(&file).unwrap();
for element in list {
// Extracting the contents of the file
let data = libnres::reader::get_file(&file, &element).unwrap();
}
}
```

View File

@@ -1,33 +0,0 @@
use crate::error::ConverterError;
/// Method for converting u32 to u64.
pub fn u32_to_u64(value: u32) -> Result<u64, ConverterError> {
match u64::try_from(value) {
Err(error) => Err(ConverterError::Infallible(error)),
Ok(result) => Ok(result),
}
}
/// Method for converting u32 to usize.
pub fn u32_to_usize(value: u32) -> Result<usize, ConverterError> {
match usize::try_from(value) {
Err(error) => Err(ConverterError::TryFromIntError(error)),
Ok(result) => Ok(result),
}
}
/// Method for converting u64 to u32.
pub fn u64_to_u32(value: u64) -> Result<u32, ConverterError> {
match u32::try_from(value) {
Err(error) => Err(ConverterError::TryFromIntError(error)),
Ok(result) => Ok(result),
}
}
/// Method for converting usize to u32.
pub fn usize_to_u32(value: usize) -> Result<u32, ConverterError> {
match u32::try_from(value) {
Err(error) => Err(ConverterError::TryFromIntError(error)),
Ok(result) => Ok(result),
}
}

View File

@@ -1,45 +0,0 @@
extern crate miette;
extern crate thiserror;
use miette::Diagnostic;
use thiserror::Error;
#[derive(Error, Diagnostic, Debug)]
pub enum ConverterError {
#[error("error converting an value")]
#[diagnostic(code(libnres::infallible))]
Infallible(#[from] std::convert::Infallible),
#[error("error converting an value")]
#[diagnostic(code(libnres::try_from_int_error))]
TryFromIntError(#[from] std::num::TryFromIntError),
}
#[derive(Error, Diagnostic, Debug)]
pub enum ReaderError {
#[error(transparent)]
#[diagnostic(code(libnres::convert_error))]
ConvertValue(#[from] ConverterError),
#[error("incorrect header format")]
#[diagnostic(code(libnres::list_type_error))]
IncorrectHeader,
#[error("incorrect file size (expected {expected:?} bytes, received {received:?} bytes)")]
#[diagnostic(code(libnres::file_size_error))]
IncorrectSizeFile { expected: u32, received: u32 },
#[error(
"incorrect size of the file list (not a multiple of {expected:?}, received {received:?})"
)]
#[diagnostic(code(libnres::list_size_error))]
IncorrectSizeList { expected: u32, received: u32 },
#[error("resource file reading error")]
#[diagnostic(code(libnres::io_error))]
ReadFile(#[from] std::io::Error),
#[error("file is too small (must be at least {expected:?} bytes, received {received:?} byte)")]
#[diagnostic(code(libnres::file_size_error))]
SmallFile { expected: u32, received: u32 },
}

View File

@@ -1,24 +0,0 @@
/// First constant value of the NRes file ("NRes" characters in numeric)
pub const FILE_TYPE_1: u32 = 1936020046;
/// Second constant value of the NRes file
pub const FILE_TYPE_2: u32 = 256;
/// Size of the element item (in bytes)
pub const LIST_ELEMENT_SIZE: u32 = 64;
/// Minimum allowed file size (in bytes)
pub const MINIMUM_FILE_SIZE: u32 = 16;
static DEBUG: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false);
mod converter;
mod error;
pub mod reader;
/// Get debug status value
pub fn get_debug() -> bool {
DEBUG.load(std::sync::atomic::Ordering::Relaxed)
}
/// Change debug status value
pub fn set_debug(value: bool) {
DEBUG.store(value, std::sync::atomic::Ordering::Relaxed)
}

View File

@@ -1,227 +0,0 @@
use std::io::{Read, Seek};
use byteorder::ByteOrder;
use crate::error::ReaderError;
use crate::{converter, FILE_TYPE_1, FILE_TYPE_2, LIST_ELEMENT_SIZE, MINIMUM_FILE_SIZE};
#[derive(Debug)]
pub struct ListElement {
/// Unknown parameter
_unknown0: i32,
/// Unknown parameter
_unknown1: i32,
/// Unknown parameter
_unknown2: i32,
/// File extension
pub extension: String,
/// Identifier or sequence number
pub index: u32,
/// File name
pub name: String,
/// Position in the file
pub position: u32,
/// File size (in bytes)
pub size: u32,
}
impl ListElement {
/// Get full name of the file
pub fn get_filename(&self) -> String {
format!("{}.{}", self.name, self.extension)
}
}
#[derive(Debug)]
pub struct FileHeader {
/// File size
size: u32,
/// Number of files
total: u32,
/// First constant value
type1: u32,
/// Second constant value
type2: u32,
}
/// Get a packed file data
pub fn get_file(file: &std::fs::File, element: &ListElement) -> Result<Vec<u8>, ReaderError> {
let size = get_file_size(file)?;
check_file_size(size)?;
let header = get_file_header(file)?;
check_file_header(&header, size)?;
let data = get_element_data(file, element)?;
Ok(data)
}
/// Get a list of packed files
pub fn get_list(file: &std::fs::File) -> Result<Vec<ListElement>, ReaderError> {
let mut list: Vec<ListElement> = Vec::new();
let size = get_file_size(file)?;
check_file_size(size)?;
let header = get_file_header(file)?;
check_file_header(&header, size)?;
get_file_list(file, &header, &mut list)?;
Ok(list)
}
fn check_file_header(header: &FileHeader, size: u32) -> Result<(), ReaderError> {
if header.type1 != FILE_TYPE_1 || header.type2 != FILE_TYPE_2 {
return Err(ReaderError::IncorrectHeader);
}
if header.size != size {
return Err(ReaderError::IncorrectSizeFile {
expected: size,
received: header.size,
});
}
Ok(())
}
fn check_file_size(size: u32) -> Result<(), ReaderError> {
if size < MINIMUM_FILE_SIZE {
return Err(ReaderError::SmallFile {
expected: MINIMUM_FILE_SIZE,
received: size,
});
}
Ok(())
}
fn get_element_data(file: &std::fs::File, element: &ListElement) -> Result<Vec<u8>, ReaderError> {
let position = converter::u32_to_u64(element.position)?;
let size = converter::u32_to_usize(element.size)?;
let mut reader = std::io::BufReader::new(file);
let mut buffer = vec![0u8; size];
if let Err(error) = reader.seek(std::io::SeekFrom::Start(position)) {
return Err(ReaderError::ReadFile(error));
};
if let Err(error) = reader.read_exact(&mut buffer) {
return Err(ReaderError::ReadFile(error));
};
Ok(buffer)
}
fn get_element_position(index: u32) -> Result<(usize, usize), ReaderError> {
let from = converter::u32_to_usize(index * LIST_ELEMENT_SIZE)?;
let to = converter::u32_to_usize((index * LIST_ELEMENT_SIZE) + LIST_ELEMENT_SIZE)?;
Ok((from, to))
}
fn get_file_header(file: &std::fs::File) -> Result<FileHeader, ReaderError> {
let mut reader = std::io::BufReader::new(file);
let mut buffer = vec![0u8; MINIMUM_FILE_SIZE as usize];
if let Err(error) = reader.seek(std::io::SeekFrom::Start(0)) {
return Err(ReaderError::ReadFile(error));
};
if let Err(error) = reader.read_exact(&mut buffer) {
return Err(ReaderError::ReadFile(error));
};
let header = FileHeader {
size: byteorder::LittleEndian::read_u32(&buffer[12..16]),
total: byteorder::LittleEndian::read_u32(&buffer[8..12]),
type1: byteorder::LittleEndian::read_u32(&buffer[0..4]),
type2: byteorder::LittleEndian::read_u32(&buffer[4..8]),
};
buffer.clear();
Ok(header)
}
fn get_file_list(
file: &std::fs::File,
header: &FileHeader,
list: &mut Vec<ListElement>,
) -> Result<(), ReaderError> {
let (start_position, list_size) = get_list_position(header)?;
let mut reader = std::io::BufReader::new(file);
let mut buffer = vec![0u8; list_size];
if let Err(error) = reader.seek(std::io::SeekFrom::Start(start_position)) {
return Err(ReaderError::ReadFile(error));
};
if let Err(error) = reader.read_exact(&mut buffer) {
return Err(ReaderError::ReadFile(error));
}
let buffer_size = converter::usize_to_u32(buffer.len())?;
if buffer_size % LIST_ELEMENT_SIZE != 0 {
return Err(ReaderError::IncorrectSizeList {
expected: LIST_ELEMENT_SIZE,
received: buffer_size,
});
}
for i in 0..(buffer_size / LIST_ELEMENT_SIZE) {
let (from, to) = get_element_position(i)?;
let chunk: &[u8] = &buffer[from..to];
let element = get_list_element(chunk)?;
list.push(element);
}
buffer.clear();
Ok(())
}
fn get_file_size(file: &std::fs::File) -> Result<u32, ReaderError> {
let metadata = match file.metadata() {
Err(error) => return Err(ReaderError::ReadFile(error)),
Ok(value) => value,
};
let result = converter::u64_to_u32(metadata.len())?;
Ok(result)
}
fn get_list_element(buffer: &[u8]) -> Result<ListElement, ReaderError> {
let index = byteorder::LittleEndian::read_u32(&buffer[60..64]);
let position = byteorder::LittleEndian::read_u32(&buffer[56..60]);
let size = byteorder::LittleEndian::read_u32(&buffer[12..16]);
let unknown0 = byteorder::LittleEndian::read_i32(&buffer[4..8]);
let unknown1 = byteorder::LittleEndian::read_i32(&buffer[8..12]);
let unknown2 = byteorder::LittleEndian::read_i32(&buffer[16..20]);
let extension = String::from_utf8_lossy(&buffer[0..4])
.trim_matches(char::from(0))
.to_string();
let name = String::from_utf8_lossy(&buffer[20..56])
.trim_matches(char::from(0))
.to_string();
Ok(ListElement {
_unknown0: unknown0,
_unknown1: unknown1,
_unknown2: unknown2,
extension,
index,
name,
position,
size,
})
}
fn get_list_position(header: &FileHeader) -> Result<(u64, usize), ReaderError> {
let position = converter::u32_to_u64(header.size - (header.total * LIST_ELEMENT_SIZE))?;
let size = converter::u32_to_usize(header.total * LIST_ELEMENT_SIZE)?;
Ok((position, size))
}

35
mkdocs.yml Normal file
View File

@@ -0,0 +1,35 @@
# Project information
site_name: FParkan
site_url: https://fparkan.popov.link/
site_author: Valentin Popov
site_description: >-
Utilities and tools for the game “Parkan: Iron Strategy”.
# Repository
repo_name: valentineus/fparkan
repo_url: https://github.com/valentineus/fparkan
# Copyright
copyright: Copyright &copy; 2023 &mdash; 2024 Valentin Popov
# Configuration
theme:
name: material
language: ru
palette:
scheme: slate
# Navigation
nav:
- Home: index.md
- Specs:
- NRes / RsLi: specs/nres.md
- 3D модели: specs/msh.md
- Текстуры и материалы: specs/textures.md
- Эффекты и частицы: specs/effects.md
# Additional configuration
extra:
social:
- icon: fontawesome/brands/github
link: https://github.com/valentineus/fparkan

View File

@@ -1,20 +0,0 @@
[package]
name = "nres-cli"
version = "0.2.3"
description = "Console tool for NRes files"
authors = ["Valentin Popov <valentin@popov.link>"]
homepage = "https://git.popov.link/valentineus/fparkan"
repository = "https://git.popov.link/valentineus/fparkan.git"
license = "GPL-2.0"
edition = "2021"
keywords = ["cli", "gamedev", "nres"]
[dependencies]
byteorder = "1.4"
clap = { version = "4.2", features = ["derive"] }
console = "0.15"
dialoguer = { version = "0.10", features = ["completion"] }
indicatif = "0.17"
libnres = { version = "0.1", path = "../libnres" }
miette = { version = "5.6", features = ["fancy"] }
tempdir = "0.3"

View File

@@ -1,6 +0,0 @@
# Console tool for NRes files (Deprecated)
## Commands
- `extract` - Extract game resources from a "NRes" file.
- `ls` - Get a list of files in a "NRes" file.

View File

@@ -1,198 +0,0 @@
extern crate core;
extern crate libnres;
use std::io::Write;
use clap::{Parser, Subcommand};
use miette::{IntoDiagnostic, Result};
#[derive(Parser, Debug)]
#[command(name = "NRes CLI")]
#[command(about, author, version, long_about = None)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand, Debug)]
enum Commands {
/// Check if the "NRes" file can be extract
Check {
/// "NRes" file
file: String,
},
/// Print debugging information on the "NRes" file
#[command(arg_required_else_help = true)]
Debug {
/// "NRes" file
file: String,
/// Filter results by file name
#[arg(long)]
name: Option<String>,
},
/// Extract files or a file from the "NRes" file
#[command(arg_required_else_help = true)]
Extract {
/// "NRes" file
file: String,
/// Overwrite files
#[arg(short, long, default_value_t = false, value_name = "TRUE|FALSE")]
force: bool,
/// Outbound directory
#[arg(short, long, value_name = "DIR")]
out: String,
},
/// Print a list of files in the "NRes" file
#[command(arg_required_else_help = true)]
Ls {
/// "NRes" file
file: String,
},
}
pub fn main() -> Result<()> {
let stdout = console::Term::stdout();
let cli = Cli::parse();
match cli.command {
Commands::Check { file } => command_check(stdout, file)?,
Commands::Debug { file, name } => command_debug(stdout, file, name)?,
Commands::Extract { file, force, out } => command_extract(stdout, file, out, force)?,
Commands::Ls { file } => command_ls(stdout, file)?,
}
Ok(())
}
fn command_check(_stdout: console::Term, file: String) -> Result<()> {
let file = std::fs::File::open(file).into_diagnostic()?;
let list = libnres::reader::get_list(&file).into_diagnostic()?;
let tmp = tempdir::TempDir::new("nres").into_diagnostic()?;
let bar = indicatif::ProgressBar::new(list.len() as u64);
bar.set_style(get_bar_style()?);
for element in list {
bar.set_message(element.get_filename());
let path = tmp.path().join(element.get_filename());
let mut output = std::fs::File::create(path).into_diagnostic()?;
let mut buffer = libnres::reader::get_file(&file, &element).into_diagnostic()?;
output.write_all(&buffer).into_diagnostic()?;
buffer.clear();
bar.inc(1);
}
bar.finish();
Ok(())
}
fn command_debug(stdout: console::Term, file: String, name: Option<String>) -> Result<()> {
let file = std::fs::File::open(file).into_diagnostic()?;
let mut list = libnres::reader::get_list(&file).into_diagnostic()?;
let mut total_files_size: u32 = 0;
let mut total_files_gap: u32 = 0;
let mut total_files: u32 = 0;
for (index, item) in list.iter().enumerate() {
total_files_size += item.size;
total_files += 1;
let mut gap = 0;
if index > 1 {
let previous_item = &list[index - 1];
gap = item.position - (previous_item.position + previous_item.size);
}
total_files_gap += gap;
}
if let Some(name) = name {
list.retain(|item| item.name.contains(&name));
};
for (index, item) in list.iter().enumerate() {
let mut gap = 0;
if index > 1 {
let previous_item = &list[index - 1];
gap = item.position - (previous_item.position + previous_item.size);
}
let text = format!("Index: {};\nGap: {};\nItem: {:#?};\n", index, gap, item);
stdout.write_line(&text).into_diagnostic()?;
}
let text = format!(
"Total files: {};\nTotal files gap: {} (bytes);\nTotal files size: {} (bytes);",
total_files, total_files_gap, total_files_size
);
stdout.write_line(&text).into_diagnostic()?;
Ok(())
}
fn command_extract(_stdout: console::Term, file: String, out: String, force: bool) -> Result<()> {
let file = std::fs::File::open(file).into_diagnostic()?;
let list = libnres::reader::get_list(&file).into_diagnostic()?;
let bar = indicatif::ProgressBar::new(list.len() as u64);
bar.set_style(get_bar_style()?);
for element in list {
bar.set_message(element.get_filename());
let path = format!("{}/{}", out, element.get_filename());
if !force && is_exist_file(&path) {
let message = format!("File \"{}\" exists. Overwrite it?", path);
if !dialoguer::Confirm::new()
.with_prompt(message)
.interact()
.into_diagnostic()?
{
continue;
}
}
let mut output = std::fs::File::create(path).into_diagnostic()?;
let mut buffer = libnres::reader::get_file(&file, &element).into_diagnostic()?;
output.write_all(&buffer).into_diagnostic()?;
buffer.clear();
bar.inc(1);
}
bar.finish();
Ok(())
}
fn command_ls(stdout: console::Term, file: String) -> Result<()> {
let file = std::fs::File::open(file).into_diagnostic()?;
let list = libnres::reader::get_list(&file).into_diagnostic()?;
for element in list {
stdout.write_line(&element.name).into_diagnostic()?;
}
Ok(())
}
fn get_bar_style() -> Result<indicatif::ProgressStyle> {
Ok(
indicatif::ProgressStyle::with_template("[{bar:32}] {pos:>7}/{len:7} {msg}")
.into_diagnostic()?
.progress_chars("=>-"),
)
}
fn is_exist_file(path: &String) -> bool {
let metadata = std::path::Path::new(path);
metadata.exists()
}

View File

@@ -1,9 +0,0 @@
[package]
name = "packer"
version = "0.1.0"
edition = "2021"
[dependencies]
byteorder = "1.4.3"
serde = { version = "1.0.160", features = ["derive"] }
serde_json = "1.0.96"

View File

@@ -1,27 +0,0 @@
# NRes Game Resource Packer
At the moment, this is a demonstration of the NRes game resource packing algorithm in action.
It packs 100% of the NRes game resources for the game "Parkan: Iron Strategy".
The hash sums of the resulting files match the original game files.
__Attention!__
This is a test version of the utility. It overwrites the specified final file without asking.
## Building
To build the tools, you need to run the following command in the root directory:
```bash
cargo build --release
```
## Running
You can run the utility with the following command:
```bash
./target/release/packer /path/to/unpack /path/to/file.ex
```
- `/path/to/unpack`: This is the directory with the resources unpacked by the [unpacker](../unpacker) utility.
- `/path/to/file.ex`: This is the final file that will be created.

View File

@@ -1,175 +0,0 @@
use std::env;
use std::{
fs::{self, File},
io::{BufReader, Read},
};
use byteorder::{ByteOrder, LittleEndian};
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct ImportListElement {
pub extension: String,
pub index: u32,
pub name: String,
pub unknown0: u32,
pub unknown1: u32,
pub unknown2: u32,
}
#[derive(Debug)]
pub struct ListElement {
pub extension: String,
pub index: u32,
pub name: String,
pub position: u32,
pub size: u32,
pub unknown0: u32,
pub unknown1: u32,
pub unknown2: u32,
}
fn main() {
let args: Vec<String> = env::args().collect();
let input = &args[1];
let output = &args[2];
pack(String::from(input), String::from(output));
}
fn pack(input: String, output: String) {
// Загружаем индекс-файл
let index_file = format!("{}/{}", input, "index.json");
let data = fs::read_to_string(index_file).unwrap();
let list: Vec<ImportListElement> = serde_json::from_str(&data).unwrap();
// Общий буфер хранения файлов
let mut content_buffer: Vec<u8> = Vec::new();
let mut list_buffer: Vec<u8> = Vec::new();
// Общее количество файлов
let total_files: u32 = list.len() as u32;
for (index, item) in list.iter().enumerate() {
// Открываем дескриптор файла
let path = format!("{}/{}.{}", input, item.name, item.index);
let file = File::open(path).unwrap();
let metadata = file.metadata().unwrap();
// Считываем файл в буфер
let mut reader = BufReader::new(file);
let mut file_buffer: Vec<u8> = Vec::new();
reader.read_to_end(&mut file_buffer).unwrap();
// Выравнивание буфера
if index != 0 {
while content_buffer.len() % 8 != 0 {
content_buffer.push(0);
}
}
// Получение позиции файла
let position = content_buffer.len() + 16;
// Записываем файл в буфер
content_buffer.extend(file_buffer);
// Формируем элемент
let element = ListElement {
extension: item.extension.to_string(),
index: item.index,
name: item.name.to_string(),
position: position as u32,
size: metadata.len() as u32,
unknown0: item.unknown0,
unknown1: item.unknown1,
unknown2: item.unknown2,
};
// Создаем буфер из элемента
let mut element_buffer: Vec<u8> = Vec::new();
// Пишем тип файла
let mut extension_buffer: [u8; 4] = [0; 4];
let mut file_extension_buffer = element.extension.into_bytes();
file_extension_buffer.resize(4, 0);
extension_buffer.copy_from_slice(&file_extension_buffer);
element_buffer.extend(extension_buffer);
// Пишем неизвестное значение #1
let mut unknown0_buffer: [u8; 4] = [0; 4];
LittleEndian::write_u32(&mut unknown0_buffer, element.unknown0);
element_buffer.extend(unknown0_buffer);
// Пишем неизвестное значение #2
let mut unknown1_buffer: [u8; 4] = [0; 4];
LittleEndian::write_u32(&mut unknown1_buffer, element.unknown1);
element_buffer.extend(unknown1_buffer);
// Пишем размер файла
let mut file_size_buffer: [u8; 4] = [0; 4];
LittleEndian::write_u32(&mut file_size_buffer, element.size);
element_buffer.extend(file_size_buffer);
// Пишем неизвестное значение #3
let mut unknown2_buffer: [u8; 4] = [0; 4];
LittleEndian::write_u32(&mut unknown2_buffer, element.unknown2);
element_buffer.extend(unknown2_buffer);
// Пишем название файла
let mut name_buffer: [u8; 36] = [0; 36];
let mut file_name_buffer = element.name.into_bytes();
file_name_buffer.resize(36, 0);
name_buffer.copy_from_slice(&file_name_buffer);
element_buffer.extend(name_buffer);
// Пишем позицию файла
let mut position_buffer: [u8; 4] = [0; 4];
LittleEndian::write_u32(&mut position_buffer, element.position);
element_buffer.extend(position_buffer);
// Пишем индекс файла
let mut index_buffer: [u8; 4] = [0; 4];
LittleEndian::write_u32(&mut index_buffer, element.index);
element_buffer.extend(index_buffer);
// Добавляем итоговый буфер в буфер элементов списка
list_buffer.extend(element_buffer);
}
// Выравнивание буфера
while content_buffer.len() % 8 != 0 {
content_buffer.push(0);
}
let mut header_buffer: Vec<u8> = Vec::new();
// Пишем первый тип файла
let mut header_type_1 = [0; 4];
LittleEndian::write_u32(&mut header_type_1, 1936020046_u32);
header_buffer.extend(header_type_1);
// Пишем второй тип файла
let mut header_type_2 = [0; 4];
LittleEndian::write_u32(&mut header_type_2, 256_u32);
header_buffer.extend(header_type_2);
// Пишем количество файлов
let mut header_total_files = [0; 4];
LittleEndian::write_u32(&mut header_total_files, total_files);
header_buffer.extend(header_total_files);
// Пишем общий размер файла
let mut header_total_size = [0; 4];
let total_size: u32 = ((content_buffer.len() + 16) as u32) + (total_files * 64);
LittleEndian::write_u32(&mut header_total_size, total_size);
header_buffer.extend(header_total_size);
let mut result_buffer: Vec<u8> = Vec::new();
result_buffer.extend(header_buffer);
result_buffer.extend(content_buffer);
result_buffer.extend(list_buffer);
fs::write(output, result_buffer).unwrap();
}

6
renovate.config.cjs Normal file
View File

@@ -0,0 +1,6 @@
module.exports = {
endpoint: "https://code.popov.link",
gitAuthor: "renovate[bot] <renovatebot@noreply.localhost>",
optimizeForDisabled: true,
platform: "gitea",
};

1
requirements.txt Normal file
View File

@@ -0,0 +1 @@
mkdocs-material

2
testdata/nres/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*
!.gitignore

2
testdata/rsli/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*
!.gitignore

View File

@@ -1,8 +0,0 @@
[package]
name = "texture-decoder"
version = "0.1.0"
edition = "2021"
[dependencies]
byteorder = "1.4.3"
image = "0.24.7"

View File

@@ -1,13 +0,0 @@
# Декодировщик текстур
Сборка:
```bash
cargo build --release
```
Запуск:
```bash
./target/release/texture-decoder ./out/AIM_02.0 ./out/AIM_02.0.png
```

View File

@@ -1,41 +0,0 @@
use std::io::Read;
use byteorder::ReadBytesExt;
use image::Rgba;
fn decode_texture(file_path: &str, output_path: &str) -> Result<(), std::io::Error> {
// Читаем файл
let mut file = std::fs::File::open(file_path)?;
let mut buffer: Vec<u8> = Vec::new();
file.read_to_end(&mut buffer)?;
// Декодируем метаданные
let mut cursor = std::io::Cursor::new(&buffer[4..]);
let img_width = cursor.read_u32::<byteorder::LittleEndian>()?;
let img_height = cursor.read_u32::<byteorder::LittleEndian>()?;
// Пропустить оставшиеся байты метаданных
cursor.set_position(20);
// Извлекаем данные изображения
let image_data = buffer[cursor.position() as usize..].to_vec();
let img =
image::ImageBuffer::<Rgba<u8>, _>::from_raw(img_width, img_height, image_data.to_vec())
.expect("Failed to decode image");
// Сохраняем изображение
img.save(output_path).unwrap();
Ok(())
}
fn main() {
let args: Vec<String> = std::env::args().collect();
let input = &args[1];
let output = &args[2];
if let Err(err) = decode_texture(input, output) {
eprintln!("Error: {}", err)
}
}

107
tools/README.md Normal file
View File

@@ -0,0 +1,107 @@
# Инструменты в каталоге `tools`
## `archive_roundtrip_validator.py`
Скрипт предназначен для **валидации документации по форматам NRes и RsLi на реальных данных игры**.
Что делает утилита:
- находит архивы по сигнатуре заголовка (а не по расширению файла);
- распаковывает архивы в структуру `manifest.json + entries/*`;
- собирает архивы обратно из `manifest.json`;
- выполняет проверку `unpack -> repack -> byte-compare`;
- формирует отчёт о расхождениях со спецификацией.
Скрипт не изменяет оригинальные файлы игры. Рабочие файлы создаются только в указанном `--workdir` (или во временной папке).
## Поддерживаемые сигнатуры
- `NRes` (`4E 52 65 73`)
- `RsLi` в файловом формате библиотеки: `NL 00 01`
## Основные команды
Сканирование архива по сигнатурам:
```bash
python3 tools/archive_roundtrip_validator.py scan --input tmp/gamedata
```
Распаковка/упаковка одного NRes:
```bash
python3 tools/archive_roundtrip_validator.py nres-unpack \
--archive tmp/gamedata/sounds.lib \
--output tmp/work/nres_sounds
python3 tools/archive_roundtrip_validator.py nres-pack \
--manifest tmp/work/nres_sounds/manifest.json \
--output tmp/work/sounds.repacked.lib
```
Распаковка/упаковка одного RsLi:
```bash
python3 tools/archive_roundtrip_validator.py rsli-unpack \
--archive tmp/gamedata/sprites.lib \
--output tmp/work/rsli_sprites
python3 tools/archive_roundtrip_validator.py rsli-pack \
--manifest tmp/work/rsli_sprites/manifest.json \
--output tmp/work/sprites.repacked.lib
```
Полная валидация документации на всём наборе данных:
```bash
python3 tools/archive_roundtrip_validator.py validate \
--input tmp/gamedata \
--workdir tmp/validation_work \
--report tmp/validation_report.json \
--fail-on-diff
```
## Формат распаковки
Для каждого архива создаются:
- `manifest.json` — все поля заголовка, записи, индексы, смещения, контрольные суммы;
- `entries/*.bin` — payload-файлы.
Имена файлов в `entries` включают индекс записи, поэтому коллизии одинаковых имён внутри архива обрабатываются корректно.
## `init_testdata.py`
Скрипт инициализирует тестовые данные по сигнатурам архивов из спецификации:
- `NRes` (`4E 52 65 73`);
- `RsLi` (`NL 00 01`).
Что делает утилита:
- рекурсивно сканирует все файлы в `--input`;
- копирует найденные `NRes` в `--output/nres/`;
- копирует найденные `RsLi` в `--output/rsli/`;
- сохраняет относительный путь исходного файла внутри целевого каталога;
- создаёт целевые каталоги автоматически, если их нет.
Базовый запуск:
```bash
python3 tools/init_testdata.py --input tmp/gamedata --output testdata
```
Если целевой файл уже существует, скрипт спрашивает подтверждение перезаписи (`yes/no/all/quit`).
Для перезаписи без вопросов используйте `--force`:
```bash
python3 tools/init_testdata.py --input tmp/gamedata --output testdata --force
```
Проверки надёжности:
- `--input` должен существовать и быть каталогом;
- если `--output` указывает на существующий файл, скрипт завершится с ошибкой;
- если `--output` расположен внутри `--input`, каталог вывода исключается из сканирования;
- если `stdin` неинтерактивный и требуется перезапись, нужно явно указать `--force`.

View File

@@ -0,0 +1,944 @@
#!/usr/bin/env python3
"""
Roundtrip tools for NRes and RsLi archives.
The script can:
1) scan archives by header signature (ignores file extensions),
2) unpack / pack NRes archives,
3) unpack / pack RsLi archives,
4) validate docs assumptions by full roundtrip and byte-to-byte comparison.
"""
from __future__ import annotations
import argparse
import hashlib
import json
import re
import shutil
import struct
import tempfile
import zlib
from pathlib import Path
from typing import Any
MAGIC_NRES = b"NRes"
MAGIC_RSLI = b"NL\x00\x01"
class ArchiveFormatError(RuntimeError):
pass
def sha256_hex(data: bytes) -> str:
return hashlib.sha256(data).hexdigest()
def safe_component(value: str, fallback: str = "item", max_len: int = 80) -> str:
clean = re.sub(r"[^A-Za-z0-9._-]+", "_", value).strip("._-")
if not clean:
clean = fallback
return clean[:max_len]
def first_diff(a: bytes, b: bytes) -> tuple[int | None, str | None]:
if a == b:
return None, None
limit = min(len(a), len(b))
for idx in range(limit):
if a[idx] != b[idx]:
return idx, f"{a[idx]:02x}!={b[idx]:02x}"
return limit, f"len {len(a)}!={len(b)}"
def load_json(path: Path) -> dict[str, Any]:
with path.open("r", encoding="utf-8") as handle:
return json.load(handle)
def dump_json(path: Path, payload: dict[str, Any]) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w", encoding="utf-8") as handle:
json.dump(payload, handle, indent=2, ensure_ascii=False)
handle.write("\n")
def xor_stream(data: bytes, key16: int) -> bytes:
lo = key16 & 0xFF
hi = (key16 >> 8) & 0xFF
out = bytearray(len(data))
for i, value in enumerate(data):
lo = (hi ^ ((lo << 1) & 0xFF)) & 0xFF
out[i] = value ^ lo
hi = (lo ^ ((hi >> 1) & 0xFF)) & 0xFF
return bytes(out)
def lzss_decompress_simple(data: bytes, expected_size: int) -> bytes:
ring = bytearray([0x20] * 0x1000)
ring_pos = 0xFEE
out = bytearray()
in_pos = 0
control = 0
bits_left = 0
while len(out) < expected_size and in_pos < len(data):
if bits_left == 0:
control = data[in_pos]
in_pos += 1
bits_left = 8
if control & 1:
if in_pos >= len(data):
break
byte = data[in_pos]
in_pos += 1
out.append(byte)
ring[ring_pos] = byte
ring_pos = (ring_pos + 1) & 0x0FFF
else:
if in_pos + 1 >= len(data):
break
low = data[in_pos]
high = data[in_pos + 1]
in_pos += 2
# Real files indicate nibble layout opposite to common LZSS variant:
# high nibble extends offset, low nibble stores (length - 3).
offset = low | ((high & 0xF0) << 4)
length = (high & 0x0F) + 3
for step in range(length):
byte = ring[(offset + step) & 0x0FFF]
out.append(byte)
ring[ring_pos] = byte
ring_pos = (ring_pos + 1) & 0x0FFF
if len(out) >= expected_size:
break
control >>= 1
bits_left -= 1
if len(out) != expected_size:
raise ArchiveFormatError(
f"LZSS size mismatch: expected {expected_size}, got {len(out)}"
)
return bytes(out)
def decode_rsli_payload(
packed: bytes, method: int, sort_to_original: int, unpacked_size: int
) -> bytes:
key16 = sort_to_original & 0xFFFF
if method == 0x000:
out = packed
elif method == 0x020:
if len(packed) < unpacked_size:
raise ArchiveFormatError(
f"method 0x20 packed too short: {len(packed)} < {unpacked_size}"
)
out = xor_stream(packed[:unpacked_size], key16)
elif method == 0x040:
out = lzss_decompress_simple(packed, unpacked_size)
elif method == 0x060:
out = lzss_decompress_simple(xor_stream(packed, key16), unpacked_size)
elif method == 0x100:
try:
out = zlib.decompress(packed, -15)
except zlib.error:
out = zlib.decompress(packed)
else:
raise ArchiveFormatError(f"unsupported RsLi method: 0x{method:03X}")
if len(out) != unpacked_size:
raise ArchiveFormatError(
f"unpacked_size mismatch: expected {unpacked_size}, got {len(out)}"
)
return out
def detect_archive_type(path: Path) -> str | None:
try:
with path.open("rb") as handle:
magic = handle.read(4)
except OSError:
return None
if magic == MAGIC_NRES:
return "nres"
if magic == MAGIC_RSLI:
return "rsli"
return None
def scan_archives(root: Path) -> list[dict[str, Any]]:
found: list[dict[str, Any]] = []
for path in sorted(root.rglob("*")):
if not path.is_file():
continue
archive_type = detect_archive_type(path)
if not archive_type:
continue
found.append(
{
"path": str(path),
"relative_path": str(path.relative_to(root)),
"type": archive_type,
"size": path.stat().st_size,
}
)
return found
def parse_nres(data: bytes, source: str = "<memory>") -> dict[str, Any]:
if len(data) < 16:
raise ArchiveFormatError(f"{source}: NRes too short ({len(data)} bytes)")
magic, version, entry_count, total_size = struct.unpack_from("<4sIII", data, 0)
if magic != MAGIC_NRES:
raise ArchiveFormatError(f"{source}: invalid NRes magic")
issues: list[str] = []
if total_size != len(data):
issues.append(
f"header.total_size={total_size} != actual_size={len(data)} (spec 1.2)"
)
if version != 0x100:
issues.append(f"version=0x{version:08X} != 0x00000100 (spec 1.2)")
directory_offset = total_size - entry_count * 64
if directory_offset < 16 or directory_offset > len(data):
raise ArchiveFormatError(
f"{source}: invalid directory offset {directory_offset} for entry_count={entry_count}"
)
if directory_offset + entry_count * 64 != len(data):
issues.append(
"directory_offset + entry_count*64 != file_size (spec 1.3)"
)
entries: list[dict[str, Any]] = []
for index in range(entry_count):
offset = directory_offset + index * 64
if offset + 64 > len(data):
raise ArchiveFormatError(f"{source}: truncated directory entry {index}")
(
type_id,
attr1,
attr2,
size,
attr3,
name_raw,
data_offset,
sort_index,
) = struct.unpack_from("<IIIII36sII", data, offset)
name_bytes = name_raw.split(b"\x00", 1)[0]
name = name_bytes.decode("latin1", errors="replace")
entries.append(
{
"index": index,
"type_id": type_id,
"attr1": attr1,
"attr2": attr2,
"size": size,
"attr3": attr3,
"name": name,
"name_bytes_hex": name_bytes.hex(),
"name_raw_hex": name_raw.hex(),
"data_offset": data_offset,
"sort_index": sort_index,
}
)
# Spec checks.
expected_sort = sorted(
range(entry_count),
key=lambda idx: bytes.fromhex(entries[idx]["name_bytes_hex"]).lower(),
)
current_sort = [item["sort_index"] for item in entries]
if current_sort != expected_sort:
issues.append(
"sort_index table does not match case-insensitive name order (spec 1.4)"
)
data_regions = sorted(
(
item["index"],
item["data_offset"],
item["size"],
)
for item in entries
)
for idx, data_offset, size in data_regions:
if data_offset % 8 != 0:
issues.append(f"entry {idx}: data_offset={data_offset} not aligned to 8 (spec 1.5)")
if data_offset < 16 or data_offset + size > directory_offset:
issues.append(
f"entry {idx}: data range [{data_offset}, {data_offset + size}) out of data area (spec 1.3)"
)
for i in range(len(data_regions) - 1):
_, start, size = data_regions[i]
_, next_start, _ = data_regions[i + 1]
if start + size > next_start:
issues.append(
f"entry overlap at data_offset={start}, next={next_start}"
)
padding = data[start + size : next_start]
if any(padding):
issues.append(
f"non-zero padding after data block at offset={start + size} (spec 1.5)"
)
return {
"format": "NRes",
"header": {
"magic": "NRes",
"version": version,
"entry_count": entry_count,
"total_size": total_size,
"directory_offset": directory_offset,
},
"entries": entries,
"issues": issues,
}
def build_nres_name_field(entry: dict[str, Any]) -> bytes:
if "name_bytes_hex" in entry:
raw = bytes.fromhex(entry["name_bytes_hex"])
else:
raw = entry.get("name", "").encode("latin1", errors="replace")
raw = raw[:35]
return raw + b"\x00" * (36 - len(raw))
def unpack_nres_file(archive_path: Path, out_dir: Path, source_root: Path | None = None) -> dict[str, Any]:
data = archive_path.read_bytes()
parsed = parse_nres(data, source=str(archive_path))
out_dir.mkdir(parents=True, exist_ok=True)
entries_dir = out_dir / "entries"
entries_dir.mkdir(parents=True, exist_ok=True)
manifest: dict[str, Any] = {
"format": "NRes",
"source_path": str(archive_path),
"source_relative_path": str(archive_path.relative_to(source_root)) if source_root else str(archive_path),
"header": parsed["header"],
"entries": [],
"issues": parsed["issues"],
"source_sha256": sha256_hex(data),
}
for entry in parsed["entries"]:
begin = entry["data_offset"]
end = begin + entry["size"]
if begin < 0 or end > len(data):
raise ArchiveFormatError(
f"{archive_path}: entry {entry['index']} data range outside file"
)
payload = data[begin:end]
base = safe_component(entry["name"], fallback=f"entry_{entry['index']:05d}")
file_name = (
f"{entry['index']:05d}__{base}"
f"__t{entry['type_id']:08X}_a1{entry['attr1']:08X}_a2{entry['attr2']:08X}.bin"
)
(entries_dir / file_name).write_bytes(payload)
manifest_entry = dict(entry)
manifest_entry["data_file"] = f"entries/{file_name}"
manifest_entry["sha256"] = sha256_hex(payload)
manifest["entries"].append(manifest_entry)
dump_json(out_dir / "manifest.json", manifest)
return manifest
def pack_nres_manifest(manifest_path: Path, out_file: Path) -> bytes:
manifest = load_json(manifest_path)
if manifest.get("format") != "NRes":
raise ArchiveFormatError(f"{manifest_path}: not an NRes manifest")
entries = manifest["entries"]
count = len(entries)
version = int(manifest.get("header", {}).get("version", 0x100))
out = bytearray(b"\x00" * 16)
data_offsets: list[int] = []
data_sizes: list[int] = []
for entry in entries:
payload_path = manifest_path.parent / entry["data_file"]
payload = payload_path.read_bytes()
offset = len(out)
out.extend(payload)
padding = (-len(out)) % 8
if padding:
out.extend(b"\x00" * padding)
data_offsets.append(offset)
data_sizes.append(len(payload))
directory_offset = len(out)
expected_sort = sorted(
range(count),
key=lambda idx: bytes.fromhex(entries[idx].get("name_bytes_hex", "")).lower(),
)
for index, entry in enumerate(entries):
name_field = build_nres_name_field(entry)
out.extend(
struct.pack(
"<IIIII36sII",
int(entry["type_id"]),
int(entry["attr1"]),
int(entry["attr2"]),
data_sizes[index],
int(entry["attr3"]),
name_field,
data_offsets[index],
expected_sort[index],
)
)
total_size = len(out)
struct.pack_into("<4sIII", out, 0, MAGIC_NRES, version, count, total_size)
out_file.parent.mkdir(parents=True, exist_ok=True)
out_file.write_bytes(out)
return bytes(out)
def parse_rsli(data: bytes, source: str = "<memory>") -> dict[str, Any]:
if len(data) < 32:
raise ArchiveFormatError(f"{source}: RsLi too short ({len(data)} bytes)")
if data[:4] != MAGIC_RSLI:
raise ArchiveFormatError(f"{source}: invalid RsLi magic")
issues: list[str] = []
reserved_zero = data[2]
version = data[3]
entry_count = struct.unpack_from("<h", data, 4)[0]
presorted_flag = struct.unpack_from("<H", data, 14)[0]
seed = struct.unpack_from("<I", data, 20)[0]
if reserved_zero != 0:
issues.append(f"header[2]={reserved_zero} != 0 (spec 2.2)")
if version != 1:
issues.append(f"version={version} != 1 (spec 2.2)")
if entry_count < 0:
raise ArchiveFormatError(f"{source}: negative entry_count={entry_count}")
table_offset = 32
table_size = entry_count * 32
if table_offset + table_size > len(data):
raise ArchiveFormatError(
f"{source}: encrypted table out of file bounds ({table_offset}+{table_size}>{len(data)})"
)
table_encrypted = data[table_offset : table_offset + table_size]
table_plain = xor_stream(table_encrypted, seed & 0xFFFF)
trailer: dict[str, Any] = {"present": False}
overlay_offset = 0
if len(data) >= 6 and data[-6:-4] == b"AO":
overlay_offset = struct.unpack_from("<I", data, len(data) - 4)[0]
trailer = {
"present": True,
"signature": "AO",
"overlay_offset": overlay_offset,
"raw_hex": data[-6:].hex(),
}
entries: list[dict[str, Any]] = []
sort_values: list[int] = []
for index in range(entry_count):
row = table_plain[index * 32 : (index + 1) * 32]
name_raw = row[0:12]
reserved4 = row[12:16]
flags_signed, sort_to_original = struct.unpack_from("<hh", row, 16)
unpacked_size, data_offset, packed_size = struct.unpack_from("<III", row, 20)
method = flags_signed & 0x1E0
name = name_raw.split(b"\x00", 1)[0].decode("latin1", errors="replace")
effective_offset = data_offset + overlay_offset
entries.append(
{
"index": index,
"name": name,
"name_raw_hex": name_raw.hex(),
"reserved_raw_hex": reserved4.hex(),
"flags_signed": flags_signed,
"flags_u16": flags_signed & 0xFFFF,
"method": method,
"sort_to_original": sort_to_original,
"unpacked_size": unpacked_size,
"data_offset": data_offset,
"effective_data_offset": effective_offset,
"packed_size": packed_size,
}
)
sort_values.append(sort_to_original)
if effective_offset < 0:
issues.append(f"entry {index}: negative effective_data_offset={effective_offset}")
elif effective_offset + packed_size > len(data):
end = effective_offset + packed_size
if method == 0x100 and end == len(data) + 1:
issues.append(
f"entry {index}: deflate packed_size reaches EOF+1 ({end}); "
"observed in game data, likely decoder lookahead byte"
)
else:
issues.append(
f"entry {index}: packed range [{effective_offset}, {end}) out of file"
)
if presorted_flag == 0xABBA:
if sorted(sort_values) != list(range(entry_count)):
issues.append(
"presorted flag is 0xABBA but sort_to_original is not a permutation [0..N-1] (spec 2.2/2.4)"
)
return {
"format": "RsLi",
"header_raw_hex": data[:32].hex(),
"header": {
"magic": "NL\\x00\\x01",
"entry_count": entry_count,
"seed": seed,
"presorted_flag": presorted_flag,
},
"entries": entries,
"issues": issues,
"trailer": trailer,
}
def unpack_rsli_file(archive_path: Path, out_dir: Path, source_root: Path | None = None) -> dict[str, Any]:
data = archive_path.read_bytes()
parsed = parse_rsli(data, source=str(archive_path))
out_dir.mkdir(parents=True, exist_ok=True)
entries_dir = out_dir / "entries"
entries_dir.mkdir(parents=True, exist_ok=True)
manifest: dict[str, Any] = {
"format": "RsLi",
"source_path": str(archive_path),
"source_relative_path": str(archive_path.relative_to(source_root)) if source_root else str(archive_path),
"source_size": len(data),
"header_raw_hex": parsed["header_raw_hex"],
"header": parsed["header"],
"entries": [],
"issues": list(parsed["issues"]),
"trailer": parsed["trailer"],
"source_sha256": sha256_hex(data),
}
for entry in parsed["entries"]:
begin = int(entry["effective_data_offset"])
end = begin + int(entry["packed_size"])
packed = data[begin:end]
base = safe_component(entry["name"], fallback=f"entry_{entry['index']:05d}")
packed_name = f"{entry['index']:05d}__{base}__packed.bin"
(entries_dir / packed_name).write_bytes(packed)
manifest_entry = dict(entry)
manifest_entry["packed_file"] = f"entries/{packed_name}"
manifest_entry["packed_file_size"] = len(packed)
manifest_entry["packed_sha256"] = sha256_hex(packed)
try:
unpacked = decode_rsli_payload(
packed=packed,
method=int(entry["method"]),
sort_to_original=int(entry["sort_to_original"]),
unpacked_size=int(entry["unpacked_size"]),
)
unpacked_name = f"{entry['index']:05d}__{base}__unpacked.bin"
(entries_dir / unpacked_name).write_bytes(unpacked)
manifest_entry["unpacked_file"] = f"entries/{unpacked_name}"
manifest_entry["unpacked_sha256"] = sha256_hex(unpacked)
except ArchiveFormatError as exc:
manifest_entry["unpack_error"] = str(exc)
manifest["issues"].append(
f"entry {entry['index']}: cannot decode method 0x{entry['method']:03X}: {exc}"
)
manifest["entries"].append(manifest_entry)
dump_json(out_dir / "manifest.json", manifest)
return manifest
def _pack_i16(value: int) -> int:
if not (-32768 <= int(value) <= 32767):
raise ArchiveFormatError(f"int16 overflow: {value}")
return int(value)
def pack_rsli_manifest(manifest_path: Path, out_file: Path) -> bytes:
manifest = load_json(manifest_path)
if manifest.get("format") != "RsLi":
raise ArchiveFormatError(f"{manifest_path}: not an RsLi manifest")
entries = manifest["entries"]
count = len(entries)
header_raw = bytes.fromhex(manifest["header_raw_hex"])
if len(header_raw) != 32:
raise ArchiveFormatError(f"{manifest_path}: header_raw_hex must be 32 bytes")
header = bytearray(header_raw)
header[:4] = MAGIC_RSLI
struct.pack_into("<h", header, 4, count)
seed = int(manifest["header"]["seed"])
struct.pack_into("<I", header, 20, seed)
rows = bytearray()
packed_chunks: list[tuple[dict[str, Any], bytes]] = []
for entry in entries:
packed_path = manifest_path.parent / entry["packed_file"]
packed = packed_path.read_bytes()
declared_size = int(entry["packed_size"])
if len(packed) > declared_size:
raise ArchiveFormatError(
f"{packed_path}: packed size {len(packed)} > manifest packed_size {declared_size}"
)
data_offset = int(entry["data_offset"])
packed_chunks.append((entry, packed))
row = bytearray(32)
name_raw = bytes.fromhex(entry["name_raw_hex"])
reserved_raw = bytes.fromhex(entry["reserved_raw_hex"])
if len(name_raw) != 12 or len(reserved_raw) != 4:
raise ArchiveFormatError(
f"entry {entry['index']}: invalid name/reserved raw length"
)
row[0:12] = name_raw
row[12:16] = reserved_raw
struct.pack_into(
"<hhIII",
row,
16,
_pack_i16(int(entry["flags_signed"])),
_pack_i16(int(entry["sort_to_original"])),
int(entry["unpacked_size"]),
data_offset,
declared_size,
)
rows.extend(row)
encrypted_table = xor_stream(bytes(rows), seed & 0xFFFF)
trailer = manifest.get("trailer", {})
trailer_raw = b""
if trailer.get("present"):
raw_hex = trailer.get("raw_hex", "")
trailer_raw = bytes.fromhex(raw_hex)
if len(trailer_raw) != 6:
raise ArchiveFormatError("trailer raw length must be 6 bytes")
source_size = manifest.get("source_size")
table_end = 32 + count * 32
if source_size is not None:
pre_trailer_size = int(source_size) - len(trailer_raw)
if pre_trailer_size < table_end:
raise ArchiveFormatError(
f"invalid source_size={source_size}: smaller than header+table"
)
else:
pre_trailer_size = table_end
for entry, packed in packed_chunks:
pre_trailer_size = max(
pre_trailer_size, int(entry["data_offset"]) + len(packed)
)
out = bytearray(pre_trailer_size)
out[0:32] = header
out[32:table_end] = encrypted_table
occupied = bytearray(pre_trailer_size)
occupied[0:table_end] = b"\x01" * table_end
for entry, packed in packed_chunks:
base_offset = int(entry["data_offset"])
for index, byte in enumerate(packed):
pos = base_offset + index
if pos >= pre_trailer_size:
raise ArchiveFormatError(
f"entry {entry['index']}: data write at {pos} beyond output size {pre_trailer_size}"
)
if occupied[pos] and out[pos] != byte:
raise ArchiveFormatError(
f"entry {entry['index']}: overlapping packed data conflict at offset {pos}"
)
out[pos] = byte
occupied[pos] = 1
out.extend(trailer_raw)
if source_size is not None and len(out) != int(source_size):
raise ArchiveFormatError(
f"packed size {len(out)} != source_size {source_size} from manifest"
)
out_file.parent.mkdir(parents=True, exist_ok=True)
out_file.write_bytes(out)
return bytes(out)
def cmd_scan(args: argparse.Namespace) -> int:
root = Path(args.input).resolve()
archives = scan_archives(root)
if args.json:
print(json.dumps(archives, ensure_ascii=False, indent=2))
else:
print(f"Found {len(archives)} archive(s) in {root}")
for item in archives:
print(f"{item['type']:4} {item['size']:10d} {item['relative_path']}")
return 0
def cmd_nres_unpack(args: argparse.Namespace) -> int:
archive_path = Path(args.archive).resolve()
out_dir = Path(args.output).resolve()
manifest = unpack_nres_file(archive_path, out_dir)
print(f"NRes unpacked: {archive_path}")
print(f"Manifest: {out_dir / 'manifest.json'}")
print(f"Entries : {len(manifest['entries'])}")
if manifest["issues"]:
print("Issues:")
for issue in manifest["issues"]:
print(f"- {issue}")
return 0
def cmd_nres_pack(args: argparse.Namespace) -> int:
manifest_path = Path(args.manifest).resolve()
out_file = Path(args.output).resolve()
packed = pack_nres_manifest(manifest_path, out_file)
print(f"NRes packed: {out_file} ({len(packed)} bytes, sha256={sha256_hex(packed)})")
return 0
def cmd_rsli_unpack(args: argparse.Namespace) -> int:
archive_path = Path(args.archive).resolve()
out_dir = Path(args.output).resolve()
manifest = unpack_rsli_file(archive_path, out_dir)
print(f"RsLi unpacked: {archive_path}")
print(f"Manifest: {out_dir / 'manifest.json'}")
print(f"Entries : {len(manifest['entries'])}")
if manifest["issues"]:
print("Issues:")
for issue in manifest["issues"]:
print(f"- {issue}")
return 0
def cmd_rsli_pack(args: argparse.Namespace) -> int:
manifest_path = Path(args.manifest).resolve()
out_file = Path(args.output).resolve()
packed = pack_rsli_manifest(manifest_path, out_file)
print(f"RsLi packed: {out_file} ({len(packed)} bytes, sha256={sha256_hex(packed)})")
return 0
def cmd_validate(args: argparse.Namespace) -> int:
input_root = Path(args.input).resolve()
archives = scan_archives(input_root)
temp_created = False
if args.workdir:
workdir = Path(args.workdir).resolve()
workdir.mkdir(parents=True, exist_ok=True)
else:
workdir = Path(tempfile.mkdtemp(prefix="nres-rsli-validate-"))
temp_created = True
report: dict[str, Any] = {
"input_root": str(input_root),
"workdir": str(workdir),
"archives_total": len(archives),
"results": [],
"summary": {},
}
failures = 0
try:
for idx, item in enumerate(archives):
rel = item["relative_path"]
archive_path = input_root / rel
marker = f"{idx:04d}_{safe_component(rel, fallback='archive')}"
unpack_dir = workdir / "unpacked" / marker
repacked_file = workdir / "repacked" / f"{marker}.bin"
try:
if item["type"] == "nres":
manifest = unpack_nres_file(archive_path, unpack_dir, source_root=input_root)
repacked = pack_nres_manifest(unpack_dir / "manifest.json", repacked_file)
elif item["type"] == "rsli":
manifest = unpack_rsli_file(archive_path, unpack_dir, source_root=input_root)
repacked = pack_rsli_manifest(unpack_dir / "manifest.json", repacked_file)
else:
continue
original = archive_path.read_bytes()
match = original == repacked
diff_offset, diff_desc = first_diff(original, repacked)
issues = list(manifest.get("issues", []))
result = {
"relative_path": rel,
"type": item["type"],
"size_original": len(original),
"size_repacked": len(repacked),
"sha256_original": sha256_hex(original),
"sha256_repacked": sha256_hex(repacked),
"match": match,
"first_diff_offset": diff_offset,
"first_diff": diff_desc,
"issues": issues,
"entries": len(manifest.get("entries", [])),
"error": None,
}
except Exception as exc: # pylint: disable=broad-except
result = {
"relative_path": rel,
"type": item["type"],
"size_original": item["size"],
"size_repacked": None,
"sha256_original": None,
"sha256_repacked": None,
"match": False,
"first_diff_offset": None,
"first_diff": None,
"issues": [f"processing error: {exc}"],
"entries": None,
"error": str(exc),
}
report["results"].append(result)
if not result["match"]:
failures += 1
if result["issues"] and args.fail_on_issues:
failures += 1
matches = sum(1 for row in report["results"] if row["match"])
mismatches = len(report["results"]) - matches
nres_count = sum(1 for row in report["results"] if row["type"] == "nres")
rsli_count = sum(1 for row in report["results"] if row["type"] == "rsli")
issues_total = sum(len(row["issues"]) for row in report["results"])
report["summary"] = {
"nres_count": nres_count,
"rsli_count": rsli_count,
"matches": matches,
"mismatches": mismatches,
"issues_total": issues_total,
}
if args.report:
dump_json(Path(args.report).resolve(), report)
print(f"Input root : {input_root}")
print(f"Work dir : {workdir}")
print(f"NRes archives : {nres_count}")
print(f"RsLi archives : {rsli_count}")
print(f"Roundtrip match: {matches}/{len(report['results'])}")
print(f"Doc issues : {issues_total}")
if mismatches:
print("\nMismatches:")
for row in report["results"]:
if row["match"]:
continue
print(
f"- {row['relative_path']} [{row['type']}] "
f"diff@{row['first_diff_offset']}: {row['first_diff']}"
)
if issues_total:
print("\nIssues:")
for row in report["results"]:
if not row["issues"]:
continue
print(f"- {row['relative_path']} [{row['type']}]")
for issue in row["issues"]:
print(f" * {issue}")
finally:
if temp_created or args.cleanup:
shutil.rmtree(workdir, ignore_errors=True)
if failures > 0:
return 1
if report["summary"].get("mismatches", 0) > 0 and args.fail_on_diff:
return 1
return 0
def build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="NRes/RsLi tools: scan, unpack, repack, and roundtrip validation."
)
sub = parser.add_subparsers(dest="command", required=True)
scan = sub.add_parser("scan", help="Scan files by header signatures.")
scan.add_argument("--input", required=True, help="Root directory to scan.")
scan.add_argument("--json", action="store_true", help="Print JSON output.")
scan.set_defaults(func=cmd_scan)
nres_unpack = sub.add_parser("nres-unpack", help="Unpack a single NRes archive.")
nres_unpack.add_argument("--archive", required=True, help="Path to NRes file.")
nres_unpack.add_argument("--output", required=True, help="Output directory.")
nres_unpack.set_defaults(func=cmd_nres_unpack)
nres_pack = sub.add_parser("nres-pack", help="Pack NRes archive from manifest.")
nres_pack.add_argument("--manifest", required=True, help="Path to manifest.json.")
nres_pack.add_argument("--output", required=True, help="Output file path.")
nres_pack.set_defaults(func=cmd_nres_pack)
rsli_unpack = sub.add_parser("rsli-unpack", help="Unpack a single RsLi archive.")
rsli_unpack.add_argument("--archive", required=True, help="Path to RsLi file.")
rsli_unpack.add_argument("--output", required=True, help="Output directory.")
rsli_unpack.set_defaults(func=cmd_rsli_unpack)
rsli_pack = sub.add_parser("rsli-pack", help="Pack RsLi archive from manifest.")
rsli_pack.add_argument("--manifest", required=True, help="Path to manifest.json.")
rsli_pack.add_argument("--output", required=True, help="Output file path.")
rsli_pack.set_defaults(func=cmd_rsli_pack)
validate = sub.add_parser(
"validate",
help="Scan all archives and run unpack->repack->byte-compare validation.",
)
validate.add_argument("--input", required=True, help="Root with game data files.")
validate.add_argument(
"--workdir",
help="Working directory for temporary unpack/repack files. "
"If omitted, a temporary directory is used and removed automatically.",
)
validate.add_argument("--report", help="Optional JSON report output path.")
validate.add_argument(
"--fail-on-diff",
action="store_true",
help="Return non-zero exit code if any byte mismatch exists.",
)
validate.add_argument(
"--fail-on-issues",
action="store_true",
help="Return non-zero exit code if any spec issue was detected.",
)
validate.add_argument(
"--cleanup",
action="store_true",
help="Remove --workdir after completion.",
)
validate.set_defaults(func=cmd_validate)
return parser
def main() -> int:
parser = build_parser()
args = parser.parse_args()
return int(args.func(args))
if __name__ == "__main__":
raise SystemExit(main())

204
tools/init_testdata.py Normal file
View File

@@ -0,0 +1,204 @@
#!/usr/bin/env python3
"""
Initialize test data folders by archive signatures.
The script scans all files in --input and copies matching archives into:
--output/nres/<relative path>
--output/rsli/<relative path>
"""
from __future__ import annotations
import argparse
import shutil
import sys
from pathlib import Path
MAGIC_NRES = b"NRes"
MAGIC_RSLI = b"NL\x00\x01"
def is_relative_to(path: Path, base: Path) -> bool:
try:
path.relative_to(base)
except ValueError:
return False
return True
def detect_archive_type(path: Path) -> str | None:
try:
with path.open("rb") as handle:
magic = handle.read(4)
except OSError as exc:
print(f"[warn] cannot read {path}: {exc}", file=sys.stderr)
return None
if magic == MAGIC_NRES:
return "nres"
if magic == MAGIC_RSLI:
return "rsli"
return None
def scan_archives(input_root: Path, excluded_root: Path | None) -> list[tuple[Path, str]]:
found: list[tuple[Path, str]] = []
for path in sorted(input_root.rglob("*")):
if not path.is_file():
continue
if excluded_root and is_relative_to(path.resolve(), excluded_root):
continue
archive_type = detect_archive_type(path)
if archive_type:
found.append((path, archive_type))
return found
def confirm_overwrite(path: Path) -> str:
prompt = (
f"File exists: {path}\n"
"Overwrite? [y]es / [n]o / [a]ll / [q]uit (default: n): "
)
while True:
try:
answer = input(prompt).strip().lower()
except EOFError:
return "quit"
if answer in {"", "n", "no"}:
return "no"
if answer in {"y", "yes"}:
return "yes"
if answer in {"a", "all"}:
return "all"
if answer in {"q", "quit"}:
return "quit"
print("Please answer with y, n, a, or q.")
def copy_archives(
archives: list[tuple[Path, str]],
input_root: Path,
output_root: Path,
force: bool,
) -> int:
copied = 0
skipped = 0
overwritten = 0
overwrite_all = force
type_counts = {"nres": 0, "rsli": 0}
for _, archive_type in archives:
type_counts[archive_type] += 1
print(
f"Found archives: total={len(archives)}, "
f"nres={type_counts['nres']}, rsli={type_counts['rsli']}"
)
for source, archive_type in archives:
rel_path = source.relative_to(input_root)
destination = output_root / archive_type / rel_path
destination.parent.mkdir(parents=True, exist_ok=True)
if destination.exists():
if destination.is_dir():
print(
f"[error] destination is a directory, expected file: {destination}",
file=sys.stderr,
)
return 2
if not overwrite_all:
if not sys.stdin.isatty():
print(
"[error] destination file exists but stdin is not interactive. "
"Use --force to overwrite without prompts.",
file=sys.stderr,
)
return 2
decision = confirm_overwrite(destination)
if decision == "quit":
print("Aborted by user.")
return 130
if decision == "no":
skipped += 1
continue
if decision == "all":
overwrite_all = True
overwritten += 1
try:
shutil.copy2(source, destination)
except OSError as exc:
print(f"[error] failed to copy {source} -> {destination}: {exc}", file=sys.stderr)
return 2
copied += 1
print(
f"Done: copied={copied}, overwritten={overwritten}, skipped={skipped}, "
f"output={output_root}"
)
return 0
def build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Initialize test data by scanning NRes/RsLi signatures."
)
parser.add_argument(
"--input",
required=True,
help="Input directory to scan recursively.",
)
parser.add_argument(
"--output",
required=True,
help="Output root directory (archives go to nres/ and rsli/ subdirs).",
)
parser.add_argument(
"--force",
action="store_true",
help="Overwrite destination files without confirmation prompts.",
)
return parser
def main() -> int:
args = build_parser().parse_args()
input_root = Path(args.input)
if not input_root.exists():
print(f"[error] input directory does not exist: {input_root}", file=sys.stderr)
return 2
if not input_root.is_dir():
print(f"[error] input path is not a directory: {input_root}", file=sys.stderr)
return 2
output_root = Path(args.output)
if output_root.exists() and not output_root.is_dir():
print(f"[error] output path exists and is not a directory: {output_root}", file=sys.stderr)
return 2
input_resolved = input_root.resolve()
output_resolved = output_root.resolve()
if input_resolved == output_resolved:
print("[error] input and output directories must be different.", file=sys.stderr)
return 2
excluded_root: Path | None = None
if is_relative_to(output_resolved, input_resolved):
excluded_root = output_resolved
print(f"Notice: output is inside input, skipping scan under: {excluded_root}")
archives = scan_archives(input_root, excluded_root)
output_root.mkdir(parents=True, exist_ok=True)
return copy_archives(archives, input_root, output_root, force=args.force)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,9 +0,0 @@
[package]
name = "unpacker"
version = "0.1.1"
edition = "2021"
[dependencies]
byteorder = "1.4.3"
serde = { version = "1.0.160", features = ["derive"] }
serde_json = "1.0.96"

View File

@@ -1,41 +0,0 @@
# NRes Game Resource Unpacker
At the moment, this is a demonstration of the NRes game resource unpacking algorithm in action.
It unpacks 100% of the NRes game resources for the game "Parkan: Iron Strategy".
The unpacked resources can be packed again using the [packer](../packer) utility and replace the original game files.
__Attention!__
This is a test version of the utility.
It overwrites existing files without asking.
## Building
To build the tools, you need to run the following command in the root directory:
```bash
cargo build --release
```
## Running
You can run the utility with the following command:
```bash
./target/release/unpacker /path/to/file.ex /path/to/output
```
- `/path/to/file.ex`: This is the file containing the game resources that will be unpacked.
- `/path/to/output`: This is the directory where the unpacked files will be placed.
## How it Works
The structure describing the packed game resources is not fully understood yet.
Therefore, the utility saves unpacked files in the format `file_name.file_index` because some files have the same name.
Additionally, an `index.json` file is created, which is important for re-packing the files.
This file lists all the fields that game resources have in their packed form.
It is essential to preserve the file index for the game to function correctly, as the game engine looks for the necessary files by index.
Files can be replaced and packed back using the [packer](../packer).
The newly obtained game resource files are correctly processed by the game engine.
For example, sounds and 3D models of warbots' weapons were successfully replaced.

View File

@@ -1,124 +0,0 @@
use std::env;
use std::fs::File;
use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
use byteorder::{ByteOrder, LittleEndian};
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct FileHeader {
pub size: u32,
pub total: u32,
pub type1: u32,
pub type2: u32,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ListElement {
pub extension: String,
pub index: u32,
pub name: String,
#[serde(skip_serializing)]
pub position: u32,
#[serde(skip_serializing)]
pub size: u32,
pub unknown0: u32,
pub unknown1: u32,
pub unknown2: u32,
}
fn main() {
let args: Vec<String> = env::args().collect();
let input = &args[1];
let output = &args[2];
unpack(String::from(input), String::from(output));
}
fn unpack(input: String, output: String) {
let file = File::open(input).unwrap();
let metadata = file.metadata().unwrap();
let mut reader = BufReader::new(file);
let mut list: Vec<ListElement> = Vec::new();
// Считываем заголовок файла
let mut header_buffer = [0u8; 16];
reader.seek(SeekFrom::Start(0)).unwrap();
reader.read_exact(&mut header_buffer).unwrap();
let file_header = FileHeader {
size: LittleEndian::read_u32(&header_buffer[12..16]),
total: LittleEndian::read_u32(&header_buffer[8..12]),
type1: LittleEndian::read_u32(&header_buffer[0..4]),
type2: LittleEndian::read_u32(&header_buffer[4..8]),
};
if file_header.type1 != 1936020046 || file_header.type2 != 256 {
panic!("this isn't NRes file");
}
if metadata.len() != file_header.size as u64 {
panic!("incorrect size")
}
// Считываем список файлов
let list_files_start_position = file_header.size - (file_header.total * 64);
let list_files_size = file_header.total * 64;
let mut list_buffer = vec![0u8; list_files_size as usize];
reader
.seek(SeekFrom::Start(list_files_start_position as u64))
.unwrap();
reader.read_exact(&mut list_buffer).unwrap();
if list_buffer.len() % 64 != 0 {
panic!("invalid files list")
}
for i in 0..(list_buffer.len() / 64) {
let from = i * 64;
let to = (i * 64) + 64;
let chunk: &[u8] = &list_buffer[from..to];
let element_list = ListElement {
extension: String::from_utf8_lossy(&chunk[0..4])
.trim_matches(char::from(0))
.to_string(),
index: LittleEndian::read_u32(&chunk[60..64]),
name: String::from_utf8_lossy(&chunk[20..56])
.trim_matches(char::from(0))
.to_string(),
position: LittleEndian::read_u32(&chunk[56..60]),
size: LittleEndian::read_u32(&chunk[12..16]),
unknown0: LittleEndian::read_u32(&chunk[4..8]),
unknown1: LittleEndian::read_u32(&chunk[8..12]),
unknown2: LittleEndian::read_u32(&chunk[16..20]),
};
list.push(element_list)
}
// Распаковываем файлы в директорию
for element in &list {
let path = format!("{}/{}.{}", output, element.name, element.index);
let mut file = File::create(path).unwrap();
let mut file_buffer = vec![0u8; element.size as usize];
reader
.seek(SeekFrom::Start(element.position as u64))
.unwrap();
reader.read_exact(&mut file_buffer).unwrap();
file.write_all(&file_buffer).unwrap();
file_buffer.clear();
}
// Выгрузка списка файлов в JSON
let path = format!("{}/{}", output, "index.json");
let file = File::create(path).unwrap();
let mut writer = BufWriter::new(file);
serde_json::to_writer_pretty(&mut writer, &list).unwrap();
writer.flush().unwrap();
}

View File

@@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"ef9fa958318e442f1da7d204494cefec75c144aa6d5d5c93b0a5d6fcdf4ef6c6","Cargo.lock":"20b23c454fc3127f08a1bcd2864bbf029793759e6411fba24d44d8f4b7831ad0","Cargo.toml":"d0f15fde73d42bdf00e93f960dff908447225bede9364cb1659e44740a536c04","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e99d88d232bf57d70f0fb87f6b496d44b6653f99f8a63d250a54c61ea4bcde40","README.md":"76d28502bd2e83f6a9e3576bd45e9a7fe5308448c4b5384b0d249515b5f67a5c","bench.plot.r":"6a5d7a4d36ed6b3d9919be703a479bef47698bf947818b483ff03951df2d4e01","benchmark.sh":"b35f89b1ca2c1dc0476cdd07f0284b72d41920d1c7b6054072f50ffba296d78d","coverage.sh":"4677e81922d08a82e83068a911717a247c66af12e559f37b78b6be3337ac9f07","examples/addr2line.rs":"3c5eb5a6726634df6cf53e4d67ee9f90c9ac09838303947f45c3bea1e84548b5","rustfmt.toml":"01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b","src/builtin_split_dwarf_loader.rs":"dc6979de81b35f82e97275e6be27ec61f3c4225ea10574a9e031813e00185174","src/function.rs":"68f047e0c78afe18ad165db255c8254ee74c35cd6df0cc07e400252981f661ed","src/lazy.rs":"0bf23f7098f1902f181e43c2ffa82a3f86df2c0dbcb9bc0ebce6a0168dd8b060","src/lib.rs":"9d6531f71fd138d31cc7596db9ab234198d0895a21ea9cb116434c19ec78b660","tests/correctness.rs":"4081f8019535305e3aa254c6a4e1436272dd873f9717c687ca0e66ea8d5871ed","tests/output_equivalence.rs":"b2cd7c59fa55808a2e66e9fe7f160d846867e3ecefe22c22a818f822c3c41f23","tests/parse.rs":"c2f7362e4679c1b4803b12ec6e8dca6da96aed7273fd210a857524a4182c30e7"},"package":"8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"}

View File

@@ -1,336 +0,0 @@
# `addr2line` Change Log
--------------------------------------------------------------------------------
## 0.21.0 (2023/08/12)
### Breaking changes
* Updated `gimli`, `object`, and `fallible-iterator` dependencies.
### Changed
* The minimum supported rust version is 1.65.0.
* Store boxed slices instead of `Vec` objects in `Context`.
[#278](https://github.com/gimli-rs/addr2line/pull/278)
--------------------------------------------------------------------------------
## 0.20.0 (2023/04/15)
### Breaking changes
* The minimum supported rust version is 1.58.0.
* Changed `Context::find_frames` to return `LookupResult`.
Use `LookupResult::skip_all_loads` to obtain the result without loading split DWARF.
[#260](https://github.com/gimli-rs/addr2line/pull/260)
* Replaced `Context::find_dwarf_unit` with `Context::find_dwarf_and_unit`.
[#260](https://github.com/gimli-rs/addr2line/pull/260)
* Updated `object` dependency.
### Changed
* Fix handling of file index 0 for DWARF 5.
[#264](https://github.com/gimli-rs/addr2line/pull/264)
### Added
* Added types and methods to support loading split DWARF:
`LookupResult`, `SplitDwarfLoad`, `SplitDwarfLoader`, `Context::preload_units`.
[#260](https://github.com/gimli-rs/addr2line/pull/260)
[#262](https://github.com/gimli-rs/addr2line/pull/262)
[#263](https://github.com/gimli-rs/addr2line/pull/263)
--------------------------------------------------------------------------------
## 0.19.0 (2022/11/24)
### Breaking changes
* Updated `gimli` and `object` dependencies.
--------------------------------------------------------------------------------
## 0.18.0 (2022/07/16)
### Breaking changes
* Updated `object` dependency.
### Changed
* Fixed handling of relative path for `DW_AT_comp_dir`.
[#239](https://github.com/gimli-rs/addr2line/pull/239)
* Fixed handling of `DW_FORM_addrx` for DWARF 5 support.
[#243](https://github.com/gimli-rs/addr2line/pull/243)
* Fixed handling of units that are missing range information.
[#249](https://github.com/gimli-rs/addr2line/pull/249)
--------------------------------------------------------------------------------
## 0.17.0 (2021/10/24)
### Breaking changes
* Updated `gimli` and `object` dependencies.
### Changed
* Use `skip_attributes` to improve performance.
[#236](https://github.com/gimli-rs/addr2line/pull/236)
--------------------------------------------------------------------------------
## 0.16.0 (2021/07/26)
### Breaking changes
* Updated `gimli` and `object` dependencies.
--------------------------------------------------------------------------------
## 0.15.2 (2021/06/04)
### Fixed
* Allow `Context` to be `Send`.
[#219](https://github.com/gimli-rs/addr2line/pull/219)
--------------------------------------------------------------------------------
## 0.15.1 (2021/05/02)
### Fixed
* Don't ignore aranges with address 0.
[#217](https://github.com/gimli-rs/addr2line/pull/217)
--------------------------------------------------------------------------------
## 0.15.0 (2021/05/02)
### Breaking changes
* Updated `gimli` and `object` dependencies.
[#215](https://github.com/gimli-rs/addr2line/pull/215)
* Added `debug_aranges` parameter to `Context::from_sections`.
[#200](https://github.com/gimli-rs/addr2line/pull/200)
### Added
* Added `.debug_aranges` support.
[#200](https://github.com/gimli-rs/addr2line/pull/200)
* Added supplementary object file support.
[#208](https://github.com/gimli-rs/addr2line/pull/208)
### Fixed
* Fixed handling of Windows paths in locations.
[#209](https://github.com/gimli-rs/addr2line/pull/209)
* examples/addr2line: Flush stdout after each response.
[#210](https://github.com/gimli-rs/addr2line/pull/210)
* examples/addr2line: Avoid copying every section.
[#213](https://github.com/gimli-rs/addr2line/pull/213)
--------------------------------------------------------------------------------
## 0.14.1 (2020/12/31)
### Fixed
* Fix location lookup for skeleton units.
[#201](https://github.com/gimli-rs/addr2line/pull/201)
### Added
* Added `Context::find_location_range`.
[#196](https://github.com/gimli-rs/addr2line/pull/196)
[#199](https://github.com/gimli-rs/addr2line/pull/199)
--------------------------------------------------------------------------------
## 0.14.0 (2020/10/27)
### Breaking changes
* Updated `gimli` and `object` dependencies.
### Fixed
* Handle units that only have line information.
[#188](https://github.com/gimli-rs/addr2line/pull/188)
* Handle DWARF units with version <= 4 and no `DW_AT_name`.
[#191](https://github.com/gimli-rs/addr2line/pull/191)
* Fix handling of `DW_FORM_ref_addr`.
[#193](https://github.com/gimli-rs/addr2line/pull/193)
--------------------------------------------------------------------------------
## 0.13.0 (2020/07/07)
### Breaking changes
* Updated `gimli` and `object` dependencies.
* Added `rustc-dep-of-std` feature.
[#166](https://github.com/gimli-rs/addr2line/pull/166)
### Changed
* Improve performance by parsing function contents lazily.
[#178](https://github.com/gimli-rs/addr2line/pull/178)
* Don't skip `.debug_info` and `.debug_line` entries with a zero address.
[#182](https://github.com/gimli-rs/addr2line/pull/182)
--------------------------------------------------------------------------------
## 0.12.2 (2020/06/21)
### Fixed
* Avoid linear search for `DW_FORM_ref_addr`.
[#175](https://github.com/gimli-rs/addr2line/pull/175)
--------------------------------------------------------------------------------
## 0.12.1 (2020/05/19)
### Fixed
* Handle units with overlapping address ranges.
[#163](https://github.com/gimli-rs/addr2line/pull/163)
* Don't assert for functions with overlapping address ranges.
[#168](https://github.com/gimli-rs/addr2line/pull/168)
--------------------------------------------------------------------------------
## 0.12.0 (2020/05/12)
### Breaking changes
* Updated `gimli` and `object` dependencies.
* Added more optional features: `smallvec` and `fallible-iterator`.
[#160](https://github.com/gimli-rs/addr2line/pull/160)
### Added
* Added `Context::dwarf` and `Context::find_dwarf_unit`.
[#159](https://github.com/gimli-rs/addr2line/pull/159)
### Changed
* Removed `lazycell` dependency.
[#160](https://github.com/gimli-rs/addr2line/pull/160)
--------------------------------------------------------------------------------
## 0.11.0 (2020/01/11)
### Breaking changes
* Updated `gimli` and `object` dependencies.
* [#130](https://github.com/gimli-rs/addr2line/pull/130)
Changed `Location::file` from `Option<String>` to `Option<&str>`.
This required adding lifetime parameters to `Location` and other structs that
contain it.
* [#152](https://github.com/gimli-rs/addr2line/pull/152)
Changed `Location::line` and `Location::column` from `Option<u64>`to `Option<u32>`.
* [#156](https://github.com/gimli-rs/addr2line/pull/156)
Deleted `alloc` feature, and fixed `no-std` builds with stable rust.
Removed default `Reader` parameter for `Context`, and added `ObjectContext` instead.
### Added
* [#134](https://github.com/gimli-rs/addr2line/pull/134)
Added `Context::from_dwarf`.
### Changed
* [#133](https://github.com/gimli-rs/addr2line/pull/133)
Fixed handling of units that can't be parsed.
* [#155](https://github.com/gimli-rs/addr2line/pull/155)
Fixed `addr2line` output to match binutils.
* [#130](https://github.com/gimli-rs/addr2line/pull/130)
Improved `.debug_line` parsing performance.
* [#148](https://github.com/gimli-rs/addr2line/pull/148)
[#150](https://github.com/gimli-rs/addr2line/pull/150)
[#151](https://github.com/gimli-rs/addr2line/pull/151)
[#152](https://github.com/gimli-rs/addr2line/pull/152)
Improved `.debug_info` parsing performance.
* [#137](https://github.com/gimli-rs/addr2line/pull/137)
[#138](https://github.com/gimli-rs/addr2line/pull/138)
[#139](https://github.com/gimli-rs/addr2line/pull/139)
[#140](https://github.com/gimli-rs/addr2line/pull/140)
[#146](https://github.com/gimli-rs/addr2line/pull/146)
Improved benchmarks.
--------------------------------------------------------------------------------
## 0.10.0 (2019/07/07)
### Breaking changes
* [#127](https://github.com/gimli-rs/addr2line/pull/127)
Update `gimli`.
--------------------------------------------------------------------------------
## 0.9.0 (2019/05/02)
### Breaking changes
* [#121](https://github.com/gimli-rs/addr2line/pull/121)
Update `gimli`, `object`, and `fallible-iterator` dependencies.
### Added
* [#121](https://github.com/gimli-rs/addr2line/pull/121)
Reexport `gimli`, `object`, and `fallible-iterator`.
--------------------------------------------------------------------------------
## 0.8.0 (2019/02/06)
### Breaking changes
* [#107](https://github.com/gimli-rs/addr2line/pull/107)
Update `object` dependency to 0.11. This is part of the public API.
### Added
* [#101](https://github.com/gimli-rs/addr2line/pull/101)
Add `object` feature (enabled by default). Disable this feature to remove
the `object` dependency and `Context::new` API.
* [#102](https://github.com/gimli-rs/addr2line/pull/102)
Add `std` (enabled by default) and `alloc` features.
### Changed
* [#108](https://github.com/gimli-rs/addr2line/issues/108)
`demangle` no longer outputs the hash for rust symbols.
* [#109](https://github.com/gimli-rs/addr2line/issues/109)
Set default `R` for `Context<R>`.

704
vendor/addr2line/Cargo.lock generated vendored
View File

@@ -1,704 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "addr2line"
version = "0.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97"
dependencies = [
"gimli 0.27.2",
]
[[package]]
name = "addr2line"
version = "0.21.0"
dependencies = [
"backtrace",
"clap",
"compiler_builtins",
"cpp_demangle",
"fallible-iterator",
"findshlibs",
"gimli 0.28.0",
"libtest-mimic",
"memmap2",
"object 0.32.0",
"rustc-demangle",
"rustc-std-workspace-alloc",
"rustc-std-workspace-core",
"smallvec",
"typed-arena",
]
[[package]]
name = "adler"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "anstream"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"is-terminal",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd"
[[package]]
name = "anstyle-parse"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
dependencies = [
"windows-sys",
]
[[package]]
name = "anstyle-wincon"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c"
dependencies = [
"anstyle",
"windows-sys",
]
[[package]]
name = "backtrace"
version = "0.3.67"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca"
dependencies = [
"addr2line 0.19.0",
"cc",
"cfg-if",
"libc",
"miniz_oxide",
"object 0.30.3",
"rustc-demangle",
]
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
[[package]]
name = "byteorder"
version = "1.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]]
name = "cc"
version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
version = "4.3.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd"
dependencies = [
"clap_builder",
"clap_derive",
"once_cell",
]
[[package]]
name = "clap_builder"
version = "4.3.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa"
dependencies = [
"anstream",
"anstyle",
"clap_lex",
"strsim",
"terminal_size",
]
[[package]]
name = "clap_derive"
version = "4.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.15",
]
[[package]]
name = "clap_lex"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
[[package]]
name = "colorchoice"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "compiler_builtins"
version = "0.1.91"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "571298a3cce7e2afbd3d61abb91a18667d5ab25993ec577a88ee8ac45f00cc3a"
[[package]]
name = "cpp_demangle"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c76f98bdfc7f66172e6c7065f981ebb576ffc903fe4c0561d9f0c2509226dc6"
dependencies = [
"cfg-if",
]
[[package]]
name = "crc32fast"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
dependencies = [
"cfg-if",
]
[[package]]
name = "errno"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f"
dependencies = [
"errno-dragonfly",
"libc",
"windows-sys",
]
[[package]]
name = "errno-dragonfly"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "fallible-iterator"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"
[[package]]
name = "findshlibs"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64"
dependencies = [
"cc",
"lazy_static",
"libc",
"winapi",
]
[[package]]
name = "flate2"
version = "1.0.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841"
dependencies = [
"crc32fast",
"miniz_oxide",
]
[[package]]
name = "gimli"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4"
[[package]]
name = "gimli"
version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
dependencies = [
"compiler_builtins",
"fallible-iterator",
"rustc-std-workspace-alloc",
"rustc-std-workspace-core",
"stable_deref_trait",
]
[[package]]
name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "hermit-abi"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
dependencies = [
"libc",
]
[[package]]
name = "hermit-abi"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b"
[[package]]
name = "io-lifetimes"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
dependencies = [
"hermit-abi 0.3.2",
"libc",
"windows-sys",
]
[[package]]
name = "is-terminal"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
dependencies = [
"hermit-abi 0.3.2",
"rustix 0.38.8",
"windows-sys",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.147"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
[[package]]
name = "libtest-mimic"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d8de370f98a6cb8a4606618e53e802f93b094ddec0f96988eaec2c27e6e9ce7"
dependencies = [
"clap",
"termcolor",
"threadpool",
]
[[package]]
name = "linux-raw-sys"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
[[package]]
name = "linux-raw-sys"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503"
[[package]]
name = "memchr"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "memmap2"
version = "0.5.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327"
dependencies = [
"libc",
]
[[package]]
name = "miniz_oxide"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa"
dependencies = [
"adler",
]
[[package]]
name = "num_cpus"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b"
dependencies = [
"hermit-abi 0.2.6",
"libc",
]
[[package]]
name = "object"
version = "0.30.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439"
dependencies = [
"memchr",
]
[[package]]
name = "object"
version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe"
dependencies = [
"flate2",
"memchr",
"ruzstd",
]
[[package]]
name = "once_cell"
version = "1.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
[[package]]
name = "proc-macro2"
version = "1.0.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rustc-demangle"
version = "0.1.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b"
[[package]]
name = "rustc-std-workspace-alloc"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff66d57013a5686e1917ed6a025d54dd591fcda71a41fe07edf4d16726aefa86"
[[package]]
name = "rustc-std-workspace-core"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1956f5517128a2b6f23ab2dadf1a976f4f5b27962e7724c2bf3d45e539ec098c"
[[package]]
name = "rustix"
version = "0.37.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06"
dependencies = [
"bitflags 1.3.2",
"errno",
"io-lifetimes",
"libc",
"linux-raw-sys 0.3.8",
"windows-sys",
]
[[package]]
name = "rustix"
version = "0.38.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f"
dependencies = [
"bitflags 2.4.0",
"errno",
"libc",
"linux-raw-sys 0.4.5",
"windows-sys",
]
[[package]]
name = "ruzstd"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3ffab8f9715a0d455df4bbb9d21e91135aab3cd3ca187af0cd0c3c3f868fdc"
dependencies = [
"byteorder",
"thiserror-core",
"twox-hash",
]
[[package]]
name = "smallvec"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "termcolor"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
dependencies = [
"winapi-util",
]
[[package]]
name = "terminal_size"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237"
dependencies = [
"rustix 0.37.23",
"windows-sys",
]
[[package]]
name = "thiserror-core"
version = "1.0.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d97345f6437bb2004cd58819d8a9ef8e36cdd7661c2abc4bbde0a7c40d9f497"
dependencies = [
"thiserror-core-impl",
]
[[package]]
name = "thiserror-core-impl"
version = "1.0.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "threadpool"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
dependencies = [
"num_cpus",
]
[[package]]
name = "twox-hash"
version = "1.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675"
dependencies = [
"cfg-if",
"static_assertions",
]
[[package]]
name = "typed-arena"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a"
[[package]]
name = "unicode-ident"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4"
[[package]]
name = "utf8parse"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
dependencies = [
"winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-sys"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.48.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
[[package]]
name = "windows_i686_gnu"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
[[package]]
name = "windows_i686_msvc"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"

View File

@@ -1,147 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
rust-version = "1.65"
name = "addr2line"
version = "0.21.0"
exclude = [
"/benches/*",
"/fixtures/*",
".github",
]
description = "A cross-platform symbolication library written in Rust, using `gimli`"
documentation = "https://docs.rs/addr2line"
readme = "./README.md"
keywords = [
"DWARF",
"debug",
"elf",
"symbolicate",
"atos",
]
categories = ["development-tools::debugging"]
license = "Apache-2.0 OR MIT"
repository = "https://github.com/gimli-rs/addr2line"
[profile.bench]
codegen-units = 1
debug = true
[profile.release]
debug = true
[[example]]
name = "addr2line"
required-features = ["default"]
[[test]]
name = "output_equivalence"
harness = false
required-features = ["default"]
[[test]]
name = "correctness"
required-features = ["default"]
[[test]]
name = "parse"
required-features = ["std-object"]
[dependencies.alloc]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-alloc"
[dependencies.compiler_builtins]
version = "0.1.2"
optional = true
[dependencies.core]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-core"
[dependencies.cpp_demangle]
version = "0.4"
features = ["alloc"]
optional = true
default-features = false
[dependencies.fallible-iterator]
version = "0.3.0"
optional = true
default-features = false
[dependencies.gimli]
version = "0.28.0"
features = ["read"]
default-features = false
[dependencies.memmap2]
version = "0.5.5"
optional = true
[dependencies.object]
version = "0.32.0"
features = ["read"]
optional = true
default-features = false
[dependencies.rustc-demangle]
version = "0.1"
optional = true
[dependencies.smallvec]
version = "1"
optional = true
default-features = false
[dev-dependencies.backtrace]
version = "0.3.13"
[dev-dependencies.clap]
version = "4.3.21"
features = ["wrap_help"]
[dev-dependencies.findshlibs]
version = "0.10"
[dev-dependencies.libtest-mimic]
version = "0.6.1"
[dev-dependencies.typed-arena]
version = "2"
[features]
default = [
"rustc-demangle",
"cpp_demangle",
"std-object",
"fallible-iterator",
"smallvec",
"memmap2",
]
rustc-dep-of-std = [
"core",
"alloc",
"compiler_builtins",
"gimli/rustc-dep-of-std",
]
std = ["gimli/std"]
std-object = [
"std",
"object",
"object/std",
"object/compression",
"gimli/endian-reader",
]

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,25 +0,0 @@
Copyright (c) 2016-2018 The gimli Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@@ -1,48 +0,0 @@
# addr2line
[![](https://img.shields.io/crates/v/addr2line.svg)](https://crates.io/crates/addr2line)
[![](https://img.shields.io/docsrs/addr2line.svg)](https://docs.rs/addr2line)
[![Coverage Status](https://coveralls.io/repos/github/gimli-rs/addr2line/badge.svg?branch=master)](https://coveralls.io/github/gimli-rs/addr2line?branch=master)
A cross-platform library for retrieving per-address debug information
from files with DWARF debug information.
`addr2line` uses [`gimli`](https://github.com/gimli-rs/gimli) to parse
the debug information, and exposes an interface for finding
the source file, line number, and wrapping function for instruction
addresses within the target program. These lookups can either be
performed programmatically through `Context::find_location` and
`Context::find_frames`, or via the included example binary,
`addr2line` (named and modelled after the equivalent utility from
[GNU binutils](https://sourceware.org/binutils/docs/binutils/addr2line.html)).
# Quickstart
- Add the [`addr2line` crate](https://crates.io/crates/addr2line) to your `Cargo.toml`
- Load the file and parse it with [`addr2line::object::read::File::parse`](https://docs.rs/object/*/object/read/struct.File.html#method.parse)
- Pass the parsed file to [`addr2line::Context::new` ](https://docs.rs/addr2line/*/addr2line/struct.Context.html#method.new)
- Use [`addr2line::Context::find_location`](https://docs.rs/addr2line/*/addr2line/struct.Context.html#method.find_location)
or [`addr2line::Context::find_frames`](https://docs.rs/addr2line/*/addr2line/struct.Context.html#method.find_frames)
to look up debug information for an address
# Performance
`addr2line` optimizes for speed over memory by caching parsed information.
The DWARF information is parsed lazily where possible.
The library aims to perform similarly to equivalent existing tools such
as `addr2line` from binutils, `eu-addr2line` from elfutils, and
`llvm-symbolize` from the llvm project, and in the past some benchmarking
was done that indicates a comparable performance.
## License
Licensed under either of
* Apache License, Version 2.0 ([`LICENSE-APACHE`](./LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([`LICENSE-MIT`](./LICENSE-MIT) or https://opensource.org/licenses/MIT)
at your option.
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

View File

@@ -1,23 +0,0 @@
v <- read.table(file("stdin"))
t <- data.frame(prog=v[,1], funcs=(v[,2]=="func"), time=v[,3], mem=v[,4], stringsAsFactors=FALSE)
t$prog <- as.character(t$prog)
t$prog[t$prog == "master"] <- "gimli-rs/addr2line"
t$funcs[t$funcs == TRUE] <- "With functions"
t$funcs[t$funcs == FALSE] <- "File/line only"
t$mem = t$mem / 1024.0
library(ggplot2)
p <- ggplot(data=t, aes(x=prog, y=time, fill=prog))
p <- p + geom_bar(stat = "identity")
p <- p + facet_wrap(~ funcs)
p <- p + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())
p <- p + ylab("time (s)") + ggtitle("addr2line runtime")
ggsave('time.png',plot=p,width=10,height=6)
p <- ggplot(data=t, aes(x=prog, y=mem, fill=prog))
p <- p + geom_bar(stat = "identity")
p <- p + facet_wrap(~ funcs)
p <- p + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())
p <- p + ylab("memory (kB)") + ggtitle("addr2line memory usage")
ggsave('memory.png',plot=p,width=10,height=6)

View File

@@ -1,112 +0,0 @@
#!/bin/bash
if [[ $# -le 1 ]]; then
echo "Usage: $0 <executable> [<addresses>] REFS..."
exit 1
fi
target="$1"
shift
addresses=""
if [[ -e "$1" ]]; then
addresses="$1"
shift
fi
# path to "us"
# readlink -f, but more portable:
dirname=$(perl -e 'use Cwd "abs_path";print abs_path(shift)' "$(dirname "$0")")
# https://stackoverflow.com/a/2358432/472927
{
# compile all refs
pushd "$dirname" > /dev/null
# if the user has some local changes, preserve them
nstashed=$(git stash list | wc -l)
echo "==> Stashing any local modifications"
git stash --keep-index > /dev/null
popstash() {
# https://stackoverflow.com/q/24520791/472927
if [[ "$(git stash list | wc -l)" -ne "$nstashed" ]]; then
echo "==> Restoring stashed state"
git stash pop > /dev/null
fi
}
# if the user has added stuff to the index, abort
if ! git diff-index --quiet HEAD --; then
echo "Refusing to overwrite outstanding git changes"
popstash
exit 2
fi
current=$(git symbolic-ref --short HEAD)
for ref in "$@"; do
echo "==> Compiling $ref"
git checkout -q "$ref"
commit=$(git rev-parse HEAD)
fn="target/release/addr2line-$commit"
if [[ ! -e "$fn" ]]; then
cargo build --release --example addr2line
cp target/release/examples/addr2line "$fn"
fi
if [[ "$ref" != "$commit" ]]; then
ln -sfn "addr2line-$commit" target/release/addr2line-"$ref"
fi
done
git checkout -q "$current"
popstash
popd > /dev/null
# get us some addresses to look up
if [[ -z "$addresses" ]]; then
echo "==> Looking for benchmarking addresses (this may take a while)"
addresses=$(mktemp tmp.XXXXXXXXXX)
objdump -C -x --disassemble -l "$target" \
| grep -P '0[048]:' \
| awk '{print $1}' \
| sed 's/:$//' \
> "$addresses"
echo " -> Addresses stored in $addresses; you should re-use it next time"
fi
run() {
func="$1"
name="$2"
cmd="$3"
args="$4"
printf "%s\t%s\t" "$name" "$func"
if [[ "$cmd" =~ llvm-symbolizer ]]; then
/usr/bin/time -f '%e\t%M' "$cmd" $args -obj="$target" < "$addresses" 2>&1 >/dev/null
else
/usr/bin/time -f '%e\t%M' "$cmd" $args -e "$target" < "$addresses" 2>&1 >/dev/null
fi
}
# run without functions
log1=$(mktemp tmp.XXXXXXXXXX)
echo "==> Benchmarking"
run nofunc binutils addr2line >> "$log1"
#run nofunc elfutils eu-addr2line >> "$log1"
run nofunc llvm-sym llvm-symbolizer -functions=none >> "$log1"
for ref in "$@"; do
run nofunc "$ref" "$dirname/target/release/addr2line-$ref" >> "$log1"
done
cat "$log1" | column -t
# run with functions
log2=$(mktemp tmp.XXXXXXXXXX)
echo "==> Benchmarking with -f"
run func binutils addr2line "-f -i" >> "$log2"
#run func elfutils eu-addr2line "-f -i" >> "$log2"
run func llvm-sym llvm-symbolizer "-functions=linkage -demangle=0" >> "$log2"
for ref in "$@"; do
run func "$ref" "$dirname/target/release/addr2line-$ref" "-f -i" >> "$log2"
done
cat "$log2" | column -t
cat "$log2" >> "$log1"; rm "$log2"
echo "==> Plotting"
Rscript --no-readline --no-restore --no-save "$dirname/bench.plot.r" < "$log1"
echo "==> Cleaning up"
rm "$log1"
exit 0
}

View File

@@ -1,5 +0,0 @@
#!/bin/sh
# Run tarpaulin and pycobertura to generate coverage.html.
cargo tarpaulin --skip-clean --out Xml
pycobertura show --format html --output coverage.html cobertura.xml

View File

@@ -1,317 +0,0 @@
use std::borrow::Cow;
use std::fs::File;
use std::io::{BufRead, Lines, StdinLock, Write};
use std::path::{Path, PathBuf};
use clap::{Arg, ArgAction, Command};
use fallible_iterator::FallibleIterator;
use object::{Object, ObjectSection, SymbolMap, SymbolMapName};
use typed_arena::Arena;
use addr2line::{Context, Location};
fn parse_uint_from_hex_string(string: &str) -> Option<u64> {
if string.len() > 2 && string.starts_with("0x") {
u64::from_str_radix(&string[2..], 16).ok()
} else {
u64::from_str_radix(string, 16).ok()
}
}
enum Addrs<'a> {
Args(clap::parser::ValuesRef<'a, String>),
Stdin(Lines<StdinLock<'a>>),
}
impl<'a> Iterator for Addrs<'a> {
type Item = Option<u64>;
fn next(&mut self) -> Option<Option<u64>> {
let text = match *self {
Addrs::Args(ref mut vals) => vals.next().map(Cow::from),
Addrs::Stdin(ref mut lines) => lines.next().map(Result::unwrap).map(Cow::from),
};
text.as_ref()
.map(Cow::as_ref)
.map(parse_uint_from_hex_string)
}
}
fn print_loc(loc: Option<&Location<'_>>, basenames: bool, llvm: bool) {
if let Some(loc) = loc {
if let Some(ref file) = loc.file.as_ref() {
let path = if basenames {
Path::new(Path::new(file).file_name().unwrap())
} else {
Path::new(file)
};
print!("{}:", path.display());
} else {
print!("??:");
}
if llvm {
print!("{}:{}", loc.line.unwrap_or(0), loc.column.unwrap_or(0));
} else if let Some(line) = loc.line {
print!("{}", line);
} else {
print!("?");
}
println!();
} else if llvm {
println!("??:0:0");
} else {
println!("??:0");
}
}
fn print_function(name: Option<&str>, language: Option<gimli::DwLang>, demangle: bool) {
if let Some(name) = name {
if demangle {
print!("{}", addr2line::demangle_auto(Cow::from(name), language));
} else {
print!("{}", name);
}
} else {
print!("??");
}
}
fn load_file_section<'input, 'arena, Endian: gimli::Endianity>(
id: gimli::SectionId,
file: &object::File<'input>,
endian: Endian,
arena_data: &'arena Arena<Cow<'input, [u8]>>,
) -> Result<gimli::EndianSlice<'arena, Endian>, ()> {
// TODO: Unify with dwarfdump.rs in gimli.
let name = id.name();
match file.section_by_name(name) {
Some(section) => match section.uncompressed_data().unwrap() {
Cow::Borrowed(b) => Ok(gimli::EndianSlice::new(b, endian)),
Cow::Owned(b) => Ok(gimli::EndianSlice::new(arena_data.alloc(b.into()), endian)),
},
None => Ok(gimli::EndianSlice::new(&[][..], endian)),
}
}
fn find_name_from_symbols<'a>(
symbols: &'a SymbolMap<SymbolMapName<'_>>,
probe: u64,
) -> Option<&'a str> {
symbols.get(probe).map(|x| x.name())
}
struct Options<'a> {
do_functions: bool,
do_inlines: bool,
pretty: bool,
print_addrs: bool,
basenames: bool,
demangle: bool,
llvm: bool,
exe: &'a PathBuf,
sup: Option<&'a PathBuf>,
}
fn main() {
let matches = Command::new("addr2line")
.version(env!("CARGO_PKG_VERSION"))
.about("A fast addr2line Rust port")
.max_term_width(100)
.args(&[
Arg::new("exe")
.short('e')
.long("exe")
.value_name("filename")
.value_parser(clap::value_parser!(PathBuf))
.help(
"Specify the name of the executable for which addresses should be translated.",
)
.required(true),
Arg::new("sup")
.long("sup")
.value_name("filename")
.value_parser(clap::value_parser!(PathBuf))
.help("Path to supplementary object file."),
Arg::new("functions")
.short('f')
.long("functions")
.action(ArgAction::SetTrue)
.help("Display function names as well as file and line number information."),
Arg::new("pretty").short('p').long("pretty-print")
.action(ArgAction::SetTrue)
.help(
"Make the output more human friendly: each location are printed on one line.",
),
Arg::new("inlines").short('i').long("inlines")
.action(ArgAction::SetTrue)
.help(
"If the address belongs to a function that was inlined, the source information for \
all enclosing scopes back to the first non-inlined function will also be printed.",
),
Arg::new("addresses").short('a').long("addresses")
.action(ArgAction::SetTrue)
.help(
"Display the address before the function name, file and line number information.",
),
Arg::new("basenames")
.short('s')
.long("basenames")
.action(ArgAction::SetTrue)
.help("Display only the base of each file name."),
Arg::new("demangle").short('C').long("demangle")
.action(ArgAction::SetTrue)
.help(
"Demangle function names. \
Specifying a specific demangling style (like GNU addr2line) is not supported. \
(TODO)"
),
Arg::new("llvm")
.long("llvm")
.action(ArgAction::SetTrue)
.help("Display output in the same format as llvm-symbolizer."),
Arg::new("addrs")
.action(ArgAction::Append)
.help("Addresses to use instead of reading from stdin."),
])
.get_matches();
let arena_data = Arena::new();
let opts = Options {
do_functions: matches.get_flag("functions"),
do_inlines: matches.get_flag("inlines"),
pretty: matches.get_flag("pretty"),
print_addrs: matches.get_flag("addresses"),
basenames: matches.get_flag("basenames"),
demangle: matches.get_flag("demangle"),
llvm: matches.get_flag("llvm"),
exe: matches.get_one::<PathBuf>("exe").unwrap(),
sup: matches.get_one::<PathBuf>("sup"),
};
let file = File::open(opts.exe).unwrap();
let map = unsafe { memmap2::Mmap::map(&file).unwrap() };
let object = &object::File::parse(&*map).unwrap();
let endian = if object.is_little_endian() {
gimli::RunTimeEndian::Little
} else {
gimli::RunTimeEndian::Big
};
let mut load_section = |id: gimli::SectionId| -> Result<_, _> {
load_file_section(id, object, endian, &arena_data)
};
let sup_map;
let sup_object = if let Some(sup_path) = opts.sup {
let sup_file = File::open(sup_path).unwrap();
sup_map = unsafe { memmap2::Mmap::map(&sup_file).unwrap() };
Some(object::File::parse(&*sup_map).unwrap())
} else {
None
};
let symbols = object.symbol_map();
let mut dwarf = gimli::Dwarf::load(&mut load_section).unwrap();
if let Some(ref sup_object) = sup_object {
let mut load_sup_section = |id: gimli::SectionId| -> Result<_, _> {
load_file_section(id, sup_object, endian, &arena_data)
};
dwarf.load_sup(&mut load_sup_section).unwrap();
}
let mut split_dwarf_loader = addr2line::builtin_split_dwarf_loader::SplitDwarfLoader::new(
|data, endian| {
gimli::EndianSlice::new(arena_data.alloc(Cow::Owned(data.into_owned())), endian)
},
Some(opts.exe.clone()),
);
let ctx = Context::from_dwarf(dwarf).unwrap();
let stdin = std::io::stdin();
let addrs = matches
.get_many::<String>("addrs")
.map(Addrs::Args)
.unwrap_or_else(|| Addrs::Stdin(stdin.lock().lines()));
for probe in addrs {
if opts.print_addrs {
let addr = probe.unwrap_or(0);
if opts.llvm {
print!("0x{:x}", addr);
} else {
print!("0x{:016x}", addr);
}
if opts.pretty {
print!(": ");
} else {
println!();
}
}
if opts.do_functions || opts.do_inlines {
let mut printed_anything = false;
if let Some(probe) = probe {
let frames = ctx.find_frames(probe);
let frames = split_dwarf_loader.run(frames).unwrap();
let mut frames = frames.enumerate();
while let Some((i, frame)) = frames.next().unwrap() {
if opts.pretty && i != 0 {
print!(" (inlined by) ");
}
if opts.do_functions {
if let Some(func) = frame.function {
print_function(
func.raw_name().ok().as_ref().map(AsRef::as_ref),
func.language,
opts.demangle,
);
} else {
let name = find_name_from_symbols(&symbols, probe);
print_function(name, None, opts.demangle);
}
if opts.pretty {
print!(" at ");
} else {
println!();
}
}
print_loc(frame.location.as_ref(), opts.basenames, opts.llvm);
printed_anything = true;
if !opts.do_inlines {
break;
}
}
}
if !printed_anything {
if opts.do_functions {
let name = probe.and_then(|probe| find_name_from_symbols(&symbols, probe));
print_function(name, None, opts.demangle);
if opts.pretty {
print!(" at ");
} else {
println!();
}
}
print_loc(None, opts.basenames, opts.llvm);
}
} else {
let loc = probe.and_then(|probe| ctx.find_location(probe).unwrap());
print_loc(loc.as_ref(), opts.basenames, opts.llvm);
}
if opts.llvm {
println!();
}
std::io::stdout().flush().unwrap();
}
}

View File

@@ -1 +0,0 @@

View File

@@ -1,164 +0,0 @@
use alloc::borrow::Cow;
use alloc::sync::Arc;
use std::fs::File;
use std::path::PathBuf;
use object::Object;
use crate::{LookupContinuation, LookupResult};
#[cfg(unix)]
fn convert_path<R: gimli::Reader<Endian = gimli::RunTimeEndian>>(
r: &R,
) -> Result<PathBuf, gimli::Error> {
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
let bytes = r.to_slice()?;
let s = OsStr::from_bytes(&bytes);
Ok(PathBuf::from(s))
}
#[cfg(not(unix))]
fn convert_path<R: gimli::Reader<Endian = gimli::RunTimeEndian>>(
r: &R,
) -> Result<PathBuf, gimli::Error> {
let bytes = r.to_slice()?;
let s = std::str::from_utf8(&bytes).map_err(|_| gimli::Error::BadUtf8)?;
Ok(PathBuf::from(s))
}
fn load_section<'data: 'file, 'file, O, R, F>(
id: gimli::SectionId,
file: &'file O,
endian: R::Endian,
loader: &mut F,
) -> Result<R, gimli::Error>
where
O: object::Object<'data, 'file>,
R: gimli::Reader<Endian = gimli::RunTimeEndian>,
F: FnMut(Cow<'data, [u8]>, R::Endian) -> R,
{
use object::ObjectSection;
let data = id
.dwo_name()
.and_then(|dwo_name| {
file.section_by_name(dwo_name)
.and_then(|section| section.uncompressed_data().ok())
})
.unwrap_or(Cow::Borrowed(&[]));
Ok(loader(data, endian))
}
/// A simple builtin split DWARF loader.
pub struct SplitDwarfLoader<R, F>
where
R: gimli::Reader<Endian = gimli::RunTimeEndian>,
F: FnMut(Cow<'_, [u8]>, R::Endian) -> R,
{
loader: F,
dwarf_package: Option<gimli::DwarfPackage<R>>,
}
impl<R, F> SplitDwarfLoader<R, F>
where
R: gimli::Reader<Endian = gimli::RunTimeEndian>,
F: FnMut(Cow<'_, [u8]>, R::Endian) -> R,
{
fn load_dwarf_package(loader: &mut F, path: Option<PathBuf>) -> Option<gimli::DwarfPackage<R>> {
let mut path = path.map(Ok).unwrap_or_else(std::env::current_exe).ok()?;
let dwp_extension = path
.extension()
.map(|previous_extension| {
let mut previous_extension = previous_extension.to_os_string();
previous_extension.push(".dwp");
previous_extension
})
.unwrap_or_else(|| "dwp".into());
path.set_extension(dwp_extension);
let file = File::open(&path).ok()?;
let map = unsafe { memmap2::Mmap::map(&file).ok()? };
let dwp = object::File::parse(&*map).ok()?;
let endian = if dwp.is_little_endian() {
gimli::RunTimeEndian::Little
} else {
gimli::RunTimeEndian::Big
};
let empty = loader(Cow::Borrowed(&[]), endian);
gimli::DwarfPackage::load(
|section_id| load_section(section_id, &dwp, endian, loader),
empty,
)
.ok()
}
/// Create a new split DWARF loader.
pub fn new(mut loader: F, path: Option<PathBuf>) -> SplitDwarfLoader<R, F> {
let dwarf_package = SplitDwarfLoader::load_dwarf_package(&mut loader, path);
SplitDwarfLoader {
loader,
dwarf_package,
}
}
/// Run the provided `LookupResult` to completion, loading any necessary
/// split DWARF along the way.
pub fn run<L>(&mut self, mut l: LookupResult<L>) -> L::Output
where
L: LookupContinuation<Buf = R>,
{
loop {
let (load, continuation) = match l {
LookupResult::Output(output) => break output,
LookupResult::Load { load, continuation } => (load, continuation),
};
let mut r: Option<Arc<gimli::Dwarf<_>>> = None;
if let Some(dwp) = self.dwarf_package.as_ref() {
if let Ok(Some(cu)) = dwp.find_cu(load.dwo_id, &load.parent) {
r = Some(Arc::new(cu));
}
}
if r.is_none() {
let mut path = PathBuf::new();
if let Some(p) = load.comp_dir.as_ref() {
if let Ok(p) = convert_path(p) {
path.push(p);
}
}
if let Some(p) = load.path.as_ref() {
if let Ok(p) = convert_path(p) {
path.push(p);
}
}
if let Ok(file) = File::open(&path) {
if let Ok(map) = unsafe { memmap2::Mmap::map(&file) } {
if let Ok(file) = object::File::parse(&*map) {
let endian = if file.is_little_endian() {
gimli::RunTimeEndian::Little
} else {
gimli::RunTimeEndian::Big
};
r = gimli::Dwarf::load(|id| {
load_section(id, &file, endian, &mut self.loader)
})
.ok()
.map(|mut dwo_dwarf| {
dwo_dwarf.make_dwo(&load.parent);
Arc::new(dwo_dwarf)
});
}
}
}
}
l = continuation.resume(r);
}
}
}

View File

@@ -1,555 +0,0 @@
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::cmp::Ordering;
use core::iter;
use crate::lazy::LazyCell;
use crate::maybe_small;
use crate::{Context, DebugFile, Error, RangeAttributes};
pub(crate) struct Functions<R: gimli::Reader> {
/// List of all `DW_TAG_subprogram` details in the unit.
pub(crate) functions: Box<
[(
gimli::UnitOffset<R::Offset>,
LazyCell<Result<Function<R>, Error>>,
)],
>,
/// List of `DW_TAG_subprogram` address ranges in the unit.
pub(crate) addresses: Box<[FunctionAddress]>,
}
/// A single address range for a function.
///
/// It is possible for a function to have multiple address ranges; this
/// is handled by having multiple `FunctionAddress` entries with the same
/// `function` field.
pub(crate) struct FunctionAddress {
range: gimli::Range,
/// An index into `Functions::functions`.
pub(crate) function: usize,
}
pub(crate) struct Function<R: gimli::Reader> {
pub(crate) dw_die_offset: gimli::UnitOffset<R::Offset>,
pub(crate) name: Option<R>,
/// List of all `DW_TAG_inlined_subroutine` details in this function.
inlined_functions: Box<[InlinedFunction<R>]>,
/// List of `DW_TAG_inlined_subroutine` address ranges in this function.
inlined_addresses: Box<[InlinedFunctionAddress]>,
}
pub(crate) struct InlinedFunctionAddress {
range: gimli::Range,
call_depth: usize,
/// An index into `Function::inlined_functions`.
function: usize,
}
pub(crate) struct InlinedFunction<R: gimli::Reader> {
pub(crate) dw_die_offset: gimli::UnitOffset<R::Offset>,
pub(crate) name: Option<R>,
pub(crate) call_file: Option<u64>,
pub(crate) call_line: u32,
pub(crate) call_column: u32,
}
impl<R: gimli::Reader> Functions<R> {
pub(crate) fn parse(
unit: &gimli::Unit<R>,
sections: &gimli::Dwarf<R>,
) -> Result<Functions<R>, Error> {
let mut functions = Vec::new();
let mut addresses = Vec::new();
let mut entries = unit.entries_raw(None)?;
while !entries.is_empty() {
let dw_die_offset = entries.next_offset();
if let Some(abbrev) = entries.read_abbreviation()? {
if abbrev.tag() == gimli::DW_TAG_subprogram {
let mut ranges = RangeAttributes::default();
for spec in abbrev.attributes() {
match entries.read_attribute(*spec) {
Ok(ref attr) => {
match attr.name() {
gimli::DW_AT_low_pc => match attr.value() {
gimli::AttributeValue::Addr(val) => {
ranges.low_pc = Some(val)
}
gimli::AttributeValue::DebugAddrIndex(index) => {
ranges.low_pc = Some(sections.address(unit, index)?);
}
_ => {}
},
gimli::DW_AT_high_pc => match attr.value() {
gimli::AttributeValue::Addr(val) => {
ranges.high_pc = Some(val)
}
gimli::AttributeValue::DebugAddrIndex(index) => {
ranges.high_pc = Some(sections.address(unit, index)?);
}
gimli::AttributeValue::Udata(val) => {
ranges.size = Some(val)
}
_ => {}
},
gimli::DW_AT_ranges => {
ranges.ranges_offset =
sections.attr_ranges_offset(unit, attr.value())?;
}
_ => {}
};
}
Err(e) => return Err(e),
}
}
let function_index = functions.len();
if ranges.for_each_range(sections, unit, |range| {
addresses.push(FunctionAddress {
range,
function: function_index,
});
})? {
functions.push((dw_die_offset, LazyCell::new()));
}
} else {
entries.skip_attributes(abbrev.attributes())?;
}
}
}
// The binary search requires the addresses to be sorted.
//
// It also requires them to be non-overlapping. In practice, overlapping
// function ranges are unlikely, so we don't try to handle that yet.
//
// It's possible for multiple functions to have the same address range if the
// compiler can detect and remove functions with identical code. In that case
// we'll nondeterministically return one of them.
addresses.sort_by_key(|x| x.range.begin);
Ok(Functions {
functions: functions.into_boxed_slice(),
addresses: addresses.into_boxed_slice(),
})
}
pub(crate) fn find_address(&self, probe: u64) -> Option<usize> {
self.addresses
.binary_search_by(|address| {
if probe < address.range.begin {
Ordering::Greater
} else if probe >= address.range.end {
Ordering::Less
} else {
Ordering::Equal
}
})
.ok()
}
pub(crate) fn parse_inlined_functions(
&self,
file: DebugFile,
unit: &gimli::Unit<R>,
ctx: &Context<R>,
sections: &gimli::Dwarf<R>,
) -> Result<(), Error> {
for function in &*self.functions {
function
.1
.borrow_with(|| Function::parse(function.0, file, unit, ctx, sections))
.as_ref()
.map_err(Error::clone)?;
}
Ok(())
}
}
impl<R: gimli::Reader> Function<R> {
pub(crate) fn parse(
dw_die_offset: gimli::UnitOffset<R::Offset>,
file: DebugFile,
unit: &gimli::Unit<R>,
ctx: &Context<R>,
sections: &gimli::Dwarf<R>,
) -> Result<Self, Error> {
let mut entries = unit.entries_raw(Some(dw_die_offset))?;
let depth = entries.next_depth();
let abbrev = entries.read_abbreviation()?.unwrap();
debug_assert_eq!(abbrev.tag(), gimli::DW_TAG_subprogram);
let mut name = None;
for spec in abbrev.attributes() {
match entries.read_attribute(*spec) {
Ok(ref attr) => {
match attr.name() {
gimli::DW_AT_linkage_name | gimli::DW_AT_MIPS_linkage_name => {
if let Ok(val) = sections.attr_string(unit, attr.value()) {
name = Some(val);
}
}
gimli::DW_AT_name => {
if name.is_none() {
name = sections.attr_string(unit, attr.value()).ok();
}
}
gimli::DW_AT_abstract_origin | gimli::DW_AT_specification => {
if name.is_none() {
name = name_attr(attr.value(), file, unit, ctx, sections, 16)?;
}
}
_ => {}
};
}
Err(e) => return Err(e),
}
}
let mut inlined_functions = Vec::new();
let mut inlined_addresses = Vec::new();
Function::parse_children(
&mut entries,
depth,
file,
unit,
ctx,
sections,
&mut inlined_functions,
&mut inlined_addresses,
0,
)?;
// Sort ranges in "breadth-first traversal order", i.e. first by call_depth
// and then by range.begin. This allows finding the range containing an
// address at a certain depth using binary search.
// Note: Using DFS order, i.e. ordering by range.begin first and then by
// call_depth, would not work! Consider the two examples
// "[0..10 at depth 0], [0..2 at depth 1], [6..8 at depth 1]" and
// "[0..5 at depth 0], [0..2 at depth 1], [5..10 at depth 0], [6..8 at depth 1]".
// In this example, if you want to look up address 7 at depth 0, and you
// encounter [0..2 at depth 1], are you before or after the target range?
// You don't know.
inlined_addresses.sort_by(|r1, r2| {
if r1.call_depth < r2.call_depth {
Ordering::Less
} else if r1.call_depth > r2.call_depth {
Ordering::Greater
} else if r1.range.begin < r2.range.begin {
Ordering::Less
} else if r1.range.begin > r2.range.begin {
Ordering::Greater
} else {
Ordering::Equal
}
});
Ok(Function {
dw_die_offset,
name,
inlined_functions: inlined_functions.into_boxed_slice(),
inlined_addresses: inlined_addresses.into_boxed_slice(),
})
}
fn parse_children(
entries: &mut gimli::EntriesRaw<'_, '_, R>,
depth: isize,
file: DebugFile,
unit: &gimli::Unit<R>,
ctx: &Context<R>,
sections: &gimli::Dwarf<R>,
inlined_functions: &mut Vec<InlinedFunction<R>>,
inlined_addresses: &mut Vec<InlinedFunctionAddress>,
inlined_depth: usize,
) -> Result<(), Error> {
loop {
let dw_die_offset = entries.next_offset();
let next_depth = entries.next_depth();
if next_depth <= depth {
return Ok(());
}
if let Some(abbrev) = entries.read_abbreviation()? {
match abbrev.tag() {
gimli::DW_TAG_subprogram => {
Function::skip(entries, abbrev, next_depth)?;
}
gimli::DW_TAG_inlined_subroutine => {
InlinedFunction::parse(
dw_die_offset,
entries,
abbrev,
next_depth,
file,
unit,
ctx,
sections,
inlined_functions,
inlined_addresses,
inlined_depth,
)?;
}
_ => {
entries.skip_attributes(abbrev.attributes())?;
}
}
}
}
}
fn skip(
entries: &mut gimli::EntriesRaw<'_, '_, R>,
abbrev: &gimli::Abbreviation,
depth: isize,
) -> Result<(), Error> {
// TODO: use DW_AT_sibling
entries.skip_attributes(abbrev.attributes())?;
while entries.next_depth() > depth {
if let Some(abbrev) = entries.read_abbreviation()? {
entries.skip_attributes(abbrev.attributes())?;
}
}
Ok(())
}
/// Build the list of inlined functions that contain `probe`.
pub(crate) fn find_inlined_functions(
&self,
probe: u64,
) -> iter::Rev<maybe_small::IntoIter<&InlinedFunction<R>>> {
// `inlined_functions` is ordered from outside to inside.
let mut inlined_functions = maybe_small::Vec::new();
let mut inlined_addresses = &self.inlined_addresses[..];
loop {
let current_depth = inlined_functions.len();
// Look up (probe, current_depth) in inline_ranges.
// `inlined_addresses` is sorted in "breadth-first traversal order", i.e.
// by `call_depth` first, and then by `range.begin`. See the comment at
// the sort call for more information about why.
let search = inlined_addresses.binary_search_by(|range| {
if range.call_depth > current_depth {
Ordering::Greater
} else if range.call_depth < current_depth {
Ordering::Less
} else if range.range.begin > probe {
Ordering::Greater
} else if range.range.end <= probe {
Ordering::Less
} else {
Ordering::Equal
}
});
if let Ok(index) = search {
let function_index = inlined_addresses[index].function;
inlined_functions.push(&self.inlined_functions[function_index]);
inlined_addresses = &inlined_addresses[index + 1..];
} else {
break;
}
}
inlined_functions.into_iter().rev()
}
}
impl<R: gimli::Reader> InlinedFunction<R> {
fn parse(
dw_die_offset: gimli::UnitOffset<R::Offset>,
entries: &mut gimli::EntriesRaw<'_, '_, R>,
abbrev: &gimli::Abbreviation,
depth: isize,
file: DebugFile,
unit: &gimli::Unit<R>,
ctx: &Context<R>,
sections: &gimli::Dwarf<R>,
inlined_functions: &mut Vec<InlinedFunction<R>>,
inlined_addresses: &mut Vec<InlinedFunctionAddress>,
inlined_depth: usize,
) -> Result<(), Error> {
let mut ranges = RangeAttributes::default();
let mut name = None;
let mut call_file = None;
let mut call_line = 0;
let mut call_column = 0;
for spec in abbrev.attributes() {
match entries.read_attribute(*spec) {
Ok(ref attr) => match attr.name() {
gimli::DW_AT_low_pc => match attr.value() {
gimli::AttributeValue::Addr(val) => ranges.low_pc = Some(val),
gimli::AttributeValue::DebugAddrIndex(index) => {
ranges.low_pc = Some(sections.address(unit, index)?);
}
_ => {}
},
gimli::DW_AT_high_pc => match attr.value() {
gimli::AttributeValue::Addr(val) => ranges.high_pc = Some(val),
gimli::AttributeValue::DebugAddrIndex(index) => {
ranges.high_pc = Some(sections.address(unit, index)?);
}
gimli::AttributeValue::Udata(val) => ranges.size = Some(val),
_ => {}
},
gimli::DW_AT_ranges => {
ranges.ranges_offset = sections.attr_ranges_offset(unit, attr.value())?;
}
gimli::DW_AT_linkage_name | gimli::DW_AT_MIPS_linkage_name => {
if let Ok(val) = sections.attr_string(unit, attr.value()) {
name = Some(val);
}
}
gimli::DW_AT_name => {
if name.is_none() {
name = sections.attr_string(unit, attr.value()).ok();
}
}
gimli::DW_AT_abstract_origin | gimli::DW_AT_specification => {
if name.is_none() {
name = name_attr(attr.value(), file, unit, ctx, sections, 16)?;
}
}
gimli::DW_AT_call_file => {
// There is a spec issue [1] with how DW_AT_call_file is specified in DWARF 5.
// Before, a file index of 0 would indicate no source file, however in
// DWARF 5 this could be a valid index into the file table.
//
// Implementations such as LLVM generates a file index of 0 when DWARF 5 is
// used.
//
// Thus, if we see a version of 5 or later, treat a file index of 0 as such.
// [1]: http://wiki.dwarfstd.org/index.php?title=DWARF5_Line_Table_File_Numbers
if let gimli::AttributeValue::FileIndex(fi) = attr.value() {
if fi > 0 || unit.header.version() >= 5 {
call_file = Some(fi);
}
}
}
gimli::DW_AT_call_line => {
call_line = attr.udata_value().unwrap_or(0) as u32;
}
gimli::DW_AT_call_column => {
call_column = attr.udata_value().unwrap_or(0) as u32;
}
_ => {}
},
Err(e) => return Err(e),
}
}
let function_index = inlined_functions.len();
inlined_functions.push(InlinedFunction {
dw_die_offset,
name,
call_file,
call_line,
call_column,
});
ranges.for_each_range(sections, unit, |range| {
inlined_addresses.push(InlinedFunctionAddress {
range,
call_depth: inlined_depth,
function: function_index,
});
})?;
Function::parse_children(
entries,
depth,
file,
unit,
ctx,
sections,
inlined_functions,
inlined_addresses,
inlined_depth + 1,
)
}
}
fn name_attr<R>(
attr: gimli::AttributeValue<R>,
mut file: DebugFile,
unit: &gimli::Unit<R>,
ctx: &Context<R>,
sections: &gimli::Dwarf<R>,
recursion_limit: usize,
) -> Result<Option<R>, Error>
where
R: gimli::Reader,
{
if recursion_limit == 0 {
return Ok(None);
}
match attr {
gimli::AttributeValue::UnitRef(offset) => {
name_entry(file, unit, offset, ctx, sections, recursion_limit)
}
gimli::AttributeValue::DebugInfoRef(dr) => {
let (unit, offset) = ctx.find_unit(dr, file)?;
name_entry(file, unit, offset, ctx, sections, recursion_limit)
}
gimli::AttributeValue::DebugInfoRefSup(dr) => {
if let Some(sup_sections) = sections.sup.as_ref() {
file = DebugFile::Supplementary;
let (unit, offset) = ctx.find_unit(dr, file)?;
name_entry(file, unit, offset, ctx, sup_sections, recursion_limit)
} else {
Ok(None)
}
}
_ => Ok(None),
}
}
fn name_entry<R>(
file: DebugFile,
unit: &gimli::Unit<R>,
offset: gimli::UnitOffset<R::Offset>,
ctx: &Context<R>,
sections: &gimli::Dwarf<R>,
recursion_limit: usize,
) -> Result<Option<R>, Error>
where
R: gimli::Reader,
{
let mut entries = unit.entries_raw(Some(offset))?;
let abbrev = if let Some(abbrev) = entries.read_abbreviation()? {
abbrev
} else {
return Err(gimli::Error::NoEntryAtGivenOffset);
};
let mut name = None;
let mut next = None;
for spec in abbrev.attributes() {
match entries.read_attribute(*spec) {
Ok(ref attr) => match attr.name() {
gimli::DW_AT_linkage_name | gimli::DW_AT_MIPS_linkage_name => {
if let Ok(val) = sections.attr_string(unit, attr.value()) {
return Ok(Some(val));
}
}
gimli::DW_AT_name => {
if let Ok(val) = sections.attr_string(unit, attr.value()) {
name = Some(val);
}
}
gimli::DW_AT_abstract_origin | gimli::DW_AT_specification => {
next = Some(attr.value());
}
_ => {}
},
Err(e) => return Err(e),
}
}
if name.is_some() {
return Ok(name);
}
if let Some(next) = next {
return name_attr(next, file, unit, ctx, sections, recursion_limit - 1);
}
Ok(None)
}

View File

@@ -1,31 +0,0 @@
use core::cell::UnsafeCell;
pub struct LazyCell<T> {
contents: UnsafeCell<Option<T>>,
}
impl<T> LazyCell<T> {
pub fn new() -> LazyCell<T> {
LazyCell {
contents: UnsafeCell::new(None),
}
}
pub fn borrow(&self) -> Option<&T> {
unsafe { &*self.contents.get() }.as_ref()
}
pub fn borrow_with(&self, closure: impl FnOnce() -> T) -> &T {
// First check if we're already initialized...
let ptr = self.contents.get();
if let Some(val) = unsafe { &*ptr } {
return val;
}
// Note that while we're executing `closure` our `borrow_with` may
// be called recursively. This means we need to check again after
// the closure has executed. For that we use the `get_or_insert`
// method which will only perform mutation if we aren't already
// `Some`.
let val = closure();
unsafe { (*ptr).get_or_insert(val) }
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,126 +0,0 @@
use addr2line::Context;
use fallible_iterator::FallibleIterator;
use findshlibs::{IterationControl, SharedLibrary, TargetSharedLibrary};
use object::Object;
use std::borrow::Cow;
use std::fs::File;
use std::sync::Arc;
fn find_debuginfo() -> memmap2::Mmap {
let path = std::env::current_exe().unwrap();
let file = File::open(&path).unwrap();
let map = unsafe { memmap2::Mmap::map(&file).unwrap() };
let file = &object::File::parse(&*map).unwrap();
if let Ok(uuid) = file.mach_uuid() {
for candidate in path.parent().unwrap().read_dir().unwrap() {
let path = candidate.unwrap().path();
if !path.to_str().unwrap().ends_with(".dSYM") {
continue;
}
for candidate in path.join("Contents/Resources/DWARF").read_dir().unwrap() {
let path = candidate.unwrap().path();
let file = File::open(&path).unwrap();
let map = unsafe { memmap2::Mmap::map(&file).unwrap() };
let file = &object::File::parse(&*map).unwrap();
if file.mach_uuid().unwrap() == uuid {
return map;
}
}
}
}
return map;
}
#[test]
fn correctness() {
let map = find_debuginfo();
let file = &object::File::parse(&*map).unwrap();
let module_base = file.relative_address_base();
let endian = if file.is_little_endian() {
gimli::RunTimeEndian::Little
} else {
gimli::RunTimeEndian::Big
};
fn load_section<'data: 'file, 'file, O, Endian>(
id: gimli::SectionId,
file: &'file O,
endian: Endian,
) -> Result<gimli::EndianArcSlice<Endian>, gimli::Error>
where
O: object::Object<'data, 'file>,
Endian: gimli::Endianity,
{
use object::ObjectSection;
let data = file
.section_by_name(id.name())
.and_then(|section| section.uncompressed_data().ok())
.unwrap_or(Cow::Borrowed(&[]));
Ok(gimli::EndianArcSlice::new(Arc::from(&*data), endian))
}
let dwarf = gimli::Dwarf::load(|id| load_section(id, file, endian)).unwrap();
let ctx = Context::from_dwarf(dwarf).unwrap();
let mut split_dwarf_loader = addr2line::builtin_split_dwarf_loader::SplitDwarfLoader::new(
|data, endian| gimli::EndianArcSlice::new(Arc::from(&*data), endian),
None,
);
let mut bias = None;
TargetSharedLibrary::each(|lib| {
bias = Some((lib.virtual_memory_bias().0 as u64).wrapping_sub(module_base));
IterationControl::Break
});
#[allow(unused_mut)]
let mut test = |sym: u64, expected_prefix: &str| {
let ip = sym.wrapping_sub(bias.unwrap());
let frames = ctx.find_frames(ip);
let frames = split_dwarf_loader.run(frames).unwrap();
let frame = frames.last().unwrap().unwrap();
let name = frame.function.as_ref().unwrap().demangle().unwrap();
// Old rust versions generate DWARF with wrong linkage name,
// so only check the start.
if !name.starts_with(expected_prefix) {
panic!("incorrect name '{}', expected {:?}", name, expected_prefix);
}
};
test(test_function as u64, "correctness::test_function");
test(
small::test_function as u64,
"correctness::small::test_function",
);
test(auxiliary::foo as u64, "auxiliary::foo");
}
mod small {
pub fn test_function() {
println!("y");
}
}
fn test_function() {
println!("x");
}
#[test]
fn zero_function() {
let map = find_debuginfo();
let file = &object::File::parse(&*map).unwrap();
let ctx = Context::new(file).unwrap();
for probe in 0..10 {
assert!(
ctx.find_frames(probe)
.skip_all_loads()
.unwrap()
.count()
.unwrap()
< 10
);
}
}

View File

@@ -1,135 +0,0 @@
use std::env;
use std::ffi::OsStr;
use std::path::Path;
use std::process::Command;
use backtrace::Backtrace;
use findshlibs::{IterationControl, SharedLibrary, TargetSharedLibrary};
use libtest_mimic::{Arguments, Failed, Trial};
#[inline(never)]
fn make_trace() -> Vec<String> {
fn foo() -> Backtrace {
bar()
}
#[inline(never)]
fn bar() -> Backtrace {
baz()
}
#[inline(always)]
fn baz() -> Backtrace {
Backtrace::new_unresolved()
}
let mut base_addr = None;
TargetSharedLibrary::each(|lib| {
base_addr = Some(lib.virtual_memory_bias().0 as isize);
IterationControl::Break
});
let addrfix = -base_addr.unwrap();
let trace = foo();
trace
.frames()
.iter()
.take(5)
.map(|x| format!("{:p}", (x.ip() as *const u8).wrapping_offset(addrfix)))
.collect()
}
fn run_cmd<P: AsRef<OsStr>>(exe: P, me: &Path, flags: Option<&str>, trace: &str) -> String {
let mut cmd = Command::new(exe);
cmd.env("LC_ALL", "C"); // GNU addr2line is localized, we aren't
cmd.env("RUST_BACKTRACE", "1"); // if a child crashes, we want to know why
if let Some(flags) = flags {
cmd.arg(flags);
}
cmd.arg("--exe").arg(me).arg(trace);
let output = cmd.output().unwrap();
assert!(output.status.success());
String::from_utf8(output.stdout).unwrap()
}
fn run_test(flags: Option<&str>) -> Result<(), Failed> {
let me = env::current_exe().unwrap();
let mut exe = me.clone();
assert!(exe.pop());
if exe.file_name().unwrap().to_str().unwrap() == "deps" {
assert!(exe.pop());
}
exe.push("examples");
exe.push("addr2line");
assert!(exe.is_file());
let trace = make_trace();
// HACK: GNU addr2line has a bug where looking up multiple addresses can cause the second
// lookup to fail. Workaround by doing one address at a time.
for addr in &trace {
let theirs = run_cmd("addr2line", &me, flags, addr);
let ours = run_cmd(&exe, &me, flags, addr);
// HACK: GNU addr2line does not tidy up paths properly, causing double slashes to be printed.
// We consider our behavior to be correct, so we fix their output to match ours.
let theirs = theirs.replace("//", "/");
assert!(
theirs == ours,
"Output not equivalent:
$ addr2line {0} --exe {1} {2}
{4}
$ {3} {0} --exe {1} {2}
{5}
",
flags.unwrap_or(""),
me.display(),
trace.join(" "),
exe.display(),
theirs,
ours
);
}
Ok(())
}
static FLAGS: &str = "aipsf";
fn make_tests() -> Vec<Trial> {
(0..(1 << FLAGS.len()))
.map(|bits| {
if bits == 0 {
None
} else {
let mut param = String::new();
param.push('-');
for (i, flag) in FLAGS.chars().enumerate() {
if (bits & (1 << i)) != 0 {
param.push(flag);
}
}
Some(param)
}
})
.map(|param| {
Trial::test(
format!("addr2line {}", param.as_ref().map_or("", String::as_str)),
move || run_test(param.as_ref().map(String::as_str)),
)
})
.collect()
}
fn main() {
if !cfg!(target_os = "linux") {
return;
}
let args = Arguments::from_args();
libtest_mimic::run(&args, make_tests()).exit();
}

View File

@@ -1,114 +0,0 @@
use std::borrow::Cow;
use std::env;
use std::fs::File;
use std::path::{self, PathBuf};
use object::Object;
fn release_fixture_path() -> PathBuf {
if let Ok(p) = env::var("ADDR2LINE_FIXTURE_PATH") {
return p.into();
}
let mut path = PathBuf::new();
if let Ok(dir) = env::var("CARGO_MANIFEST_DIR") {
path.push(dir);
}
path.push("fixtures");
path.push("addr2line-release");
path
}
fn with_file<F: FnOnce(&object::File<'_>)>(target: &path::Path, f: F) {
let file = File::open(target).unwrap();
let map = unsafe { memmap2::Mmap::map(&file).unwrap() };
let file = object::File::parse(&*map).unwrap();
f(&file)
}
fn dwarf_load<'a>(object: &object::File<'a>) -> gimli::Dwarf<Cow<'a, [u8]>> {
let load_section = |id: gimli::SectionId| -> Result<Cow<'a, [u8]>, gimli::Error> {
use object::ObjectSection;
let data = object
.section_by_name(id.name())
.and_then(|section| section.data().ok())
.unwrap_or(&[][..]);
Ok(Cow::Borrowed(data))
};
gimli::Dwarf::load(&load_section).unwrap()
}
fn dwarf_borrow<'a>(
dwarf: &'a gimli::Dwarf<Cow<'_, [u8]>>,
) -> gimli::Dwarf<gimli::EndianSlice<'a, gimli::LittleEndian>> {
let borrow_section: &dyn for<'b> Fn(
&'b Cow<'_, [u8]>,
) -> gimli::EndianSlice<'b, gimli::LittleEndian> =
&|section| gimli::EndianSlice::new(section, gimli::LittleEndian);
dwarf.borrow(&borrow_section)
}
#[test]
fn parse_base_rc() {
let target = release_fixture_path();
with_file(&target, |file| {
addr2line::ObjectContext::new(file).unwrap();
});
}
#[test]
fn parse_base_slice() {
let target = release_fixture_path();
with_file(&target, |file| {
let dwarf = dwarf_load(file);
let dwarf = dwarf_borrow(&dwarf);
addr2line::Context::from_dwarf(dwarf).unwrap();
});
}
#[test]
fn parse_lines_rc() {
let target = release_fixture_path();
with_file(&target, |file| {
let context = addr2line::ObjectContext::new(file).unwrap();
context.parse_lines().unwrap();
});
}
#[test]
fn parse_lines_slice() {
let target = release_fixture_path();
with_file(&target, |file| {
let dwarf = dwarf_load(file);
let dwarf = dwarf_borrow(&dwarf);
let context = addr2line::Context::from_dwarf(dwarf).unwrap();
context.parse_lines().unwrap();
});
}
#[test]
fn parse_functions_rc() {
let target = release_fixture_path();
with_file(&target, |file| {
let context = addr2line::ObjectContext::new(file).unwrap();
context.parse_functions().unwrap();
});
}
#[test]
fn parse_functions_slice() {
let target = release_fixture_path();
with_file(&target, |file| {
let dwarf = dwarf_load(file);
let dwarf = dwarf_borrow(&dwarf);
let context = addr2line::Context::from_dwarf(dwarf).unwrap();
context.parse_functions().unwrap();
});
}

View File

@@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"737088e45fdf27fe2cfedce163332d8ce08c58fd86ca287de2de34c0fbaf63e7","Cargo.toml":"f410869f0f1a5697f65a8a77be03da7aeecc0be26e7cf3a1feb1acaa4f518770","LICENSE-0BSD":"861399f8c21c042b110517e76dc6b63a2b334276c8cf17412fc3c8908ca8dc17","LICENSE-APACHE":"8ada45cd9f843acf64e4722ae262c622a2b3b3007c7310ef36ac1061a30f6adb","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"308c50cdb42b9573743068158339570b45ca3f895015ca3b87ba983edb0a21e6","RELEASE_PROCESS.md":"a86cd10fc70f167f8d00e9e4ce0c6b4ebdfa1865058390dffd1e0ad4d3e68d9d","benches/bench.rs":"c07ce370e3680c602e415f8d1ec4e543ea2163ab22a09b6b82d93e8a30adca82","src/algo.rs":"b664b131f724a809591394a10b9023f40ab5963e32a83fa3163c2668e59c8b66","src/lib.rs":"b55ba9c629b30360d08168b2ca0c96275432856a539737a105a6d6ae6bf7e88f"},"package":"f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"}

View File

@@ -1,63 +0,0 @@
# Changelog
## Unreleased
No changes.
## [1.0.2 - 2021-02-26](https://github.com/jonas-schievink/adler/releases/tag/v1.0.2)
- Fix doctest on big-endian systems ([#9]).
[#9]: https://github.com/jonas-schievink/adler/pull/9
## [1.0.1 - 2020-11-08](https://github.com/jonas-schievink/adler/releases/tag/v1.0.1)
### Fixes
- Fix documentation on docs.rs.
## [1.0.0 - 2020-11-08](https://github.com/jonas-schievink/adler/releases/tag/v1.0.0)
### Fixes
- Fix `cargo test --no-default-features` ([#5]).
### Improvements
- Extended and clarified documentation.
- Added more rustdoc examples.
- Extended CI to test the crate with `--no-default-features`.
### Breaking Changes
- `adler32_reader` now takes its generic argument by value instead of as a `&mut`.
- Renamed `adler32_reader` to `adler32`.
## [0.2.3 - 2020-07-11](https://github.com/jonas-schievink/adler/releases/tag/v0.2.3)
- Process 4 Bytes at a time, improving performance by up to 50% ([#2]).
## [0.2.2 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.2)
- Bump MSRV to 1.31.0.
## [0.2.1 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.1)
- Add a few `#[inline]` annotations to small functions.
- Fix CI badge.
- Allow integration into libstd.
## [0.2.0 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.0)
- Support `#![no_std]` when using `default-features = false`.
- Improve performance by around 7x.
- Support Rust 1.8.0.
- Improve API naming.
## [0.1.0 - 2020-06-26](https://github.com/jonas-schievink/adler/releases/tag/v0.1.0)
Initial release.
[#2]: https://github.com/jonas-schievink/adler/pull/2
[#5]: https://github.com/jonas-schievink/adler/pull/5

View File

@@ -1,64 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "adler"
version = "1.0.2"
authors = ["Jonas Schievink <jonasschievink@gmail.com>"]
description = "A simple clean-room implementation of the Adler-32 checksum"
documentation = "https://docs.rs/adler/"
readme = "README.md"
keywords = ["checksum", "integrity", "hash", "adler32", "zlib"]
categories = ["algorithms"]
license = "0BSD OR MIT OR Apache-2.0"
repository = "https://github.com/jonas-schievink/adler.git"
[package.metadata.docs.rs]
rustdoc-args = ["--cfg=docsrs"]
[package.metadata.release]
no-dev-version = true
pre-release-commit-message = "Release {{version}}"
tag-message = "{{version}}"
[[package.metadata.release.pre-release-replacements]]
file = "CHANGELOG.md"
replace = "## Unreleased\n\nNo changes.\n\n## [{{version}} - {{date}}](https://github.com/jonas-schievink/adler/releases/tag/v{{version}})\n"
search = "## Unreleased\n"
[[package.metadata.release.pre-release-replacements]]
file = "README.md"
replace = "adler = \"{{version}}\""
search = "adler = \"[a-z0-9\\\\.-]+\""
[[package.metadata.release.pre-release-replacements]]
file = "src/lib.rs"
replace = "https://docs.rs/adler/{{version}}"
search = "https://docs.rs/adler/[a-z0-9\\.-]+"
[[bench]]
name = "bench"
harness = false
[dependencies.compiler_builtins]
version = "0.1.2"
optional = true
[dependencies.core]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-core"
[dev-dependencies.criterion]
version = "0.3.2"
[features]
default = ["std"]
rustc-dep-of-std = ["core", "compiler_builtins"]
std = []

View File

@@ -1,12 +0,0 @@
Copyright (C) Jonas Schievink <jonasschievink@gmail.com>
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/LICENSE-2.0
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@@ -1,39 +0,0 @@
# Adler-32 checksums for Rust
[![crates.io](https://img.shields.io/crates/v/adler.svg)](https://crates.io/crates/adler)
[![docs.rs](https://docs.rs/adler/badge.svg)](https://docs.rs/adler/)
![CI](https://github.com/jonas-schievink/adler/workflows/CI/badge.svg)
This crate provides a simple implementation of the Adler-32 checksum, used in
the zlib compression format.
Please refer to the [changelog](CHANGELOG.md) to see what changed in the last
releases.
## Features
- Permissively licensed (0BSD) clean-room implementation.
- Zero dependencies.
- Zero `unsafe`.
- Decent performance (3-4 GB/s).
- Supports `#![no_std]` (with `default-features = false`).
## Usage
Add an entry to your `Cargo.toml`:
```toml
[dependencies]
adler = "1.0.2"
```
Check the [API Documentation](https://docs.rs/adler/) for how to use the
crate's functionality.
## Rust version support
Currently, this crate supports all Rust versions starting at Rust 1.31.0.
Bumping the Minimum Supported Rust Version (MSRV) is *not* considered a breaking
change, but will not be done without good reasons. The latest 3 stable Rust
versions will always be supported no matter what.

View File

@@ -1,13 +0,0 @@
# What to do to publish a new release
1. Ensure all notable changes are in the changelog under "Unreleased".
2. Execute `cargo release <level>` to bump version(s), tag and publish
everything. External subcommand, must be installed with `cargo install
cargo-release`.
`<level>` can be one of `major|minor|patch`. If this is the first release
(`0.1.0`), use `minor`, since the version starts out as `0.0.0`.
3. Go to the GitHub releases, edit the just-pushed tag. Copy the release notes
from the changelog.

View File

@@ -1,109 +0,0 @@
extern crate adler;
extern crate criterion;
use adler::{adler32_slice, Adler32};
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
fn simple(c: &mut Criterion) {
{
const SIZE: usize = 100;
let mut group = c.benchmark_group("simple-100b");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("zeroes-100", |bencher| {
bencher.iter(|| {
adler32_slice(&[0; SIZE]);
});
});
group.bench_function("ones-100", |bencher| {
bencher.iter(|| {
adler32_slice(&[0xff; SIZE]);
});
});
}
{
const SIZE: usize = 1024;
let mut group = c.benchmark_group("simple-1k");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("zeroes-1k", |bencher| {
bencher.iter(|| {
adler32_slice(&[0; SIZE]);
});
});
group.bench_function("ones-1k", |bencher| {
bencher.iter(|| {
adler32_slice(&[0xff; SIZE]);
});
});
}
{
const SIZE: usize = 1024 * 1024;
let mut group = c.benchmark_group("simple-1m");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("zeroes-1m", |bencher| {
bencher.iter(|| {
adler32_slice(&[0; SIZE]);
});
});
group.bench_function("ones-1m", |bencher| {
bencher.iter(|| {
adler32_slice(&[0xff; SIZE]);
});
});
}
}
fn chunked(c: &mut Criterion) {
const SIZE: usize = 16 * 1024 * 1024;
let data = vec![0xAB; SIZE];
let mut group = c.benchmark_group("chunked-16m");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("5552", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(5552) {
h.write_slice(chunk);
}
h.checksum()
});
});
group.bench_function("8k", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(8 * 1024) {
h.write_slice(chunk);
}
h.checksum()
});
});
group.bench_function("64k", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(64 * 1024) {
h.write_slice(chunk);
}
h.checksum()
});
});
group.bench_function("1m", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(1024 * 1024) {
h.write_slice(chunk);
}
h.checksum()
});
});
}
criterion_group!(benches, simple, chunked);
criterion_main!(benches);

View File

@@ -1,146 +0,0 @@
use crate::Adler32;
use std::ops::{AddAssign, MulAssign, RemAssign};
impl Adler32 {
pub(crate) fn compute(&mut self, bytes: &[u8]) {
// The basic algorithm is, for every byte:
// a = (a + byte) % MOD
// b = (b + a) % MOD
// where MOD = 65521.
//
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
// - We use 32-bit arithmetic in this function.
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
// operation.
//
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
// b = n×D1 + (n1)×D2 + (n2)×D3 + ... + Dn + n*1 (mod 65521)
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
// for the previous values of a and b, as well as treat every input Byte as being 255:
// b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520
// Or in other words:
// b_inc = n*65520 + n(n+1)/2*255
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
// 2^32-65521 = n*65520 + n(n+1)/2*255
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
//
// On top of the optimization outlined above, the algorithm can also be parallelized with a
// bit more work:
//
// Note that b is a linear combination of a vector of input bytes (D1, ..., Dn).
//
// If we fix some value k<N and rewrite indices 1, ..., N as
//
// 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k,
//
// then we can express a and b in terms of sums of smaller sequences kb and ka:
//
// ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k
//
// a = ka(1) + ka(2) + ... + ka(k) + 1
// b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N
//
// We use this insight to unroll the main loop and process k=4 bytes at a time.
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
// stem from increased pipeline parallelism rather than auto-vectorization.
//
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
const MOD: u32 = 65521;
const CHUNK_SIZE: usize = 5552 * 4;
let mut a = u32::from(self.a);
let mut b = u32::from(self.b);
let mut a_vec = U32X4([0; 4]);
let mut b_vec = a_vec;
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
// iterate over 4 bytes at a time
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
let remainder_chunk = chunk_iter.remainder();
for chunk in chunk_iter {
for byte_vec in chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += CHUNK_SIZE as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
}
// special-case the final chunk because it may be shorter than the rest
for byte_vec in remainder_chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += remainder_chunk.len() as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
// combine the sub-sum results into the main sum
b_vec *= 4;
b_vec.0[1] += MOD - a_vec.0[1];
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
for &av in a_vec.0.iter() {
a += av;
}
for &bv in b_vec.0.iter() {
b += bv;
}
// iterate over the remaining few bytes in serial
for &byte in remainder.iter() {
a += u32::from(byte);
b += a;
}
self.a = (a % MOD) as u16;
self.b = (b % MOD) as u16;
}
}
#[derive(Copy, Clone)]
struct U32X4([u32; 4]);
impl U32X4 {
fn from(bytes: &[u8]) -> Self {
U32X4([
u32::from(bytes[0]),
u32::from(bytes[1]),
u32::from(bytes[2]),
u32::from(bytes[3]),
])
}
}
impl AddAssign<Self> for U32X4 {
fn add_assign(&mut self, other: Self) {
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s += o;
}
}
}
impl RemAssign<u32> for U32X4 {
fn rem_assign(&mut self, quotient: u32) {
for s in self.0.iter_mut() {
*s %= quotient;
}
}
}
impl MulAssign<u32> for U32X4 {
fn mul_assign(&mut self, rhs: u32) {
for s in self.0.iter_mut() {
*s *= rhs;
}
}
}

View File

@@ -1,287 +0,0 @@
//! Adler-32 checksum implementation.
//!
//! This implementation features:
//!
//! - Permissively licensed (0BSD) clean-room implementation.
//! - Zero dependencies.
//! - Zero `unsafe`.
//! - Decent performance (3-4 GB/s).
//! - `#![no_std]` support (with `default-features = false`).
#![doc(html_root_url = "https://docs.rs/adler/1.0.2")]
// Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default
#![doc(test(attr(deny(unused_imports, unused_must_use))))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(missing_debug_implementations)]
#![forbid(unsafe_code)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
extern crate core as std;
mod algo;
use std::hash::Hasher;
#[cfg(feature = "std")]
use std::io::{self, BufRead};
/// Adler-32 checksum calculator.
///
/// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default
/// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via
/// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`].
///
/// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any
/// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although
/// that is not recommended (while every checksum is a hash function, they are not necessarily a
/// good one).
///
/// # Examples
///
/// Basic, piecewise checksum calculation:
///
/// ```
/// use adler::Adler32;
///
/// let mut adler = Adler32::new();
///
/// adler.write_slice(&[0, 1, 2]);
/// adler.write_slice(&[3, 4, 5]);
///
/// assert_eq!(adler.checksum(), 0x00290010);
/// ```
///
/// Using `Hash` to process structures:
///
/// ```
/// use std::hash::Hash;
/// use adler::Adler32;
///
/// #[derive(Hash)]
/// struct Data {
/// byte: u8,
/// word: u16,
/// big: u64,
/// }
///
/// let mut adler = Adler32::new();
///
/// let data = Data { byte: 0x1F, word: 0xABCD, big: !0 };
/// data.hash(&mut adler);
///
/// // hash value depends on architecture endianness
/// if cfg!(target_endian = "little") {
/// assert_eq!(adler.checksum(), 0x33410990);
/// }
/// if cfg!(target_endian = "big") {
/// assert_eq!(adler.checksum(), 0x331F0990);
/// }
///
/// ```
///
/// [`new`]: #method.new
/// [`from_checksum`]: #method.from_checksum
/// [`checksum`]: #method.checksum
#[derive(Debug, Copy, Clone)]
pub struct Adler32 {
a: u16,
b: u16,
}
impl Adler32 {
/// Creates a new Adler-32 instance with default state.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an `Adler32` instance from a precomputed Adler-32 checksum.
///
/// This allows resuming checksum calculation without having to keep the `Adler32` instance
/// around.
///
/// # Example
///
/// ```
/// # use adler::Adler32;
/// let parts = [
/// "rust",
/// "acean",
/// ];
/// let whole = adler::adler32_slice(b"rustacean");
///
/// let mut sum = Adler32::new();
/// sum.write_slice(parts[0].as_bytes());
/// let partial = sum.checksum();
///
/// // ...later
///
/// let mut sum = Adler32::from_checksum(partial);
/// sum.write_slice(parts[1].as_bytes());
/// assert_eq!(sum.checksum(), whole);
/// ```
#[inline]
pub fn from_checksum(sum: u32) -> Self {
Adler32 {
a: sum as u16,
b: (sum >> 16) as u16,
}
}
/// Returns the calculated checksum at this point in time.
#[inline]
pub fn checksum(&self) -> u32 {
(u32::from(self.b) << 16) | u32::from(self.a)
}
/// Adds `bytes` to the checksum calculation.
///
/// If efficiency matters, this should be called with Byte slices that contain at least a few
/// thousand Bytes.
pub fn write_slice(&mut self, bytes: &[u8]) {
self.compute(bytes);
}
}
impl Default for Adler32 {
#[inline]
fn default() -> Self {
Adler32 { a: 1, b: 0 }
}
}
impl Hasher for Adler32 {
#[inline]
fn finish(&self) -> u64 {
u64::from(self.checksum())
}
fn write(&mut self, bytes: &[u8]) {
self.write_slice(bytes);
}
}
/// Calculates the Adler-32 checksum of a byte slice.
///
/// This is a convenience function around the [`Adler32`] type.
///
/// [`Adler32`]: struct.Adler32.html
pub fn adler32_slice(data: &[u8]) -> u32 {
let mut h = Adler32::new();
h.write_slice(data);
h.checksum()
}
/// Calculates the Adler-32 checksum of a `BufRead`'s contents.
///
/// The passed `BufRead` implementor will be read until it reaches EOF (or until it reports an
/// error).
///
/// If you only have a `Read` implementor, you can wrap it in `std::io::BufReader` before calling
/// this function.
///
/// # Errors
///
/// Any error returned by the reader are bubbled up by this function.
///
/// # Examples
///
/// ```no_run
/// # fn run() -> Result<(), Box<dyn std::error::Error>> {
/// use adler::adler32;
///
/// use std::fs::File;
/// use std::io::BufReader;
///
/// let file = File::open("input.txt")?;
/// let mut file = BufReader::new(file);
///
/// adler32(&mut file)?;
/// # Ok(()) }
/// # fn main() { run().unwrap() }
/// ```
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub fn adler32<R: BufRead>(mut reader: R) -> io::Result<u32> {
let mut h = Adler32::new();
loop {
let len = {
let buf = reader.fill_buf()?;
if buf.is_empty() {
return Ok(h.checksum());
}
h.write_slice(buf);
buf.len()
};
reader.consume(len);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn zeroes() {
assert_eq!(adler32_slice(&[]), 1);
assert_eq!(adler32_slice(&[0]), 1 | 1 << 16);
assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16);
assert_eq!(adler32_slice(&[0; 100]), 0x00640001);
assert_eq!(adler32_slice(&[0; 1024]), 0x04000001);
assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001);
}
#[test]
fn ones() {
assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e);
assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11);
}
#[test]
fn mixed() {
assert_eq!(adler32_slice(&[1]), 2 | 2 << 16);
assert_eq!(adler32_slice(&[40]), 41 | 41 << 16);
assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398);
}
#[test]
fn resume() {
let mut adler = Adler32::new();
adler.write_slice(&[0xff; 1024]);
let partial = adler.checksum();
assert_eq!(partial, 0x79a6fc2e); // from above
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
// Make sure that we can resume computing from the partial checksum via `from_checksum`.
let mut adler = Adler32::from_checksum(partial);
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
}
#[cfg(feature = "std")]
#[test]
fn bufread() {
use std::io::BufReader;
fn test(data: &[u8], checksum: u32) {
// `BufReader` uses an 8 KB buffer, so this will test buffer refilling.
let mut buf = BufReader::new(data);
let real_sum = adler32(&mut buf).unwrap();
assert_eq!(checksum, real_sum);
}
test(&[], 1);
test(&[0; 1024], 0x04000001);
test(&[0; 1024 * 1024], 0x00f00001);
test(&[0xA5; 1024 * 1024], 0xd5009ab1);
}
}

View File

@@ -1 +0,0 @@
{"files":{"Cargo.lock":"e89078a9d7e89f125bea210c74fd30ef1167c208b9b240baa3fe76ec1170f6ec","Cargo.toml":"38deb1bfcca1eaef87c409274c63f9b25df94f6faaebc74061fa7ef1e4f078f1","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"6efb0476a1cc085077ed49357026d8c173bf33017278ef440f222fb9cbcb66e6","README.md":"b230c2257d0c7a49b9bd97f2fa73abedcdc055757b5cedd2b0eb1a7a448ff461","benches/stream.rs":"7e666c4f4b79ddb5237361ed25264a966ee241192fbb2c1baea3006e3e0326b4","benches/strip.rs":"9603bd5ca1ae4661c2ccab50315dbfdec0c661ac2624262172bbd8f5d0bd87c9","benches/wincon.rs":"680e86933c008b242a3286c5149c33d3c086426eb99fe134b6e79f7578f96663","examples/dump-stream.rs":"54b2bce2409fc1a1f00dbdcab7abbbb6cde447fa20b5c829d1b17ce2e15eefd1","examples/query-stream.rs":"16f38843083174fbefa974a5aa38a5f3ffa51bd6e6db3dc1d91164462219399e","src/adapter/mod.rs":"baf4237ea0b18df63609e49d93572ca27c2202a4cbec0220adb5a7e815c7d8ed","src/adapter/strip.rs":"010972f96708c56da9bced98287f134ce43a4f6459c22c1697abdc4fd6f82d00","src/adapter/wincon.rs":"07d75878ca9edcef4f473a5ff6113b40aab681dcbcd1ae9de1ec895332f7cc2a","src/auto.rs":"71c249ab6b0af64c3946817ea9f1719d4b789128c244611a05075b1e13413007","src/buffer.rs":"83e7088b50dd3e2941c06a417d9eef75fda45311a2912ba94f480ec98d6f0183","src/fmt.rs":"cc11b005c4559843bd908a57958a13c8d0922fae6aff5261f3583c90e60da73c","src/lib.rs":"649b86b187835e0e33baaaf2242c5f331b7dff133fae8fc419c52b7add797c57","src/macros.rs":"a26ababe32a39732d0aade9674f6e5e267bd26c6ea06603ff9e61e80681195e0","src/stream.rs":"cbe8f61fba4c3c60934339c8bda5d1ff43320f57cdc4ed409aa173945a941b3d","src/strip.rs":"56e6516283b6c0dfa72a8e0e6679da8424295f50a3e56c44281e76de6aa0344b","src/wincon.rs":"fe5aff7bfd80b14c9a6b07143079d59b81831293ad766b845e46fad2e1459c9a"},"package":"d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6"}

1094
vendor/anstream/Cargo.lock generated vendored

File diff suppressed because it is too large Load Diff

View File

@@ -1,144 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.70.0"
name = "anstream"
version = "0.6.5"
include = [
"build.rs",
"src/**/*",
"Cargo.toml",
"Cargo.lock",
"LICENSE*",
"README.md",
"benches/**/*",
"examples/**/*",
]
description = "A simple cross platform library for writing colored text to a terminal."
homepage = "https://github.com/rust-cli/anstyle"
readme = "README.md"
keywords = [
"ansi",
"terminal",
"color",
"strip",
"wincon",
]
categories = ["command-line-interface"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-cli/anstyle.git"
[package.metadata.docs.rs]
cargo-args = [
"-Zunstable-options",
"-Zrustdoc-scrape-examples",
]
rustdoc-args = [
"--cfg",
"docsrs",
]
[[package.metadata.release.pre-release-replacements]]
file = "CHANGELOG.md"
min = 1
replace = "{{version}}"
search = "Unreleased"
[[package.metadata.release.pre-release-replacements]]
exactly = 1
file = "CHANGELOG.md"
replace = "...{{tag_name}}"
search = '\.\.\.HEAD'
[[package.metadata.release.pre-release-replacements]]
file = "CHANGELOG.md"
min = 1
replace = "{{date}}"
search = "ReleaseDate"
[[package.metadata.release.pre-release-replacements]]
exactly = 1
file = "CHANGELOG.md"
replace = """
<!-- next-header -->
## [Unreleased] - ReleaseDate
"""
search = "<!-- next-header -->"
[[package.metadata.release.pre-release-replacements]]
exactly = 1
file = "CHANGELOG.md"
replace = """
<!-- next-url -->
[Unreleased]: https://github.com/rust-cli/anstyle/compare/{{tag_name}}...HEAD"""
search = "<!-- next-url -->"
[[bench]]
name = "strip"
harness = false
[[bench]]
name = "wincon"
harness = false
[[bench]]
name = "stream"
harness = false
[dependencies.anstyle]
version = "1.0.0"
[dependencies.anstyle-parse]
version = "0.2.0"
[dependencies.anstyle-query]
version = "1.0.0"
optional = true
[dependencies.colorchoice]
version = "1.0.0"
optional = true
[dependencies.utf8parse]
version = "0.2.1"
[dev-dependencies.criterion]
version = "0.5.1"
[dev-dependencies.lexopt]
version = "0.3.0"
[dev-dependencies.owo-colors]
version = "3.5.0"
[dev-dependencies.proptest]
version = "1.4.0"
[dev-dependencies.strip-ansi-escapes]
version = "0.2.0"
[features]
auto = [
"dep:anstyle-query",
"dep:colorchoice",
]
default = [
"auto",
"wincon",
]
test = []
wincon = ["dep:anstyle-wincon"]
[target."cfg(windows)".dependencies.anstyle-wincon]
version = "3.0.1"
optional = true

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,19 +0,0 @@
Copyright (c) Individual contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,34 +0,0 @@
# anstream
> A simple cross platform library for writing colored text to a terminal.
*A portmanteau of "ansi stream"*
[![Documentation](https://img.shields.io/badge/docs-master-blue.svg)][Documentation]
![License](https://img.shields.io/crates/l/anstream.svg)
[![Crates Status](https://img.shields.io/crates/v/anstream.svg)](https://crates.io/crates/anstream)
Specialized `stdout` and `stderr` that accept ANSI escape codes and adapt them
based on the terminal's capabilities.
`anstream::adapter::strip_str` may also be of interest on its own for low
overhead stripping of ANSI escape codes.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.
[Crates.io]: https://crates.io/crates/anstream
[Documentation]: https://docs.rs/anstream

View File

@@ -1,81 +0,0 @@
use std::io::Write as _;
use criterion::{black_box, Criterion};
fn stream(c: &mut Criterion) {
for (name, content) in [
("demo.vte", &include_bytes!("../tests/demo.vte")[..]),
("rg_help.vte", &include_bytes!("../tests/rg_help.vte")[..]),
("rg_linus.vte", &include_bytes!("../tests/rg_linus.vte")[..]),
(
"state_changes",
&b"\x1b]2;X\x1b\\ \x1b[0m \x1bP0@\x1b\\"[..],
),
] {
let mut group = c.benchmark_group(name);
group.bench_function("nop", |b| {
b.iter(|| {
let buffer = Vec::with_capacity(content.len());
let mut stream = buffer;
stream.write_all(content).unwrap();
black_box(stream)
})
});
group.bench_function("StripStream", |b| {
b.iter(|| {
let buffer = Vec::with_capacity(content.len());
let mut stream = anstream::StripStream::new(buffer);
stream.write_all(content).unwrap();
black_box(stream)
})
});
#[cfg(all(windows, feature = "wincon"))]
group.bench_function("WinconStream", |b| {
b.iter(|| {
let buffer = Vec::with_capacity(content.len());
let mut stream = anstream::WinconStream::new(buffer);
stream.write_all(content).unwrap();
black_box(stream)
})
});
group.bench_function("AutoStream::always_ansi", |b| {
b.iter(|| {
let buffer = Vec::with_capacity(content.len());
let mut stream = anstream::AutoStream::always_ansi(buffer);
stream.write_all(content).unwrap();
black_box(stream)
})
});
group.bench_function("AutoStream::always", |b| {
b.iter(|| {
let buffer = Vec::with_capacity(content.len());
let mut stream = anstream::AutoStream::always(buffer);
stream.write_all(content).unwrap();
black_box(stream)
})
});
group.bench_function("AutoStream::never", |b| {
b.iter(|| {
let buffer = Vec::with_capacity(content.len());
let mut stream = anstream::AutoStream::never(buffer);
stream.write_all(content).unwrap();
black_box(stream)
})
});
}
}
criterion::criterion_group!(benches, stream);
criterion::criterion_main!(benches);

View File

@@ -1,102 +0,0 @@
use criterion::{black_box, Criterion};
#[derive(Default)]
struct Strip(String);
impl Strip {
fn with_capacity(capacity: usize) -> Self {
Self(String::with_capacity(capacity))
}
}
impl anstyle_parse::Perform for Strip {
fn print(&mut self, c: char) {
self.0.push(c);
}
fn execute(&mut self, byte: u8) {
if byte.is_ascii_whitespace() {
self.0.push(byte as char);
}
}
}
fn strip(c: &mut Criterion) {
for (name, content) in [
("demo.vte", &include_bytes!("../tests/demo.vte")[..]),
("rg_help.vte", &include_bytes!("../tests/rg_help.vte")[..]),
("rg_linus.vte", &include_bytes!("../tests/rg_linus.vte")[..]),
(
"state_changes",
&b"\x1b]2;X\x1b\\ \x1b[0m \x1bP0@\x1b\\"[..],
),
] {
// Make sure the comparison is fair
if let Ok(content) = std::str::from_utf8(content) {
let mut stripped = Strip::with_capacity(content.len());
let mut parser = anstyle_parse::Parser::<anstyle_parse::DefaultCharAccumulator>::new();
for byte in content.as_bytes() {
parser.advance(&mut stripped, *byte);
}
assert_eq!(
stripped.0,
anstream::adapter::strip_str(content).to_string()
);
assert_eq!(
stripped.0,
String::from_utf8(anstream::adapter::strip_bytes(content.as_bytes()).into_vec())
.unwrap()
);
}
let mut group = c.benchmark_group(name);
group.bench_function("advance_strip", |b| {
b.iter(|| {
let mut stripped = Strip::with_capacity(content.len());
let mut parser =
anstyle_parse::Parser::<anstyle_parse::DefaultCharAccumulator>::new();
for byte in content {
parser.advance(&mut stripped, *byte);
}
black_box(stripped.0)
})
});
group.bench_function("strip_ansi_escapes", |b| {
b.iter(|| {
let stripped = strip_ansi_escapes::strip(content);
black_box(stripped)
})
});
if let Ok(content) = std::str::from_utf8(content) {
group.bench_function("strip_str", |b| {
b.iter(|| {
let stripped = anstream::adapter::strip_str(content).to_string();
black_box(stripped)
})
});
group.bench_function("StripStr", |b| {
b.iter(|| {
let mut stripped = String::with_capacity(content.len());
let mut state = anstream::adapter::StripStr::new();
for printable in state.strip_next(content) {
stripped.push_str(printable);
}
black_box(stripped)
})
});
}
group.bench_function("strip_bytes", |b| {
b.iter(|| {
let stripped = anstream::adapter::strip_bytes(content).into_vec();
black_box(stripped)
})
});
}
}
criterion::criterion_group!(benches, strip);
criterion::criterion_main!(benches);

View File

@@ -1,26 +0,0 @@
use criterion::{black_box, Criterion};
fn wincon(c: &mut Criterion) {
for (name, content) in [
("demo.vte", &include_bytes!("../tests/demo.vte")[..]),
("rg_help.vte", &include_bytes!("../tests/rg_help.vte")[..]),
("rg_linus.vte", &include_bytes!("../tests/rg_linus.vte")[..]),
(
"state_changes",
&b"\x1b]2;X\x1b\\ \x1b[0m \x1bP0@\x1b\\"[..],
),
] {
let mut group = c.benchmark_group(name);
group.bench_function("wincon_bytes", |b| {
b.iter(|| {
let mut state = anstream::adapter::WinconBytes::new();
let stripped = state.extract_next(content).collect::<Vec<_>>();
black_box(stripped)
})
});
}
}
criterion::criterion_group!(benches, wincon);
criterion::criterion_main!(benches);

View File

@@ -1,128 +0,0 @@
use std::io::Write;
fn main() -> Result<(), lexopt::Error> {
let args = Args::parse()?;
let stdout = anstream::stdout();
let mut stdout = stdout.lock();
for fixed in 0..16 {
let style = style(fixed, args.layer, args.effects);
let _ = print_number(&mut stdout, fixed, style);
if fixed == 7 || fixed == 15 {
let _ = writeln!(&mut stdout);
}
}
for r in 0..6 {
let _ = writeln!(stdout);
for g in 0..6 {
for b in 0..6 {
let fixed = r * 36 + g * 6 + b + 16;
let style = style(fixed, args.layer, args.effects);
let _ = print_number(&mut stdout, fixed, style);
}
let _ = writeln!(stdout);
}
}
for c in 0..24 {
if 0 == c % 8 {
let _ = writeln!(stdout);
}
let fixed = 232 + c;
let style = style(fixed, args.layer, args.effects);
let _ = print_number(&mut stdout, fixed, style);
}
Ok(())
}
fn style(fixed: u8, layer: Layer, effects: anstyle::Effects) -> anstyle::Style {
let color = anstyle::Ansi256Color(fixed).into();
(match layer {
Layer::Fg => anstyle::Style::new().fg_color(Some(color)),
Layer::Bg => anstyle::Style::new().bg_color(Some(color)),
Layer::Underline => anstyle::Style::new().underline_color(Some(color)),
}) | effects
}
fn print_number(stdout: &mut impl Write, fixed: u8, style: anstyle::Style) -> std::io::Result<()> {
write!(
stdout,
"{}{:>4}{}",
style.render(),
fixed,
anstyle::Reset.render()
)
}
#[derive(Default)]
struct Args {
effects: anstyle::Effects,
layer: Layer,
}
#[derive(Copy, Clone, Default)]
enum Layer {
#[default]
Fg,
Bg,
Underline,
}
impl Args {
fn parse() -> Result<Self, lexopt::Error> {
use lexopt::prelude::*;
let mut res = Args::default();
let mut args = lexopt::Parser::from_env();
while let Some(arg) = args.next()? {
match arg {
Long("layer") => {
res.layer = args.value()?.parse_with(|s| match s {
"fg" => Ok(Layer::Fg),
"bg" => Ok(Layer::Bg),
"underline" => Ok(Layer::Underline),
_ => Err("expected values fg, bg, underline"),
})?;
}
Long("effect") => {
const EFFECTS: [(&str, anstyle::Effects); 12] = [
("bold", anstyle::Effects::BOLD),
("dimmed", anstyle::Effects::DIMMED),
("italic", anstyle::Effects::ITALIC),
("underline", anstyle::Effects::UNDERLINE),
("double_underline", anstyle::Effects::DOUBLE_UNDERLINE),
("curly_underline", anstyle::Effects::CURLY_UNDERLINE),
("dotted_underline", anstyle::Effects::DOTTED_UNDERLINE),
("dashed_underline", anstyle::Effects::DASHED_UNDERLINE),
("blink", anstyle::Effects::BLINK),
("invert", anstyle::Effects::INVERT),
("hidden", anstyle::Effects::HIDDEN),
("strikethrough", anstyle::Effects::STRIKETHROUGH),
];
let effect = args.value()?.parse_with(|s| {
EFFECTS
.into_iter()
.find(|(name, _)| *name == s)
.map(|(_, effect)| effect)
.ok_or_else(|| {
format!(
"expected one of {}",
EFFECTS
.into_iter()
.map(|(n, _)| n)
.collect::<Vec<_>>()
.join(", ")
)
})
})?;
res.effects = res.effects.insert(effect);
}
_ => return Err(arg.unexpected()),
}
}
Ok(res)
}
}

View File

@@ -1,20 +0,0 @@
fn main() {
println!("stdout:");
println!(
" choice: {:?}",
anstream::AutoStream::choice(&std::io::stdout())
);
println!(
" choice: {:?}",
anstream::AutoStream::auto(std::io::stdout()).current_choice()
);
println!("stderr:");
println!(
" choice: {:?}",
anstream::AutoStream::choice(&std::io::stderr())
);
println!(
" choice: {:?}",
anstream::AutoStream::auto(std::io::stderr()).current_choice()
);
}

View File

@@ -1,15 +0,0 @@
//! Gracefully degrade styled output
mod strip;
mod wincon;
pub use strip::strip_bytes;
pub use strip::strip_str;
pub use strip::StripBytes;
pub use strip::StripBytesIter;
pub use strip::StripStr;
pub use strip::StripStrIter;
pub use strip::StrippedBytes;
pub use strip::StrippedStr;
pub use wincon::WinconBytes;
pub use wincon::WinconBytesIter;

View File

@@ -1,513 +0,0 @@
use anstyle_parse::state::state_change;
use anstyle_parse::state::Action;
use anstyle_parse::state::State;
/// Strip ANSI escapes from a `&str`, returning the printable content
///
/// This can be used to take output from a program that includes escape sequences and write it
/// somewhere that does not easily support them, such as a log file.
///
/// For non-contiguous data, see [`StripStr`].
///
/// # Example
///
/// ```rust
/// use std::io::Write as _;
///
/// let styled_text = "\x1b[32mfoo\x1b[m bar";
/// let plain_str = anstream::adapter::strip_str(&styled_text).to_string();
/// assert_eq!(plain_str, "foo bar");
/// ```
#[inline]
pub fn strip_str(data: &str) -> StrippedStr<'_> {
StrippedStr::new(data)
}
/// See [`strip_str`]
#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct StrippedStr<'s> {
bytes: &'s [u8],
state: State,
}
impl<'s> StrippedStr<'s> {
#[inline]
fn new(data: &'s str) -> Self {
Self {
bytes: data.as_bytes(),
state: State::Ground,
}
}
/// Create a [`String`] of the printable content
#[inline]
#[allow(clippy::inherent_to_string_shadow_display)] // Single-allocation implementation
pub fn to_string(&self) -> String {
use std::fmt::Write as _;
let mut stripped = String::with_capacity(self.bytes.len());
let _ = write!(&mut stripped, "{}", self);
stripped
}
}
impl<'s> std::fmt::Display for StrippedStr<'s> {
/// **Note:** this does *not* exhaust the [`Iterator`]
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let iter = Self {
bytes: self.bytes,
state: self.state,
};
for printable in iter {
printable.fmt(f)?;
}
Ok(())
}
}
impl<'s> Iterator for StrippedStr<'s> {
type Item = &'s str;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
next_str(&mut self.bytes, &mut self.state)
}
}
/// Incrementally strip non-contiguous data
#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct StripStr {
state: State,
}
impl StripStr {
/// Initial state
pub fn new() -> Self {
Default::default()
}
/// Strip the next segment of data
pub fn strip_next<'s>(&'s mut self, data: &'s str) -> StripStrIter<'s> {
StripStrIter {
bytes: data.as_bytes(),
state: &mut self.state,
}
}
}
/// See [`StripStr`]
#[derive(Debug, PartialEq, Eq)]
pub struct StripStrIter<'s> {
bytes: &'s [u8],
state: &'s mut State,
}
impl<'s> Iterator for StripStrIter<'s> {
type Item = &'s str;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
next_str(&mut self.bytes, self.state)
}
}
#[inline]
fn next_str<'s>(bytes: &mut &'s [u8], state: &mut State) -> Option<&'s str> {
let offset = bytes.iter().copied().position(|b| {
let (next_state, action) = state_change(*state, b);
if next_state != State::Anywhere {
*state = next_state;
}
is_printable_str(action, b)
});
let (_, next) = bytes.split_at(offset.unwrap_or(bytes.len()));
*bytes = next;
*state = State::Ground;
let offset = bytes.iter().copied().position(|b| {
let (_next_state, action) = state_change(State::Ground, b);
!is_printable_str(action, b)
});
let (printable, next) = bytes.split_at(offset.unwrap_or(bytes.len()));
*bytes = next;
if printable.is_empty() {
None
} else {
let printable = unsafe {
from_utf8_unchecked(
printable,
"`bytes` was validated as UTF-8, the parser preserves UTF-8 continuations",
)
};
Some(printable)
}
}
#[inline]
unsafe fn from_utf8_unchecked<'b>(bytes: &'b [u8], safety_justification: &'static str) -> &'b str {
if cfg!(debug_assertions) {
// Catch problems more quickly when testing
std::str::from_utf8(bytes).expect(safety_justification)
} else {
std::str::from_utf8_unchecked(bytes)
}
}
#[inline]
fn is_printable_str(action: Action, byte: u8) -> bool {
// VT320 considered 0x7f to be `Print`able but we expect to be working in UTF-8 systems and not
// ISO Latin-1, making it DEL and non-printable
const DEL: u8 = 0x7f;
(action == Action::Print && byte != DEL)
|| action == Action::BeginUtf8
// since we know the input is valid UTF-8, the only thing we can do with
// continuations is to print them
|| is_utf8_continuation(byte)
|| (action == Action::Execute && byte.is_ascii_whitespace())
}
#[inline]
fn is_utf8_continuation(b: u8) -> bool {
matches!(b, 0x80..=0xbf)
}
/// Strip ANSI escapes from bytes, returning the printable content
///
/// This can be used to take output from a program that includes escape sequences and write it
/// somewhere that does not easily support them, such as a log file.
///
/// # Example
///
/// ```rust
/// use std::io::Write as _;
///
/// let styled_text = "\x1b[32mfoo\x1b[m bar";
/// let plain_str = anstream::adapter::strip_bytes(styled_text.as_bytes()).into_vec();
/// assert_eq!(plain_str.as_slice(), &b"foo bar"[..]);
/// ```
#[inline]
pub fn strip_bytes(data: &[u8]) -> StrippedBytes<'_> {
StrippedBytes::new(data)
}
/// See [`strip_bytes`]
#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct StrippedBytes<'s> {
bytes: &'s [u8],
state: State,
utf8parser: Utf8Parser,
}
impl<'s> StrippedBytes<'s> {
/// See [`strip_bytes`]
#[inline]
pub fn new(bytes: &'s [u8]) -> Self {
Self {
bytes,
state: State::Ground,
utf8parser: Default::default(),
}
}
/// Strip the next slice of bytes
///
/// Used when the content is in several non-contiguous slices
///
/// # Panic
///
/// May panic if it is not exhausted / empty
#[inline]
pub fn extend(&mut self, bytes: &'s [u8]) {
debug_assert!(
self.is_empty(),
"current bytes must be processed to ensure we end at the right state"
);
self.bytes = bytes;
}
/// Report the bytes has been exhausted
#[inline]
pub fn is_empty(&self) -> bool {
self.bytes.is_empty()
}
/// Create a [`Vec`] of the printable content
#[inline]
pub fn into_vec(self) -> Vec<u8> {
let mut stripped = Vec::with_capacity(self.bytes.len());
for printable in self {
stripped.extend(printable);
}
stripped
}
}
impl<'s> Iterator for StrippedBytes<'s> {
type Item = &'s [u8];
#[inline]
fn next(&mut self) -> Option<Self::Item> {
next_bytes(&mut self.bytes, &mut self.state, &mut self.utf8parser)
}
}
/// Incrementally strip non-contiguous data
#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct StripBytes {
state: State,
utf8parser: Utf8Parser,
}
impl StripBytes {
/// Initial state
pub fn new() -> Self {
Default::default()
}
/// Strip the next segment of data
pub fn strip_next<'s>(&'s mut self, bytes: &'s [u8]) -> StripBytesIter<'s> {
StripBytesIter {
bytes,
state: &mut self.state,
utf8parser: &mut self.utf8parser,
}
}
}
/// See [`StripBytes`]
#[derive(Debug, PartialEq, Eq)]
pub struct StripBytesIter<'s> {
bytes: &'s [u8],
state: &'s mut State,
utf8parser: &'s mut Utf8Parser,
}
impl<'s> Iterator for StripBytesIter<'s> {
type Item = &'s [u8];
#[inline]
fn next(&mut self) -> Option<Self::Item> {
next_bytes(&mut self.bytes, self.state, self.utf8parser)
}
}
#[inline]
fn next_bytes<'s>(
bytes: &mut &'s [u8],
state: &mut State,
utf8parser: &mut Utf8Parser,
) -> Option<&'s [u8]> {
let offset = bytes.iter().copied().position(|b| {
if *state == State::Utf8 {
true
} else {
let (next_state, action) = state_change(*state, b);
if next_state != State::Anywhere {
*state = next_state;
}
is_printable_bytes(action, b)
}
});
let (_, next) = bytes.split_at(offset.unwrap_or(bytes.len()));
*bytes = next;
let offset = bytes.iter().copied().position(|b| {
if *state == State::Utf8 {
if utf8parser.add(b) {
*state = State::Ground;
}
false
} else {
let (next_state, action) = state_change(State::Ground, b);
if next_state != State::Anywhere {
*state = next_state;
}
if *state == State::Utf8 {
utf8parser.add(b);
false
} else {
!is_printable_bytes(action, b)
}
}
});
let (printable, next) = bytes.split_at(offset.unwrap_or(bytes.len()));
*bytes = next;
if printable.is_empty() {
None
} else {
Some(printable)
}
}
#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct Utf8Parser {
utf8_parser: utf8parse::Parser,
}
impl Utf8Parser {
fn add(&mut self, byte: u8) -> bool {
let mut b = false;
let mut receiver = VtUtf8Receiver(&mut b);
self.utf8_parser.advance(&mut receiver, byte);
b
}
}
struct VtUtf8Receiver<'a>(&'a mut bool);
impl<'a> utf8parse::Receiver for VtUtf8Receiver<'a> {
fn codepoint(&mut self, _: char) {
*self.0 = true;
}
fn invalid_sequence(&mut self) {
*self.0 = true;
}
}
#[inline]
fn is_printable_bytes(action: Action, byte: u8) -> bool {
// VT320 considered 0x7f to be `Print`able but we expect to be working in UTF-8 systems and not
// ISO Latin-1, making it DEL and non-printable
const DEL: u8 = 0x7f;
// Continuations aren't included as they may also be control codes, requiring more context
(action == Action::Print && byte != DEL)
|| action == Action::BeginUtf8
|| (action == Action::Execute && byte.is_ascii_whitespace())
}
#[cfg(test)]
mod test {
use super::*;
use proptest::prelude::*;
/// Model based off full parser
fn parser_strip(bytes: &[u8]) -> String {
#[derive(Default)]
struct Strip(String);
impl Strip {
fn with_capacity(capacity: usize) -> Self {
Self(String::with_capacity(capacity))
}
}
impl anstyle_parse::Perform for Strip {
fn print(&mut self, c: char) {
self.0.push(c);
}
fn execute(&mut self, byte: u8) {
if byte.is_ascii_whitespace() {
self.0.push(byte as char);
}
}
}
let mut stripped = Strip::with_capacity(bytes.len());
let mut parser = anstyle_parse::Parser::<anstyle_parse::DefaultCharAccumulator>::new();
for byte in bytes {
parser.advance(&mut stripped, *byte);
}
stripped.0
}
/// Model verifying incremental parsing
fn strip_char(mut s: &str) -> String {
let mut result = String::new();
let mut state = StripStr::new();
while !s.is_empty() {
let mut indices = s.char_indices();
indices.next(); // current
let offset = indices.next().map(|(i, _)| i).unwrap_or_else(|| s.len());
let (current, remainder) = s.split_at(offset);
for printable in state.strip_next(current) {
result.push_str(printable);
}
s = remainder;
}
result
}
/// Model verifying incremental parsing
fn strip_byte(s: &[u8]) -> Vec<u8> {
let mut result = Vec::new();
let mut state = StripBytes::default();
for start in 0..s.len() {
let current = &s[start..=start];
for printable in state.strip_next(current) {
result.extend(printable);
}
}
result
}
#[test]
fn test_strip_bytes_multibyte() {
let bytes = [240, 145, 141, 139];
let expected = parser_strip(&bytes);
let actual = String::from_utf8(strip_bytes(&bytes).into_vec()).unwrap();
assert_eq!(expected, actual);
}
#[test]
fn test_strip_byte_multibyte() {
let bytes = [240, 145, 141, 139];
let expected = parser_strip(&bytes);
let actual = String::from_utf8(strip_byte(&bytes).to_vec()).unwrap();
assert_eq!(expected, actual);
}
#[test]
fn test_strip_str_del() {
let input = std::str::from_utf8(&[0x7f]).unwrap();
let expected = "";
let actual = strip_str(input).to_string();
assert_eq!(expected, actual);
}
#[test]
fn test_strip_byte_del() {
let bytes = [0x7f];
let expected = "";
let actual = String::from_utf8(strip_byte(&bytes).to_vec()).unwrap();
assert_eq!(expected, actual);
}
proptest! {
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn strip_str_no_escapes(s in "\\PC*") {
let expected = parser_strip(s.as_bytes());
let actual = strip_str(&s).to_string();
assert_eq!(expected, actual);
}
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn strip_char_no_escapes(s in "\\PC*") {
let expected = parser_strip(s.as_bytes());
let actual = strip_char(&s);
assert_eq!(expected, actual);
}
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn strip_bytes_no_escapes(s in "\\PC*") {
dbg!(&s);
dbg!(s.as_bytes());
let expected = parser_strip(s.as_bytes());
let actual = String::from_utf8(strip_bytes(s.as_bytes()).into_vec()).unwrap();
assert_eq!(expected, actual);
}
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn strip_byte_no_escapes(s in "\\PC*") {
dbg!(&s);
dbg!(s.as_bytes());
let expected = parser_strip(s.as_bytes());
let actual = String::from_utf8(strip_byte(s.as_bytes()).to_vec()).unwrap();
assert_eq!(expected, actual);
}
}
}

View File

@@ -1,320 +0,0 @@
/// Incrementally convert to wincon calls for non-contiguous data
#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct WinconBytes {
parser: anstyle_parse::Parser,
capture: WinconCapture,
}
impl WinconBytes {
/// Initial state
pub fn new() -> Self {
Default::default()
}
/// Strip the next segment of data
pub fn extract_next<'s>(&'s mut self, bytes: &'s [u8]) -> WinconBytesIter<'s> {
self.capture.reset();
self.capture.printable.reserve(bytes.len());
WinconBytesIter {
bytes,
parser: &mut self.parser,
capture: &mut self.capture,
}
}
}
/// See [`WinconBytes`]
#[derive(Debug, PartialEq, Eq)]
pub struct WinconBytesIter<'s> {
bytes: &'s [u8],
parser: &'s mut anstyle_parse::Parser,
capture: &'s mut WinconCapture,
}
impl<'s> Iterator for WinconBytesIter<'s> {
type Item = (anstyle::Style, String);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
next_bytes(&mut self.bytes, self.parser, self.capture)
}
}
#[inline]
fn next_bytes(
bytes: &mut &[u8],
parser: &mut anstyle_parse::Parser,
capture: &mut WinconCapture,
) -> Option<(anstyle::Style, String)> {
capture.reset();
while capture.ready.is_none() {
let byte = if let Some((byte, remainder)) = (*bytes).split_first() {
*bytes = remainder;
*byte
} else {
break;
};
parser.advance(capture, byte);
}
if capture.printable.is_empty() {
return None;
}
let style = capture.ready.unwrap_or(capture.style);
Some((style, std::mem::take(&mut capture.printable)))
}
#[derive(Default, Clone, Debug, PartialEq, Eq)]
struct WinconCapture {
style: anstyle::Style,
printable: String,
ready: Option<anstyle::Style>,
}
impl WinconCapture {
fn reset(&mut self) {
self.ready = None;
}
}
impl anstyle_parse::Perform for WinconCapture {
/// Draw a character to the screen and update states.
fn print(&mut self, c: char) {
self.printable.push(c);
}
/// Execute a C0 or C1 control function.
fn execute(&mut self, byte: u8) {
if byte.is_ascii_whitespace() {
self.printable.push(byte as char);
}
}
fn csi_dispatch(
&mut self,
params: &anstyle_parse::Params,
_intermediates: &[u8],
ignore: bool,
action: u8,
) {
if ignore {
return;
}
if action != b'm' {
return;
}
let mut style = self.style;
// param/value differences are dependent on the escape code
let mut state = State::Normal;
let mut r = None;
let mut g = None;
let mut is_bg = false;
for param in params {
for value in param {
match (state, *value) {
(State::Normal, 0) => {
style = anstyle::Style::default();
break;
}
(State::Normal, 1) => {
style = style.bold();
break;
}
(State::Normal, 4) => {
style = style.underline();
break;
}
(State::Normal, 30..=37) => {
let color = to_ansi_color(value - 30).unwrap();
style = style.fg_color(Some(color.into()));
break;
}
(State::Normal, 38) => {
is_bg = false;
state = State::PrepareCustomColor;
}
(State::Normal, 39) => {
style = style.fg_color(None);
break;
}
(State::Normal, 40..=47) => {
let color = to_ansi_color(value - 40).unwrap();
style = style.bg_color(Some(color.into()));
break;
}
(State::Normal, 48) => {
is_bg = true;
state = State::PrepareCustomColor;
}
(State::Normal, 49) => {
style = style.bg_color(None);
break;
}
(State::Normal, 90..=97) => {
let color = to_ansi_color(value - 90).unwrap().bright(true);
style = style.fg_color(Some(color.into()));
break;
}
(State::Normal, 100..=107) => {
let color = to_ansi_color(value - 100).unwrap().bright(true);
style = style.bg_color(Some(color.into()));
break;
}
(State::PrepareCustomColor, 5) => {
state = State::Ansi256;
}
(State::PrepareCustomColor, 2) => {
state = State::Rgb;
r = None;
g = None;
}
(State::Ansi256, n) => {
let color = anstyle::Ansi256Color(n as u8);
if is_bg {
style = style.bg_color(Some(color.into()));
} else {
style = style.fg_color(Some(color.into()));
}
break;
}
(State::Rgb, b) => match (r, g) {
(None, _) => {
r = Some(b);
}
(Some(_), None) => {
g = Some(b);
}
(Some(r), Some(g)) => {
let color = anstyle::RgbColor(r as u8, g as u8, b as u8);
if is_bg {
style = style.bg_color(Some(color.into()));
} else {
style = style.fg_color(Some(color.into()));
}
break;
}
},
_ => {
break;
}
}
}
}
if style != self.style && !self.printable.is_empty() {
self.ready = Some(self.style);
}
self.style = style;
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum State {
Normal,
PrepareCustomColor,
Ansi256,
Rgb,
}
fn to_ansi_color(digit: u16) -> Option<anstyle::AnsiColor> {
match digit {
0 => Some(anstyle::AnsiColor::Black),
1 => Some(anstyle::AnsiColor::Red),
2 => Some(anstyle::AnsiColor::Green),
3 => Some(anstyle::AnsiColor::Yellow),
4 => Some(anstyle::AnsiColor::Blue),
5 => Some(anstyle::AnsiColor::Magenta),
6 => Some(anstyle::AnsiColor::Cyan),
7 => Some(anstyle::AnsiColor::White),
_ => None,
}
}
#[cfg(test)]
mod test {
use super::*;
use owo_colors::OwoColorize as _;
use proptest::prelude::*;
#[track_caller]
fn verify(input: &str, expected: Vec<(anstyle::Style, &str)>) {
let expected = expected
.into_iter()
.map(|(style, value)| (style, value.to_owned()))
.collect::<Vec<_>>();
let mut state = WinconBytes::new();
let actual = state.extract_next(input.as_bytes()).collect::<Vec<_>>();
assert_eq!(expected, actual, "{input:?}");
}
#[test]
fn start() {
let input = format!("{} world!", "Hello".green().on_red());
let expected = vec![
(
anstyle::AnsiColor::Green.on(anstyle::AnsiColor::Red),
"Hello",
),
(anstyle::Style::default(), " world!"),
];
verify(&input, expected);
}
#[test]
fn middle() {
let input = format!("Hello {}!", "world".green().on_red());
let expected = vec![
(anstyle::Style::default(), "Hello "),
(
anstyle::AnsiColor::Green.on(anstyle::AnsiColor::Red),
"world",
),
(anstyle::Style::default(), "!"),
];
verify(&input, expected);
}
#[test]
fn end() {
let input = format!("Hello {}", "world!".green().on_red());
let expected = vec![
(anstyle::Style::default(), "Hello "),
(
anstyle::AnsiColor::Green.on(anstyle::AnsiColor::Red),
"world!",
),
];
verify(&input, expected);
}
#[test]
fn ansi256_colors() {
// termcolor only supports "brights" via these
let input = format!(
"Hello {}!",
"world".color(owo_colors::XtermColors::UserBrightYellow)
);
let expected = vec![
(anstyle::Style::default(), "Hello "),
(anstyle::Ansi256Color(11).on_default(), "world"),
(anstyle::Style::default(), "!"),
];
verify(&input, expected);
}
proptest! {
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn wincon_no_escapes(s in "\\PC*") {
let expected = if s.is_empty() {
vec![]
} else {
vec![(anstyle::Style::default(), s.clone())]
};
let mut state = WinconBytes::new();
let actual = state.extract_next(s.as_bytes()).collect::<Vec<_>>();
assert_eq!(expected, actual);
}
}
}

View File

@@ -1,263 +0,0 @@
use crate::stream::AsLockedWrite;
use crate::stream::RawStream;
#[cfg(feature = "auto")]
use crate::ColorChoice;
use crate::StripStream;
#[cfg(all(windows, feature = "wincon"))]
use crate::WinconStream;
/// [`std::io::Write`] that adapts ANSI escape codes to the underlying `Write`s capabilities
#[derive(Debug)]
pub struct AutoStream<S: RawStream> {
inner: StreamInner<S>,
}
#[derive(Debug)]
enum StreamInner<S: RawStream> {
PassThrough(S),
Strip(StripStream<S>),
#[cfg(all(windows, feature = "wincon"))]
Wincon(WinconStream<S>),
}
impl<S> AutoStream<S>
where
S: RawStream,
{
/// Runtime control over styling behavior
#[cfg(feature = "auto")]
#[inline]
pub fn new(raw: S, choice: ColorChoice) -> Self {
match choice {
ColorChoice::Auto => Self::auto(raw),
ColorChoice::AlwaysAnsi => Self::always_ansi(raw),
ColorChoice::Always => Self::always(raw),
ColorChoice::Never => Self::never(raw),
}
}
/// Auto-adapt for the stream's capabilities
#[cfg(feature = "auto")]
#[inline]
pub fn auto(raw: S) -> Self {
let choice = Self::choice(&raw);
debug_assert_ne!(choice, ColorChoice::Auto);
Self::new(raw, choice)
}
/// Report the desired choice for the given stream
#[cfg(feature = "auto")]
pub fn choice(raw: &S) -> ColorChoice {
choice(raw)
}
/// Force ANSI escape codes to be passed through as-is, no matter what the inner `Write`
/// supports.
#[inline]
pub fn always_ansi(raw: S) -> Self {
#[cfg(feature = "auto")]
{
if raw.is_terminal() {
let _ = anstyle_query::windows::enable_ansi_colors();
}
}
Self::always_ansi_(raw)
}
#[inline]
fn always_ansi_(raw: S) -> Self {
let inner = StreamInner::PassThrough(raw);
AutoStream { inner }
}
/// Force color, no matter what the inner `Write` supports.
#[inline]
pub fn always(raw: S) -> Self {
if cfg!(windows) {
#[cfg(feature = "auto")]
let use_wincon = raw.is_terminal()
&& !anstyle_query::windows::enable_ansi_colors().unwrap_or(true)
&& !anstyle_query::term_supports_ansi_color();
#[cfg(not(feature = "auto"))]
let use_wincon = true;
if use_wincon {
Self::wincon(raw).unwrap_or_else(|raw| Self::always_ansi_(raw))
} else {
Self::always_ansi_(raw)
}
} else {
Self::always_ansi(raw)
}
}
/// Only pass printable data to the inner `Write`.
#[inline]
pub fn never(raw: S) -> Self {
let inner = StreamInner::Strip(StripStream::new(raw));
AutoStream { inner }
}
#[inline]
fn wincon(raw: S) -> Result<Self, S> {
#[cfg(all(windows, feature = "wincon"))]
{
Ok(Self {
inner: StreamInner::Wincon(WinconStream::new(raw)),
})
}
#[cfg(not(all(windows, feature = "wincon")))]
{
Err(raw)
}
}
/// Get the wrapped [`RawStream`]
#[inline]
pub fn into_inner(self) -> S {
match self.inner {
StreamInner::PassThrough(w) => w,
StreamInner::Strip(w) => w.into_inner(),
#[cfg(all(windows, feature = "wincon"))]
StreamInner::Wincon(w) => w.into_inner(),
}
}
#[inline]
pub fn is_terminal(&self) -> bool {
match &self.inner {
StreamInner::PassThrough(w) => w.is_terminal(),
StreamInner::Strip(w) => w.is_terminal(),
#[cfg(all(windows, feature = "wincon"))]
StreamInner::Wincon(_) => true, // its only ever a terminal
}
}
/// Prefer [`AutoStream::choice`]
///
/// This doesn't report what is requested but what is currently active.
#[inline]
#[cfg(feature = "auto")]
pub fn current_choice(&self) -> ColorChoice {
match &self.inner {
StreamInner::PassThrough(_) => ColorChoice::AlwaysAnsi,
StreamInner::Strip(_) => ColorChoice::Never,
#[cfg(all(windows, feature = "wincon"))]
StreamInner::Wincon(_) => ColorChoice::Always,
}
}
}
#[cfg(feature = "auto")]
fn choice(raw: &dyn RawStream) -> ColorChoice {
let choice = ColorChoice::global();
match choice {
ColorChoice::Auto => {
let clicolor = anstyle_query::clicolor();
let clicolor_enabled = clicolor.unwrap_or(false);
let clicolor_disabled = !clicolor.unwrap_or(true);
if raw.is_terminal()
&& !anstyle_query::no_color()
&& !clicolor_disabled
&& (anstyle_query::term_supports_color()
|| clicolor_enabled
|| anstyle_query::is_ci())
|| anstyle_query::clicolor_force()
{
ColorChoice::Always
} else {
ColorChoice::Never
}
}
ColorChoice::AlwaysAnsi | ColorChoice::Always | ColorChoice::Never => choice,
}
}
impl AutoStream<std::io::Stdout> {
/// Get exclusive access to the `AutoStream`
///
/// Why?
/// - Faster performance when writing in a loop
/// - Avoid other threads interleaving output with the current thread
#[inline]
pub fn lock(self) -> AutoStream<std::io::StdoutLock<'static>> {
let inner = match self.inner {
StreamInner::PassThrough(w) => StreamInner::PassThrough(w.lock()),
StreamInner::Strip(w) => StreamInner::Strip(w.lock()),
#[cfg(all(windows, feature = "wincon"))]
StreamInner::Wincon(w) => StreamInner::Wincon(w.lock()),
};
AutoStream { inner }
}
}
impl AutoStream<std::io::Stderr> {
/// Get exclusive access to the `AutoStream`
///
/// Why?
/// - Faster performance when writing in a loop
/// - Avoid other threads interleaving output with the current thread
#[inline]
pub fn lock(self) -> AutoStream<std::io::StderrLock<'static>> {
let inner = match self.inner {
StreamInner::PassThrough(w) => StreamInner::PassThrough(w.lock()),
StreamInner::Strip(w) => StreamInner::Strip(w.lock()),
#[cfg(all(windows, feature = "wincon"))]
StreamInner::Wincon(w) => StreamInner::Wincon(w.lock()),
};
AutoStream { inner }
}
}
impl<S> std::io::Write for AutoStream<S>
where
S: RawStream + AsLockedWrite,
{
// Must forward all calls to ensure locking happens appropriately
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
match &mut self.inner {
StreamInner::PassThrough(w) => w.as_locked_write().write(buf),
StreamInner::Strip(w) => w.write(buf),
#[cfg(all(windows, feature = "wincon"))]
StreamInner::Wincon(w) => w.write(buf),
}
}
#[inline]
fn write_vectored(&mut self, bufs: &[std::io::IoSlice<'_>]) -> std::io::Result<usize> {
match &mut self.inner {
StreamInner::PassThrough(w) => w.as_locked_write().write_vectored(bufs),
StreamInner::Strip(w) => w.write_vectored(bufs),
#[cfg(all(windows, feature = "wincon"))]
StreamInner::Wincon(w) => w.write_vectored(bufs),
}
}
// is_write_vectored: nightly only
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
match &mut self.inner {
StreamInner::PassThrough(w) => w.as_locked_write().flush(),
StreamInner::Strip(w) => w.flush(),
#[cfg(all(windows, feature = "wincon"))]
StreamInner::Wincon(w) => w.flush(),
}
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> {
match &mut self.inner {
StreamInner::PassThrough(w) => w.as_locked_write().write_all(buf),
StreamInner::Strip(w) => w.write_all(buf),
#[cfg(all(windows, feature = "wincon"))]
StreamInner::Wincon(w) => w.write_all(buf),
}
}
// write_all_vectored: nightly only
#[inline]
fn write_fmt(&mut self, args: std::fmt::Arguments<'_>) -> std::io::Result<()> {
match &mut self.inner {
StreamInner::PassThrough(w) => w.as_locked_write().write_fmt(args),
StreamInner::Strip(w) => w.write_fmt(args),
#[cfg(all(windows, feature = "wincon"))]
StreamInner::Wincon(w) => w.write_fmt(args),
}
}
}

View File

@@ -1,68 +0,0 @@
#![allow(deprecated)]
/// In-memory [`RawStream`][crate::stream::RawStream]
#[derive(Clone, Default, Debug, PartialEq, Eq)]
#[deprecated(since = "0.6.2", note = "Use Vec")]
#[doc(hidden)]
pub struct Buffer(Vec<u8>);
impl Buffer {
#[inline]
pub fn new() -> Self {
Default::default()
}
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Self(Vec::with_capacity(capacity))
}
#[inline]
pub fn as_bytes(&self) -> &[u8] {
&self.0
}
}
impl AsRef<[u8]> for Buffer {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl std::io::Write for Buffer {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.0.extend(buf);
Ok(buf.len())
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
#[cfg(all(windows, feature = "wincon"))]
impl anstyle_wincon::WinconStream for Buffer {
fn write_colored(
&mut self,
fg: Option<anstyle::AnsiColor>,
bg: Option<anstyle::AnsiColor>,
data: &[u8],
) -> std::io::Result<usize> {
self.0.write_colored(fg, bg, data)
}
}
#[cfg(all(windows, feature = "wincon"))]
impl anstyle_wincon::WinconStream for &'_ mut Buffer {
fn write_colored(
&mut self,
fg: Option<anstyle::AnsiColor>,
bg: Option<anstyle::AnsiColor>,
data: &[u8],
) -> std::io::Result<usize> {
(**self).write_colored(fg, bg, data)
}
}

View File

@@ -1,54 +0,0 @@
/// A shim which allows a [`std::io::Write`] to be implemented in terms of a [`std::fmt::Write`]
///
/// This saves off I/O errors. instead of discarding them
pub(crate) struct Adapter<W>
where
W: FnMut(&[u8]) -> std::io::Result<()>,
{
writer: W,
error: std::io::Result<()>,
}
impl<W> Adapter<W>
where
W: FnMut(&[u8]) -> std::io::Result<()>,
{
pub(crate) fn new(writer: W) -> Self {
Adapter {
writer,
error: Ok(()),
}
}
pub(crate) fn write_fmt(mut self, fmt: std::fmt::Arguments<'_>) -> std::io::Result<()> {
match std::fmt::write(&mut self, fmt) {
Ok(()) => Ok(()),
Err(..) => {
// check if the error came from the underlying `Write` or not
if self.error.is_err() {
self.error
} else {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"formatter error",
))
}
}
}
}
}
impl<W> std::fmt::Write for Adapter<W>
where
W: FnMut(&[u8]) -> std::io::Result<()>,
{
fn write_str(&mut self, s: &str) -> std::fmt::Result {
match (self.writer)(s.as_bytes()) {
Ok(()) => Ok(()),
Err(e) => {
self.error = Err(e);
Err(std::fmt::Error)
}
}
}
}

View File

@@ -1,79 +0,0 @@
//! **Auto-adapting [`stdout`] / [`stderr`] streams**
//!
//! *A portmanteau of "ansi stream"*
//!
//! [`AutoStream`] always accepts [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code),
//! adapting to the user's terminal's capabilities.
//!
//! Benefits
//! - Allows the caller to not be concerned with the terminal's capabilities
//! - Semver safe way of passing styled text between crates as ANSI escape codes offer more
//! compatibility than most crate APIs.
//!
//! Available styling crates:
//! - [anstyle](https://docs.rs/anstyle) for minimal runtime styling, designed to go in public APIs
//! (once it hits 1.0)
//! - [owo-colors](https://docs.rs/owo-colors) for feature-rich runtime styling
//! - [color-print](https://docs.rs/color-print) for feature-rich compile-time styling
//!
//! # Example
//!
//! ```
//! # #[cfg(feature = "auto")] {
//! use anstream::println;
//! use owo_colors::OwoColorize as _;
//!
//! // Foreground colors
//! println!("My number is {:#x}!", 10.green());
//! // Background colors
//! println!("My number is not {}!", 4.on_red());
//! # }
//! ```
//!
//! And this will correctly handle piping to a file, etc
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
pub mod adapter;
pub mod stream;
mod buffer;
#[macro_use]
mod macros;
mod auto;
mod fmt;
mod strip;
#[cfg(all(windows, feature = "wincon"))]
mod wincon;
pub use auto::AutoStream;
pub use strip::StripStream;
#[cfg(all(windows, feature = "wincon"))]
pub use wincon::WinconStream;
#[allow(deprecated)]
pub use buffer::Buffer;
/// Create an ANSI escape code compatible stdout
///
/// **Note:** Call [`AutoStream::lock`] in loops to avoid the performance hit of acquiring/releasing
/// from the implicit locking in each [`std::io::Write`] call
#[cfg(feature = "auto")]
pub fn stdout() -> AutoStream<std::io::Stdout> {
let stdout = std::io::stdout();
AutoStream::auto(stdout)
}
/// Create an ANSI escape code compatible stderr
///
/// **Note:** Call [`AutoStream::lock`] in loops to avoid the performance hit of acquiring/releasing
/// from the implicit locking in each [`std::io::Write`] call
#[cfg(feature = "auto")]
pub fn stderr() -> AutoStream<std::io::Stderr> {
let stderr = std::io::stderr();
AutoStream::auto(stderr)
}
/// Selection for overriding color output
#[cfg(feature = "auto")]
pub use colorchoice::ColorChoice;

View File

@@ -1,389 +0,0 @@
/// Prints to [`stdout`][crate::stdout].
///
/// Equivalent to the [`println!`] macro except that a newline is not printed at
/// the end of the message.
///
/// Note that stdout is frequently line-buffered by default so it may be
/// necessary to use [`std::io::Write::flush()`] to ensure the output is emitted
/// immediately.
///
/// **NOTE:** The `print!` macro will lock the standard output on each call. If you call
/// `print!` within a hot loop, this behavior may be the bottleneck of the loop.
/// To avoid this, lock stdout with [`AutoStream::lock`][crate::AutoStream::lock]:
/// ```
/// # #[cfg(feature = "auto")] {
/// use std::io::Write as _;
///
/// let mut lock = anstream::stdout().lock();
/// write!(lock, "hello world").unwrap();
/// # }
/// ```
///
/// Use `print!` only for the primary output of your program. Use
/// [`eprint!`] instead to print error and progress messages.
///
/// **NOTE:** Not all `print!` calls will be captured in tests like [`std::print!`]
/// - Capturing will automatically be activated in test binaries
/// - Otherwise, only when the `test` feature is enabled
///
/// # Panics
///
/// Panics if writing to `stdout` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use std::io::Write as _;
/// use anstream::print;
/// use anstream::stdout;
///
/// print!("this ");
/// print!("will ");
/// print!("be ");
/// print!("on ");
/// print!("the ");
/// print!("same ");
/// print!("line ");
///
/// stdout().flush().unwrap();
///
/// print!("this string has a newline, why not choose println! instead?\n");
///
/// stdout().flush().unwrap();
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! print {
($($arg:tt)*) => {{
if cfg!(any(feature = "test", test)) {
use std::io::Write as _;
let stdio = std::io::stdout();
let choice = $crate::AutoStream::choice(&stdio);
let buffer = Vec::new();
let mut stream = $crate::AutoStream::new(buffer, choice);
// Ignore errors rather than panic
let _ = ::std::write!(&mut stream, $($arg)*);
let buffer = stream.into_inner();
// Should be UTF-8 but not wanting to panic
let buffer = String::from_utf8_lossy(&buffer);
::std::print!("{}", buffer)
} else {
use std::io::Write as _;
let mut stream = $crate::stdout();
match ::std::write!(&mut stream, $($arg)*) {
Err(e) if e.kind() != ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}
}};
}
/// Prints to [`stdout`][crate::stdout], with a newline.
///
/// On all platforms, the newline is the LINE FEED character (`\n`/`U+000A`) alone
/// (no additional CARRIAGE RETURN (`\r`/`U+000D`)).
///
/// This macro uses the same syntax as [`format!`], but writes to the standard output instead.
/// See [`std::fmt`] for more information.
///
/// **NOTE:** The `println!` macro will lock the standard output on each call. If you call
/// `println!` within a hot loop, this behavior may be the bottleneck of the loop.
/// To avoid this, lock stdout with [`AutoStream::lock`][crate::AutoStream::lock]:
/// ```
/// # #[cfg(feature = "auto")] {
/// use std::io::Write as _;
///
/// let mut lock = anstream::stdout().lock();
/// writeln!(lock, "hello world").unwrap();
/// # }
/// ```
///
/// Use `println!` only for the primary output of your program. Use
/// [`eprintln!`] instead to print error and progress messages.
///
/// **NOTE:** Not all `println!` calls will be captured in tests like [`std::println!`]
/// - Capturing will automatically be activated in test binaries
/// - Otherwise, only when the `test` feature is enabled
///
/// # Panics
///
/// Panics if writing to `stdout` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use anstream::println;
///
/// println!(); // prints just a newline
/// println!("hello there!");
/// println!("format {} arguments", "some");
/// let local_variable = "some";
/// println!("format {local_variable} arguments");
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! println {
() => {
$crate::print!("\n")
};
($($arg:tt)*) => {{
if cfg!(any(feature = "test", test)) {
use std::io::Write as _;
let stdio = std::io::stdout();
let choice = $crate::AutoStream::choice(&stdio);
let buffer = Vec::new();
let mut stream = $crate::AutoStream::new(buffer, choice);
// Ignore errors rather than panic
let _ = ::std::write!(&mut stream, $($arg)*);
let buffer = stream.into_inner();
// Should be UTF-8 but not wanting to panic
let buffer = String::from_utf8_lossy(&buffer);
::std::println!("{}", buffer)
} else {
use std::io::Write as _;
let mut stream = $crate::stdout();
match ::std::writeln!(&mut stream, $($arg)*) {
Err(e) if e.kind() != ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}
}};
}
/// Prints to [`stderr`][crate::stderr].
///
/// Equivalent to the [`print!`] macro, except that output goes to
/// `stderr` instead of `stdout`. See [`print!`] for
/// example usage.
///
/// Use `eprint!` only for error and progress messages. Use `print!`
/// instead for the primary output of your program.
///
/// **NOTE:** Not all `eprint!` calls will be captured in tests like [`std::eprint!`]
/// - Capturing will automatically be activated in test binaries
/// - Otherwise, only when the `test` feature is enabled
///
/// # Panics
///
/// Panics if writing to `stderr` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use anstream::eprint;
///
/// eprint!("Error: Could not complete task");
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! eprint {
($($arg:tt)*) => {{
if cfg!(any(feature = "test", test)) {
use std::io::Write as _;
let stdio = std::io::stderr();
let choice = $crate::AutoStream::choice(&stdio);
let buffer = Vec::new();
let mut stream = $crate::AutoStream::new(buffer, choice);
// Ignore errors rather than panic
let _ = ::std::write!(&mut stream, $($arg)*);
let buffer = stream.into_inner();
// Should be UTF-8 but not wanting to panic
let buffer = String::from_utf8_lossy(&buffer);
::std::eprint!("{}", buffer)
} else {
use std::io::Write as _;
let mut stream = $crate::stderr();
match ::std::write!(&mut stream, $($arg)*) {
Err(e) if e.kind() != ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}
}};
}
/// Prints to [`stderr`][crate::stderr], with a newline.
///
/// Equivalent to the [`println!`] macro, except that output goes to
/// `stderr` instead of `stdout`. See [`println!`] for
/// example usage.
///
/// Use `eprintln!` only for error and progress messages. Use `println!`
/// instead for the primary output of your program.
///
/// **NOTE:** Not all `eprintln!` calls will be captured in tests like [`std::eprintln!`]
/// - Capturing will automatically be activated in test binaries
/// - Otherwise, only when the `test` feature is enabled
///
/// # Panics
///
/// Panics if writing to `stderr` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use anstream::eprintln;
///
/// eprintln!("Error: Could not complete task");
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! eprintln {
() => {
$crate::eprint!("\n")
};
($($arg:tt)*) => {{
if cfg!(any(feature = "test", test)) {
use std::io::Write as _;
let stdio = std::io::stderr();
let choice = $crate::AutoStream::choice(&stdio);
let buffer = Vec::new();
let mut stream = $crate::AutoStream::new(buffer, choice);
// Ignore errors rather than panic
let _ = ::std::write!(&mut stream, $($arg)*);
let buffer = stream.into_inner();
// Should be UTF-8 but not wanting to panic
let buffer = String::from_utf8_lossy(&buffer);
::std::eprintln!("{}", buffer)
} else {
use std::io::Write as _;
let mut stream = $crate::stderr();
match ::std::writeln!(&mut stream, $($arg)*) {
Err(e) if e.kind() != ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}
}};
}
/// Panics the current thread.
///
/// This allows a program to terminate immediately and provide feedback
/// to the caller of the program.
///
/// This macro is the perfect way to assert conditions in example code and in
/// tests. `panic!` is closely tied with the `unwrap` method of both
/// [`Option`][ounwrap] and [`Result`][runwrap] enums. Both implementations call
/// `panic!` when they are set to [`None`] or [`Err`] variants.
///
/// When using `panic!()` you can specify a string payload, that is built using
/// the [`format!`] syntax. That payload is used when injecting the panic into
/// the calling Rust thread, causing the thread to panic entirely.
///
/// The behavior of the default `std` hook, i.e. the code that runs directly
/// after the panic is invoked, is to print the message payload to
/// `stderr` along with the file/line/column information of the `panic!()`
/// call. You can override the panic hook using [`std::panic::set_hook()`].
/// Inside the hook a panic can be accessed as a `&dyn Any + Send`,
/// which contains either a `&str` or `String` for regular `panic!()` invocations.
/// To panic with a value of another other type, [`panic_any`] can be used.
///
/// See also the macro [`compile_error!`], for raising errors during compilation.
///
/// # When to use `panic!` vs `Result`
///
/// The Rust language provides two complementary systems for constructing /
/// representing, reporting, propagating, reacting to, and discarding errors. These
/// responsibilities are collectively known as "error handling." `panic!` and
/// `Result` are similar in that they are each the primary interface of their
/// respective error handling systems; however, the meaning these interfaces attach
/// to their errors and the responsibilities they fulfill within their respective
/// error handling systems differ.
///
/// The `panic!` macro is used to construct errors that represent a bug that has
/// been detected in your program. With `panic!` you provide a message that
/// describes the bug and the language then constructs an error with that message,
/// reports it, and propagates it for you.
///
/// `Result` on the other hand is used to wrap other types that represent either
/// the successful result of some computation, `Ok(T)`, or error types that
/// represent an anticipated runtime failure mode of that computation, `Err(E)`.
/// `Result` is used alongside user defined types which represent the various
/// anticipated runtime failure modes that the associated computation could
/// encounter. `Result` must be propagated manually, often with the the help of the
/// `?` operator and `Try` trait, and they must be reported manually, often with
/// the help of the `Error` trait.
///
/// For more detailed information about error handling check out the [book] or the
/// [`std::result`] module docs.
///
/// [ounwrap]: Option::unwrap
/// [runwrap]: Result::unwrap
/// [`std::panic::set_hook()`]: ../std/panic/fn.set_hook.html
/// [`panic_any`]: ../std/panic/fn.panic_any.html
/// [`Box`]: ../std/boxed/struct.Box.html
/// [`Any`]: crate::any::Any
/// [`format!`]: ../std/macro.format.html
/// [book]: ../book/ch09-00-error-handling.html
/// [`std::result`]: ../std/result/index.html
///
/// # Current implementation
///
/// If the main thread panics it will terminate all your threads and end your
/// program with code `101`.
///
/// # Examples
///
/// ```should_panic
/// # #![allow(unreachable_code)]
/// use anstream::panic;
/// panic!();
/// panic!("this is a terrible mistake!");
/// panic!("this is a {} {message}", "fancy", message = "message");
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! panic {
() => {
::std::panic!()
};
($($arg:tt)*) => {{
use std::io::Write as _;
let panic_stream = std::io::stderr();
let choice = $crate::AutoStream::choice(&panic_stream);
let buffer = Vec::new();
let mut stream = $crate::AutoStream::new(buffer, choice);
// Ignore errors rather than panic
let _ = ::std::write!(&mut stream, $($arg)*);
let buffer = stream.into_inner();
// Should be UTF-8 but not wanting to panic
let buffer = String::from_utf8_lossy(&buffer).into_owned();
::std::panic!("{}", buffer)
}};
}

View File

@@ -1,261 +0,0 @@
//! Higher-level traits to describe writeable streams
/// Required functionality for underlying [`std::io::Write`] for adaptation
#[cfg(not(all(windows, feature = "wincon")))]
pub trait RawStream: std::io::Write + IsTerminal + private::Sealed {}
/// Required functionality for underlying [`std::io::Write`] for adaptation
#[cfg(all(windows, feature = "wincon"))]
pub trait RawStream:
std::io::Write + IsTerminal + anstyle_wincon::WinconStream + private::Sealed
{
}
impl RawStream for std::io::Stdout {}
impl RawStream for std::io::StdoutLock<'_> {}
impl RawStream for &'_ mut std::io::StdoutLock<'_> {}
impl RawStream for std::io::Stderr {}
impl RawStream for std::io::StderrLock<'_> {}
impl RawStream for &'_ mut std::io::StderrLock<'_> {}
impl RawStream for Box<dyn std::io::Write> {}
impl RawStream for &'_ mut Box<dyn std::io::Write> {}
impl RawStream for Vec<u8> {}
impl RawStream for &'_ mut Vec<u8> {}
impl RawStream for std::fs::File {}
impl RawStream for &'_ mut std::fs::File {}
#[allow(deprecated)]
impl RawStream for crate::Buffer {}
#[allow(deprecated)]
impl RawStream for &'_ mut crate::Buffer {}
pub trait IsTerminal: private::Sealed {
fn is_terminal(&self) -> bool;
}
impl IsTerminal for std::io::Stdout {
#[inline]
fn is_terminal(&self) -> bool {
std::io::IsTerminal::is_terminal(self)
}
}
impl IsTerminal for std::io::StdoutLock<'_> {
#[inline]
fn is_terminal(&self) -> bool {
std::io::IsTerminal::is_terminal(self)
}
}
impl IsTerminal for &'_ mut std::io::StdoutLock<'_> {
#[inline]
fn is_terminal(&self) -> bool {
(**self).is_terminal()
}
}
impl IsTerminal for std::io::Stderr {
#[inline]
fn is_terminal(&self) -> bool {
std::io::IsTerminal::is_terminal(self)
}
}
impl IsTerminal for std::io::StderrLock<'_> {
#[inline]
fn is_terminal(&self) -> bool {
std::io::IsTerminal::is_terminal(self)
}
}
impl IsTerminal for &'_ mut std::io::StderrLock<'_> {
#[inline]
fn is_terminal(&self) -> bool {
(**self).is_terminal()
}
}
impl IsTerminal for Box<dyn std::io::Write> {
#[inline]
fn is_terminal(&self) -> bool {
false
}
}
impl IsTerminal for &'_ mut Box<dyn std::io::Write> {
#[inline]
fn is_terminal(&self) -> bool {
false
}
}
impl IsTerminal for Vec<u8> {
#[inline]
fn is_terminal(&self) -> bool {
false
}
}
impl IsTerminal for &'_ mut Vec<u8> {
#[inline]
fn is_terminal(&self) -> bool {
false
}
}
impl IsTerminal for std::fs::File {
#[inline]
fn is_terminal(&self) -> bool {
std::io::IsTerminal::is_terminal(self)
}
}
impl IsTerminal for &'_ mut std::fs::File {
#[inline]
fn is_terminal(&self) -> bool {
(**self).is_terminal()
}
}
#[allow(deprecated)]
impl IsTerminal for crate::Buffer {
#[inline]
fn is_terminal(&self) -> bool {
false
}
}
#[allow(deprecated)]
impl IsTerminal for &'_ mut crate::Buffer {
#[inline]
fn is_terminal(&self) -> bool {
(**self).is_terminal()
}
}
pub trait AsLockedWrite: private::Sealed {
type Write<'w>: RawStream + 'w
where
Self: 'w;
fn as_locked_write(&mut self) -> Self::Write<'_>;
}
impl AsLockedWrite for std::io::Stdout {
type Write<'w> = std::io::StdoutLock<'w>;
#[inline]
fn as_locked_write(&mut self) -> Self::Write<'_> {
self.lock()
}
}
impl AsLockedWrite for std::io::StdoutLock<'static> {
type Write<'w> = &'w mut Self;
#[inline]
fn as_locked_write(&mut self) -> Self::Write<'_> {
self
}
}
impl AsLockedWrite for std::io::Stderr {
type Write<'w> = std::io::StderrLock<'w>;
#[inline]
fn as_locked_write(&mut self) -> Self::Write<'_> {
self.lock()
}
}
impl AsLockedWrite for std::io::StderrLock<'static> {
type Write<'w> = &'w mut Self;
#[inline]
fn as_locked_write(&mut self) -> Self::Write<'_> {
self
}
}
impl AsLockedWrite for Box<dyn std::io::Write> {
type Write<'w> = &'w mut Self;
#[inline]
fn as_locked_write(&mut self) -> Self::Write<'_> {
self
}
}
impl AsLockedWrite for Vec<u8> {
type Write<'w> = &'w mut Self;
#[inline]
fn as_locked_write(&mut self) -> Self::Write<'_> {
self
}
}
impl AsLockedWrite for std::fs::File {
type Write<'w> = &'w mut Self;
#[inline]
fn as_locked_write(&mut self) -> Self::Write<'_> {
self
}
}
#[allow(deprecated)]
impl AsLockedWrite for crate::Buffer {
type Write<'w> = &'w mut Self;
#[inline]
fn as_locked_write(&mut self) -> Self::Write<'_> {
self
}
}
mod private {
pub trait Sealed {}
impl Sealed for std::io::Stdout {}
impl Sealed for std::io::StdoutLock<'_> {}
impl Sealed for &'_ mut std::io::StdoutLock<'_> {}
impl Sealed for std::io::Stderr {}
impl Sealed for std::io::StderrLock<'_> {}
impl Sealed for &'_ mut std::io::StderrLock<'_> {}
impl Sealed for Box<dyn std::io::Write> {}
impl Sealed for &'_ mut Box<dyn std::io::Write> {}
impl Sealed for Vec<u8> {}
impl Sealed for &'_ mut Vec<u8> {}
impl Sealed for std::fs::File {}
impl Sealed for &'_ mut std::fs::File {}
#[allow(deprecated)]
impl Sealed for crate::Buffer {}
#[allow(deprecated)]
impl Sealed for &'_ mut crate::Buffer {}
}

View File

@@ -1,219 +0,0 @@
use crate::adapter::StripBytes;
use crate::stream::AsLockedWrite;
use crate::stream::RawStream;
/// Only pass printable data to the inner `Write`
#[derive(Debug)]
pub struct StripStream<S>
where
S: RawStream,
{
raw: S,
state: StripBytes,
}
impl<S> StripStream<S>
where
S: RawStream,
{
/// Only pass printable data to the inner `Write`
#[inline]
pub fn new(raw: S) -> Self {
Self {
raw,
state: Default::default(),
}
}
/// Get the wrapped [`RawStream`]
#[inline]
pub fn into_inner(self) -> S {
self.raw
}
#[inline]
pub fn is_terminal(&self) -> bool {
self.raw.is_terminal()
}
}
impl StripStream<std::io::Stdout> {
/// Get exclusive access to the `StripStream`
///
/// Why?
/// - Faster performance when writing in a loop
/// - Avoid other threads interleaving output with the current thread
#[inline]
pub fn lock(self) -> StripStream<std::io::StdoutLock<'static>> {
StripStream {
raw: self.raw.lock(),
state: self.state,
}
}
}
impl StripStream<std::io::Stderr> {
/// Get exclusive access to the `StripStream`
///
/// Why?
/// - Faster performance when writing in a loop
/// - Avoid other threads interleaving output with the current thread
#[inline]
pub fn lock(self) -> StripStream<std::io::StderrLock<'static>> {
StripStream {
raw: self.raw.lock(),
state: self.state,
}
}
}
impl<S> std::io::Write for StripStream<S>
where
S: RawStream + AsLockedWrite,
{
// Must forward all calls to ensure locking happens appropriately
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
write(&mut self.raw.as_locked_write(), &mut self.state, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[std::io::IoSlice<'_>]) -> std::io::Result<usize> {
let buf = bufs
.iter()
.find(|b| !b.is_empty())
.map(|b| &**b)
.unwrap_or(&[][..]);
self.write(buf)
}
// is_write_vectored: nightly only
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
self.raw.as_locked_write().flush()
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> {
write_all(&mut self.raw.as_locked_write(), &mut self.state, buf)
}
// write_all_vectored: nightly only
#[inline]
fn write_fmt(&mut self, args: std::fmt::Arguments<'_>) -> std::io::Result<()> {
write_fmt(&mut self.raw.as_locked_write(), &mut self.state, args)
}
}
fn write(
raw: &mut dyn std::io::Write,
state: &mut StripBytes,
buf: &[u8],
) -> std::io::Result<usize> {
let initial_state = state.clone();
for printable in state.strip_next(buf) {
let possible = printable.len();
let written = raw.write(printable)?;
if possible != written {
let divergence = &printable[written..];
let offset = offset_to(buf, divergence);
let consumed = &buf[offset..];
*state = initial_state;
state.strip_next(consumed).last();
return Ok(offset);
}
}
Ok(buf.len())
}
fn write_all(
raw: &mut dyn std::io::Write,
state: &mut StripBytes,
buf: &[u8],
) -> std::io::Result<()> {
for printable in state.strip_next(buf) {
raw.write_all(printable)?;
}
Ok(())
}
fn write_fmt(
raw: &mut dyn std::io::Write,
state: &mut StripBytes,
args: std::fmt::Arguments<'_>,
) -> std::io::Result<()> {
let write_all = |buf: &[u8]| write_all(raw, state, buf);
crate::fmt::Adapter::new(write_all).write_fmt(args)
}
#[inline]
fn offset_to(total: &[u8], subslice: &[u8]) -> usize {
let total = total.as_ptr();
let subslice = subslice.as_ptr();
debug_assert!(
total <= subslice,
"`Offset::offset_to` only accepts slices of `self`"
);
subslice as usize - total as usize
}
#[cfg(test)]
mod test {
use super::*;
use proptest::prelude::*;
use std::io::Write as _;
proptest! {
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn write_all_no_escapes(s in "\\PC*") {
let buffer = Vec::new();
let mut stream = StripStream::new(buffer);
stream.write_all(s.as_bytes()).unwrap();
let buffer = stream.into_inner();
let actual = std::str::from_utf8(buffer.as_ref()).unwrap();
assert_eq!(s, actual);
}
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn write_byte_no_escapes(s in "\\PC*") {
let buffer = Vec::new();
let mut stream = StripStream::new(buffer);
for byte in s.as_bytes() {
stream.write_all(&[*byte]).unwrap();
}
let buffer = stream.into_inner();
let actual = std::str::from_utf8(buffer.as_ref()).unwrap();
assert_eq!(s, actual);
}
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn write_all_random(s in any::<Vec<u8>>()) {
let buffer = Vec::new();
let mut stream = StripStream::new(buffer);
stream.write_all(s.as_slice()).unwrap();
let buffer = stream.into_inner();
if let Ok(actual) = std::str::from_utf8(buffer.as_ref()) {
for char in actual.chars() {
assert!(!char.is_ascii() || !char.is_control() || char.is_ascii_whitespace(), "{:?} -> {:?}: {:?}", String::from_utf8_lossy(&s), actual, char);
}
}
}
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn write_byte_random(s in any::<Vec<u8>>()) {
let buffer = Vec::new();
let mut stream = StripStream::new(buffer);
for byte in s.as_slice() {
stream.write_all(&[*byte]).unwrap();
}
let buffer = stream.into_inner();
if let Ok(actual) = std::str::from_utf8(buffer.as_ref()) {
for char in actual.chars() {
assert!(!char.is_ascii() || !char.is_control() || char.is_ascii_whitespace(), "{:?} -> {:?}: {:?}", String::from_utf8_lossy(&s), actual, char);
}
}
}
}
}

View File

@@ -1,210 +0,0 @@
use crate::adapter::WinconBytes;
use crate::stream::AsLockedWrite;
use crate::stream::RawStream;
/// Only pass printable data to the inner `Write`
#[cfg(feature = "wincon")] // here mostly for documentation purposes
#[derive(Debug)]
pub struct WinconStream<S>
where
S: RawStream,
{
raw: S,
// `WinconBytes` is especially large compared to other variants of `AutoStream`, so boxing it
// here so `AutoStream` doesn't have to discard one allocation and create another one when
// calling `AutoStream::lock`
state: Box<WinconBytes>,
}
impl<S> WinconStream<S>
where
S: RawStream,
{
/// Only pass printable data to the inner `Write`
#[inline]
pub fn new(raw: S) -> Self {
Self {
raw,
state: Default::default(),
}
}
/// Get the wrapped [`RawStream`]
#[inline]
pub fn into_inner(self) -> S {
self.raw
}
#[inline]
pub fn is_terminal(&self) -> bool {
self.raw.is_terminal()
}
}
impl WinconStream<std::io::Stdout> {
/// Get exclusive access to the `WinconStream`
///
/// Why?
/// - Faster performance when writing in a loop
/// - Avoid other threads interleaving output with the current thread
#[inline]
pub fn lock(self) -> WinconStream<std::io::StdoutLock<'static>> {
WinconStream {
raw: self.raw.lock(),
state: self.state,
}
}
}
impl WinconStream<std::io::Stderr> {
/// Get exclusive access to the `WinconStream`
///
/// Why?
/// - Faster performance when writing in a loop
/// - Avoid other threads interleaving output with the current thread
#[inline]
pub fn lock(self) -> WinconStream<std::io::StderrLock<'static>> {
WinconStream {
raw: self.raw.lock(),
state: self.state,
}
}
}
impl<S> std::io::Write for WinconStream<S>
where
S: RawStream + AsLockedWrite,
{
// Must forward all calls to ensure locking happens appropriately
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
write(&mut self.raw.as_locked_write(), &mut self.state, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[std::io::IoSlice<'_>]) -> std::io::Result<usize> {
let buf = bufs
.iter()
.find(|b| !b.is_empty())
.map(|b| &**b)
.unwrap_or(&[][..]);
self.write(buf)
}
// is_write_vectored: nightly only
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
self.raw.as_locked_write().flush()
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> {
write_all(&mut self.raw.as_locked_write(), &mut self.state, buf)
}
// write_all_vectored: nightly only
#[inline]
fn write_fmt(&mut self, args: std::fmt::Arguments<'_>) -> std::io::Result<()> {
write_fmt(&mut self.raw.as_locked_write(), &mut self.state, args)
}
}
fn write(raw: &mut dyn RawStream, state: &mut WinconBytes, buf: &[u8]) -> std::io::Result<usize> {
for (style, printable) in state.extract_next(buf) {
let fg = style.get_fg_color().and_then(cap_wincon_color);
let bg = style.get_bg_color().and_then(cap_wincon_color);
let written = raw.write_colored(fg, bg, printable.as_bytes())?;
let possible = printable.len();
if possible != written {
// HACK: Unsupported atm
break;
}
}
Ok(buf.len())
}
fn write_all(raw: &mut dyn RawStream, state: &mut WinconBytes, buf: &[u8]) -> std::io::Result<()> {
for (style, printable) in state.extract_next(buf) {
let mut buf = printable.as_bytes();
let fg = style.get_fg_color().and_then(cap_wincon_color);
let bg = style.get_bg_color().and_then(cap_wincon_color);
while !buf.is_empty() {
match raw.write_colored(fg, bg, buf) {
Ok(0) => {
return Err(std::io::Error::new(
std::io::ErrorKind::WriteZero,
"failed to write whole buffer",
));
}
Ok(n) => buf = &buf[n..],
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
}
Ok(())
}
fn write_fmt(
raw: &mut dyn RawStream,
state: &mut WinconBytes,
args: std::fmt::Arguments<'_>,
) -> std::io::Result<()> {
let write_all = |buf: &[u8]| write_all(raw, state, buf);
crate::fmt::Adapter::new(write_all).write_fmt(args)
}
fn cap_wincon_color(color: anstyle::Color) -> Option<anstyle::AnsiColor> {
match color {
anstyle::Color::Ansi(c) => Some(c),
anstyle::Color::Ansi256(c) => c.into_ansi(),
anstyle::Color::Rgb(_) => None,
}
}
#[cfg(test)]
mod test {
use super::*;
use proptest::prelude::*;
use std::io::Write as _;
proptest! {
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn write_all_no_escapes(s in "\\PC*") {
let buffer = Vec::new();
let mut stream = WinconStream::new(buffer);
stream.write_all(s.as_bytes()).unwrap();
let buffer = stream.into_inner();
let actual = std::str::from_utf8(buffer.as_ref()).unwrap();
assert_eq!(s, actual);
}
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn write_byte_no_escapes(s in "\\PC*") {
let buffer = Vec::new();
let mut stream = WinconStream::new(buffer);
for byte in s.as_bytes() {
stream.write_all(&[*byte]).unwrap();
}
let buffer = stream.into_inner();
let actual = std::str::from_utf8(buffer.as_ref()).unwrap();
assert_eq!(s, actual);
}
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn write_all_random(s in any::<Vec<u8>>()) {
let buffer = Vec::new();
let mut stream = WinconStream::new(buffer);
stream.write_all(s.as_slice()).unwrap();
}
#[test]
#[cfg_attr(miri, ignore)] // See https://github.com/AltSysrq/proptest/issues/253
fn write_byte_random(s in any::<Vec<u8>>()) {
let buffer = Vec::new();
let mut stream = WinconStream::new(buffer);
for byte in s.as_slice() {
stream.write_all(&[*byte]).unwrap();
}
}
}
}

View File

@@ -1 +0,0 @@
{"files":{"Cargo.lock":"7f68b5328c460caf1d2198b10fe1761e5f0282262f92d04076b30b25539970b0","Cargo.toml":"2834f39b7169c03b03da1e209f56133783ce00ea64d5f2c14381d93984ca20bf","LICENSE-APACHE":"b40930bbcf80744c86c46a12bc9da056641d722716c378f5659b9e555ef833e1","LICENSE-MIT":"c1d4bc00896473e0109ccb4c3c7d21addb55a4ff1a644be204dcfce26612af2a","README.md":"abc82171d436ee0eb221838e8d21a21a2e392504e87f0c130b5eca6a35671e1e","benches/parse.rs":"336c808d51c90db2497fa87e571df7f71c844a1b09be88839fe4255066c632f4","examples/parselog.rs":"58b7db739deed701aa0ab386d0d0c1772511b8aed1c08d31ec5b35a1c8cd4321","src/lib.rs":"c89f2afa0e982276dc47ca8d8a76d47516aa39aa9d3354254c87fdbf2f8ef4cc","src/params.rs":"8cfef4e2ab1961ca2d9f210da553fc6ac64bb6dbd03321f0ee7d6089ab45389c","src/state/codegen.rs":"8530124c8f998f391e47950f130590376321dcade810990f4312c3b1c0a61968","src/state/definitions.rs":"dc3dbb3244def74430a72b0108f019e22cc02e0ae5f563ee14d38300ff82b814","src/state/mod.rs":"be07c2ea393a971dd54117dc2ce8a3ffb5b803cb557ab468389b74570855fa37","src/state/table.rs":"673b7e9242c5248efc076086cc6923578ec2f059c0c26da21363528e20e4285c"},"package":"c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c"}

1202
vendor/anstyle-parse/Cargo.lock generated vendored

File diff suppressed because it is too large Load Diff

View File

@@ -1,108 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.70.0"
name = "anstyle-parse"
version = "0.2.3"
include = [
"build.rs",
"src/**/*",
"Cargo.toml",
"Cargo.lock",
"LICENSE*",
"README.md",
"benches/**/*",
"examples/**/*",
]
description = "Parse ANSI Style Escapes"
homepage = "https://github.com/rust-cli/anstyle"
readme = "README.md"
keywords = [
"ansi",
"terminal",
"color",
"vte",
]
categories = ["command-line-interface"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-cli/anstyle.git"
[[package.metadata.release.pre-release-replacements]]
file = "CHANGELOG.md"
min = 1
replace = "{{version}}"
search = "Unreleased"
[[package.metadata.release.pre-release-replacements]]
exactly = 1
file = "CHANGELOG.md"
replace = "...{{tag_name}}"
search = '\.\.\.HEAD'
[[package.metadata.release.pre-release-replacements]]
file = "CHANGELOG.md"
min = 1
replace = "{{date}}"
search = "ReleaseDate"
[[package.metadata.release.pre-release-replacements]]
exactly = 1
file = "CHANGELOG.md"
replace = """
<!-- next-header -->
## [Unreleased] - ReleaseDate
"""
search = "<!-- next-header -->"
[[package.metadata.release.pre-release-replacements]]
exactly = 1
file = "CHANGELOG.md"
replace = """
<!-- next-url -->
[Unreleased]: https://github.com/rust-cli/anstyle/compare/{{tag_name}}...HEAD"""
search = "<!-- next-url -->"
[[bench]]
name = "parse"
harness = false
[dependencies.arrayvec]
version = "0.7.2"
optional = true
default-features = false
[dependencies.utf8parse]
version = "0.2.1"
optional = true
[dev-dependencies.codegenrs]
version = "3.0.1"
default-features = false
[dev-dependencies.criterion]
version = "0.5.1"
[dev-dependencies.proptest]
version = "1.4.0"
[dev-dependencies.snapbox]
version = "0.4.14"
features = ["path"]
[dev-dependencies.vte_generate_state_changes]
version = "0.1.1"
[features]
core = ["dep:arrayvec"]
default = ["utf8"]
utf8 = ["dep:utf8parse"]

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,25 +0,0 @@
Copyright (c) 2016 Joe Wilm and individual contributors
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@@ -1,33 +0,0 @@
# anstyle-parse
> Parse [Parse ANSI Style Escapes](https://vt100.net/emu/dec_ansi_parser)
[![Documentation](https://img.shields.io/badge/docs-master-blue.svg)][Documentation]
![License](https://img.shields.io/crates/l/anstyle-parse.svg)
[![Crates Status](https://img.shields.io/crates/v/anstyle-parse.svg)](https://crates.io/crates/anstyle-parse)
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.
### Special Thanks
[chrisduerr](https://github.com/alacritty/vte/commits?author=chrisduerr) and the
[alacritty project](https://github.com/alacritty/alacritty) for
[vte](https://crates.io/crates/vte) which
[this was forked from](https://github.com/alacritty/vte/issues/82)
[Crates.io]: https://crates.io/crates/anstyle-parse
[Documentation]: https://docs.rs/anstyle-parse

View File

@@ -1,169 +0,0 @@
use criterion::{black_box, Criterion};
use anstyle_parse::*;
struct BenchDispatcher;
impl Perform for BenchDispatcher {
fn print(&mut self, c: char) {
black_box(c);
}
fn execute(&mut self, byte: u8) {
black_box(byte);
}
fn hook(&mut self, params: &Params, intermediates: &[u8], ignore: bool, c: u8) {
black_box((params, intermediates, ignore, c));
}
fn put(&mut self, byte: u8) {
black_box(byte);
}
fn osc_dispatch(&mut self, params: &[&[u8]], bell_terminated: bool) {
black_box((params, bell_terminated));
}
fn csi_dispatch(&mut self, params: &Params, intermediates: &[u8], ignore: bool, c: u8) {
black_box((params, intermediates, ignore, c));
}
fn esc_dispatch(&mut self, intermediates: &[u8], ignore: bool, byte: u8) {
black_box((intermediates, ignore, byte));
}
}
#[derive(Default)]
struct Strip(String);
impl Strip {
fn with_capacity(capacity: usize) -> Self {
Self(String::with_capacity(capacity))
}
}
impl Perform for Strip {
fn print(&mut self, c: char) {
self.0.push(c);
}
fn execute(&mut self, byte: u8) {
if byte.is_ascii_whitespace() {
self.0.push(byte as char);
}
}
}
fn strip_str(content: &str) -> String {
use anstyle_parse::state::state_change;
use anstyle_parse::state::Action;
use anstyle_parse::state::State;
#[inline]
fn is_utf8_continuation(b: u8) -> bool {
matches!(b, 0x80..=0xbf)
}
#[inline]
fn is_printable(action: Action, byte: u8) -> bool {
action == Action::Print
|| action == Action::BeginUtf8
// since we know the input is valid UTF-8, the only thing we can do with
// continuations is to print them
|| is_utf8_continuation(byte)
|| (action == Action::Execute && byte.is_ascii_whitespace())
}
let mut stripped = Vec::with_capacity(content.len());
let mut bytes = content.as_bytes();
while !bytes.is_empty() {
let offset = bytes.iter().copied().position(|b| {
let (_next_state, action) = state_change(State::Ground, b);
!is_printable(action, b)
});
let (printable, next) = bytes.split_at(offset.unwrap_or(bytes.len()));
stripped.extend(printable);
bytes = next;
let mut state = State::Ground;
let offset = bytes.iter().copied().position(|b| {
let (next_state, action) = state_change(state, b);
if next_state != State::Anywhere {
state = next_state;
}
is_printable(action, b)
});
let (_, next) = bytes.split_at(offset.unwrap_or(bytes.len()));
bytes = next;
}
String::from_utf8(stripped).unwrap()
}
fn parse(c: &mut Criterion) {
for (name, content) in [
#[cfg(feature = "utf8")]
("demo.vte", &include_bytes!("../tests/demo.vte")[..]),
("rg_help.vte", &include_bytes!("../tests/rg_help.vte")[..]),
("rg_linus.vte", &include_bytes!("../tests/rg_linus.vte")[..]),
(
"state_changes",
&b"\x1b]2;X\x1b\\ \x1b[0m \x1bP0@\x1b\\"[..],
),
] {
// Make sure the comparison is fair
if let Ok(content) = std::str::from_utf8(content) {
let mut stripped = Strip::with_capacity(content.len());
let mut parser = Parser::<DefaultCharAccumulator>::new();
for byte in content.as_bytes() {
parser.advance(&mut stripped, *byte);
}
assert_eq!(stripped.0, strip_str(content));
}
let mut group = c.benchmark_group(name);
group.bench_function("advance", |b| {
b.iter(|| {
let mut dispatcher = BenchDispatcher;
let mut parser = Parser::<DefaultCharAccumulator>::new();
for byte in content {
parser.advance(&mut dispatcher, *byte);
}
})
});
group.bench_function("advance_strip", |b| {
b.iter(|| {
let mut stripped = Strip::with_capacity(content.len());
let mut parser = Parser::<DefaultCharAccumulator>::new();
for byte in content {
parser.advance(&mut stripped, *byte);
}
black_box(stripped.0)
})
});
group.bench_function("state_change", |b| {
b.iter(|| {
let mut state = anstyle_parse::state::State::default();
for byte in content {
let (next_state, action) = anstyle_parse::state::state_change(state, *byte);
state = next_state;
black_box(action);
}
})
});
if let Ok(content) = std::str::from_utf8(content) {
group.bench_function("state_change_strip_str", |b| {
b.iter(|| {
let stripped = strip_str(content);
black_box(stripped)
})
});
}
}
}
criterion::criterion_group!(benches, parse);
criterion::criterion_main!(benches);

Some files were not shown because too many files have changed in this diff Show More