Compare commits
140 Commits
2023.01.08
...
5035d02220
| Author | SHA1 | Date | |
|---|---|---|---|
|
5035d02220
|
|||
|
ba1789f106
|
|||
|
842f4a8569
|
|||
|
ce6e30f727
|
|||
|
4af183ad74
|
|||
|
ab413bd751
|
|||
|
b5e6fad3c3
|
|||
|
c69cad6a26
|
|||
|
a24910791e
|
|||
|
371a060eb6
|
|||
|
e08b5f3853
|
|||
|
5a97f2e429
|
|||
|
9e2dcb44a6
|
|||
|
828106ba81
|
|||
|
a7dd18fa1d
|
|||
|
f8cca32968
|
|||
|
ef93237724
|
|||
|
58a896221f
|
|||
|
3f48f53bd5
|
|||
|
2953f0c8c9
|
|||
|
022ec608f5
|
|||
|
54c94fddb5
|
|||
|
0def311fd1
|
|||
|
2f157d0972
|
|||
|
8f57a8f0f9
|
|||
|
40e7d88fd0
|
|||
|
afe6b9a29b
|
|||
| 6a46fe9825 | |||
|
7818a7ef3f
|
|||
| 15f2a73e95 | |||
| 2890b69678 | |||
| 27e9d2b39c | |||
| b283e2a8df | |||
|
9dcce90201
|
|||
|
|
7c876faf12 | ||
|
|
39c66e698e | ||
|
|
abac84a008 | ||
|
|
b44217d4af | ||
|
|
c268e4c205 | ||
|
|
8aabe74eb2 | ||
| 84f2175fd2 | |||
|
|
307b9c6d90 | ||
|
|
7de26b16d4 | ||
| 52f2ad43e6 | |||
|
|
c4dec3fe4c | ||
|
e51edcb561
|
|||
| 2273fd4263 | |||
|
|
d4f104cf5e | ||
|
|
7f41a51f2a | ||
|
|
e97610a8ac | ||
|
|
ee02d922ae | ||
|
|
dbd7b6bf33 | ||
|
|
949c0aa087 | ||
|
|
4f29af53b6 | ||
|
|
1d62740d59 | ||
|
d274602104
|
|||
|
8bc39d10b1
|
|||
| 88faa6e3ea | |||
|
|
66705ba4f0 | ||
|
|
bb4c217ee2 | ||
|
|
c83822e353 | ||
|
|
130ee8df5b | ||
|
8d8653133b
|
|||
|
94d2f8a512
|
|||
|
215a093344
|
|||
| 3de1575082 | |||
|
|
aa8e1184bf | ||
| feb7ebe722 | |||
|
|
becadef5ee | ||
| a4b36e1aea | |||
|
|
c7b099b596 | ||
|
48a08445e7
|
|||
|
694de5edfa
|
|||
|
0dc37e9604
|
|||
| 3d2e970225 | |||
|
d90b9830bc
|
|||
| f91e1bda22 | |||
| e9a0fd718f | |||
| 509ce2d83d | |||
| 391756b77d | |||
| 035153c7c0 | |||
| 885a593829 | |||
| 7c3c8cc969 | |||
| 00c62a9909 | |||
| c2899d27af | |||
| e60fdd1958 | |||
| dd6d440ba5 | |||
| 36a082ba18 | |||
| 09689a937c | |||
| 39f6479415 | |||
| 01a2a47370 | |||
| 4cd42afa37 | |||
| 298aa954b9 | |||
| 910deb6c17 | |||
| 4a22e2177e | |||
| 729c972573 | |||
| 250d78a955 | |||
| 03f2d762bb | |||
| fcaa729544 | |||
| 8c2a6e2c19 | |||
| daa2efba89 | |||
| b5748505ef | |||
| d305b1f005 | |||
| 2cfba4891c | |||
| 777d3814d3 | |||
| 784ceeebdf | |||
| e3675555ea | |||
| 91104e214f | |||
| 9198b18652 | |||
| 1ad7949828 | |||
| b98f01a810 | |||
| fa88050a52 | |||
| 1123c8a56e | |||
| 2eb6333552 | |||
| c5224e006f | |||
| 79599f3cf4 | |||
| 7acf99b9d6 | |||
| ec542703b4 | |||
| ee1cdda38b | |||
| 293a1de413 | |||
| 6635d4da9a | |||
| f549769fcf | |||
| c0a56acc0c | |||
| a136dc5fa4 | |||
| 1b13f2acfc | |||
| 6c127ce028 | |||
| bc2e051741 | |||
| 9abd2a4558 | |||
| f267a56fd0 | |||
| 1d592418af | |||
| 3448f0f930 | |||
|
039ed238a6
|
|||
|
b7349f9df9
|
|||
|
12c7f0284e
|
|||
| 5c9a691495 | |||
| bf8be5c045 | |||
|
ee8a5fc02b
|
|||
|
a990de90fe
|
|||
|
3d48cd3f81
|
|||
|
78d6eca336
|
@@ -1,5 +0,0 @@
|
||||
[source.crates-io]
|
||||
replace-with = "vendored-sources"
|
||||
|
||||
[source.vendored-sources]
|
||||
directory = "vendor"
|
||||
9
.devcontainer/devcontainer.json
Normal file
9
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"image": "mcr.microsoft.com/devcontainers/rust:latest",
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": ["rust-lang.rust-analyzer"]
|
||||
}
|
||||
},
|
||||
"runArgs": ["--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined"]
|
||||
}
|
||||
25
.gitea/workflows/renovate.yml
Normal file
25
.gitea/workflows/renovate.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
name: RenovateBot
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "@daily"
|
||||
|
||||
jobs:
|
||||
renovate:
|
||||
container: ghcr.io/renovatebot/renovate:43
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Run renovate
|
||||
run: |
|
||||
renovate
|
||||
env:
|
||||
GITHUB_COM_TOKEN: ${{ secrets.RENOVATE_GITHUB_TOKEN }}
|
||||
LOG_LEVEL: ${{ vars.RENOVATE_LOG_LEVEL }}
|
||||
RENOVATE_CONFIG_FILE: renovate.config.cjs
|
||||
RENOVATE_LOG_LEVEL: ${{ vars.RENOVATE_LOG_LEVEL }}
|
||||
RENOVATE_REPOSITORIES: ${{ gitea.repository }}
|
||||
RENOVATE_TOKEN: ${{ secrets.RENOVATE_TOKEN }}
|
||||
27
.gitea/workflows/test.yml
Normal file
27
.gitea/workflows/test.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Test
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: clippy
|
||||
- name: Cargo check
|
||||
run: cargo check --workspace --all-targets --all-features
|
||||
- name: Clippy (deny warnings)
|
||||
run: cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
needs: lint
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Cargo test
|
||||
run: cargo test --workspace --all-features -- --nocapture
|
||||
219
.gitignore
vendored
219
.gitignore
vendored
@@ -1 +1,218 @@
|
||||
/target
|
||||
*~
|
||||
|
||||
# temporary files which can be created if a process still has a handle open of a deleted file
|
||||
.fuse_hidden*
|
||||
|
||||
# KDE directory preferences
|
||||
.directory
|
||||
|
||||
# Linux trash folder which might appear on any partition or disk
|
||||
.Trash-*
|
||||
|
||||
# .nfs files are created when an open file is removed but is still being accessed
|
||||
.nfs*
|
||||
|
||||
# General
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
.com.apple.timemachine.donotpresent
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
# Windows thumbnail cache files
|
||||
Thumbs.db
|
||||
Thumbs.db:encryptable
|
||||
ehthumbs.db
|
||||
ehthumbs_vista.db
|
||||
|
||||
# Dump file
|
||||
*.stackdump
|
||||
|
||||
# Folder config file
|
||||
[Dd]esktop.ini
|
||||
|
||||
# Recycle Bin used on file shares
|
||||
$RECYCLE.BIN/
|
||||
|
||||
# Windows Installer files
|
||||
*.cab
|
||||
*.msi
|
||||
*.msix
|
||||
*.msm
|
||||
*.msp
|
||||
|
||||
# Windows shortcuts
|
||||
*.lnk
|
||||
|
||||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
debug/
|
||||
target/
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
Cargo.lock
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
# MSVC Windows builds of rustc generate these, which store debugging information
|
||||
*.pdb
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
tmp/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pdm
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# Poetry local configuration file
|
||||
poetry.toml
|
||||
|
||||
# ruff
|
||||
.ruff_cache/
|
||||
|
||||
# LSP config files
|
||||
pyrightconfig.json
|
||||
30
.renovaterc
Normal file
30
.renovaterc
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"config:recommended",
|
||||
":disableDependencyDashboard"
|
||||
],
|
||||
"assignees": [
|
||||
"valentineus"
|
||||
],
|
||||
"labels": [
|
||||
"dependencies",
|
||||
"automated"
|
||||
],
|
||||
"packageRules": [
|
||||
{
|
||||
"groupName": "all digest updates",
|
||||
"groupSlug": "all-digest",
|
||||
"matchUpdateTypes": [
|
||||
"minor",
|
||||
"patch",
|
||||
"pin",
|
||||
"digest"
|
||||
],
|
||||
"matchPackageNames": [
|
||||
"*"
|
||||
],
|
||||
"automerge": true
|
||||
}
|
||||
]
|
||||
}
|
||||
1182
Cargo.lock
generated
1182
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
10
Cargo.toml
10
Cargo.toml
@@ -1,12 +1,6 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"libnres",
|
||||
"nres-cli",
|
||||
"packer",
|
||||
"texture-decoder",
|
||||
"unpacker",
|
||||
]
|
||||
resolver = "3"
|
||||
members = ["crates/*"]
|
||||
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
|
||||
60
README.md
60
README.md
@@ -1,11 +1,55 @@
|
||||
# Utilities for the game "Parkan: Iron Strategy"
|
||||
# FParkan
|
||||
|
||||
This repository contains utilities, tools, and libraries for the game "Parkan: Iron Strategy."
|
||||
Open source проект с реализацией компонентов игрового движка игры **«Паркан: Железная Стратегия»** и набором [вспомогательных инструментов](tools) для исследования.
|
||||
|
||||
## List of projects
|
||||
## Описание
|
||||
|
||||
- [unpacker](unpacker): Text-based utility for unpacking game resources in the NRres format. Allows unpacking 100% of game resources.
|
||||
- [packer](packer): Text-based utility for packing game resources in the NRres format. Allows packing 100% of game resources.
|
||||
- [texture-decoder](texture-decoder): (WIP) Decoder for game textures. Decodes approximately 20% of game textures.
|
||||
- [libnres](libnres): _(Deprecation)_ Library for NRes files.
|
||||
- [nres-cli](nres-cli): _(Deprecation)_ Console tool for NRes files.
|
||||
Проект находится в активной разработке и включает:
|
||||
|
||||
- библиотеки для работы с форматами игровых архивов;
|
||||
- инструменты для валидации/подготовки тестовых данных;
|
||||
- спецификации форматов и сопутствующую документацию.
|
||||
|
||||
## Установка
|
||||
|
||||
Проект находится в начальной стадии, подробная инструкция по установке пока отсутствует.
|
||||
|
||||
## Документация
|
||||
|
||||
- локально: каталог [`docs/`](docs)
|
||||
- сайт: <https://fparkan.popov.link>
|
||||
|
||||
## Инструменты
|
||||
|
||||
Вспомогательные инструменты находятся в каталоге [`tools/`](tools).
|
||||
|
||||
- [tools/archive_roundtrip_validator.py](tools/archive_roundtrip_validator.py) — инструмент верификации документации по архивам `NRes`/`RsLi` на реальных файлах (включая `unpack -> repack -> byte-compare`).
|
||||
- [tools/init_testdata.py](tools/init_testdata.py) — подготовка тестовых данных по сигнатурам с раскладкой по каталогам.
|
||||
|
||||
## Библиотеки
|
||||
|
||||
- [crates/nres](crates/nres) — библиотека для работы с файлами архивов NRes (чтение, поиск, редактирование, сохранение).
|
||||
- [crates/rsli](crates/rsli) — библиотека для работы с файлами архивов RsLi (чтение, поиск, загрузка/распаковка поддерживаемых методов).
|
||||
|
||||
## Тестирование
|
||||
|
||||
Базовое тестирование проходит на синтетических тестах из репозитория.
|
||||
|
||||
Для дополнительного тестирования на реальных игровых ресурсах:
|
||||
|
||||
- используйте [tools/init_testdata.py](tools/init_testdata.py) для подготовки локального набора;
|
||||
- используйте оригинальную копию игры (диск или [GOG-версия](https://www.gog.com/en/game/parkan_iron_strategy));
|
||||
- игровые ресурсы в репозиторий не включаются, так как защищены авторским правом.
|
||||
|
||||
## Contributing & Support
|
||||
|
||||
Проект активно поддерживается и открыт для contribution. Issues и pull requests можно создавать в обоих репозиториях:
|
||||
|
||||
- **Primary development**: [valentineus/fparkan](https://code.popov.link/valentineus/fparkan)
|
||||
- **GitHub mirror**: [valentineus/fparkan](https://github.com/valentineus/fparkan)
|
||||
|
||||
Основная разработка ведётся в self-hosted репозитории.
|
||||
|
||||
## Лицензия
|
||||
|
||||
Проект распространяется под лицензией **[GNU GPL v2](LICENSE.txt)**.
|
||||
|
||||
6
crates/common/Cargo.toml
Normal file
6
crates/common/Cargo.toml
Normal file
@@ -0,0 +1,6 @@
|
||||
[package]
|
||||
name = "common"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
43
crates/common/src/lib.rs
Normal file
43
crates/common/src/lib.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use std::io;
|
||||
|
||||
/// Resource payload that can be either borrowed from mapped bytes or owned.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ResourceData<'a> {
|
||||
Borrowed(&'a [u8]),
|
||||
Owned(Vec<u8>),
|
||||
}
|
||||
|
||||
impl<'a> ResourceData<'a> {
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
match self {
|
||||
Self::Borrowed(slice) => slice,
|
||||
Self::Owned(buf) => buf.as_slice(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_owned(self) -> Vec<u8> {
|
||||
match self {
|
||||
Self::Borrowed(slice) => slice.to_vec(),
|
||||
Self::Owned(buf) => buf,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for ResourceData<'_> {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
/// Output sink used by `read_into`/`load_into` APIs.
|
||||
pub trait OutputBuffer {
|
||||
fn write_exact(&mut self, data: &[u8]) -> io::Result<()>;
|
||||
}
|
||||
|
||||
impl OutputBuffer for Vec<u8> {
|
||||
fn write_exact(&mut self, data: &[u8]) -> io::Result<()> {
|
||||
self.clear();
|
||||
self.extend_from_slice(data);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
7
crates/nres/Cargo.toml
Normal file
7
crates/nres/Cargo.toml
Normal file
@@ -0,0 +1,7 @@
|
||||
[package]
|
||||
name = "nres"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
common = { path = "../common" }
|
||||
42
crates/nres/README.md
Normal file
42
crates/nres/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# nres
|
||||
|
||||
Rust-библиотека для работы с архивами формата **NRes**.
|
||||
|
||||
## Что умеет
|
||||
|
||||
- Открытие архива из файла (`open_path`) и из памяти (`open_bytes`).
|
||||
- Поддержка `raw_mode` (весь файл как единый ресурс).
|
||||
- Чтение метаданных и итерация по записям.
|
||||
- Поиск по имени без учёта регистра (`find`).
|
||||
- Чтение данных ресурса (`read`, `read_into`, `raw_slice`).
|
||||
- Редактирование архива через `Editor`:
|
||||
- `add`, `replace_data`, `remove`.
|
||||
- `commit` с пересчётом `sort_index`, выравниванием по 8 байт и атомарной записью файла.
|
||||
|
||||
## Модель ошибок
|
||||
|
||||
Библиотека возвращает типизированные ошибки (`InvalidMagic`, `UnsupportedVersion`, `TotalSizeMismatch`, `DirectoryOutOfBounds`, `EntryDataOutOfBounds`, и др.) без паник в production-коде.
|
||||
|
||||
## Покрытие тестами
|
||||
|
||||
### Реальные файлы
|
||||
|
||||
- Рекурсивный прогон по `testdata/nres/**`.
|
||||
- Сейчас в наборе: **120 архивов**.
|
||||
- Для каждого архива проверяется:
|
||||
- чтение всех записей;
|
||||
- `read`/`read_into`/`raw_slice`;
|
||||
- `find`;
|
||||
- `unpack -> repack (Editor::commit)` с проверкой **byte-to-byte**.
|
||||
|
||||
### Синтетические тесты
|
||||
|
||||
- Проверка основных сценариев редактирования (`add/replace/remove/commit`).
|
||||
- Проверка валидации и ошибок:
|
||||
- `InvalidMagic`, `UnsupportedVersion`, `TotalSizeMismatch`, `InvalidEntryCount`, `DirectoryOutOfBounds`, `NameTooLong`, `EntryDataOutOfBounds`, `EntryIdOutOfRange`, `NameContainsNul`.
|
||||
|
||||
## Быстрый запуск тестов
|
||||
|
||||
```bash
|
||||
cargo test -p nres -- --nocapture
|
||||
```
|
||||
103
crates/nres/src/error.rs
Normal file
103
crates/nres/src/error.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
use core::fmt;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[non_exhaustive]
|
||||
pub enum Error {
|
||||
Io(std::io::Error),
|
||||
|
||||
InvalidMagic {
|
||||
got: [u8; 4],
|
||||
},
|
||||
UnsupportedVersion {
|
||||
got: u32,
|
||||
},
|
||||
TotalSizeMismatch {
|
||||
header: u32,
|
||||
actual: u64,
|
||||
},
|
||||
|
||||
InvalidEntryCount {
|
||||
got: i32,
|
||||
},
|
||||
TooManyEntries {
|
||||
got: usize,
|
||||
},
|
||||
DirectoryOutOfBounds {
|
||||
directory_offset: u64,
|
||||
directory_len: u64,
|
||||
file_len: u64,
|
||||
},
|
||||
|
||||
EntryIdOutOfRange {
|
||||
id: u32,
|
||||
entry_count: u32,
|
||||
},
|
||||
EntryDataOutOfBounds {
|
||||
id: u32,
|
||||
offset: u64,
|
||||
size: u32,
|
||||
directory_offset: u64,
|
||||
},
|
||||
NameTooLong {
|
||||
got: usize,
|
||||
max: usize,
|
||||
},
|
||||
NameContainsNul,
|
||||
BadNameEncoding,
|
||||
|
||||
IntegerOverflow,
|
||||
|
||||
RawModeDisallowsOperation(&'static str),
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for Error {
|
||||
fn from(value: std::io::Error) -> Self {
|
||||
Self::Io(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Error::Io(e) => write!(f, "I/O error: {e}"),
|
||||
Error::InvalidMagic { got } => write!(f, "invalid NRes magic: {got:02X?}"),
|
||||
Error::UnsupportedVersion { got } => {
|
||||
write!(f, "unsupported NRes version: {got:#x}")
|
||||
}
|
||||
Error::TotalSizeMismatch { header, actual } => {
|
||||
write!(f, "NRes total_size mismatch: header={header}, actual={actual}")
|
||||
}
|
||||
Error::InvalidEntryCount { got } => write!(f, "invalid entry_count: {got}"),
|
||||
Error::TooManyEntries { got } => write!(f, "too many entries: {got} exceeds u32::MAX"),
|
||||
Error::DirectoryOutOfBounds {
|
||||
directory_offset,
|
||||
directory_len,
|
||||
file_len,
|
||||
} => write!(
|
||||
f,
|
||||
"directory out of bounds: off={directory_offset}, len={directory_len}, file={file_len}"
|
||||
),
|
||||
Error::EntryIdOutOfRange { id, entry_count } => {
|
||||
write!(f, "entry id out of range: id={id}, count={entry_count}")
|
||||
}
|
||||
Error::EntryDataOutOfBounds {
|
||||
id,
|
||||
offset,
|
||||
size,
|
||||
directory_offset,
|
||||
} => write!(
|
||||
f,
|
||||
"entry data out of bounds: id={id}, off={offset}, size={size}, dir_off={directory_offset}"
|
||||
),
|
||||
Error::NameTooLong { got, max } => write!(f, "name too long: {got} > {max}"),
|
||||
Error::NameContainsNul => write!(f, "name contains NUL byte"),
|
||||
Error::BadNameEncoding => write!(f, "bad name encoding"),
|
||||
Error::IntegerOverflow => write!(f, "integer overflow"),
|
||||
Error::RawModeDisallowsOperation(op) => {
|
||||
write!(f, "operation not allowed in raw mode: {op}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {}
|
||||
677
crates/nres/src/lib.rs
Normal file
677
crates/nres/src/lib.rs
Normal file
@@ -0,0 +1,677 @@
|
||||
pub mod error;
|
||||
|
||||
use crate::error::Error;
|
||||
use common::{OutputBuffer, ResourceData};
|
||||
use core::ops::Range;
|
||||
use std::cmp::Ordering;
|
||||
use std::fs::{self, OpenOptions as FsOpenOptions};
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
pub type Result<T> = core::result::Result<T, Error>;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct OpenOptions {
|
||||
pub raw_mode: bool,
|
||||
pub sequential_hint: bool,
|
||||
pub prefetch_pages: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub enum OpenMode {
|
||||
#[default]
|
||||
ReadOnly,
|
||||
ReadWrite,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Archive {
|
||||
bytes: Arc<[u8]>,
|
||||
entries: Vec<EntryRecord>,
|
||||
raw_mode: bool,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct EntryId(pub u32);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct EntryMeta {
|
||||
pub kind: u32,
|
||||
pub attr1: u32,
|
||||
pub attr2: u32,
|
||||
pub attr3: u32,
|
||||
pub name: String,
|
||||
pub data_offset: u64,
|
||||
pub data_size: u32,
|
||||
pub sort_index: u32,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct EntryRef<'a> {
|
||||
pub id: EntryId,
|
||||
pub meta: &'a EntryMeta,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct EntryRecord {
|
||||
meta: EntryMeta,
|
||||
name_raw: [u8; 36],
|
||||
}
|
||||
|
||||
impl Archive {
|
||||
pub fn open_path(path: impl AsRef<Path>) -> Result<Self> {
|
||||
Self::open_path_with(path, OpenMode::ReadOnly, OpenOptions::default())
|
||||
}
|
||||
|
||||
pub fn open_path_with(
|
||||
path: impl AsRef<Path>,
|
||||
_mode: OpenMode,
|
||||
opts: OpenOptions,
|
||||
) -> Result<Self> {
|
||||
let bytes = fs::read(path.as_ref())?;
|
||||
let arc: Arc<[u8]> = Arc::from(bytes.into_boxed_slice());
|
||||
Self::open_bytes(arc, opts)
|
||||
}
|
||||
|
||||
pub fn open_bytes(bytes: Arc<[u8]>, opts: OpenOptions) -> Result<Self> {
|
||||
let (entries, _) = parse_archive(&bytes, opts.raw_mode)?;
|
||||
if opts.prefetch_pages {
|
||||
prefetch_pages(&bytes);
|
||||
}
|
||||
Ok(Self {
|
||||
bytes,
|
||||
entries,
|
||||
raw_mode: opts.raw_mode,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn entry_count(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
pub fn entries(&self) -> impl Iterator<Item = EntryRef<'_>> {
|
||||
self.entries
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, entry)| EntryRef {
|
||||
id: EntryId(u32::try_from(idx).expect("entry count validated at parse")),
|
||||
meta: &entry.meta,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn find(&self, name: &str) -> Option<EntryId> {
|
||||
if self.entries.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
if !self.raw_mode {
|
||||
let mut low = 0usize;
|
||||
let mut high = self.entries.len();
|
||||
while low < high {
|
||||
let mid = low + (high - low) / 2;
|
||||
let Ok(target_idx) = usize::try_from(self.entries[mid].meta.sort_index) else {
|
||||
break;
|
||||
};
|
||||
if target_idx >= self.entries.len() {
|
||||
break;
|
||||
}
|
||||
let cmp = cmp_name_case_insensitive(
|
||||
name.as_bytes(),
|
||||
entry_name_bytes(&self.entries[target_idx].name_raw),
|
||||
);
|
||||
match cmp {
|
||||
Ordering::Less => high = mid,
|
||||
Ordering::Greater => low = mid + 1,
|
||||
Ordering::Equal => {
|
||||
return Some(EntryId(
|
||||
u32::try_from(target_idx).expect("entry count validated at parse"),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.entries.iter().enumerate().find_map(|(idx, entry)| {
|
||||
if cmp_name_case_insensitive(name.as_bytes(), entry_name_bytes(&entry.name_raw))
|
||||
== Ordering::Equal
|
||||
{
|
||||
Some(EntryId(
|
||||
u32::try_from(idx).expect("entry count validated at parse"),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get(&self, id: EntryId) -> Option<EntryRef<'_>> {
|
||||
let idx = usize::try_from(id.0).ok()?;
|
||||
let entry = self.entries.get(idx)?;
|
||||
Some(EntryRef {
|
||||
id,
|
||||
meta: &entry.meta,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn read(&self, id: EntryId) -> Result<ResourceData<'_>> {
|
||||
let range = self.entry_range(id)?;
|
||||
Ok(ResourceData::Borrowed(&self.bytes[range]))
|
||||
}
|
||||
|
||||
pub fn read_into(&self, id: EntryId, out: &mut dyn OutputBuffer) -> Result<usize> {
|
||||
let range = self.entry_range(id)?;
|
||||
out.write_exact(&self.bytes[range.clone()])?;
|
||||
Ok(range.len())
|
||||
}
|
||||
|
||||
pub fn raw_slice(&self, id: EntryId) -> Result<Option<&[u8]>> {
|
||||
let range = self.entry_range(id)?;
|
||||
Ok(Some(&self.bytes[range]))
|
||||
}
|
||||
|
||||
pub fn edit_path(path: impl AsRef<Path>) -> Result<Editor> {
|
||||
let path_buf = path.as_ref().to_path_buf();
|
||||
let bytes = fs::read(&path_buf)?;
|
||||
let arc: Arc<[u8]> = Arc::from(bytes.into_boxed_slice());
|
||||
let (entries, _) = parse_archive(&arc, false)?;
|
||||
let mut editable = Vec::with_capacity(entries.len());
|
||||
for entry in &entries {
|
||||
let range = checked_range(entry.meta.data_offset, entry.meta.data_size, arc.len())?;
|
||||
editable.push(EditableEntry {
|
||||
meta: entry.meta.clone(),
|
||||
name_raw: entry.name_raw,
|
||||
data: EntryData::Borrowed(range), // Copy-on-write: only store range
|
||||
});
|
||||
}
|
||||
Ok(Editor {
|
||||
path: path_buf,
|
||||
source: arc,
|
||||
entries: editable,
|
||||
})
|
||||
}
|
||||
|
||||
fn entry_range(&self, id: EntryId) -> Result<Range<usize>> {
|
||||
let idx = usize::try_from(id.0).map_err(|_| Error::IntegerOverflow)?;
|
||||
let Some(entry) = self.entries.get(idx) else {
|
||||
return Err(Error::EntryIdOutOfRange {
|
||||
id: id.0,
|
||||
entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
|
||||
});
|
||||
};
|
||||
checked_range(
|
||||
entry.meta.data_offset,
|
||||
entry.meta.data_size,
|
||||
self.bytes.len(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Editor {
|
||||
path: PathBuf,
|
||||
source: Arc<[u8]>,
|
||||
entries: Vec<EditableEntry>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum EntryData {
|
||||
Borrowed(Range<usize>),
|
||||
Modified(Vec<u8>),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct EditableEntry {
|
||||
meta: EntryMeta,
|
||||
name_raw: [u8; 36],
|
||||
data: EntryData,
|
||||
}
|
||||
|
||||
impl EditableEntry {
|
||||
fn data_slice<'a>(&'a self, source: &'a Arc<[u8]>) -> &'a [u8] {
|
||||
match &self.data {
|
||||
EntryData::Borrowed(range) => &source[range.clone()],
|
||||
EntryData::Modified(vec) => vec.as_slice(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct NewEntry<'a> {
|
||||
pub kind: u32,
|
||||
pub attr1: u32,
|
||||
pub attr2: u32,
|
||||
pub attr3: u32,
|
||||
pub name: &'a str,
|
||||
pub data: &'a [u8],
|
||||
}
|
||||
|
||||
impl Editor {
|
||||
pub fn entries(&self) -> impl Iterator<Item = EntryRef<'_>> {
|
||||
self.entries
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, entry)| EntryRef {
|
||||
id: EntryId(u32::try_from(idx).expect("entry count validated at add")),
|
||||
meta: &entry.meta,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn add(&mut self, entry: NewEntry<'_>) -> Result<EntryId> {
|
||||
let name_raw = encode_name_field(entry.name)?;
|
||||
let id_u32 = u32::try_from(self.entries.len()).map_err(|_| Error::IntegerOverflow)?;
|
||||
let data_size = u32::try_from(entry.data.len()).map_err(|_| Error::IntegerOverflow)?;
|
||||
self.entries.push(EditableEntry {
|
||||
meta: EntryMeta {
|
||||
kind: entry.kind,
|
||||
attr1: entry.attr1,
|
||||
attr2: entry.attr2,
|
||||
attr3: entry.attr3,
|
||||
name: decode_name(entry_name_bytes(&name_raw)),
|
||||
data_offset: 0,
|
||||
data_size,
|
||||
sort_index: 0,
|
||||
},
|
||||
name_raw,
|
||||
data: EntryData::Modified(entry.data.to_vec()),
|
||||
});
|
||||
Ok(EntryId(id_u32))
|
||||
}
|
||||
|
||||
pub fn replace_data(&mut self, id: EntryId, data: &[u8]) -> Result<()> {
|
||||
let idx = usize::try_from(id.0).map_err(|_| Error::IntegerOverflow)?;
|
||||
let Some(entry) = self.entries.get_mut(idx) else {
|
||||
return Err(Error::EntryIdOutOfRange {
|
||||
id: id.0,
|
||||
entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
|
||||
});
|
||||
};
|
||||
entry.meta.data_size = u32::try_from(data.len()).map_err(|_| Error::IntegerOverflow)?;
|
||||
// Replace with new data (triggers copy-on-write if borrowed)
|
||||
entry.data = EntryData::Modified(data.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, id: EntryId) -> Result<()> {
|
||||
let idx = usize::try_from(id.0).map_err(|_| Error::IntegerOverflow)?;
|
||||
if idx >= self.entries.len() {
|
||||
return Err(Error::EntryIdOutOfRange {
|
||||
id: id.0,
|
||||
entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
|
||||
});
|
||||
}
|
||||
self.entries.remove(idx);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn commit(mut self) -> Result<()> {
|
||||
let count_u32 = u32::try_from(self.entries.len()).map_err(|_| Error::IntegerOverflow)?;
|
||||
|
||||
// Pre-calculate capacity to avoid reallocations
|
||||
let total_data_size: usize = self
|
||||
.entries
|
||||
.iter()
|
||||
.map(|e| e.data_slice(&self.source).len())
|
||||
.sum();
|
||||
let padding_estimate = self.entries.len() * 8; // Max 8 bytes padding per entry
|
||||
let directory_size = self.entries.len() * 64; // 64 bytes per entry
|
||||
let capacity = 16 + total_data_size + padding_estimate + directory_size;
|
||||
|
||||
let mut out = Vec::with_capacity(capacity);
|
||||
out.resize(16, 0); // Header
|
||||
|
||||
// Keep reference to source for copy-on-write
|
||||
let source = &self.source;
|
||||
|
||||
for entry in &mut self.entries {
|
||||
entry.meta.data_offset =
|
||||
u64::try_from(out.len()).map_err(|_| Error::IntegerOverflow)?;
|
||||
|
||||
// Calculate size and get slice separately to avoid borrow conflicts
|
||||
let data_len = entry.data_slice(source).len();
|
||||
entry.meta.data_size = u32::try_from(data_len).map_err(|_| Error::IntegerOverflow)?;
|
||||
|
||||
// Now get the slice again for writing
|
||||
let data_slice = entry.data_slice(source);
|
||||
out.extend_from_slice(data_slice);
|
||||
|
||||
let padding = (8 - (out.len() % 8)) % 8;
|
||||
if padding > 0 {
|
||||
out.resize(out.len() + padding, 0);
|
||||
}
|
||||
}
|
||||
|
||||
let mut sort_order: Vec<usize> = (0..self.entries.len()).collect();
|
||||
sort_order.sort_by(|a, b| {
|
||||
cmp_name_case_insensitive(
|
||||
entry_name_bytes(&self.entries[*a].name_raw),
|
||||
entry_name_bytes(&self.entries[*b].name_raw),
|
||||
)
|
||||
});
|
||||
|
||||
for (idx, entry) in self.entries.iter_mut().enumerate() {
|
||||
entry.meta.sort_index =
|
||||
u32::try_from(sort_order[idx]).map_err(|_| Error::IntegerOverflow)?;
|
||||
}
|
||||
|
||||
for entry in &self.entries {
|
||||
let data_offset_u32 =
|
||||
u32::try_from(entry.meta.data_offset).map_err(|_| Error::IntegerOverflow)?;
|
||||
push_u32(&mut out, entry.meta.kind);
|
||||
push_u32(&mut out, entry.meta.attr1);
|
||||
push_u32(&mut out, entry.meta.attr2);
|
||||
push_u32(&mut out, entry.meta.data_size);
|
||||
push_u32(&mut out, entry.meta.attr3);
|
||||
out.extend_from_slice(&entry.name_raw);
|
||||
push_u32(&mut out, data_offset_u32);
|
||||
push_u32(&mut out, entry.meta.sort_index);
|
||||
}
|
||||
|
||||
let total_size_u32 = u32::try_from(out.len()).map_err(|_| Error::IntegerOverflow)?;
|
||||
out[0..4].copy_from_slice(b"NRes");
|
||||
out[4..8].copy_from_slice(&0x100_u32.to_le_bytes());
|
||||
out[8..12].copy_from_slice(&count_u32.to_le_bytes());
|
||||
out[12..16].copy_from_slice(&total_size_u32.to_le_bytes());
|
||||
|
||||
write_atomic(&self.path, &out)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_archive(bytes: &[u8], raw_mode: bool) -> Result<(Vec<EntryRecord>, u64)> {
|
||||
if raw_mode {
|
||||
let data_size = u32::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?;
|
||||
let entry = EntryRecord {
|
||||
meta: EntryMeta {
|
||||
kind: 0,
|
||||
attr1: 0,
|
||||
attr2: 0,
|
||||
attr3: 0,
|
||||
name: String::from("RAW"),
|
||||
data_offset: 0,
|
||||
data_size,
|
||||
sort_index: 0,
|
||||
},
|
||||
name_raw: {
|
||||
let mut name = [0u8; 36];
|
||||
let bytes_name = b"RAW";
|
||||
name[..bytes_name.len()].copy_from_slice(bytes_name);
|
||||
name
|
||||
},
|
||||
};
|
||||
return Ok((
|
||||
vec![entry],
|
||||
u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?,
|
||||
));
|
||||
}
|
||||
|
||||
if bytes.len() < 16 {
|
||||
let mut got = [0u8; 4];
|
||||
let copy_len = bytes.len().min(4);
|
||||
got[..copy_len].copy_from_slice(&bytes[..copy_len]);
|
||||
return Err(Error::InvalidMagic { got });
|
||||
}
|
||||
|
||||
let mut magic = [0u8; 4];
|
||||
magic.copy_from_slice(&bytes[0..4]);
|
||||
if &magic != b"NRes" {
|
||||
return Err(Error::InvalidMagic { got: magic });
|
||||
}
|
||||
|
||||
let version = read_u32(bytes, 4)?;
|
||||
if version != 0x100 {
|
||||
return Err(Error::UnsupportedVersion { got: version });
|
||||
}
|
||||
|
||||
let entry_count_i32 = i32::from_le_bytes(
|
||||
bytes[8..12]
|
||||
.try_into()
|
||||
.map_err(|_| Error::IntegerOverflow)?,
|
||||
);
|
||||
if entry_count_i32 < 0 {
|
||||
return Err(Error::InvalidEntryCount {
|
||||
got: entry_count_i32,
|
||||
});
|
||||
}
|
||||
let entry_count = usize::try_from(entry_count_i32).map_err(|_| Error::IntegerOverflow)?;
|
||||
|
||||
// Validate entry_count fits in u32 (required for EntryId)
|
||||
if entry_count > u32::MAX as usize {
|
||||
return Err(Error::TooManyEntries { got: entry_count });
|
||||
}
|
||||
|
||||
let total_size = read_u32(bytes, 12)?;
|
||||
let actual_size = u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?;
|
||||
if u64::from(total_size) != actual_size {
|
||||
return Err(Error::TotalSizeMismatch {
|
||||
header: total_size,
|
||||
actual: actual_size,
|
||||
});
|
||||
}
|
||||
|
||||
let directory_len = u64::try_from(entry_count)
|
||||
.map_err(|_| Error::IntegerOverflow)?
|
||||
.checked_mul(64)
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
let directory_offset =
|
||||
u64::from(total_size)
|
||||
.checked_sub(directory_len)
|
||||
.ok_or(Error::DirectoryOutOfBounds {
|
||||
directory_offset: 0,
|
||||
directory_len,
|
||||
file_len: actual_size,
|
||||
})?;
|
||||
|
||||
if directory_offset < 16 || directory_offset + directory_len > actual_size {
|
||||
return Err(Error::DirectoryOutOfBounds {
|
||||
directory_offset,
|
||||
directory_len,
|
||||
file_len: actual_size,
|
||||
});
|
||||
}
|
||||
|
||||
let mut entries = Vec::with_capacity(entry_count);
|
||||
for index in 0..entry_count {
|
||||
let base = usize::try_from(directory_offset)
|
||||
.map_err(|_| Error::IntegerOverflow)?
|
||||
.checked_add(index.checked_mul(64).ok_or(Error::IntegerOverflow)?)
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
|
||||
let kind = read_u32(bytes, base)?;
|
||||
let attr1 = read_u32(bytes, base + 4)?;
|
||||
let attr2 = read_u32(bytes, base + 8)?;
|
||||
let data_size = read_u32(bytes, base + 12)?;
|
||||
let attr3 = read_u32(bytes, base + 16)?;
|
||||
|
||||
let mut name_raw = [0u8; 36];
|
||||
let name_slice = bytes
|
||||
.get(base + 20..base + 56)
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
name_raw.copy_from_slice(name_slice);
|
||||
|
||||
let name_bytes = entry_name_bytes(&name_raw);
|
||||
if name_bytes.len() > 35 {
|
||||
return Err(Error::NameTooLong {
|
||||
got: name_bytes.len(),
|
||||
max: 35,
|
||||
});
|
||||
}
|
||||
|
||||
let data_offset = u64::from(read_u32(bytes, base + 56)?);
|
||||
let sort_index = read_u32(bytes, base + 60)?;
|
||||
|
||||
let end = data_offset
|
||||
.checked_add(u64::from(data_size))
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
if data_offset < 16 || end > directory_offset {
|
||||
return Err(Error::EntryDataOutOfBounds {
|
||||
id: u32::try_from(index).map_err(|_| Error::IntegerOverflow)?,
|
||||
offset: data_offset,
|
||||
size: data_size,
|
||||
directory_offset,
|
||||
});
|
||||
}
|
||||
|
||||
entries.push(EntryRecord {
|
||||
meta: EntryMeta {
|
||||
kind,
|
||||
attr1,
|
||||
attr2,
|
||||
attr3,
|
||||
name: decode_name(name_bytes),
|
||||
data_offset,
|
||||
data_size,
|
||||
sort_index,
|
||||
},
|
||||
name_raw,
|
||||
});
|
||||
}
|
||||
|
||||
Ok((entries, directory_offset))
|
||||
}
|
||||
|
||||
fn checked_range(offset: u64, size: u32, bytes_len: usize) -> Result<Range<usize>> {
|
||||
let start = usize::try_from(offset).map_err(|_| Error::IntegerOverflow)?;
|
||||
let len = usize::try_from(size).map_err(|_| Error::IntegerOverflow)?;
|
||||
let end = start.checked_add(len).ok_or(Error::IntegerOverflow)?;
|
||||
if end > bytes_len {
|
||||
return Err(Error::IntegerOverflow);
|
||||
}
|
||||
Ok(start..end)
|
||||
}
|
||||
|
||||
fn read_u32(bytes: &[u8], offset: usize) -> Result<u32> {
|
||||
let data = bytes
|
||||
.get(offset..offset + 4)
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
let arr: [u8; 4] = data.try_into().map_err(|_| Error::IntegerOverflow)?;
|
||||
Ok(u32::from_le_bytes(arr))
|
||||
}
|
||||
|
||||
fn push_u32(out: &mut Vec<u8>, value: u32) {
|
||||
out.extend_from_slice(&value.to_le_bytes());
|
||||
}
|
||||
|
||||
fn encode_name_field(name: &str) -> Result<[u8; 36]> {
|
||||
let bytes = name.as_bytes();
|
||||
if bytes.contains(&0) {
|
||||
return Err(Error::NameContainsNul);
|
||||
}
|
||||
if bytes.len() > 35 {
|
||||
return Err(Error::NameTooLong {
|
||||
got: bytes.len(),
|
||||
max: 35,
|
||||
});
|
||||
}
|
||||
|
||||
let mut out = [0u8; 36];
|
||||
out[..bytes.len()].copy_from_slice(bytes);
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn entry_name_bytes(raw: &[u8; 36]) -> &[u8] {
|
||||
let len = raw.iter().position(|&b| b == 0).unwrap_or(raw.len());
|
||||
&raw[..len]
|
||||
}
|
||||
|
||||
fn decode_name(name: &[u8]) -> String {
|
||||
name.iter().map(|b| char::from(*b)).collect()
|
||||
}
|
||||
|
||||
fn cmp_name_case_insensitive(a: &[u8], b: &[u8]) -> Ordering {
|
||||
let mut idx = 0usize;
|
||||
let min_len = a.len().min(b.len());
|
||||
while idx < min_len {
|
||||
let left = ascii_lower(a[idx]);
|
||||
let right = ascii_lower(b[idx]);
|
||||
if left != right {
|
||||
return left.cmp(&right);
|
||||
}
|
||||
idx += 1;
|
||||
}
|
||||
a.len().cmp(&b.len())
|
||||
}
|
||||
|
||||
fn ascii_lower(value: u8) -> u8 {
|
||||
if value.is_ascii_uppercase() {
|
||||
value + 32
|
||||
} else {
|
||||
value
|
||||
}
|
||||
}
|
||||
|
||||
fn prefetch_pages(bytes: &[u8]) {
|
||||
use std::sync::atomic::{compiler_fence, Ordering};
|
||||
|
||||
let mut cursor = 0usize;
|
||||
let mut sink = 0u8;
|
||||
while cursor < bytes.len() {
|
||||
sink ^= bytes[cursor];
|
||||
cursor = cursor.saturating_add(4096);
|
||||
}
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
let _ = sink;
|
||||
}
|
||||
|
||||
fn write_atomic(path: &Path, content: &[u8]) -> Result<()> {
|
||||
let file_name = path
|
||||
.file_name()
|
||||
.and_then(|name| name.to_str())
|
||||
.unwrap_or("archive");
|
||||
let parent = path.parent().unwrap_or_else(|| Path::new("."));
|
||||
|
||||
let mut temp_path = None;
|
||||
for attempt in 0..128u32 {
|
||||
let name = format!(
|
||||
".{}.tmp.{}.{}.{}",
|
||||
file_name,
|
||||
std::process::id(),
|
||||
unix_time_nanos(),
|
||||
attempt
|
||||
);
|
||||
let candidate = parent.join(name);
|
||||
let opened = FsOpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.open(&candidate);
|
||||
if let Ok(mut file) = opened {
|
||||
file.write_all(content)?;
|
||||
file.sync_all()?;
|
||||
temp_path = Some((candidate, file));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let Some((tmp_path, mut file)) = temp_path else {
|
||||
return Err(Error::Io(std::io::Error::new(
|
||||
std::io::ErrorKind::AlreadyExists,
|
||||
"failed to create temporary file for atomic write",
|
||||
)));
|
||||
};
|
||||
|
||||
file.flush()?;
|
||||
drop(file);
|
||||
|
||||
match fs::rename(&tmp_path, path) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(rename_err) => {
|
||||
if path.exists() {
|
||||
fs::remove_file(path)?;
|
||||
fs::rename(&tmp_path, path)?;
|
||||
Ok(())
|
||||
} else {
|
||||
let _ = fs::remove_file(&tmp_path);
|
||||
Err(Error::Io(rename_err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn unix_time_nanos() -> u128 {
|
||||
match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(duration) => duration.as_nanos(),
|
||||
Err(_) => 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
800
crates/nres/src/tests.rs
Normal file
800
crates/nres/src/tests.rs
Normal file
@@ -0,0 +1,800 @@
|
||||
use super::*;
|
||||
use std::any::Any;
|
||||
use std::fs;
|
||||
use std::panic::{catch_unwind, AssertUnwindSafe};
|
||||
|
||||
#[derive(Clone)]
|
||||
struct SyntheticEntry<'a> {
|
||||
kind: u32,
|
||||
attr1: u32,
|
||||
attr2: u32,
|
||||
attr3: u32,
|
||||
name: &'a str,
|
||||
data: &'a [u8],
|
||||
}
|
||||
|
||||
fn collect_files_recursive(root: &Path, out: &mut Vec<PathBuf>) {
|
||||
let Ok(entries) = fs::read_dir(root) else {
|
||||
return;
|
||||
};
|
||||
for entry in entries.flatten() {
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
collect_files_recursive(&path, out);
|
||||
} else if path.is_file() {
|
||||
out.push(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn nres_test_files() -> Vec<PathBuf> {
|
||||
let root = Path::new(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("..")
|
||||
.join("..")
|
||||
.join("testdata")
|
||||
.join("nres");
|
||||
let mut files = Vec::new();
|
||||
collect_files_recursive(&root, &mut files);
|
||||
files.sort();
|
||||
files
|
||||
.into_iter()
|
||||
.filter(|path| {
|
||||
fs::read(path)
|
||||
.map(|data| data.get(0..4) == Some(b"NRes"))
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn make_temp_copy(original: &Path, bytes: &[u8]) -> PathBuf {
|
||||
let mut path = std::env::temp_dir();
|
||||
let file_name = original
|
||||
.file_name()
|
||||
.and_then(|v| v.to_str())
|
||||
.unwrap_or("archive");
|
||||
path.push(format!(
|
||||
"nres-test-{}-{}-{}",
|
||||
std::process::id(),
|
||||
unix_time_nanos(),
|
||||
file_name
|
||||
));
|
||||
fs::write(&path, bytes).expect("failed to create temp file");
|
||||
path
|
||||
}
|
||||
|
||||
fn panic_message(payload: Box<dyn Any + Send>) -> String {
|
||||
let any = payload.as_ref();
|
||||
if let Some(message) = any.downcast_ref::<String>() {
|
||||
return message.clone();
|
||||
}
|
||||
if let Some(message) = any.downcast_ref::<&str>() {
|
||||
return (*message).to_string();
|
||||
}
|
||||
String::from("panic without message")
|
||||
}
|
||||
|
||||
fn read_u32_le(bytes: &[u8], offset: usize) -> u32 {
|
||||
let slice = bytes
|
||||
.get(offset..offset + 4)
|
||||
.expect("u32 read out of bounds in test");
|
||||
let arr: [u8; 4] = slice.try_into().expect("u32 conversion failed in test");
|
||||
u32::from_le_bytes(arr)
|
||||
}
|
||||
|
||||
fn build_nres_bytes(entries: &[SyntheticEntry<'_>]) -> Vec<u8> {
|
||||
let mut out = vec![0u8; 16];
|
||||
let mut offsets = Vec::with_capacity(entries.len());
|
||||
|
||||
for entry in entries {
|
||||
offsets.push(u32::try_from(out.len()).expect("offset overflow"));
|
||||
out.extend_from_slice(entry.data);
|
||||
let padding = (8 - (out.len() % 8)) % 8;
|
||||
if padding > 0 {
|
||||
out.resize(out.len() + padding, 0);
|
||||
}
|
||||
}
|
||||
|
||||
let mut sort_order: Vec<usize> = (0..entries.len()).collect();
|
||||
sort_order.sort_by(|a, b| {
|
||||
cmp_name_case_insensitive(entries[*a].name.as_bytes(), entries[*b].name.as_bytes())
|
||||
});
|
||||
|
||||
for (index, entry) in entries.iter().enumerate() {
|
||||
let mut name_raw = [0u8; 36];
|
||||
let name_bytes = entry.name.as_bytes();
|
||||
assert!(name_bytes.len() <= 35, "name too long in fixture");
|
||||
name_raw[..name_bytes.len()].copy_from_slice(name_bytes);
|
||||
|
||||
push_u32(&mut out, entry.kind);
|
||||
push_u32(&mut out, entry.attr1);
|
||||
push_u32(&mut out, entry.attr2);
|
||||
push_u32(
|
||||
&mut out,
|
||||
u32::try_from(entry.data.len()).expect("data size overflow"),
|
||||
);
|
||||
push_u32(&mut out, entry.attr3);
|
||||
out.extend_from_slice(&name_raw);
|
||||
push_u32(&mut out, offsets[index]);
|
||||
push_u32(
|
||||
&mut out,
|
||||
u32::try_from(sort_order[index]).expect("sort index overflow"),
|
||||
);
|
||||
}
|
||||
|
||||
out[0..4].copy_from_slice(b"NRes");
|
||||
out[4..8].copy_from_slice(&0x100_u32.to_le_bytes());
|
||||
out[8..12].copy_from_slice(
|
||||
&u32::try_from(entries.len())
|
||||
.expect("count overflow")
|
||||
.to_le_bytes(),
|
||||
);
|
||||
let total_size = u32::try_from(out.len()).expect("size overflow");
|
||||
out[12..16].copy_from_slice(&total_size.to_le_bytes());
|
||||
out
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nres_read_and_roundtrip_all_files() {
|
||||
let files = nres_test_files();
|
||||
if files.is_empty() {
|
||||
eprintln!("skipping nres_read_and_roundtrip_all_files: no NRes archives in testdata/nres");
|
||||
return;
|
||||
}
|
||||
|
||||
let checked = files.len();
|
||||
let mut success = 0usize;
|
||||
let mut failures = Vec::new();
|
||||
|
||||
for path in files {
|
||||
let display_path = path.display().to_string();
|
||||
let result = catch_unwind(AssertUnwindSafe(|| {
|
||||
let original = fs::read(&path).expect("failed to read archive");
|
||||
let archive = Archive::open_path(&path)
|
||||
.unwrap_or_else(|err| panic!("failed to open {}: {err}", path.display()));
|
||||
|
||||
let count = archive.entry_count();
|
||||
assert_eq!(
|
||||
count,
|
||||
archive.entries().count(),
|
||||
"entry count mismatch: {}",
|
||||
path.display()
|
||||
);
|
||||
|
||||
for idx in 0..count {
|
||||
let id = EntryId(idx as u32);
|
||||
let entry = archive
|
||||
.get(id)
|
||||
.unwrap_or_else(|| panic!("missing entry #{idx} in {}", path.display()));
|
||||
|
||||
let payload = archive.read(id).unwrap_or_else(|err| {
|
||||
panic!("read failed for {} entry #{idx}: {err}", path.display())
|
||||
});
|
||||
|
||||
let mut out = Vec::new();
|
||||
let written = archive.read_into(id, &mut out).unwrap_or_else(|err| {
|
||||
panic!(
|
||||
"read_into failed for {} entry #{idx}: {err}",
|
||||
path.display()
|
||||
)
|
||||
});
|
||||
assert_eq!(
|
||||
written,
|
||||
payload.as_slice().len(),
|
||||
"size mismatch in {} entry #{idx}",
|
||||
path.display()
|
||||
);
|
||||
assert_eq!(
|
||||
out.as_slice(),
|
||||
payload.as_slice(),
|
||||
"payload mismatch in {} entry #{idx}",
|
||||
path.display()
|
||||
);
|
||||
|
||||
let raw = archive
|
||||
.raw_slice(id)
|
||||
.unwrap_or_else(|err| {
|
||||
panic!(
|
||||
"raw_slice failed for {} entry #{idx}: {err}",
|
||||
path.display()
|
||||
)
|
||||
})
|
||||
.expect("raw_slice must return Some for file-backed archive");
|
||||
assert_eq!(
|
||||
raw,
|
||||
payload.as_slice(),
|
||||
"raw slice mismatch in {} entry #{idx}",
|
||||
path.display()
|
||||
);
|
||||
|
||||
let found = archive.find(&entry.meta.name).unwrap_or_else(|| {
|
||||
panic!(
|
||||
"find failed for name '{}' in {}",
|
||||
entry.meta.name,
|
||||
path.display()
|
||||
)
|
||||
});
|
||||
let found_meta = archive.get(found).expect("find returned invalid id");
|
||||
assert!(
|
||||
found_meta.meta.name.eq_ignore_ascii_case(&entry.meta.name),
|
||||
"find returned unrelated entry in {}",
|
||||
path.display()
|
||||
);
|
||||
}
|
||||
|
||||
let temp_copy = make_temp_copy(&path, &original);
|
||||
let mut editor = Archive::edit_path(&temp_copy)
|
||||
.unwrap_or_else(|err| panic!("edit_path failed for {}: {err}", path.display()));
|
||||
|
||||
for idx in 0..count {
|
||||
let data = archive
|
||||
.read(EntryId(idx as u32))
|
||||
.unwrap_or_else(|err| {
|
||||
panic!(
|
||||
"read before replace failed for {} entry #{idx}: {err}",
|
||||
path.display()
|
||||
)
|
||||
})
|
||||
.into_owned();
|
||||
editor
|
||||
.replace_data(EntryId(idx as u32), &data)
|
||||
.unwrap_or_else(|err| {
|
||||
panic!(
|
||||
"replace_data failed for {} entry #{idx}: {err}",
|
||||
path.display()
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
editor
|
||||
.commit()
|
||||
.unwrap_or_else(|err| panic!("commit failed for {}: {err}", path.display()));
|
||||
let rebuilt = fs::read(&temp_copy).expect("failed to read rebuilt archive");
|
||||
let _ = fs::remove_file(&temp_copy);
|
||||
|
||||
assert_eq!(
|
||||
original,
|
||||
rebuilt,
|
||||
"byte-to-byte roundtrip mismatch for {}",
|
||||
path.display()
|
||||
);
|
||||
}));
|
||||
|
||||
match result {
|
||||
Ok(()) => success += 1,
|
||||
Err(payload) => {
|
||||
failures.push(format!("{}: {}", display_path, panic_message(payload)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let failed = failures.len();
|
||||
eprintln!(
|
||||
"NRes summary: checked={}, success={}, failed={}",
|
||||
checked, success, failed
|
||||
);
|
||||
if !failures.is_empty() {
|
||||
panic!(
|
||||
"NRes validation failed.\nsummary: checked={}, success={}, failed={}\n{}",
|
||||
checked,
|
||||
success,
|
||||
failed,
|
||||
failures.join("\n")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nres_raw_mode_exposes_whole_file() {
|
||||
let files = nres_test_files();
|
||||
let Some(first) = files.first() else {
|
||||
eprintln!("skipping nres_raw_mode_exposes_whole_file: no NRes archives in testdata/nres");
|
||||
return;
|
||||
};
|
||||
let original = fs::read(first).expect("failed to read archive");
|
||||
let arc: Arc<[u8]> = Arc::from(original.clone().into_boxed_slice());
|
||||
|
||||
let archive = Archive::open_bytes(
|
||||
arc,
|
||||
OpenOptions {
|
||||
raw_mode: true,
|
||||
sequential_hint: false,
|
||||
prefetch_pages: false,
|
||||
},
|
||||
)
|
||||
.expect("raw mode open failed");
|
||||
|
||||
assert_eq!(archive.entry_count(), 1);
|
||||
let data = archive.read(EntryId(0)).expect("raw read failed");
|
||||
assert_eq!(data.as_slice(), original.as_slice());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nres_raw_mode_accepts_non_nres_bytes() {
|
||||
let payload = b"not-an-nres-archive".to_vec();
|
||||
let bytes: Arc<[u8]> = Arc::from(payload.clone().into_boxed_slice());
|
||||
|
||||
match Archive::open_bytes(bytes.clone(), OpenOptions::default()) {
|
||||
Err(Error::InvalidMagic { .. }) => {}
|
||||
other => panic!("expected InvalidMagic without raw_mode, got {other:?}"),
|
||||
}
|
||||
|
||||
let archive = Archive::open_bytes(
|
||||
bytes,
|
||||
OpenOptions {
|
||||
raw_mode: true,
|
||||
sequential_hint: false,
|
||||
prefetch_pages: false,
|
||||
},
|
||||
)
|
||||
.expect("raw_mode should accept any bytes");
|
||||
|
||||
assert_eq!(archive.entry_count(), 1);
|
||||
assert_eq!(archive.find("raw"), Some(EntryId(0)));
|
||||
assert_eq!(
|
||||
archive
|
||||
.read(EntryId(0))
|
||||
.expect("raw read failed")
|
||||
.as_slice(),
|
||||
payload.as_slice()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nres_open_options_hints_do_not_change_payload() {
|
||||
let payload: Vec<u8> = (0..70_000u32).map(|v| (v % 251) as u8).collect();
|
||||
let src = build_nres_bytes(&[SyntheticEntry {
|
||||
kind: 7,
|
||||
attr1: 70,
|
||||
attr2: 700,
|
||||
attr3: 7000,
|
||||
name: "big.bin",
|
||||
data: &payload,
|
||||
}]);
|
||||
let arc: Arc<[u8]> = Arc::from(src.into_boxed_slice());
|
||||
|
||||
let baseline = Archive::open_bytes(arc.clone(), OpenOptions::default())
|
||||
.expect("baseline open should succeed");
|
||||
let hinted = Archive::open_bytes(
|
||||
arc,
|
||||
OpenOptions {
|
||||
raw_mode: false,
|
||||
sequential_hint: true,
|
||||
prefetch_pages: true,
|
||||
},
|
||||
)
|
||||
.expect("open with hints should succeed");
|
||||
|
||||
assert_eq!(baseline.entry_count(), 1);
|
||||
assert_eq!(hinted.entry_count(), 1);
|
||||
assert_eq!(baseline.find("BIG.BIN"), Some(EntryId(0)));
|
||||
assert_eq!(hinted.find("big.bin"), Some(EntryId(0)));
|
||||
assert_eq!(
|
||||
baseline
|
||||
.read(EntryId(0))
|
||||
.expect("baseline read failed")
|
||||
.as_slice(),
|
||||
hinted
|
||||
.read(EntryId(0))
|
||||
.expect("hinted read failed")
|
||||
.as_slice()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nres_commit_empty_archive_has_minimal_layout() {
|
||||
let mut path = std::env::temp_dir();
|
||||
path.push(format!(
|
||||
"nres-empty-commit-{}-{}.lib",
|
||||
std::process::id(),
|
||||
unix_time_nanos()
|
||||
));
|
||||
fs::write(&path, build_nres_bytes(&[])).expect("write empty archive failed");
|
||||
|
||||
Archive::edit_path(&path)
|
||||
.expect("edit_path failed for empty archive")
|
||||
.commit()
|
||||
.expect("commit failed for empty archive");
|
||||
|
||||
let bytes = fs::read(&path).expect("failed to read committed archive");
|
||||
assert_eq!(bytes.len(), 16, "empty archive must contain only header");
|
||||
assert_eq!(&bytes[0..4], b"NRes");
|
||||
assert_eq!(read_u32_le(&bytes, 4), 0x100);
|
||||
assert_eq!(read_u32_le(&bytes, 8), 0);
|
||||
assert_eq!(read_u32_le(&bytes, 12), 16);
|
||||
|
||||
let _ = fs::remove_file(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nres_commit_recomputes_header_directory_and_sort_table() {
|
||||
let mut path = std::env::temp_dir();
|
||||
path.push(format!(
|
||||
"nres-commit-layout-{}-{}.lib",
|
||||
std::process::id(),
|
||||
unix_time_nanos()
|
||||
));
|
||||
fs::write(&path, build_nres_bytes(&[])).expect("write empty archive failed");
|
||||
|
||||
let mut editor = Archive::edit_path(&path).expect("edit_path failed");
|
||||
editor
|
||||
.add(NewEntry {
|
||||
kind: 10,
|
||||
attr1: 1,
|
||||
attr2: 2,
|
||||
attr3: 3,
|
||||
name: "Zulu",
|
||||
data: b"aaaaa",
|
||||
})
|
||||
.expect("add #0 failed");
|
||||
editor
|
||||
.add(NewEntry {
|
||||
kind: 11,
|
||||
attr1: 4,
|
||||
attr2: 5,
|
||||
attr3: 6,
|
||||
name: "alpha",
|
||||
data: b"bbbbbbbb",
|
||||
})
|
||||
.expect("add #1 failed");
|
||||
editor
|
||||
.add(NewEntry {
|
||||
kind: 12,
|
||||
attr1: 7,
|
||||
attr2: 8,
|
||||
attr3: 9,
|
||||
name: "Beta",
|
||||
data: b"cccc",
|
||||
})
|
||||
.expect("add #2 failed");
|
||||
editor.commit().expect("commit failed");
|
||||
|
||||
let bytes = fs::read(&path).expect("failed to read committed archive");
|
||||
assert_eq!(&bytes[0..4], b"NRes");
|
||||
assert_eq!(read_u32_le(&bytes, 4), 0x100);
|
||||
|
||||
let entry_count = usize::try_from(read_u32_le(&bytes, 8)).expect("entry_count overflow");
|
||||
let total_size = usize::try_from(read_u32_le(&bytes, 12)).expect("total_size overflow");
|
||||
assert_eq!(entry_count, 3);
|
||||
assert_eq!(total_size, bytes.len());
|
||||
|
||||
let directory_offset = total_size
|
||||
.checked_sub(entry_count * 64)
|
||||
.expect("invalid directory offset");
|
||||
assert!(directory_offset >= 16);
|
||||
|
||||
let mut sort_indices = Vec::new();
|
||||
let mut prev_data_end = 16usize;
|
||||
for idx in 0..entry_count {
|
||||
let base = directory_offset + idx * 64;
|
||||
let data_size = usize::try_from(read_u32_le(&bytes, base + 12)).expect("size overflow");
|
||||
let data_offset = usize::try_from(read_u32_le(&bytes, base + 56)).expect("offset overflow");
|
||||
let sort_index =
|
||||
usize::try_from(read_u32_le(&bytes, base + 60)).expect("sort index overflow");
|
||||
|
||||
assert_eq!(
|
||||
data_offset % 8,
|
||||
0,
|
||||
"entry #{idx} data offset must be 8-byte aligned"
|
||||
);
|
||||
assert!(
|
||||
data_offset >= prev_data_end,
|
||||
"entry #{idx} offset regressed"
|
||||
);
|
||||
assert!(
|
||||
data_offset + data_size <= directory_offset,
|
||||
"entry #{idx} overlaps directory"
|
||||
);
|
||||
prev_data_end = data_offset + data_size;
|
||||
sort_indices.push(sort_index);
|
||||
}
|
||||
|
||||
let names = ["Zulu", "alpha", "Beta"];
|
||||
let mut expected_sort: Vec<usize> = (0..names.len()).collect();
|
||||
expected_sort
|
||||
.sort_by(|a, b| cmp_name_case_insensitive(names[*a].as_bytes(), names[*b].as_bytes()));
|
||||
assert_eq!(
|
||||
sort_indices, expected_sort,
|
||||
"sort table must contain original indexes in case-insensitive alphabetical order"
|
||||
);
|
||||
|
||||
let archive = Archive::open_path(&path).expect("re-open failed");
|
||||
assert_eq!(archive.find("zulu"), Some(EntryId(0)));
|
||||
assert_eq!(archive.find("ALPHA"), Some(EntryId(1)));
|
||||
assert_eq!(archive.find("beta"), Some(EntryId(2)));
|
||||
|
||||
let _ = fs::remove_file(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nres_synthetic_read_find_and_edit() {
|
||||
let payload_a = b"alpha";
|
||||
let payload_b = b"B";
|
||||
let payload_c = b"";
|
||||
let src = build_nres_bytes(&[
|
||||
SyntheticEntry {
|
||||
kind: 1,
|
||||
attr1: 10,
|
||||
attr2: 20,
|
||||
attr3: 30,
|
||||
name: "Alpha.TXT",
|
||||
data: payload_a,
|
||||
},
|
||||
SyntheticEntry {
|
||||
kind: 2,
|
||||
attr1: 11,
|
||||
attr2: 21,
|
||||
attr3: 31,
|
||||
name: "beta.bin",
|
||||
data: payload_b,
|
||||
},
|
||||
SyntheticEntry {
|
||||
kind: 3,
|
||||
attr1: 12,
|
||||
attr2: 22,
|
||||
attr3: 32,
|
||||
name: "Gamma",
|
||||
data: payload_c,
|
||||
},
|
||||
]);
|
||||
|
||||
let archive = Archive::open_bytes(
|
||||
Arc::from(src.clone().into_boxed_slice()),
|
||||
OpenOptions::default(),
|
||||
)
|
||||
.expect("open synthetic nres failed");
|
||||
|
||||
assert_eq!(archive.entry_count(), 3);
|
||||
assert_eq!(archive.find("alpha.txt"), Some(EntryId(0)));
|
||||
assert_eq!(archive.find("BETA.BIN"), Some(EntryId(1)));
|
||||
assert_eq!(archive.find("gAmMa"), Some(EntryId(2)));
|
||||
assert_eq!(archive.find("missing"), None);
|
||||
|
||||
assert_eq!(
|
||||
archive.read(EntryId(0)).expect("read #0 failed").as_slice(),
|
||||
payload_a
|
||||
);
|
||||
assert_eq!(
|
||||
archive.read(EntryId(1)).expect("read #1 failed").as_slice(),
|
||||
payload_b
|
||||
);
|
||||
assert_eq!(
|
||||
archive.read(EntryId(2)).expect("read #2 failed").as_slice(),
|
||||
payload_c
|
||||
);
|
||||
|
||||
let mut path = std::env::temp_dir();
|
||||
path.push(format!(
|
||||
"nres-synth-edit-{}-{}.lib",
|
||||
std::process::id(),
|
||||
unix_time_nanos()
|
||||
));
|
||||
fs::write(&path, &src).expect("write temp synthetic archive failed");
|
||||
|
||||
let mut editor = Archive::edit_path(&path).expect("edit_path on synthetic archive failed");
|
||||
editor
|
||||
.replace_data(EntryId(1), b"replaced")
|
||||
.expect("replace_data failed");
|
||||
let added = editor
|
||||
.add(NewEntry {
|
||||
kind: 4,
|
||||
attr1: 13,
|
||||
attr2: 23,
|
||||
attr3: 33,
|
||||
name: "delta",
|
||||
data: b"new payload",
|
||||
})
|
||||
.expect("add failed");
|
||||
assert_eq!(added, EntryId(3));
|
||||
editor.remove(EntryId(2)).expect("remove failed");
|
||||
editor.commit().expect("commit failed");
|
||||
|
||||
let edited = Archive::open_path(&path).expect("re-open edited archive failed");
|
||||
assert_eq!(edited.entry_count(), 3);
|
||||
assert_eq!(
|
||||
edited
|
||||
.read(edited.find("beta.bin").expect("find beta.bin failed"))
|
||||
.expect("read beta.bin failed")
|
||||
.as_slice(),
|
||||
b"replaced"
|
||||
);
|
||||
assert_eq!(
|
||||
edited
|
||||
.read(edited.find("delta").expect("find delta failed"))
|
||||
.expect("read delta failed")
|
||||
.as_slice(),
|
||||
b"new payload"
|
||||
);
|
||||
assert_eq!(edited.find("gamma"), None);
|
||||
|
||||
let _ = fs::remove_file(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nres_find_falls_back_when_sort_index_is_out_of_range() {
|
||||
let mut bytes = build_nres_bytes(&[
|
||||
SyntheticEntry {
|
||||
kind: 1,
|
||||
attr1: 0,
|
||||
attr2: 0,
|
||||
attr3: 0,
|
||||
name: "Alpha",
|
||||
data: b"a",
|
||||
},
|
||||
SyntheticEntry {
|
||||
kind: 2,
|
||||
attr1: 0,
|
||||
attr2: 0,
|
||||
attr3: 0,
|
||||
name: "Beta",
|
||||
data: b"b",
|
||||
},
|
||||
SyntheticEntry {
|
||||
kind: 3,
|
||||
attr1: 0,
|
||||
attr2: 0,
|
||||
attr3: 0,
|
||||
name: "Gamma",
|
||||
data: b"c",
|
||||
},
|
||||
]);
|
||||
|
||||
let entry_count = 3usize;
|
||||
let directory_offset = bytes
|
||||
.len()
|
||||
.checked_sub(entry_count * 64)
|
||||
.expect("directory offset underflow");
|
||||
let mid_entry_sort_index = directory_offset + 64 + 60;
|
||||
bytes[mid_entry_sort_index..mid_entry_sort_index + 4].copy_from_slice(&u32::MAX.to_le_bytes());
|
||||
|
||||
let archive = Archive::open_bytes(Arc::from(bytes.into_boxed_slice()), OpenOptions::default())
|
||||
.expect("open archive with corrupted sort index failed");
|
||||
|
||||
assert_eq!(archive.find("alpha"), Some(EntryId(0)));
|
||||
assert_eq!(archive.find("BETA"), Some(EntryId(1)));
|
||||
assert_eq!(archive.find("gamma"), Some(EntryId(2)));
|
||||
assert_eq!(archive.find("missing"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nres_validation_error_cases() {
|
||||
let valid = build_nres_bytes(&[SyntheticEntry {
|
||||
kind: 1,
|
||||
attr1: 2,
|
||||
attr2: 3,
|
||||
attr3: 4,
|
||||
name: "ok",
|
||||
data: b"1234",
|
||||
}]);
|
||||
|
||||
let mut invalid_magic = valid.clone();
|
||||
invalid_magic[0..4].copy_from_slice(b"FAIL");
|
||||
match Archive::open_bytes(
|
||||
Arc::from(invalid_magic.into_boxed_slice()),
|
||||
OpenOptions::default(),
|
||||
) {
|
||||
Err(Error::InvalidMagic { .. }) => {}
|
||||
other => panic!("expected InvalidMagic, got {other:?}"),
|
||||
}
|
||||
|
||||
let mut invalid_version = valid.clone();
|
||||
invalid_version[4..8].copy_from_slice(&0x200_u32.to_le_bytes());
|
||||
match Archive::open_bytes(
|
||||
Arc::from(invalid_version.into_boxed_slice()),
|
||||
OpenOptions::default(),
|
||||
) {
|
||||
Err(Error::UnsupportedVersion { got }) => assert_eq!(got, 0x200),
|
||||
other => panic!("expected UnsupportedVersion, got {other:?}"),
|
||||
}
|
||||
|
||||
let mut bad_total = valid.clone();
|
||||
bad_total[12..16].copy_from_slice(&0_u32.to_le_bytes());
|
||||
match Archive::open_bytes(
|
||||
Arc::from(bad_total.into_boxed_slice()),
|
||||
OpenOptions::default(),
|
||||
) {
|
||||
Err(Error::TotalSizeMismatch { .. }) => {}
|
||||
other => panic!("expected TotalSizeMismatch, got {other:?}"),
|
||||
}
|
||||
|
||||
let mut bad_count = valid.clone();
|
||||
bad_count[8..12].copy_from_slice(&(-1_i32).to_le_bytes());
|
||||
match Archive::open_bytes(
|
||||
Arc::from(bad_count.into_boxed_slice()),
|
||||
OpenOptions::default(),
|
||||
) {
|
||||
Err(Error::InvalidEntryCount { got }) => assert_eq!(got, -1),
|
||||
other => panic!("expected InvalidEntryCount, got {other:?}"),
|
||||
}
|
||||
|
||||
let mut bad_dir = valid.clone();
|
||||
bad_dir[8..12].copy_from_slice(&1000_u32.to_le_bytes());
|
||||
match Archive::open_bytes(
|
||||
Arc::from(bad_dir.into_boxed_slice()),
|
||||
OpenOptions::default(),
|
||||
) {
|
||||
Err(Error::DirectoryOutOfBounds { .. }) => {}
|
||||
other => panic!("expected DirectoryOutOfBounds, got {other:?}"),
|
||||
}
|
||||
|
||||
let mut long_name = valid.clone();
|
||||
let entry_base = long_name.len() - 64;
|
||||
for b in &mut long_name[entry_base + 20..entry_base + 56] {
|
||||
*b = b'X';
|
||||
}
|
||||
match Archive::open_bytes(
|
||||
Arc::from(long_name.into_boxed_slice()),
|
||||
OpenOptions::default(),
|
||||
) {
|
||||
Err(Error::NameTooLong { .. }) => {}
|
||||
other => panic!("expected NameTooLong, got {other:?}"),
|
||||
}
|
||||
|
||||
let mut bad_data = valid.clone();
|
||||
bad_data[entry_base + 56..entry_base + 60].copy_from_slice(&12_u32.to_le_bytes());
|
||||
bad_data[entry_base + 12..entry_base + 16].copy_from_slice(&32_u32.to_le_bytes());
|
||||
match Archive::open_bytes(
|
||||
Arc::from(bad_data.into_boxed_slice()),
|
||||
OpenOptions::default(),
|
||||
) {
|
||||
Err(Error::EntryDataOutOfBounds { .. }) => {}
|
||||
other => panic!("expected EntryDataOutOfBounds, got {other:?}"),
|
||||
}
|
||||
|
||||
let archive = Archive::open_bytes(Arc::from(valid.into_boxed_slice()), OpenOptions::default())
|
||||
.expect("open valid archive failed");
|
||||
match archive.read(EntryId(99)) {
|
||||
Err(Error::EntryIdOutOfRange { .. }) => {}
|
||||
other => panic!("expected EntryIdOutOfRange, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nres_editor_validation_error_cases() {
|
||||
let mut path = std::env::temp_dir();
|
||||
path.push(format!(
|
||||
"nres-editor-errors-{}-{}.lib",
|
||||
std::process::id(),
|
||||
unix_time_nanos()
|
||||
));
|
||||
let src = build_nres_bytes(&[]);
|
||||
fs::write(&path, src).expect("write empty archive failed");
|
||||
|
||||
let mut editor = Archive::edit_path(&path).expect("edit_path failed");
|
||||
|
||||
let long_name = "X".repeat(36);
|
||||
match editor.add(NewEntry {
|
||||
kind: 0,
|
||||
attr1: 0,
|
||||
attr2: 0,
|
||||
attr3: 0,
|
||||
name: &long_name,
|
||||
data: b"",
|
||||
}) {
|
||||
Err(Error::NameTooLong { .. }) => {}
|
||||
other => panic!("expected NameTooLong, got {other:?}"),
|
||||
}
|
||||
|
||||
match editor.add(NewEntry {
|
||||
kind: 0,
|
||||
attr1: 0,
|
||||
attr2: 0,
|
||||
attr3: 0,
|
||||
name: "bad\0name",
|
||||
data: b"",
|
||||
}) {
|
||||
Err(Error::NameContainsNul) => {}
|
||||
other => panic!("expected NameContainsNul, got {other:?}"),
|
||||
}
|
||||
|
||||
match editor.replace_data(EntryId(0), b"x") {
|
||||
Err(Error::EntryIdOutOfRange { .. }) => {}
|
||||
other => panic!("expected EntryIdOutOfRange, got {other:?}"),
|
||||
}
|
||||
|
||||
match editor.remove(EntryId(0)) {
|
||||
Err(Error::EntryIdOutOfRange { .. }) => {}
|
||||
other => panic!("expected EntryIdOutOfRange, got {other:?}"),
|
||||
}
|
||||
|
||||
let _ = fs::remove_file(&path);
|
||||
}
|
||||
8
crates/rsli/Cargo.toml
Normal file
8
crates/rsli/Cargo.toml
Normal file
@@ -0,0 +1,8 @@
|
||||
[package]
|
||||
name = "rsli"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
common = { path = "../common" }
|
||||
flate2 = { version = "1", default-features = false, features = ["rust_backend"] }
|
||||
58
crates/rsli/README.md
Normal file
58
crates/rsli/README.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# rsli
|
||||
|
||||
Rust-библиотека для чтения архивов формата **RsLi**.
|
||||
|
||||
## Что умеет
|
||||
|
||||
- Открытие библиотеки из файла (`open_path`, `open_path_with`).
|
||||
- Дешифрование таблицы записей (XOR stream cipher).
|
||||
- Поддержка AO-трейлера и media overlay (`allow_ao_trailer`).
|
||||
- Поддержка quirk для Deflate `EOF+1` (`allow_deflate_eof_plus_one`).
|
||||
- Поиск по имени (`find`, c приведением запроса к uppercase).
|
||||
- Загрузка данных:
|
||||
- `load`, `load_into`, `load_packed`, `unpack`, `load_fast`.
|
||||
|
||||
## Поддерживаемые методы упаковки
|
||||
|
||||
- `0x000` None
|
||||
- `0x020` XorOnly
|
||||
- `0x040` Lzss
|
||||
- `0x060` XorLzss
|
||||
- `0x080` LzssHuffman
|
||||
- `0x0A0` XorLzssHuffman
|
||||
- `0x100` Deflate
|
||||
|
||||
## Модель ошибок
|
||||
|
||||
Типизированные ошибки без паник в production-коде (`InvalidMagic`, `UnsupportedVersion`, `EntryTableOutOfBounds`, `PackedSizePastEof`, `DeflateEofPlusOneQuirkRejected`, `UnsupportedMethod`, и др.).
|
||||
|
||||
## Покрытие тестами
|
||||
|
||||
### Реальные файлы
|
||||
|
||||
- Рекурсивный прогон по `testdata/rsli/**`.
|
||||
- Сейчас в наборе: **2 архива**.
|
||||
- На реальных данных подтверждены и проходят byte-to-byte проверки методы:
|
||||
- `0x040` (LZSS)
|
||||
- `0x100` (Deflate)
|
||||
- Для каждого архива проверяется:
|
||||
- `load`/`load_into`/`load_packed`/`unpack`/`load_fast`;
|
||||
- `find`;
|
||||
- пересборка и сравнение **byte-to-byte**.
|
||||
|
||||
### Синтетические тесты
|
||||
|
||||
Из-за отсутствия реальных файлов для части методов добавлены синтетические архивы и тесты:
|
||||
|
||||
- Методы:
|
||||
- `0x000`, `0x020`, `0x060`, `0x080`, `0x0A0`.
|
||||
- Спецкейсы формата:
|
||||
- AO trailer + overlay;
|
||||
- Deflate `EOF+1` (оба режима: accepted/rejected);
|
||||
- некорректные заголовки/таблицы/смещения/методы.
|
||||
|
||||
## Быстрый запуск тестов
|
||||
|
||||
```bash
|
||||
cargo test -p rsli -- --nocapture
|
||||
```
|
||||
19
crates/rsli/src/compress/deflate.rs
Normal file
19
crates/rsli/src/compress/deflate.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
use crate::error::Error;
|
||||
use crate::Result;
|
||||
use flate2::read::{DeflateDecoder, ZlibDecoder};
|
||||
use std::io::Read;
|
||||
|
||||
/// Decode Deflate or Zlib compressed data
|
||||
pub fn decode_deflate(packed: &[u8]) -> Result<Vec<u8>> {
|
||||
let mut out = Vec::new();
|
||||
let mut decoder = DeflateDecoder::new(packed);
|
||||
if decoder.read_to_end(&mut out).is_ok() {
|
||||
return Ok(out);
|
||||
}
|
||||
|
||||
out.clear();
|
||||
let mut zlib = ZlibDecoder::new(packed);
|
||||
zlib.read_to_end(&mut out)
|
||||
.map_err(|_| Error::DecompressionFailed("deflate"))?;
|
||||
Ok(out)
|
||||
}
|
||||
298
crates/rsli/src/compress/lzh.rs
Normal file
298
crates/rsli/src/compress/lzh.rs
Normal file
@@ -0,0 +1,298 @@
|
||||
use super::xor::XorState;
|
||||
use crate::error::Error;
|
||||
use crate::Result;
|
||||
|
||||
pub(crate) const LZH_N: usize = 4096;
|
||||
pub(crate) const LZH_F: usize = 60;
|
||||
pub(crate) const LZH_THRESHOLD: usize = 2;
|
||||
pub(crate) const LZH_N_CHAR: usize = 256 - LZH_THRESHOLD + LZH_F;
|
||||
pub(crate) const LZH_T: usize = LZH_N_CHAR * 2 - 1;
|
||||
pub(crate) const LZH_R: usize = LZH_T - 1;
|
||||
pub(crate) const LZH_MAX_FREQ: u16 = 0x8000;
|
||||
|
||||
/// LZSS-Huffman decompression with optional on-the-fly XOR decryption.
|
||||
pub fn lzss_huffman_decompress(
|
||||
data: &[u8],
|
||||
expected_size: usize,
|
||||
xor_key: Option<u16>,
|
||||
) -> Result<Vec<u8>> {
|
||||
let mut decoder = LzhDecoder::new(data, xor_key);
|
||||
decoder.decode(expected_size)
|
||||
}
|
||||
|
||||
struct LzhDecoder<'a> {
|
||||
bit_reader: BitReader<'a>,
|
||||
text: [u8; LZH_N],
|
||||
freq: [u16; LZH_T + 1],
|
||||
parent: [usize; LZH_T + LZH_N_CHAR],
|
||||
son: [usize; LZH_T],
|
||||
d_code: [u8; 256],
|
||||
d_len: [u8; 256],
|
||||
ring_pos: usize,
|
||||
}
|
||||
|
||||
impl<'a> LzhDecoder<'a> {
|
||||
fn new(data: &'a [u8], xor_key: Option<u16>) -> Self {
|
||||
let mut decoder = Self {
|
||||
bit_reader: BitReader::new(data, xor_key),
|
||||
text: [0x20u8; LZH_N],
|
||||
freq: [0u16; LZH_T + 1],
|
||||
parent: [0usize; LZH_T + LZH_N_CHAR],
|
||||
son: [0usize; LZH_T],
|
||||
d_code: [0u8; 256],
|
||||
d_len: [0u8; 256],
|
||||
ring_pos: LZH_N - LZH_F,
|
||||
};
|
||||
decoder.init_tables();
|
||||
decoder.start_huff();
|
||||
decoder
|
||||
}
|
||||
|
||||
fn decode(&mut self, expected_size: usize) -> Result<Vec<u8>> {
|
||||
let mut out = Vec::with_capacity(expected_size);
|
||||
|
||||
while out.len() < expected_size {
|
||||
let c = self.decode_char();
|
||||
if c < 256 {
|
||||
let byte = c as u8;
|
||||
out.push(byte);
|
||||
self.text[self.ring_pos] = byte;
|
||||
self.ring_pos = (self.ring_pos + 1) & (LZH_N - 1);
|
||||
} else {
|
||||
let mut offset = self.decode_position();
|
||||
offset = (self.ring_pos.wrapping_sub(offset).wrapping_sub(1)) & (LZH_N - 1);
|
||||
let mut length = c.saturating_sub(253);
|
||||
|
||||
while length > 0 && out.len() < expected_size {
|
||||
let byte = self.text[offset];
|
||||
out.push(byte);
|
||||
self.text[self.ring_pos] = byte;
|
||||
self.ring_pos = (self.ring_pos + 1) & (LZH_N - 1);
|
||||
offset = (offset + 1) & (LZH_N - 1);
|
||||
length -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if out.len() != expected_size {
|
||||
return Err(Error::DecompressionFailed("lzss-huffman"));
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn init_tables(&mut self) {
|
||||
let d_code_group_counts = [1usize, 3, 8, 12, 24, 16];
|
||||
let d_len_group_counts = [32usize, 48, 64, 48, 48, 16];
|
||||
|
||||
let mut group_index = 0u8;
|
||||
let mut idx = 0usize;
|
||||
let mut run = 32usize;
|
||||
for count in d_code_group_counts {
|
||||
for _ in 0..count {
|
||||
for _ in 0..run {
|
||||
self.d_code[idx] = group_index;
|
||||
idx += 1;
|
||||
}
|
||||
group_index = group_index.wrapping_add(1);
|
||||
}
|
||||
run >>= 1;
|
||||
}
|
||||
|
||||
let mut len = 3u8;
|
||||
idx = 0;
|
||||
for count in d_len_group_counts {
|
||||
for _ in 0..count {
|
||||
self.d_len[idx] = len;
|
||||
idx += 1;
|
||||
}
|
||||
len = len.saturating_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
fn start_huff(&mut self) {
|
||||
for i in 0..LZH_N_CHAR {
|
||||
self.freq[i] = 1;
|
||||
self.son[i] = i + LZH_T;
|
||||
self.parent[i + LZH_T] = i;
|
||||
}
|
||||
|
||||
let mut i = 0usize;
|
||||
let mut j = LZH_N_CHAR;
|
||||
while j <= LZH_R {
|
||||
self.freq[j] = self.freq[i].saturating_add(self.freq[i + 1]);
|
||||
self.son[j] = i;
|
||||
self.parent[i] = j;
|
||||
self.parent[i + 1] = j;
|
||||
i += 2;
|
||||
j += 1;
|
||||
}
|
||||
|
||||
self.freq[LZH_T] = u16::MAX;
|
||||
self.parent[LZH_R] = 0;
|
||||
}
|
||||
|
||||
fn decode_char(&mut self) -> usize {
|
||||
let mut node = self.son[LZH_R];
|
||||
while node < LZH_T {
|
||||
let bit = usize::from(self.bit_reader.read_bit_or_zero());
|
||||
node = self.son[node + bit];
|
||||
}
|
||||
|
||||
let c = node - LZH_T;
|
||||
self.update(c);
|
||||
c
|
||||
}
|
||||
|
||||
fn decode_position(&mut self) -> usize {
|
||||
let i = self.bit_reader.read_bits_or_zero(8) as usize;
|
||||
let mut c = usize::from(self.d_code[i]) << 6;
|
||||
let mut j = usize::from(self.d_len[i]).saturating_sub(2);
|
||||
|
||||
while j > 0 {
|
||||
j -= 1;
|
||||
c |= usize::from(self.bit_reader.read_bit_or_zero()) << j;
|
||||
}
|
||||
|
||||
c | (i & 0x3F)
|
||||
}
|
||||
|
||||
fn update(&mut self, c: usize) {
|
||||
if self.freq[LZH_R] == LZH_MAX_FREQ {
|
||||
self.reconstruct();
|
||||
}
|
||||
|
||||
let mut current = self.parent[c + LZH_T];
|
||||
loop {
|
||||
self.freq[current] = self.freq[current].saturating_add(1);
|
||||
let freq = self.freq[current];
|
||||
|
||||
if current + 1 < self.freq.len() && freq > self.freq[current + 1] {
|
||||
let mut swap_idx = current + 1;
|
||||
while swap_idx + 1 < self.freq.len() && freq > self.freq[swap_idx + 1] {
|
||||
swap_idx += 1;
|
||||
}
|
||||
|
||||
self.freq.swap(current, swap_idx);
|
||||
|
||||
let left = self.son[current];
|
||||
let right = self.son[swap_idx];
|
||||
self.son[current] = right;
|
||||
self.son[swap_idx] = left;
|
||||
|
||||
self.parent[left] = swap_idx;
|
||||
if left < LZH_T {
|
||||
self.parent[left + 1] = swap_idx;
|
||||
}
|
||||
|
||||
self.parent[right] = current;
|
||||
if right < LZH_T {
|
||||
self.parent[right + 1] = current;
|
||||
}
|
||||
|
||||
current = swap_idx;
|
||||
}
|
||||
|
||||
current = self.parent[current];
|
||||
if current == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn reconstruct(&mut self) {
|
||||
let mut j = 0usize;
|
||||
for i in 0..LZH_T {
|
||||
if self.son[i] >= LZH_T {
|
||||
self.freq[j] = (self.freq[i].saturating_add(1)) / 2;
|
||||
self.son[j] = self.son[i];
|
||||
j += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let mut i = 0usize;
|
||||
let mut current = LZH_N_CHAR;
|
||||
while current < LZH_T {
|
||||
let sum = self.freq[i].saturating_add(self.freq[i + 1]);
|
||||
self.freq[current] = sum;
|
||||
|
||||
let mut insert_at = current;
|
||||
while insert_at > 0 && sum < self.freq[insert_at - 1] {
|
||||
insert_at -= 1;
|
||||
}
|
||||
|
||||
for move_idx in (insert_at..current).rev() {
|
||||
self.freq[move_idx + 1] = self.freq[move_idx];
|
||||
self.son[move_idx + 1] = self.son[move_idx];
|
||||
}
|
||||
|
||||
self.freq[insert_at] = sum;
|
||||
self.son[insert_at] = i;
|
||||
|
||||
i += 2;
|
||||
current += 1;
|
||||
}
|
||||
|
||||
for idx in 0..LZH_T {
|
||||
let node = self.son[idx];
|
||||
self.parent[node] = idx;
|
||||
if node < LZH_T {
|
||||
self.parent[node + 1] = idx;
|
||||
}
|
||||
}
|
||||
|
||||
self.freq[LZH_T] = u16::MAX;
|
||||
self.parent[LZH_R] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
struct BitReader<'a> {
|
||||
data: &'a [u8],
|
||||
byte_pos: usize,
|
||||
bit_mask: u8,
|
||||
current_byte: u8,
|
||||
xor_state: Option<XorState>,
|
||||
}
|
||||
|
||||
impl<'a> BitReader<'a> {
|
||||
fn new(data: &'a [u8], xor_key: Option<u16>) -> Self {
|
||||
Self {
|
||||
data,
|
||||
byte_pos: 0,
|
||||
bit_mask: 0x80,
|
||||
current_byte: 0,
|
||||
xor_state: xor_key.map(XorState::new),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_bit_or_zero(&mut self) -> u8 {
|
||||
if self.bit_mask == 0x80 {
|
||||
let Some(mut byte) = self.data.get(self.byte_pos).copied() else {
|
||||
return 0;
|
||||
};
|
||||
if let Some(state) = &mut self.xor_state {
|
||||
byte = state.decrypt_byte(byte);
|
||||
}
|
||||
self.current_byte = byte;
|
||||
}
|
||||
|
||||
let bit = if (self.current_byte & self.bit_mask) != 0 {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
};
|
||||
self.bit_mask >>= 1;
|
||||
if self.bit_mask == 0 {
|
||||
self.bit_mask = 0x80;
|
||||
self.byte_pos = self.byte_pos.saturating_add(1);
|
||||
}
|
||||
bit
|
||||
}
|
||||
|
||||
fn read_bits_or_zero(&mut self, bits: usize) -> u32 {
|
||||
let mut value = 0u32;
|
||||
for _ in 0..bits {
|
||||
value = (value << 1) | u32::from(self.read_bit_or_zero());
|
||||
}
|
||||
value
|
||||
}
|
||||
}
|
||||
79
crates/rsli/src/compress/lzss.rs
Normal file
79
crates/rsli/src/compress/lzss.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
use super::xor::XorState;
|
||||
use crate::error::Error;
|
||||
use crate::Result;
|
||||
|
||||
/// Simple LZSS decompression with optional on-the-fly XOR decryption
|
||||
pub fn lzss_decompress_simple(
|
||||
data: &[u8],
|
||||
expected_size: usize,
|
||||
xor_key: Option<u16>,
|
||||
) -> Result<Vec<u8>> {
|
||||
let mut ring = [0x20u8; 0x1000];
|
||||
let mut ring_pos = 0xFEEusize;
|
||||
let mut out = Vec::with_capacity(expected_size);
|
||||
let mut in_pos = 0usize;
|
||||
|
||||
let mut control = 0u8;
|
||||
let mut bits_left = 0u8;
|
||||
|
||||
// XOR state for on-the-fly decryption
|
||||
let mut xor_state = xor_key.map(XorState::new);
|
||||
|
||||
// Helper to read byte with optional XOR decryption
|
||||
let read_byte = |pos: usize, state: &mut Option<XorState>| -> Option<u8> {
|
||||
let encrypted = data.get(pos).copied()?;
|
||||
Some(if let Some(ref mut s) = state {
|
||||
s.decrypt_byte(encrypted)
|
||||
} else {
|
||||
encrypted
|
||||
})
|
||||
};
|
||||
|
||||
while out.len() < expected_size {
|
||||
if bits_left == 0 {
|
||||
let byte = read_byte(in_pos, &mut xor_state)
|
||||
.ok_or(Error::DecompressionFailed("lzss-simple: unexpected EOF"))?;
|
||||
control = byte;
|
||||
in_pos += 1;
|
||||
bits_left = 8;
|
||||
}
|
||||
|
||||
if (control & 1) != 0 {
|
||||
let byte = read_byte(in_pos, &mut xor_state)
|
||||
.ok_or(Error::DecompressionFailed("lzss-simple: unexpected EOF"))?;
|
||||
in_pos += 1;
|
||||
|
||||
out.push(byte);
|
||||
ring[ring_pos] = byte;
|
||||
ring_pos = (ring_pos + 1) & 0x0FFF;
|
||||
} else {
|
||||
let low = read_byte(in_pos, &mut xor_state)
|
||||
.ok_or(Error::DecompressionFailed("lzss-simple: unexpected EOF"))?;
|
||||
let high = read_byte(in_pos + 1, &mut xor_state)
|
||||
.ok_or(Error::DecompressionFailed("lzss-simple: unexpected EOF"))?;
|
||||
in_pos += 2;
|
||||
|
||||
let offset = usize::from(low) | (usize::from(high & 0xF0) << 4);
|
||||
let length = usize::from((high & 0x0F) + 3);
|
||||
|
||||
for step in 0..length {
|
||||
let byte = ring[(offset + step) & 0x0FFF];
|
||||
out.push(byte);
|
||||
ring[ring_pos] = byte;
|
||||
ring_pos = (ring_pos + 1) & 0x0FFF;
|
||||
if out.len() >= expected_size {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
control >>= 1;
|
||||
bits_left -= 1;
|
||||
}
|
||||
|
||||
if out.len() != expected_size {
|
||||
return Err(Error::DecompressionFailed("lzss-simple"));
|
||||
}
|
||||
|
||||
Ok(out)
|
||||
}
|
||||
9
crates/rsli/src/compress/mod.rs
Normal file
9
crates/rsli/src/compress/mod.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
pub mod deflate;
|
||||
pub mod lzh;
|
||||
pub mod lzss;
|
||||
pub mod xor;
|
||||
|
||||
pub use deflate::decode_deflate;
|
||||
pub use lzh::lzss_huffman_decompress;
|
||||
pub use lzss::lzss_decompress_simple;
|
||||
pub use xor::{xor_stream, XorState};
|
||||
29
crates/rsli/src/compress/xor.rs
Normal file
29
crates/rsli/src/compress/xor.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
/// XOR cipher state for RsLi format
|
||||
pub struct XorState {
|
||||
lo: u8,
|
||||
hi: u8,
|
||||
}
|
||||
|
||||
impl XorState {
|
||||
/// Create new XOR state from 16-bit key
|
||||
pub fn new(key16: u16) -> Self {
|
||||
Self {
|
||||
lo: (key16 & 0xFF) as u8,
|
||||
hi: ((key16 >> 8) & 0xFF) as u8,
|
||||
}
|
||||
}
|
||||
|
||||
/// Decrypt a single byte and update state
|
||||
pub fn decrypt_byte(&mut self, encrypted: u8) -> u8 {
|
||||
self.lo = self.hi ^ self.lo.wrapping_shl(1);
|
||||
let decrypted = encrypted ^ self.lo;
|
||||
self.hi = self.lo ^ (self.hi >> 1);
|
||||
decrypted
|
||||
}
|
||||
}
|
||||
|
||||
/// Decrypt entire buffer with XOR stream cipher
|
||||
pub fn xor_stream(data: &[u8], key16: u16) -> Vec<u8> {
|
||||
let mut state = XorState::new(key16);
|
||||
data.iter().map(|&b| state.decrypt_byte(b)).collect()
|
||||
}
|
||||
133
crates/rsli/src/error.rs
Normal file
133
crates/rsli/src/error.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
use core::fmt;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[non_exhaustive]
|
||||
pub enum Error {
|
||||
Io(std::io::Error),
|
||||
|
||||
InvalidMagic {
|
||||
got: [u8; 2],
|
||||
},
|
||||
UnsupportedVersion {
|
||||
got: u8,
|
||||
},
|
||||
InvalidEntryCount {
|
||||
got: i16,
|
||||
},
|
||||
TooManyEntries {
|
||||
got: usize,
|
||||
},
|
||||
|
||||
EntryTableOutOfBounds {
|
||||
table_offset: u64,
|
||||
table_len: u64,
|
||||
file_len: u64,
|
||||
},
|
||||
EntryTableDecryptFailed,
|
||||
CorruptEntryTable(&'static str),
|
||||
|
||||
EntryIdOutOfRange {
|
||||
id: u32,
|
||||
entry_count: u32,
|
||||
},
|
||||
EntryDataOutOfBounds {
|
||||
id: u32,
|
||||
offset: u64,
|
||||
size: u32,
|
||||
file_len: u64,
|
||||
},
|
||||
|
||||
AoTrailerInvalid,
|
||||
MediaOverlayOutOfBounds {
|
||||
overlay: u32,
|
||||
file_len: u64,
|
||||
},
|
||||
|
||||
UnsupportedMethod {
|
||||
raw: u32,
|
||||
},
|
||||
PackedSizePastEof {
|
||||
id: u32,
|
||||
offset: u64,
|
||||
packed_size: u32,
|
||||
file_len: u64,
|
||||
},
|
||||
DeflateEofPlusOneQuirkRejected {
|
||||
id: u32,
|
||||
},
|
||||
|
||||
DecompressionFailed(&'static str),
|
||||
OutputSizeMismatch {
|
||||
expected: u32,
|
||||
got: u32,
|
||||
},
|
||||
|
||||
IntegerOverflow,
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for Error {
|
||||
fn from(value: std::io::Error) -> Self {
|
||||
Self::Io(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Error::Io(e) => write!(f, "I/O error: {e}"),
|
||||
Error::InvalidMagic { got } => write!(f, "invalid RsLi magic: {got:02X?}"),
|
||||
Error::UnsupportedVersion { got } => write!(f, "unsupported RsLi version: {got:#x}"),
|
||||
Error::InvalidEntryCount { got } => write!(f, "invalid entry_count: {got}"),
|
||||
Error::TooManyEntries { got } => write!(f, "too many entries: {got} exceeds u32::MAX"),
|
||||
Error::EntryTableOutOfBounds {
|
||||
table_offset,
|
||||
table_len,
|
||||
file_len,
|
||||
} => write!(
|
||||
f,
|
||||
"entry table out of bounds: off={table_offset}, len={table_len}, file={file_len}"
|
||||
),
|
||||
Error::EntryTableDecryptFailed => write!(f, "failed to decrypt entry table"),
|
||||
Error::CorruptEntryTable(s) => write!(f, "corrupt entry table: {s}"),
|
||||
Error::EntryIdOutOfRange { id, entry_count } => {
|
||||
write!(f, "entry id out of range: id={id}, count={entry_count}")
|
||||
}
|
||||
Error::EntryDataOutOfBounds {
|
||||
id,
|
||||
offset,
|
||||
size,
|
||||
file_len,
|
||||
} => write!(
|
||||
f,
|
||||
"entry data out of bounds: id={id}, off={offset}, size={size}, file={file_len}"
|
||||
),
|
||||
Error::AoTrailerInvalid => write!(f, "invalid AO trailer"),
|
||||
Error::MediaOverlayOutOfBounds { overlay, file_len } => {
|
||||
write!(
|
||||
f,
|
||||
"media overlay out of bounds: overlay={overlay}, file={file_len}"
|
||||
)
|
||||
}
|
||||
Error::UnsupportedMethod { raw } => write!(f, "unsupported packing method: {raw:#x}"),
|
||||
Error::PackedSizePastEof {
|
||||
id,
|
||||
offset,
|
||||
packed_size,
|
||||
file_len,
|
||||
} => write!(
|
||||
f,
|
||||
"packed range past EOF: id={id}, off={offset}, size={packed_size}, file={file_len}"
|
||||
),
|
||||
Error::DeflateEofPlusOneQuirkRejected { id } => {
|
||||
write!(f, "deflate EOF+1 quirk rejected for entry {id}")
|
||||
}
|
||||
Error::DecompressionFailed(s) => write!(f, "decompression failed: {s}"),
|
||||
Error::OutputSizeMismatch { expected, got } => {
|
||||
write!(f, "output size mismatch: expected={expected}, got={got}")
|
||||
}
|
||||
Error::IntegerOverflow => write!(f, "integer overflow"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {}
|
||||
411
crates/rsli/src/lib.rs
Normal file
411
crates/rsli/src/lib.rs
Normal file
@@ -0,0 +1,411 @@
|
||||
pub mod compress;
|
||||
pub mod error;
|
||||
pub mod parse;
|
||||
|
||||
use crate::compress::{
|
||||
decode_deflate, lzss_decompress_simple, lzss_huffman_decompress, xor_stream,
|
||||
};
|
||||
use crate::error::Error;
|
||||
use crate::parse::{c_name_bytes, cmp_c_string, parse_library};
|
||||
use common::{OutputBuffer, ResourceData};
|
||||
use std::cmp::Ordering;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub type Result<T> = core::result::Result<T, Error>;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OpenOptions {
|
||||
pub allow_ao_trailer: bool,
|
||||
pub allow_deflate_eof_plus_one: bool,
|
||||
}
|
||||
|
||||
impl Default for OpenOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
allow_ao_trailer: true,
|
||||
allow_deflate_eof_plus_one: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Library {
|
||||
bytes: Arc<[u8]>,
|
||||
entries: Vec<EntryRecord>,
|
||||
#[cfg(test)]
|
||||
pub(crate) header_raw: [u8; 32],
|
||||
#[cfg(test)]
|
||||
pub(crate) table_plain_original: Vec<u8>,
|
||||
#[cfg(test)]
|
||||
pub(crate) xor_seed: u32,
|
||||
#[cfg(test)]
|
||||
pub(crate) source_size: usize,
|
||||
#[cfg(test)]
|
||||
pub(crate) trailer_raw: Option<[u8; 6]>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct EntryId(pub u32);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct EntryMeta {
|
||||
pub name: String,
|
||||
pub flags: i32,
|
||||
pub method: PackMethod,
|
||||
pub data_offset: u64,
|
||||
pub packed_size: u32,
|
||||
pub unpacked_size: u32,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum PackMethod {
|
||||
None,
|
||||
XorOnly,
|
||||
Lzss,
|
||||
XorLzss,
|
||||
LzssHuffman,
|
||||
XorLzssHuffman,
|
||||
Deflate,
|
||||
Unknown(u32),
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct EntryRef<'a> {
|
||||
pub id: EntryId,
|
||||
pub meta: &'a EntryMeta,
|
||||
}
|
||||
|
||||
pub struct PackedResource {
|
||||
pub meta: EntryMeta,
|
||||
pub packed: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct EntryRecord {
|
||||
pub(crate) meta: EntryMeta,
|
||||
pub(crate) name_raw: [u8; 12],
|
||||
pub(crate) sort_to_original: i16,
|
||||
pub(crate) key16: u16,
|
||||
#[cfg(test)]
|
||||
pub(crate) data_offset_raw: u32,
|
||||
pub(crate) packed_size_declared: u32,
|
||||
pub(crate) packed_size_available: usize,
|
||||
pub(crate) effective_offset: usize,
|
||||
}
|
||||
|
||||
impl Library {
|
||||
pub fn open_path(path: impl AsRef<Path>) -> Result<Self> {
|
||||
Self::open_path_with(path, OpenOptions::default())
|
||||
}
|
||||
|
||||
pub fn open_path_with(path: impl AsRef<Path>, opts: OpenOptions) -> Result<Self> {
|
||||
let bytes = fs::read(path.as_ref())?;
|
||||
let arc: Arc<[u8]> = Arc::from(bytes.into_boxed_slice());
|
||||
parse_library(arc, opts)
|
||||
}
|
||||
|
||||
pub fn entry_count(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
pub fn entries(&self) -> impl Iterator<Item = EntryRef<'_>> {
|
||||
self.entries
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, entry)| EntryRef {
|
||||
id: EntryId(u32::try_from(idx).expect("entry count validated at parse")),
|
||||
meta: &entry.meta,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn find(&self, name: &str) -> Option<EntryId> {
|
||||
if self.entries.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
const MAX_INLINE_NAME: usize = 12;
|
||||
|
||||
// Fast path: use stack allocation for short ASCII names (95% of cases)
|
||||
if name.len() <= MAX_INLINE_NAME && name.is_ascii() {
|
||||
let mut buf = [0u8; MAX_INLINE_NAME];
|
||||
for (i, &b) in name.as_bytes().iter().enumerate() {
|
||||
buf[i] = b.to_ascii_uppercase();
|
||||
}
|
||||
return self.find_impl(&buf[..name.len()]);
|
||||
}
|
||||
|
||||
// Slow path: heap allocation for long or non-ASCII names
|
||||
let query = name.to_ascii_uppercase();
|
||||
self.find_impl(query.as_bytes())
|
||||
}
|
||||
|
||||
fn find_impl(&self, query_bytes: &[u8]) -> Option<EntryId> {
|
||||
// Binary search
|
||||
let mut low = 0usize;
|
||||
let mut high = self.entries.len();
|
||||
while low < high {
|
||||
let mid = low + (high - low) / 2;
|
||||
let idx = self.entries[mid].sort_to_original;
|
||||
if idx < 0 {
|
||||
break;
|
||||
}
|
||||
let idx = usize::try_from(idx).ok()?;
|
||||
if idx >= self.entries.len() {
|
||||
break;
|
||||
}
|
||||
|
||||
let cmp = cmp_c_string(query_bytes, c_name_bytes(&self.entries[idx].name_raw));
|
||||
match cmp {
|
||||
Ordering::Less => high = mid,
|
||||
Ordering::Greater => low = mid + 1,
|
||||
Ordering::Equal => {
|
||||
return Some(EntryId(
|
||||
u32::try_from(idx).expect("entry count validated at parse"),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Linear fallback search
|
||||
self.entries.iter().enumerate().find_map(|(idx, entry)| {
|
||||
if cmp_c_string(query_bytes, c_name_bytes(&entry.name_raw)) == Ordering::Equal {
|
||||
Some(EntryId(
|
||||
u32::try_from(idx).expect("entry count validated at parse"),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get(&self, id: EntryId) -> Option<EntryRef<'_>> {
|
||||
let idx = usize::try_from(id.0).ok()?;
|
||||
let entry = self.entries.get(idx)?;
|
||||
Some(EntryRef {
|
||||
id,
|
||||
meta: &entry.meta,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn load(&self, id: EntryId) -> Result<Vec<u8>> {
|
||||
let entry = self.entry_by_id(id)?;
|
||||
let packed = self.packed_slice(entry)?;
|
||||
decode_payload(
|
||||
packed,
|
||||
entry.meta.method,
|
||||
entry.key16,
|
||||
entry.meta.unpacked_size,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn load_into(&self, id: EntryId, out: &mut dyn OutputBuffer) -> Result<usize> {
|
||||
let decoded = self.load(id)?;
|
||||
out.write_exact(&decoded)?;
|
||||
Ok(decoded.len())
|
||||
}
|
||||
|
||||
pub fn load_packed(&self, id: EntryId) -> Result<PackedResource> {
|
||||
let entry = self.entry_by_id(id)?;
|
||||
let packed = self.packed_slice(entry)?.to_vec();
|
||||
Ok(PackedResource {
|
||||
meta: entry.meta.clone(),
|
||||
packed,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn unpack(&self, packed: &PackedResource) -> Result<Vec<u8>> {
|
||||
let key16 = self.resolve_key_for_meta(&packed.meta).unwrap_or(0);
|
||||
|
||||
let method = packed.meta.method;
|
||||
if needs_xor_key(method) && self.resolve_key_for_meta(&packed.meta).is_none() {
|
||||
return Err(Error::CorruptEntryTable(
|
||||
"cannot resolve XOR key for packed resource",
|
||||
));
|
||||
}
|
||||
|
||||
decode_payload(&packed.packed, method, key16, packed.meta.unpacked_size)
|
||||
}
|
||||
|
||||
pub fn load_fast(&self, id: EntryId) -> Result<ResourceData<'_>> {
|
||||
let entry = self.entry_by_id(id)?;
|
||||
if entry.meta.method == PackMethod::None {
|
||||
let packed = self.packed_slice(entry)?;
|
||||
let size =
|
||||
usize::try_from(entry.meta.unpacked_size).map_err(|_| Error::IntegerOverflow)?;
|
||||
if packed.len() < size {
|
||||
return Err(Error::OutputSizeMismatch {
|
||||
expected: entry.meta.unpacked_size,
|
||||
got: u32::try_from(packed.len()).unwrap_or(u32::MAX),
|
||||
});
|
||||
}
|
||||
return Ok(ResourceData::Borrowed(&packed[..size]));
|
||||
}
|
||||
Ok(ResourceData::Owned(self.load(id)?))
|
||||
}
|
||||
|
||||
fn entry_by_id(&self, id: EntryId) -> Result<&EntryRecord> {
|
||||
let idx = usize::try_from(id.0).map_err(|_| Error::IntegerOverflow)?;
|
||||
self.entries
|
||||
.get(idx)
|
||||
.ok_or_else(|| Error::EntryIdOutOfRange {
|
||||
id: id.0,
|
||||
entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
|
||||
})
|
||||
}
|
||||
|
||||
fn packed_slice<'a>(&'a self, entry: &EntryRecord) -> Result<&'a [u8]> {
|
||||
let start = entry.effective_offset;
|
||||
let end = start
|
||||
.checked_add(entry.packed_size_available)
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
self.bytes
|
||||
.get(start..end)
|
||||
.ok_or(Error::EntryDataOutOfBounds {
|
||||
id: 0,
|
||||
offset: u64::try_from(start).unwrap_or(u64::MAX),
|
||||
size: entry.packed_size_declared,
|
||||
file_len: u64::try_from(self.bytes.len()).unwrap_or(u64::MAX),
|
||||
})
|
||||
}
|
||||
|
||||
fn resolve_key_for_meta(&self, meta: &EntryMeta) -> Option<u16> {
|
||||
self.entries
|
||||
.iter()
|
||||
.find(|entry| {
|
||||
entry.meta.name == meta.name
|
||||
&& entry.meta.flags == meta.flags
|
||||
&& entry.meta.data_offset == meta.data_offset
|
||||
&& entry.meta.packed_size == meta.packed_size
|
||||
&& entry.meta.unpacked_size == meta.unpacked_size
|
||||
&& entry.meta.method == meta.method
|
||||
})
|
||||
.map(|entry| entry.key16)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn rebuild_from_parsed_metadata(&self) -> Result<Vec<u8>> {
|
||||
let trailer_len = usize::from(self.trailer_raw.is_some()) * 6;
|
||||
let pre_trailer_size = self
|
||||
.source_size
|
||||
.checked_sub(trailer_len)
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
|
||||
let count = self.entries.len();
|
||||
let table_len = count.checked_mul(32).ok_or(Error::IntegerOverflow)?;
|
||||
let table_end = 32usize
|
||||
.checked_add(table_len)
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
if pre_trailer_size < table_end {
|
||||
return Err(Error::EntryTableOutOfBounds {
|
||||
table_offset: 32,
|
||||
table_len: u64::try_from(table_len).map_err(|_| Error::IntegerOverflow)?,
|
||||
file_len: u64::try_from(pre_trailer_size).map_err(|_| Error::IntegerOverflow)?,
|
||||
});
|
||||
}
|
||||
|
||||
let mut out = vec![0u8; pre_trailer_size];
|
||||
out[0..32].copy_from_slice(&self.header_raw);
|
||||
let encrypted_table =
|
||||
xor_stream(&self.table_plain_original, (self.xor_seed & 0xFFFF) as u16);
|
||||
out[32..table_end].copy_from_slice(&encrypted_table);
|
||||
|
||||
let mut occupied = vec![false; pre_trailer_size];
|
||||
for byte in occupied.iter_mut().take(table_end) {
|
||||
*byte = true;
|
||||
}
|
||||
|
||||
for (idx, entry) in self.entries.iter().enumerate() {
|
||||
let packed = self
|
||||
.load_packed(EntryId(
|
||||
u32::try_from(idx).expect("entry count validated at parse"),
|
||||
))?
|
||||
.packed;
|
||||
let start =
|
||||
usize::try_from(entry.data_offset_raw).map_err(|_| Error::IntegerOverflow)?;
|
||||
for (offset, byte) in packed.iter().copied().enumerate() {
|
||||
let pos = start.checked_add(offset).ok_or(Error::IntegerOverflow)?;
|
||||
if pos >= out.len() {
|
||||
return Err(Error::PackedSizePastEof {
|
||||
id: u32::try_from(idx).expect("entry count validated at parse"),
|
||||
offset: u64::from(entry.data_offset_raw),
|
||||
packed_size: entry.packed_size_declared,
|
||||
file_len: u64::try_from(out.len()).map_err(|_| Error::IntegerOverflow)?,
|
||||
});
|
||||
}
|
||||
if occupied[pos] && out[pos] != byte {
|
||||
return Err(Error::CorruptEntryTable("packed payload overlap conflict"));
|
||||
}
|
||||
out[pos] = byte;
|
||||
occupied[pos] = true;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(trailer) = self.trailer_raw {
|
||||
out.extend_from_slice(&trailer);
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_payload(
|
||||
packed: &[u8],
|
||||
method: PackMethod,
|
||||
key16: u16,
|
||||
unpacked_size: u32,
|
||||
) -> Result<Vec<u8>> {
|
||||
let expected = usize::try_from(unpacked_size).map_err(|_| Error::IntegerOverflow)?;
|
||||
|
||||
let out = match method {
|
||||
PackMethod::None => {
|
||||
if packed.len() < expected {
|
||||
return Err(Error::OutputSizeMismatch {
|
||||
expected: unpacked_size,
|
||||
got: u32::try_from(packed.len()).unwrap_or(u32::MAX),
|
||||
});
|
||||
}
|
||||
packed[..expected].to_vec()
|
||||
}
|
||||
PackMethod::XorOnly => {
|
||||
if packed.len() < expected {
|
||||
return Err(Error::OutputSizeMismatch {
|
||||
expected: unpacked_size,
|
||||
got: u32::try_from(packed.len()).unwrap_or(u32::MAX),
|
||||
});
|
||||
}
|
||||
xor_stream(&packed[..expected], key16)
|
||||
}
|
||||
PackMethod::Lzss => lzss_decompress_simple(packed, expected, None)?,
|
||||
PackMethod::XorLzss => {
|
||||
// Optimized: XOR on-the-fly during decompression instead of creating temp buffer
|
||||
lzss_decompress_simple(packed, expected, Some(key16))?
|
||||
}
|
||||
PackMethod::LzssHuffman => lzss_huffman_decompress(packed, expected, None)?,
|
||||
PackMethod::XorLzssHuffman => {
|
||||
// Optimized: XOR on-the-fly during decompression
|
||||
lzss_huffman_decompress(packed, expected, Some(key16))?
|
||||
}
|
||||
PackMethod::Deflate => decode_deflate(packed)?,
|
||||
PackMethod::Unknown(raw) => return Err(Error::UnsupportedMethod { raw }),
|
||||
};
|
||||
|
||||
if out.len() != expected {
|
||||
return Err(Error::OutputSizeMismatch {
|
||||
expected: unpacked_size,
|
||||
got: u32::try_from(out.len()).unwrap_or(u32::MAX),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn needs_xor_key(method: PackMethod) -> bool {
|
||||
matches!(
|
||||
method,
|
||||
PackMethod::XorOnly | PackMethod::XorLzss | PackMethod::XorLzssHuffman
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
249
crates/rsli/src/parse.rs
Normal file
249
crates/rsli/src/parse.rs
Normal file
@@ -0,0 +1,249 @@
|
||||
use crate::compress::xor::xor_stream;
|
||||
use crate::error::Error;
|
||||
use crate::{EntryMeta, EntryRecord, Library, OpenOptions, PackMethod, Result};
|
||||
use std::cmp::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn parse_library(bytes: Arc<[u8]>, opts: OpenOptions) -> Result<Library> {
|
||||
if bytes.len() < 32 {
|
||||
return Err(Error::EntryTableOutOfBounds {
|
||||
table_offset: 32,
|
||||
table_len: 0,
|
||||
file_len: u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?,
|
||||
});
|
||||
}
|
||||
|
||||
let mut header_raw = [0u8; 32];
|
||||
header_raw.copy_from_slice(&bytes[0..32]);
|
||||
|
||||
if &bytes[0..2] != b"NL" {
|
||||
let mut got = [0u8; 2];
|
||||
got.copy_from_slice(&bytes[0..2]);
|
||||
return Err(Error::InvalidMagic { got });
|
||||
}
|
||||
if bytes[3] != 0x01 {
|
||||
return Err(Error::UnsupportedVersion { got: bytes[3] });
|
||||
}
|
||||
|
||||
let entry_count = i16::from_le_bytes([bytes[4], bytes[5]]);
|
||||
if entry_count < 0 {
|
||||
return Err(Error::InvalidEntryCount { got: entry_count });
|
||||
}
|
||||
let count = usize::try_from(entry_count).map_err(|_| Error::IntegerOverflow)?;
|
||||
|
||||
// Validate entry_count fits in u32 (required for EntryId)
|
||||
if count > u32::MAX as usize {
|
||||
return Err(Error::TooManyEntries { got: count });
|
||||
}
|
||||
|
||||
let xor_seed = u32::from_le_bytes([bytes[20], bytes[21], bytes[22], bytes[23]]);
|
||||
|
||||
let table_len = count.checked_mul(32).ok_or(Error::IntegerOverflow)?;
|
||||
let table_offset = 32usize;
|
||||
let table_end = table_offset
|
||||
.checked_add(table_len)
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
if table_end > bytes.len() {
|
||||
return Err(Error::EntryTableOutOfBounds {
|
||||
table_offset: u64::try_from(table_offset).map_err(|_| Error::IntegerOverflow)?,
|
||||
table_len: u64::try_from(table_len).map_err(|_| Error::IntegerOverflow)?,
|
||||
file_len: u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?,
|
||||
});
|
||||
}
|
||||
|
||||
let table_enc = &bytes[table_offset..table_end];
|
||||
let table_plain_original = xor_stream(table_enc, (xor_seed & 0xFFFF) as u16);
|
||||
if table_plain_original.len() != table_len {
|
||||
return Err(Error::EntryTableDecryptFailed);
|
||||
}
|
||||
|
||||
let (overlay, trailer_raw) = parse_ao_trailer(&bytes, opts.allow_ao_trailer)?;
|
||||
#[cfg(not(test))]
|
||||
let _ = trailer_raw;
|
||||
|
||||
let mut entries = Vec::with_capacity(count);
|
||||
for idx in 0..count {
|
||||
let row = &table_plain_original[idx * 32..(idx + 1) * 32];
|
||||
|
||||
let mut name_raw = [0u8; 12];
|
||||
name_raw.copy_from_slice(&row[0..12]);
|
||||
|
||||
let flags_signed = i16::from_le_bytes([row[16], row[17]]);
|
||||
let sort_to_original = i16::from_le_bytes([row[18], row[19]]);
|
||||
let unpacked_size = u32::from_le_bytes([row[20], row[21], row[22], row[23]]);
|
||||
let data_offset_raw = u32::from_le_bytes([row[24], row[25], row[26], row[27]]);
|
||||
let packed_size_declared = u32::from_le_bytes([row[28], row[29], row[30], row[31]]);
|
||||
|
||||
let method_raw = (flags_signed as u16 as u32) & 0x1E0;
|
||||
let method = parse_method(method_raw);
|
||||
|
||||
let effective_offset_u64 = u64::from(data_offset_raw)
|
||||
.checked_add(u64::from(overlay))
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
let effective_offset =
|
||||
usize::try_from(effective_offset_u64).map_err(|_| Error::IntegerOverflow)?;
|
||||
|
||||
let packed_size_usize =
|
||||
usize::try_from(packed_size_declared).map_err(|_| Error::IntegerOverflow)?;
|
||||
let mut packed_size_available = packed_size_usize;
|
||||
|
||||
let end = effective_offset_u64
|
||||
.checked_add(u64::from(packed_size_declared))
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
let file_len_u64 = u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?;
|
||||
|
||||
if end > file_len_u64 {
|
||||
if method_raw == 0x100 && end == file_len_u64 + 1 {
|
||||
if opts.allow_deflate_eof_plus_one {
|
||||
packed_size_available = packed_size_available
|
||||
.checked_sub(1)
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
} else {
|
||||
return Err(Error::DeflateEofPlusOneQuirkRejected {
|
||||
id: u32::try_from(idx).expect("entry count validated at parse"),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
return Err(Error::PackedSizePastEof {
|
||||
id: u32::try_from(idx).expect("entry count validated at parse"),
|
||||
offset: effective_offset_u64,
|
||||
packed_size: packed_size_declared,
|
||||
file_len: file_len_u64,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let available_end = effective_offset
|
||||
.checked_add(packed_size_available)
|
||||
.ok_or(Error::IntegerOverflow)?;
|
||||
if available_end > bytes.len() {
|
||||
return Err(Error::EntryDataOutOfBounds {
|
||||
id: u32::try_from(idx).expect("entry count validated at parse"),
|
||||
offset: effective_offset_u64,
|
||||
size: packed_size_declared,
|
||||
file_len: file_len_u64,
|
||||
});
|
||||
}
|
||||
|
||||
let name = decode_name(c_name_bytes(&name_raw));
|
||||
|
||||
entries.push(EntryRecord {
|
||||
meta: EntryMeta {
|
||||
name,
|
||||
flags: i32::from(flags_signed),
|
||||
method,
|
||||
data_offset: effective_offset_u64,
|
||||
packed_size: packed_size_declared,
|
||||
unpacked_size,
|
||||
},
|
||||
name_raw,
|
||||
sort_to_original,
|
||||
key16: sort_to_original as u16,
|
||||
#[cfg(test)]
|
||||
data_offset_raw,
|
||||
packed_size_declared,
|
||||
packed_size_available,
|
||||
effective_offset,
|
||||
});
|
||||
}
|
||||
|
||||
let presorted_flag = u16::from_le_bytes([bytes[14], bytes[15]]);
|
||||
if presorted_flag == 0xABBA {
|
||||
for entry in &entries {
|
||||
let idx = i32::from(entry.sort_to_original);
|
||||
if idx < 0 || usize::try_from(idx).map_err(|_| Error::IntegerOverflow)? >= count {
|
||||
return Err(Error::CorruptEntryTable(
|
||||
"sort_to_original is not a valid permutation index",
|
||||
));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut sorted: Vec<usize> = (0..count).collect();
|
||||
sorted.sort_by(|a, b| {
|
||||
cmp_c_string(
|
||||
c_name_bytes(&entries[*a].name_raw),
|
||||
c_name_bytes(&entries[*b].name_raw),
|
||||
)
|
||||
});
|
||||
for (idx, entry) in entries.iter_mut().enumerate() {
|
||||
entry.sort_to_original =
|
||||
i16::try_from(sorted[idx]).map_err(|_| Error::IntegerOverflow)?;
|
||||
entry.key16 = entry.sort_to_original as u16;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
let source_size = bytes.len();
|
||||
|
||||
Ok(Library {
|
||||
bytes,
|
||||
entries,
|
||||
#[cfg(test)]
|
||||
header_raw,
|
||||
#[cfg(test)]
|
||||
table_plain_original,
|
||||
#[cfg(test)]
|
||||
xor_seed,
|
||||
#[cfg(test)]
|
||||
source_size,
|
||||
#[cfg(test)]
|
||||
trailer_raw,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_ao_trailer(bytes: &[u8], allow: bool) -> Result<(u32, Option<[u8; 6]>)> {
|
||||
if !allow || bytes.len() < 6 {
|
||||
return Ok((0, None));
|
||||
}
|
||||
|
||||
if &bytes[bytes.len() - 6..bytes.len() - 4] != b"AO" {
|
||||
return Ok((0, None));
|
||||
}
|
||||
|
||||
let mut trailer = [0u8; 6];
|
||||
trailer.copy_from_slice(&bytes[bytes.len() - 6..]);
|
||||
let overlay = u32::from_le_bytes([trailer[2], trailer[3], trailer[4], trailer[5]]);
|
||||
|
||||
if u64::from(overlay) > u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)? {
|
||||
return Err(Error::MediaOverlayOutOfBounds {
|
||||
overlay,
|
||||
file_len: u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?,
|
||||
});
|
||||
}
|
||||
|
||||
Ok((overlay, Some(trailer)))
|
||||
}
|
||||
|
||||
pub fn parse_method(raw: u32) -> PackMethod {
|
||||
match raw {
|
||||
0x000 => PackMethod::None,
|
||||
0x020 => PackMethod::XorOnly,
|
||||
0x040 => PackMethod::Lzss,
|
||||
0x060 => PackMethod::XorLzss,
|
||||
0x080 => PackMethod::LzssHuffman,
|
||||
0x0A0 => PackMethod::XorLzssHuffman,
|
||||
0x100 => PackMethod::Deflate,
|
||||
other => PackMethod::Unknown(other),
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_name(name: &[u8]) -> String {
|
||||
name.iter().map(|b| char::from(*b)).collect()
|
||||
}
|
||||
|
||||
pub fn c_name_bytes(raw: &[u8; 12]) -> &[u8] {
|
||||
let len = raw.iter().position(|&b| b == 0).unwrap_or(raw.len());
|
||||
&raw[..len]
|
||||
}
|
||||
|
||||
pub fn cmp_c_string(a: &[u8], b: &[u8]) -> Ordering {
|
||||
let min_len = a.len().min(b.len());
|
||||
let mut idx = 0usize;
|
||||
while idx < min_len {
|
||||
if a[idx] != b[idx] {
|
||||
return a[idx].cmp(&b[idx]);
|
||||
}
|
||||
idx += 1;
|
||||
}
|
||||
a.len().cmp(&b.len())
|
||||
}
|
||||
892
crates/rsli/src/tests.rs
Normal file
892
crates/rsli/src/tests.rs
Normal file
@@ -0,0 +1,892 @@
|
||||
use super::*;
|
||||
use crate::compress::lzh::{LZH_MAX_FREQ, LZH_N_CHAR, LZH_R, LZH_T};
|
||||
use crate::compress::xor::xor_stream;
|
||||
use flate2::write::DeflateEncoder;
|
||||
use flate2::Compression;
|
||||
use std::any::Any;
|
||||
use std::fs;
|
||||
use std::io::Write as _;
|
||||
use std::panic::{catch_unwind, AssertUnwindSafe};
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct SyntheticRsliEntry {
|
||||
name: String,
|
||||
method_raw: u16,
|
||||
plain: Vec<u8>,
|
||||
declared_packed_size: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RsliBuildOptions {
|
||||
seed: u32,
|
||||
presorted: bool,
|
||||
overlay: u32,
|
||||
add_ao_trailer: bool,
|
||||
}
|
||||
|
||||
impl Default for RsliBuildOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
seed: 0x1234_5678,
|
||||
presorted: true,
|
||||
overlay: 0,
|
||||
add_ao_trailer: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn collect_files_recursive(root: &Path, out: &mut Vec<PathBuf>) {
|
||||
let Ok(entries) = fs::read_dir(root) else {
|
||||
return;
|
||||
};
|
||||
for entry in entries.flatten() {
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
collect_files_recursive(&path, out);
|
||||
} else if path.is_file() {
|
||||
out.push(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn rsli_test_files() -> Vec<PathBuf> {
|
||||
let root = Path::new(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("..")
|
||||
.join("..")
|
||||
.join("testdata")
|
||||
.join("rsli");
|
||||
let mut files = Vec::new();
|
||||
collect_files_recursive(&root, &mut files);
|
||||
files.sort();
|
||||
files
|
||||
.into_iter()
|
||||
.filter(|path| {
|
||||
fs::read(path)
|
||||
.map(|data| data.get(0..4) == Some(b"NL\0\x01"))
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn panic_message(payload: Box<dyn Any + Send>) -> String {
|
||||
let any = payload.as_ref();
|
||||
if let Some(message) = any.downcast_ref::<String>() {
|
||||
return message.clone();
|
||||
}
|
||||
if let Some(message) = any.downcast_ref::<&str>() {
|
||||
return (*message).to_string();
|
||||
}
|
||||
String::from("panic without message")
|
||||
}
|
||||
|
||||
fn write_temp_file(prefix: &str, bytes: &[u8]) -> PathBuf {
|
||||
let mut path = std::env::temp_dir();
|
||||
path.push(format!(
|
||||
"{}-{}-{}.bin",
|
||||
prefix,
|
||||
std::process::id(),
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.map(|d| d.as_nanos())
|
||||
.unwrap_or(0)
|
||||
));
|
||||
fs::write(&path, bytes).expect("failed to write temp archive");
|
||||
path
|
||||
}
|
||||
|
||||
fn deflate_raw(data: &[u8]) -> Vec<u8> {
|
||||
let mut encoder = DeflateEncoder::new(Vec::new(), Compression::default());
|
||||
encoder
|
||||
.write_all(data)
|
||||
.expect("deflate encoder write failed");
|
||||
encoder.finish().expect("deflate encoder finish failed")
|
||||
}
|
||||
|
||||
fn lzss_pack_literals(data: &[u8]) -> Vec<u8> {
|
||||
let mut out = Vec::new();
|
||||
for chunk in data.chunks(8) {
|
||||
let mask = if chunk.len() == 8 {
|
||||
0xFF
|
||||
} else {
|
||||
(1u16
|
||||
.checked_shl(u32::try_from(chunk.len()).expect("chunk len overflow"))
|
||||
.expect("shift overflow")
|
||||
- 1) as u8
|
||||
};
|
||||
out.push(mask);
|
||||
out.extend_from_slice(chunk);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
struct BitWriter {
|
||||
bytes: Vec<u8>,
|
||||
current: u8,
|
||||
mask: u8,
|
||||
}
|
||||
|
||||
impl BitWriter {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
bytes: Vec::new(),
|
||||
current: 0,
|
||||
mask: 0x80,
|
||||
}
|
||||
}
|
||||
|
||||
fn write_bit(&mut self, bit: u8) {
|
||||
if bit != 0 {
|
||||
self.current |= self.mask;
|
||||
}
|
||||
self.mask >>= 1;
|
||||
if self.mask == 0 {
|
||||
self.bytes.push(self.current);
|
||||
self.current = 0;
|
||||
self.mask = 0x80;
|
||||
}
|
||||
}
|
||||
|
||||
fn finish(mut self) -> Vec<u8> {
|
||||
if self.mask != 0x80 {
|
||||
self.bytes.push(self.current);
|
||||
}
|
||||
self.bytes
|
||||
}
|
||||
}
|
||||
|
||||
struct LzhLiteralModel {
|
||||
freq: [u16; LZH_T + 1],
|
||||
parent: [usize; LZH_T + LZH_N_CHAR],
|
||||
son: [usize; LZH_T + 1],
|
||||
}
|
||||
|
||||
impl LzhLiteralModel {
|
||||
fn new() -> Self {
|
||||
let mut model = Self {
|
||||
freq: [0; LZH_T + 1],
|
||||
parent: [0; LZH_T + LZH_N_CHAR],
|
||||
son: [0; LZH_T + 1],
|
||||
};
|
||||
model.start_huff();
|
||||
model
|
||||
}
|
||||
|
||||
fn encode_literal(&mut self, literal: u8, writer: &mut BitWriter) {
|
||||
let target = usize::from(literal) + LZH_T;
|
||||
let mut path = Vec::new();
|
||||
let mut visited = [false; LZH_T + 1];
|
||||
let found = self.find_path(self.son[LZH_R], target, &mut path, &mut visited);
|
||||
assert!(found, "failed to encode literal {literal}");
|
||||
for bit in path {
|
||||
writer.write_bit(bit);
|
||||
}
|
||||
|
||||
self.update(usize::from(literal));
|
||||
}
|
||||
|
||||
fn find_path(
|
||||
&self,
|
||||
node: usize,
|
||||
target: usize,
|
||||
path: &mut Vec<u8>,
|
||||
visited: &mut [bool; LZH_T + 1],
|
||||
) -> bool {
|
||||
if node == target {
|
||||
return true;
|
||||
}
|
||||
if node >= LZH_T {
|
||||
return false;
|
||||
}
|
||||
if visited[node] {
|
||||
return false;
|
||||
}
|
||||
visited[node] = true;
|
||||
|
||||
for bit in [0u8, 1u8] {
|
||||
let child = self.son[node + usize::from(bit)];
|
||||
path.push(bit);
|
||||
if self.find_path(child, target, path, visited) {
|
||||
visited[node] = false;
|
||||
return true;
|
||||
}
|
||||
path.pop();
|
||||
}
|
||||
|
||||
visited[node] = false;
|
||||
false
|
||||
}
|
||||
|
||||
fn start_huff(&mut self) {
|
||||
for i in 0..LZH_N_CHAR {
|
||||
self.freq[i] = 1;
|
||||
self.son[i] = i + LZH_T;
|
||||
self.parent[i + LZH_T] = i;
|
||||
}
|
||||
|
||||
let mut i = 0usize;
|
||||
let mut j = LZH_N_CHAR;
|
||||
while j <= LZH_R {
|
||||
self.freq[j] = self.freq[i].saturating_add(self.freq[i + 1]);
|
||||
self.son[j] = i;
|
||||
self.parent[i] = j;
|
||||
self.parent[i + 1] = j;
|
||||
i += 2;
|
||||
j += 1;
|
||||
}
|
||||
|
||||
self.freq[LZH_T] = u16::MAX;
|
||||
self.parent[LZH_R] = 0;
|
||||
}
|
||||
|
||||
fn update(&mut self, c: usize) {
|
||||
if self.freq[LZH_R] == LZH_MAX_FREQ {
|
||||
self.reconstruct();
|
||||
}
|
||||
|
||||
let mut current = self.parent[c + LZH_T];
|
||||
loop {
|
||||
self.freq[current] = self.freq[current].saturating_add(1);
|
||||
let freq = self.freq[current];
|
||||
|
||||
if current + 1 < self.freq.len() && freq > self.freq[current + 1] {
|
||||
let mut swap_idx = current + 1;
|
||||
while swap_idx + 1 < self.freq.len() && freq > self.freq[swap_idx + 1] {
|
||||
swap_idx += 1;
|
||||
}
|
||||
|
||||
self.freq.swap(current, swap_idx);
|
||||
|
||||
let left = self.son[current];
|
||||
let right = self.son[swap_idx];
|
||||
self.son[current] = right;
|
||||
self.son[swap_idx] = left;
|
||||
|
||||
self.parent[left] = swap_idx;
|
||||
if left < LZH_T {
|
||||
self.parent[left + 1] = swap_idx;
|
||||
}
|
||||
|
||||
self.parent[right] = current;
|
||||
if right < LZH_T {
|
||||
self.parent[right + 1] = current;
|
||||
}
|
||||
|
||||
current = swap_idx;
|
||||
}
|
||||
|
||||
current = self.parent[current];
|
||||
if current == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn reconstruct(&mut self) {
|
||||
let mut j = 0usize;
|
||||
for i in 0..LZH_T {
|
||||
if self.son[i] >= LZH_T {
|
||||
self.freq[j] = self.freq[i].div_ceil(2);
|
||||
self.son[j] = self.son[i];
|
||||
j += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let mut i = 0usize;
|
||||
let mut current = LZH_N_CHAR;
|
||||
while current < LZH_T {
|
||||
let sum = self.freq[i].saturating_add(self.freq[i + 1]);
|
||||
self.freq[current] = sum;
|
||||
|
||||
let mut insert_at = current;
|
||||
while insert_at > 0 && sum < self.freq[insert_at - 1] {
|
||||
insert_at -= 1;
|
||||
}
|
||||
|
||||
for move_idx in (insert_at..current).rev() {
|
||||
self.freq[move_idx + 1] = self.freq[move_idx];
|
||||
self.son[move_idx + 1] = self.son[move_idx];
|
||||
}
|
||||
|
||||
self.freq[insert_at] = sum;
|
||||
self.son[insert_at] = i;
|
||||
i += 2;
|
||||
current += 1;
|
||||
}
|
||||
|
||||
for idx in 0..LZH_T {
|
||||
let node = self.son[idx];
|
||||
self.parent[node] = idx;
|
||||
if node < LZH_T {
|
||||
self.parent[node + 1] = idx;
|
||||
}
|
||||
}
|
||||
|
||||
self.freq[LZH_T] = u16::MAX;
|
||||
self.parent[LZH_R] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
fn lzh_pack_literals(data: &[u8]) -> Vec<u8> {
|
||||
let mut writer = BitWriter::new();
|
||||
let mut model = LzhLiteralModel::new();
|
||||
for byte in data {
|
||||
model.encode_literal(*byte, &mut writer);
|
||||
}
|
||||
writer.finish()
|
||||
}
|
||||
|
||||
fn packed_for_method(method_raw: u16, plain: &[u8], key16: u16) -> Vec<u8> {
|
||||
match (u32::from(method_raw)) & 0x1E0 {
|
||||
0x000 => plain.to_vec(),
|
||||
0x020 => xor_stream(plain, key16),
|
||||
0x040 => lzss_pack_literals(plain),
|
||||
0x060 => xor_stream(&lzss_pack_literals(plain), key16),
|
||||
0x080 => lzh_pack_literals(plain),
|
||||
0x0A0 => xor_stream(&lzh_pack_literals(plain), key16),
|
||||
0x100 => deflate_raw(plain),
|
||||
_ => plain.to_vec(),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_rsli_bytes(entries: &[SyntheticRsliEntry], opts: &RsliBuildOptions) -> Vec<u8> {
|
||||
let count = entries.len();
|
||||
let mut rows_plain = vec![0u8; count * 32];
|
||||
let table_end = 32 + rows_plain.len();
|
||||
|
||||
let mut sort_lookup: Vec<usize> = (0..count).collect();
|
||||
sort_lookup.sort_by(|a, b| entries[*a].name.as_bytes().cmp(entries[*b].name.as_bytes()));
|
||||
|
||||
let mut packed_blobs = Vec::with_capacity(count);
|
||||
for index in 0..count {
|
||||
let key16 = u16::try_from(sort_lookup[index]).expect("sort index overflow");
|
||||
let packed = packed_for_method(entries[index].method_raw, &entries[index].plain, key16);
|
||||
packed_blobs.push(packed);
|
||||
}
|
||||
|
||||
let overlay = usize::try_from(opts.overlay).expect("overlay overflow");
|
||||
let mut cursor = table_end + overlay;
|
||||
let mut output = vec![0u8; cursor];
|
||||
|
||||
let mut data_offsets = Vec::with_capacity(count);
|
||||
for (index, packed) in packed_blobs.iter().enumerate() {
|
||||
let raw_offset = cursor
|
||||
.checked_sub(overlay)
|
||||
.expect("overlay larger than cursor");
|
||||
data_offsets.push(raw_offset);
|
||||
|
||||
let end = cursor.checked_add(packed.len()).expect("cursor overflow");
|
||||
if output.len() < end {
|
||||
output.resize(end, 0);
|
||||
}
|
||||
output[cursor..end].copy_from_slice(packed);
|
||||
cursor = end;
|
||||
|
||||
let base = index * 32;
|
||||
let mut name_raw = [0u8; 12];
|
||||
let uppercase = entries[index].name.to_ascii_uppercase();
|
||||
let name_bytes = uppercase.as_bytes();
|
||||
assert!(name_bytes.len() <= 12, "name too long in synthetic fixture");
|
||||
name_raw[..name_bytes.len()].copy_from_slice(name_bytes);
|
||||
|
||||
rows_plain[base..base + 12].copy_from_slice(&name_raw);
|
||||
|
||||
let sort_field: i16 = if opts.presorted {
|
||||
i16::try_from(sort_lookup[index]).expect("sort field overflow")
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let packed_size = entries[index]
|
||||
.declared_packed_size
|
||||
.unwrap_or_else(|| u32::try_from(packed.len()).expect("packed size overflow"));
|
||||
|
||||
rows_plain[base + 16..base + 18].copy_from_slice(&entries[index].method_raw.to_le_bytes());
|
||||
rows_plain[base + 18..base + 20].copy_from_slice(&sort_field.to_le_bytes());
|
||||
rows_plain[base + 20..base + 24].copy_from_slice(
|
||||
&u32::try_from(entries[index].plain.len())
|
||||
.expect("unpacked size overflow")
|
||||
.to_le_bytes(),
|
||||
);
|
||||
rows_plain[base + 24..base + 28].copy_from_slice(
|
||||
&u32::try_from(data_offsets[index])
|
||||
.expect("data offset overflow")
|
||||
.to_le_bytes(),
|
||||
);
|
||||
rows_plain[base + 28..base + 32].copy_from_slice(&packed_size.to_le_bytes());
|
||||
}
|
||||
|
||||
if output.len() < table_end {
|
||||
output.resize(table_end, 0);
|
||||
}
|
||||
|
||||
output[0..2].copy_from_slice(b"NL");
|
||||
output[2] = 0;
|
||||
output[3] = 1;
|
||||
output[4..6].copy_from_slice(
|
||||
&i16::try_from(count)
|
||||
.expect("entry count overflow")
|
||||
.to_le_bytes(),
|
||||
);
|
||||
|
||||
let presorted_flag = if opts.presorted { 0xABBA_u16 } else { 0_u16 };
|
||||
output[14..16].copy_from_slice(&presorted_flag.to_le_bytes());
|
||||
output[20..24].copy_from_slice(&opts.seed.to_le_bytes());
|
||||
|
||||
let encrypted_table = xor_stream(&rows_plain, (opts.seed & 0xFFFF) as u16);
|
||||
output[32..table_end].copy_from_slice(&encrypted_table);
|
||||
|
||||
if opts.add_ao_trailer {
|
||||
output.extend_from_slice(b"AO");
|
||||
output.extend_from_slice(&opts.overlay.to_le_bytes());
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rsli_read_unpack_and_repack_all_files() {
|
||||
let files = rsli_test_files();
|
||||
if files.is_empty() {
|
||||
eprintln!(
|
||||
"skipping rsli_read_unpack_and_repack_all_files: no RsLi archives in testdata/rsli"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let checked = files.len();
|
||||
let mut success = 0usize;
|
||||
let mut failures = Vec::new();
|
||||
|
||||
for path in files {
|
||||
let display_path = path.display().to_string();
|
||||
let result = catch_unwind(AssertUnwindSafe(|| {
|
||||
let original = fs::read(&path).expect("failed to read archive");
|
||||
let library = Library::open_path(&path)
|
||||
.unwrap_or_else(|err| panic!("failed to open {}: {err}", path.display()));
|
||||
|
||||
let count = library.entry_count();
|
||||
assert_eq!(
|
||||
count,
|
||||
library.entries().count(),
|
||||
"entry count mismatch: {}",
|
||||
path.display()
|
||||
);
|
||||
|
||||
for idx in 0..count {
|
||||
let id = EntryId(idx as u32);
|
||||
let meta_ref = library
|
||||
.get(id)
|
||||
.unwrap_or_else(|| panic!("missing entry #{idx} in {}", path.display()));
|
||||
|
||||
let loaded = library.load(id).unwrap_or_else(|err| {
|
||||
panic!("load failed for {} entry #{idx}: {err}", path.display())
|
||||
});
|
||||
|
||||
let packed = library.load_packed(id).unwrap_or_else(|err| {
|
||||
panic!(
|
||||
"load_packed failed for {} entry #{idx}: {err}",
|
||||
path.display()
|
||||
)
|
||||
});
|
||||
let unpacked = library.unpack(&packed).unwrap_or_else(|err| {
|
||||
panic!("unpack failed for {} entry #{idx}: {err}", path.display())
|
||||
});
|
||||
assert_eq!(
|
||||
loaded,
|
||||
unpacked,
|
||||
"load != unpack in {} entry #{idx}",
|
||||
path.display()
|
||||
);
|
||||
|
||||
let mut out = Vec::new();
|
||||
let written = library.load_into(id, &mut out).unwrap_or_else(|err| {
|
||||
panic!(
|
||||
"load_into failed for {} entry #{idx}: {err}",
|
||||
path.display()
|
||||
)
|
||||
});
|
||||
assert_eq!(
|
||||
written,
|
||||
loaded.len(),
|
||||
"load_into size mismatch in {} entry #{idx}",
|
||||
path.display()
|
||||
);
|
||||
assert_eq!(
|
||||
out,
|
||||
loaded,
|
||||
"load_into payload mismatch in {} entry #{idx}",
|
||||
path.display()
|
||||
);
|
||||
|
||||
let fast = library.load_fast(id).unwrap_or_else(|err| {
|
||||
panic!(
|
||||
"load_fast failed for {} entry #{idx}: {err}",
|
||||
path.display()
|
||||
)
|
||||
});
|
||||
assert_eq!(
|
||||
fast.as_slice(),
|
||||
loaded.as_slice(),
|
||||
"load_fast mismatch in {} entry #{idx}",
|
||||
path.display()
|
||||
);
|
||||
|
||||
let found = library.find(&meta_ref.meta.name).unwrap_or_else(|| {
|
||||
panic!(
|
||||
"find failed for '{}' in {}",
|
||||
meta_ref.meta.name,
|
||||
path.display()
|
||||
)
|
||||
});
|
||||
let found_meta = library.get(found).expect("find returned invalid entry id");
|
||||
assert_eq!(
|
||||
found_meta.meta.name,
|
||||
meta_ref.meta.name,
|
||||
"find returned a different entry in {}",
|
||||
path.display()
|
||||
);
|
||||
}
|
||||
|
||||
let rebuilt = library
|
||||
.rebuild_from_parsed_metadata()
|
||||
.unwrap_or_else(|err| panic!("rebuild failed for {}: {err}", path.display()));
|
||||
assert_eq!(
|
||||
rebuilt,
|
||||
original,
|
||||
"byte-to-byte roundtrip mismatch for {}",
|
||||
path.display()
|
||||
);
|
||||
}));
|
||||
|
||||
match result {
|
||||
Ok(()) => success += 1,
|
||||
Err(payload) => failures.push(format!("{}: {}", display_path, panic_message(payload))),
|
||||
}
|
||||
}
|
||||
|
||||
let failed = failures.len();
|
||||
eprintln!(
|
||||
"RsLi summary: checked={}, success={}, failed={}",
|
||||
checked, success, failed
|
||||
);
|
||||
if !failures.is_empty() {
|
||||
panic!(
|
||||
"RsLi validation failed.\nsummary: checked={}, success={}, failed={}\n{}",
|
||||
checked,
|
||||
success,
|
||||
failed,
|
||||
failures.join("\n")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rsli_synthetic_all_methods_roundtrip() {
|
||||
let entries = vec![
|
||||
SyntheticRsliEntry {
|
||||
name: "M_NONE".to_string(),
|
||||
method_raw: 0x000,
|
||||
plain: b"plain-data".to_vec(),
|
||||
declared_packed_size: None,
|
||||
},
|
||||
SyntheticRsliEntry {
|
||||
name: "M_XOR".to_string(),
|
||||
method_raw: 0x020,
|
||||
plain: b"xor-only".to_vec(),
|
||||
declared_packed_size: None,
|
||||
},
|
||||
SyntheticRsliEntry {
|
||||
name: "M_LZSS".to_string(),
|
||||
method_raw: 0x040,
|
||||
plain: b"lzss literals payload".to_vec(),
|
||||
declared_packed_size: None,
|
||||
},
|
||||
SyntheticRsliEntry {
|
||||
name: "M_XLZS".to_string(),
|
||||
method_raw: 0x060,
|
||||
plain: b"xor lzss payload".to_vec(),
|
||||
declared_packed_size: None,
|
||||
},
|
||||
SyntheticRsliEntry {
|
||||
name: "M_LZHU".to_string(),
|
||||
method_raw: 0x080,
|
||||
plain: b"huffman literals payload".to_vec(),
|
||||
declared_packed_size: None,
|
||||
},
|
||||
SyntheticRsliEntry {
|
||||
name: "M_XLZH".to_string(),
|
||||
method_raw: 0x0A0,
|
||||
plain: b"xor huffman payload".to_vec(),
|
||||
declared_packed_size: None,
|
||||
},
|
||||
SyntheticRsliEntry {
|
||||
name: "M_DEFL".to_string(),
|
||||
method_raw: 0x100,
|
||||
plain: b"deflate payload with repetition repetition repetition".to_vec(),
|
||||
declared_packed_size: None,
|
||||
},
|
||||
];
|
||||
|
||||
let bytes = build_rsli_bytes(
|
||||
&entries,
|
||||
&RsliBuildOptions {
|
||||
seed: 0xA1B2_C3D4,
|
||||
presorted: false,
|
||||
overlay: 0,
|
||||
add_ao_trailer: false,
|
||||
},
|
||||
);
|
||||
let path = write_temp_file("rsli-all-methods", &bytes);
|
||||
|
||||
let library = Library::open_path(&path).expect("open synthetic rsli failed");
|
||||
assert_eq!(library.entry_count(), entries.len());
|
||||
|
||||
for entry in &entries {
|
||||
let id = library
|
||||
.find(&entry.name)
|
||||
.unwrap_or_else(|| panic!("find failed for {}", entry.name));
|
||||
let loaded = library
|
||||
.load(id)
|
||||
.unwrap_or_else(|err| panic!("load failed for {}: {err}", entry.name));
|
||||
assert_eq!(
|
||||
loaded, entry.plain,
|
||||
"decoded payload mismatch for {}",
|
||||
entry.name
|
||||
);
|
||||
|
||||
let packed = library
|
||||
.load_packed(id)
|
||||
.unwrap_or_else(|err| panic!("load_packed failed for {}: {err}", entry.name));
|
||||
let unpacked = library
|
||||
.unpack(&packed)
|
||||
.unwrap_or_else(|err| panic!("unpack failed for {}: {err}", entry.name));
|
||||
assert_eq!(unpacked, entry.plain, "unpack mismatch for {}", entry.name);
|
||||
}
|
||||
|
||||
let _ = fs::remove_file(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rsli_xorlzss_huffman_on_the_fly_roundtrip() {
|
||||
let plain: Vec<u8> = (0..512u16).map(|i| b'A' + (i % 26) as u8).collect();
|
||||
let entries = vec![SyntheticRsliEntry {
|
||||
name: "XLZH_ONFLY".to_string(),
|
||||
method_raw: 0x0A0,
|
||||
plain: plain.clone(),
|
||||
declared_packed_size: None,
|
||||
}];
|
||||
|
||||
let bytes = build_rsli_bytes(
|
||||
&entries,
|
||||
&RsliBuildOptions {
|
||||
seed: 0x0BAD_C0DE,
|
||||
presorted: true,
|
||||
overlay: 0,
|
||||
add_ao_trailer: false,
|
||||
},
|
||||
);
|
||||
let path = write_temp_file("rsli-xorlzh-onfly", &bytes);
|
||||
|
||||
let library = Library::open_path(&path).expect("open synthetic XLZH archive failed");
|
||||
let id = library
|
||||
.find("XLZH_ONFLY")
|
||||
.expect("find XLZH_ONFLY entry failed");
|
||||
|
||||
let loaded = library.load(id).expect("load XLZH_ONFLY failed");
|
||||
assert_eq!(loaded, plain);
|
||||
|
||||
let packed = library
|
||||
.load_packed(id)
|
||||
.expect("load_packed XLZH_ONFLY failed");
|
||||
let unpacked = library.unpack(&packed).expect("unpack XLZH_ONFLY failed");
|
||||
assert_eq!(unpacked, loaded);
|
||||
|
||||
let _ = fs::remove_file(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rsli_synthetic_overlay_and_ao_trailer() {
|
||||
let entries = vec![SyntheticRsliEntry {
|
||||
name: "OVERLAY".to_string(),
|
||||
method_raw: 0x040,
|
||||
plain: b"overlay-data".to_vec(),
|
||||
declared_packed_size: None,
|
||||
}];
|
||||
|
||||
let bytes = build_rsli_bytes(
|
||||
&entries,
|
||||
&RsliBuildOptions {
|
||||
seed: 0x4433_2211,
|
||||
presorted: true,
|
||||
overlay: 128,
|
||||
add_ao_trailer: true,
|
||||
},
|
||||
);
|
||||
let path = write_temp_file("rsli-overlay", &bytes);
|
||||
|
||||
let library = Library::open_path_with(
|
||||
&path,
|
||||
OpenOptions {
|
||||
allow_ao_trailer: true,
|
||||
allow_deflate_eof_plus_one: true,
|
||||
},
|
||||
)
|
||||
.expect("open with AO trailer enabled failed");
|
||||
|
||||
let id = library.find("OVERLAY").expect("find overlay entry failed");
|
||||
let payload = library.load(id).expect("load overlay entry failed");
|
||||
assert_eq!(payload, b"overlay-data");
|
||||
|
||||
let _ = fs::remove_file(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rsli_deflate_eof_plus_one_quirk() {
|
||||
let plain = b"quirk deflate payload".to_vec();
|
||||
let packed = deflate_raw(&plain);
|
||||
let declared = u32::try_from(packed.len() + 1).expect("declared size overflow");
|
||||
|
||||
let entries = vec![SyntheticRsliEntry {
|
||||
name: "QUIRK".to_string(),
|
||||
method_raw: 0x100,
|
||||
plain,
|
||||
declared_packed_size: Some(declared),
|
||||
}];
|
||||
let bytes = build_rsli_bytes(&entries, &RsliBuildOptions::default());
|
||||
let path = write_temp_file("rsli-deflate-quirk", &bytes);
|
||||
|
||||
let lib_ok = Library::open_path_with(
|
||||
&path,
|
||||
OpenOptions {
|
||||
allow_ao_trailer: true,
|
||||
allow_deflate_eof_plus_one: true,
|
||||
},
|
||||
)
|
||||
.expect("open with EOF+1 quirk enabled failed");
|
||||
let loaded = lib_ok
|
||||
.load(lib_ok.find("QUIRK").expect("find quirk entry failed"))
|
||||
.expect("load quirk entry failed");
|
||||
assert_eq!(loaded, b"quirk deflate payload");
|
||||
|
||||
match Library::open_path_with(
|
||||
&path,
|
||||
OpenOptions {
|
||||
allow_ao_trailer: true,
|
||||
allow_deflate_eof_plus_one: false,
|
||||
},
|
||||
) {
|
||||
Err(Error::DeflateEofPlusOneQuirkRejected { id }) => assert_eq!(id, 0),
|
||||
other => panic!("expected DeflateEofPlusOneQuirkRejected, got {other:?}"),
|
||||
}
|
||||
|
||||
let _ = fs::remove_file(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rsli_validation_error_cases() {
|
||||
let valid = build_rsli_bytes(
|
||||
&[SyntheticRsliEntry {
|
||||
name: "BASE".to_string(),
|
||||
method_raw: 0x000,
|
||||
plain: b"abc".to_vec(),
|
||||
declared_packed_size: None,
|
||||
}],
|
||||
&RsliBuildOptions::default(),
|
||||
);
|
||||
|
||||
let mut bad_magic = valid.clone();
|
||||
bad_magic[0..2].copy_from_slice(b"XX");
|
||||
let path = write_temp_file("rsli-bad-magic", &bad_magic);
|
||||
match Library::open_path(&path) {
|
||||
Err(Error::InvalidMagic { .. }) => {}
|
||||
other => panic!("expected InvalidMagic, got {other:?}"),
|
||||
}
|
||||
let _ = fs::remove_file(&path);
|
||||
|
||||
let mut bad_version = valid.clone();
|
||||
bad_version[3] = 2;
|
||||
let path = write_temp_file("rsli-bad-version", &bad_version);
|
||||
match Library::open_path(&path) {
|
||||
Err(Error::UnsupportedVersion { got }) => assert_eq!(got, 2),
|
||||
other => panic!("expected UnsupportedVersion, got {other:?}"),
|
||||
}
|
||||
let _ = fs::remove_file(&path);
|
||||
|
||||
let mut bad_count = valid.clone();
|
||||
bad_count[4..6].copy_from_slice(&(-1_i16).to_le_bytes());
|
||||
let path = write_temp_file("rsli-bad-count", &bad_count);
|
||||
match Library::open_path(&path) {
|
||||
Err(Error::InvalidEntryCount { got }) => assert_eq!(got, -1),
|
||||
other => panic!("expected InvalidEntryCount, got {other:?}"),
|
||||
}
|
||||
let _ = fs::remove_file(&path);
|
||||
|
||||
let mut bad_table = valid.clone();
|
||||
bad_table[4..6].copy_from_slice(&100_i16.to_le_bytes());
|
||||
let path = write_temp_file("rsli-bad-table", &bad_table);
|
||||
match Library::open_path(&path) {
|
||||
Err(Error::EntryTableOutOfBounds { .. }) => {}
|
||||
other => panic!("expected EntryTableOutOfBounds, got {other:?}"),
|
||||
}
|
||||
let _ = fs::remove_file(&path);
|
||||
|
||||
let mut unknown_method = build_rsli_bytes(
|
||||
&[SyntheticRsliEntry {
|
||||
name: "UNK".to_string(),
|
||||
method_raw: 0x120,
|
||||
plain: b"x".to_vec(),
|
||||
declared_packed_size: None,
|
||||
}],
|
||||
&RsliBuildOptions::default(),
|
||||
);
|
||||
// Force truly unknown method by writing 0x1C0 mask bits.
|
||||
let row = 32;
|
||||
unknown_method[row + 16..row + 18].copy_from_slice(&(0x1C0_u16).to_le_bytes());
|
||||
// Re-encrypt table with the same seed.
|
||||
let seed = u32::from_le_bytes([
|
||||
unknown_method[20],
|
||||
unknown_method[21],
|
||||
unknown_method[22],
|
||||
unknown_method[23],
|
||||
]);
|
||||
let mut plain_row = vec![0u8; 32];
|
||||
plain_row.copy_from_slice(&unknown_method[32..64]);
|
||||
plain_row = xor_stream(&plain_row, (seed & 0xFFFF) as u16);
|
||||
plain_row[16..18].copy_from_slice(&(0x1C0_u16).to_le_bytes());
|
||||
let encrypted_row = xor_stream(&plain_row, (seed & 0xFFFF) as u16);
|
||||
unknown_method[32..64].copy_from_slice(&encrypted_row);
|
||||
|
||||
let path = write_temp_file("rsli-unknown-method", &unknown_method);
|
||||
let lib = Library::open_path(&path).expect("open archive with unknown method failed");
|
||||
match lib.load(EntryId(0)) {
|
||||
Err(Error::UnsupportedMethod { raw }) => assert_eq!(raw, 0x1C0),
|
||||
other => panic!("expected UnsupportedMethod, got {other:?}"),
|
||||
}
|
||||
let _ = fs::remove_file(&path);
|
||||
|
||||
let mut bad_packed = valid.clone();
|
||||
bad_packed[32 + 28..32 + 32].copy_from_slice(&0xFFFF_FFF0_u32.to_le_bytes());
|
||||
let path = write_temp_file("rsli-bad-packed", &bad_packed);
|
||||
match Library::open_path(&path) {
|
||||
Err(Error::PackedSizePastEof { .. }) => {}
|
||||
other => panic!("expected PackedSizePastEof, got {other:?}"),
|
||||
}
|
||||
let _ = fs::remove_file(&path);
|
||||
|
||||
let mut with_bad_overlay = valid;
|
||||
with_bad_overlay.extend_from_slice(b"AO");
|
||||
with_bad_overlay.extend_from_slice(&0xFFFF_FFFF_u32.to_le_bytes());
|
||||
let path = write_temp_file("rsli-bad-overlay", &with_bad_overlay);
|
||||
match Library::open_path_with(
|
||||
&path,
|
||||
OpenOptions {
|
||||
allow_ao_trailer: true,
|
||||
allow_deflate_eof_plus_one: true,
|
||||
},
|
||||
) {
|
||||
Err(Error::MediaOverlayOutOfBounds { .. }) => {}
|
||||
other => panic!("expected MediaOverlayOutOfBounds, got {other:?}"),
|
||||
}
|
||||
let _ = fs::remove_file(&path);
|
||||
}
|
||||
17
docs/index.md
Normal file
17
docs/index.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Welcome to MkDocs
|
||||
|
||||
For full documentation visit [mkdocs.org](https://www.mkdocs.org).
|
||||
|
||||
## Commands
|
||||
|
||||
* `mkdocs new [dir-name]` - Create a new project.
|
||||
* `mkdocs serve` - Start the live-reloading docs server.
|
||||
* `mkdocs build` - Build the documentation site.
|
||||
* `mkdocs -h` - Print help message and exit.
|
||||
|
||||
## Project layout
|
||||
|
||||
mkdocs.yml # The configuration file.
|
||||
docs/
|
||||
index.md # The documentation homepage.
|
||||
... # Other markdown pages, images and other files.
|
||||
1418
docs/specs/msh.md
Normal file
1418
docs/specs/msh.md
Normal file
File diff suppressed because it is too large
Load Diff
718
docs/specs/nres.md
Normal file
718
docs/specs/nres.md
Normal file
@@ -0,0 +1,718 @@
|
||||
# Форматы игровых ресурсов
|
||||
|
||||
## Обзор
|
||||
|
||||
Библиотека `Ngi32.dll` реализует два различных формата архивов ресурсов:
|
||||
|
||||
1. **NRes** — основной формат архива ресурсов, используемый через API `niOpenResFile` / `niCreateResFile`. Каталог файлов расположен в **конце** файла. Поддерживает создание, редактирование, добавление и удаление записей.
|
||||
|
||||
2. **RsLi** — формат библиотеки ресурсов, используемый через API `rsOpenLib` / `rsLoad`. Таблица записей расположена **в начале** файла (сразу после заголовка) и зашифрована XOR-шифром. Поддерживает несколько методов сжатия. Только чтение.
|
||||
|
||||
---
|
||||
|
||||
# Часть 1. Формат NRes
|
||||
|
||||
## 1.1. Общая структура файла
|
||||
|
||||
```
|
||||
┌──────────────────────────┐ Смещение 0
|
||||
│ Заголовок (16 байт) │
|
||||
├──────────────────────────┤ Смещение 16
|
||||
│ │
|
||||
│ Данные ресурсов │
|
||||
│ (выровнены по 8 байт) │
|
||||
│ │
|
||||
├──────────────────────────┤ Смещение = total_size - entry_count × 64
|
||||
│ Каталог записей │
|
||||
│ (entry_count × 64 байт) │
|
||||
└──────────────────────────┘ Смещение = total_size
|
||||
```
|
||||
|
||||
## 1.2. Заголовок файла (16 байт)
|
||||
|
||||
| Смещение | Размер | Тип | Значение | Описание |
|
||||
| -------- | ------ | ------- | ------------------- | ------------------------------------ |
|
||||
| 0 | 4 | char[4] | `NRes` (0x4E526573) | Магическая сигнатура (little-endian) |
|
||||
| 4 | 4 | uint32 | `0x00000100` (256) | Версия формата (1.0) |
|
||||
| 8 | 4 | int32 | — | Количество записей в каталоге |
|
||||
| 12 | 4 | int32 | — | Полный размер файла в байтах |
|
||||
|
||||
**Валидация при открытии:** магическая сигнатура и версия должны совпадать точно. Поле `total_size` (смещение 12) **проверяется на равенство** с фактическим размером файла (`GetFileSize`). Если значения не совпадают — файл отклоняется.
|
||||
|
||||
## 1.3. Положение каталога в файле
|
||||
|
||||
Каталог располагается в самом конце файла. Его смещение вычисляется по формуле:
|
||||
|
||||
```
|
||||
directory_offset = total_size - entry_count × 64
|
||||
```
|
||||
|
||||
Данные ресурсов занимают пространство между заголовком (16 байт) и каталогом.
|
||||
|
||||
## 1.4. Запись каталога (64 байта)
|
||||
|
||||
Каждая запись каталога занимает ровно **64 байта** (0x40):
|
||||
|
||||
| Смещение | Размер | Тип | Описание |
|
||||
| -------- | ------ | -------- | ------------------------------------------------- |
|
||||
| 0 | 4 | uint32 | Тип / идентификатор ресурса |
|
||||
| 4 | 4 | uint32 | Атрибут 1 (например, формат, дата, категория) |
|
||||
| 8 | 4 | uint32 | Атрибут 2 (например, подтип, метка времени) |
|
||||
| 12 | 4 | uint32 | Размер данных ресурса в байтах |
|
||||
| 16 | 4 | uint32 | Атрибут 3 (дополнительный параметр) |
|
||||
| 20 | 36 | char[36] | Имя файла (null-terminated, макс. 35 символов) |
|
||||
| 56 | 4 | uint32 | Смещение данных от начала файла |
|
||||
| 60 | 4 | uint32 | Индекс сортировки (для двоичного поиска по имени) |
|
||||
|
||||
### Поле «Имя файла» (смещение 20, 36 байт)
|
||||
|
||||
- Максимальная длина имени: **35 символов** + 1 байт null-терминатор.
|
||||
- При записи поле сначала обнуляется (`memset(0, 36 байт)`), затем копируется имя (`strncpy`, макс. 35 символов).
|
||||
- Поиск по имени выполняется **без учёта регистра** (`_strcmpi`).
|
||||
|
||||
### Поле «Индекс сортировки» (смещение 60)
|
||||
|
||||
Используется для **двоичного поиска по имени**. Содержит индекс оригинальной записи, отсортированной в алфавитном порядке (регистронезависимо). Индекс строится при сохранении файла функцией `sub_10013260` с помощью **пузырьковой сортировки** по именам.
|
||||
|
||||
**Алгоритм поиска** (`sub_10011E60`): классический двоичный поиск по отсортированному массиву индексов. Возвращает оригинальный индекс записи или `-1` при отсутствии.
|
||||
|
||||
### Поле «Смещение данных» (смещение 56)
|
||||
|
||||
Абсолютное смещение от начала файла. Данные читаются из mapped view: `pointer = mapped_base + data_offset`.
|
||||
|
||||
## 1.5. Выравнивание данных
|
||||
|
||||
При добавлении ресурса его данные записываются последовательно, после чего выполняется **выравнивание по 8-байтной границе**:
|
||||
|
||||
```c
|
||||
padding = ((data_size + 7) & ~7) - data_size;
|
||||
// Если padding > 0, записываются нулевые байты
|
||||
```
|
||||
|
||||
Таким образом, каждый блок данных начинается с адреса, кратного 8.
|
||||
|
||||
При изменении размера данных ресурса выполняется сдвиг всех последующих данных и обновление смещений всех затронутых записей каталога.
|
||||
|
||||
## 1.6. Создание файла (API `niCreateResFile`)
|
||||
|
||||
При создании нового файла:
|
||||
|
||||
1. Если файл уже существует и содержит корректный NRes-архив, существующий каталог считывается с конца файла, а файл усекается до начала каталога.
|
||||
2. Если файл пуст или не является NRes-архивом, создаётся новый с пустым каталогом. Поля `entry_count = 0`, `total_size = 16`.
|
||||
|
||||
При закрытии файла (`sub_100122D0`):
|
||||
|
||||
1. Заголовок переписывается в начало файла (16 байт).
|
||||
2. Вычисляется `total_size = data_end_offset + entry_count × 64`.
|
||||
3. Индексы сортировки пересчитываются.
|
||||
4. Каталог записей записывается в конец файла.
|
||||
|
||||
## 1.7. Режимы сортировки каталога
|
||||
|
||||
Функция `sub_10012560` поддерживает 12 режимов сортировки (0–11):
|
||||
|
||||
| Режим | Порядок сортировки |
|
||||
| ----- | --------------------------------- |
|
||||
| 0 | Без сортировки (сброс) |
|
||||
| 1 | По атрибуту 1 (смещение 4) |
|
||||
| 2 | По атрибуту 2 (смещение 8) |
|
||||
| 3 | По (атрибут 1, атрибут 2) |
|
||||
| 4 | По типу ресурса (смещение 0) |
|
||||
| 5 | По (тип, атрибут 1) |
|
||||
| 6 | По (тип, атрибут 1) — идентичен 5 |
|
||||
| 7 | По (тип, атрибут 1, атрибут 2) |
|
||||
| 8 | По имени (регистронезависимо) |
|
||||
| 9 | По (тип, имя) |
|
||||
| 10 | По (атрибут 1, имя) |
|
||||
| 11 | По (атрибут 2, имя) |
|
||||
|
||||
## 1.8. Операция `niOpenResFileEx` — флаги открытия
|
||||
|
||||
Второй параметр — битовые флаги:
|
||||
|
||||
| Бит | Маска | Описание |
|
||||
| --- | ----- | ----------------------------------------------------------------------------------- |
|
||||
| 0 | 0x01 | Sequential scan hint (`FILE_FLAG_SEQUENTIAL_SCAN` вместо `FILE_FLAG_RANDOM_ACCESS`) |
|
||||
| 1 | 0x02 | Открыть для записи (read-write). Без флага — только чтение |
|
||||
| 2 | 0x04 | Пометить файл как «кэшируемый» (не выгружать при refcount=0) |
|
||||
| 3 | 0x08 | Raw-режим: не проверять заголовок NRes, трактовать весь файл как единый ресурс |
|
||||
|
||||
## 1.9. Виртуальное касание страниц
|
||||
|
||||
Функция `sub_100197D0` выполняет «касание» страниц памяти для принудительной загрузки из memory-mapped файла. Она обходит адресное пространство с шагом 4096 байт (размер страницы), начиная с 0x10000 (64 КБ):
|
||||
|
||||
```
|
||||
for (result = 0x10000; result < size; result += 4096);
|
||||
```
|
||||
|
||||
Вызывается при чтении данных ресурса с флагом `a3 != 0` для предзагрузки данных в оперативную память.
|
||||
|
||||
---
|
||||
|
||||
# Часть 2. Формат RsLi
|
||||
|
||||
## 2.1. Общая структура файла
|
||||
|
||||
```
|
||||
┌───────────────────────────────┐ Смещение 0
|
||||
│ Заголовок файла (32 байта) │
|
||||
├───────────────────────────────┤ Смещение 32
|
||||
│ Таблица записей (зашифрована)│
|
||||
│ (entry_count × 32 байт) │
|
||||
├───────────────────────────────┤ Смещение 32 + entry_count × 32
|
||||
│ │
|
||||
│ Данные ресурсов │
|
||||
│ │
|
||||
├───────────────────────────────┤
|
||||
│ [Опциональный трейлер — 6 б] │
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
## 2.2. Заголовок файла (32 байта)
|
||||
|
||||
| Смещение | Размер | Тип | Значение | Описание |
|
||||
| -------- | ------ | ------- | ----------------- | --------------------------------------------- |
|
||||
| 0 | 2 | char[2] | `NL` (0x4C4E) | Магическая сигнатура |
|
||||
| 2 | 1 | uint8 | `0x00` | Зарезервировано (должно быть 0) |
|
||||
| 3 | 1 | uint8 | `0x01` | Версия формата |
|
||||
| 4 | 2 | int16 | — | Количество записей (sign-extended при чтении) |
|
||||
| 6 | 8 | — | — | Зарезервировано / не используется |
|
||||
| 14 | 2 | uint16 | `0xABBA` или иное | Флаг предсортировки (см. ниже) |
|
||||
| 16 | 4 | — | — | Зарезервировано |
|
||||
| 20 | 4 | uint32 | — | **Начальное состояние XOR-шифра** (seed) |
|
||||
| 24 | 8 | — | — | Зарезервировано |
|
||||
|
||||
### Флаг предсортировки (смещение 14)
|
||||
|
||||
- Если `*(uint16*)(header + 14) == 0xABBA` — движок **не строит** таблицу индексов в памяти. Значения `entry[i].sort_to_original` используются **как есть** (и для двоичного поиска, и как XOR‑ключ для данных).
|
||||
- Если значение **отлично от 0xABBA** — после загрузки выполняется **пузырьковая сортировка** имён и строится перестановка `sort_to_original[]`, которая затем **записывается в `entry[i].sort_to_original`**, перетирая значения из файла. Именно эта перестановка далее используется и для поиска, и как XOR‑ключ (младшие 16 бит).
|
||||
|
||||
## 2.3. XOR-шифр таблицы записей
|
||||
|
||||
Таблица записей начинается со смещения 32 и зашифрована поточным XOR-шифром. Ключ инициализируется из DWORD по смещению 20 заголовка.
|
||||
|
||||
### Начальное состояние
|
||||
|
||||
```
|
||||
seed = *(uint32*)(header + 20)
|
||||
lo = seed & 0xFF // Младший байт
|
||||
hi = (seed >> 8) & 0xFF // Второй байт
|
||||
```
|
||||
|
||||
### Алгоритм дешифровки (побайтовый)
|
||||
|
||||
Для каждого зашифрованного байта `encrypted[i]`, начиная с `i = 0`:
|
||||
|
||||
```
|
||||
step 1: lo = hi ^ ((lo << 1) & 0xFF) // Сдвиг lo влево на 1, XOR с hi
|
||||
step 2: decrypted[i] = lo ^ encrypted[i] // Расшифровка байта
|
||||
step 3: hi = lo ^ ((hi >> 1) & 0xFF) // Сдвиг hi вправо на 1, XOR с lo
|
||||
```
|
||||
|
||||
**Пример реализации:**
|
||||
|
||||
```python
|
||||
def decrypt_rs_entries(encrypted_data: bytes, seed: int) -> bytes:
|
||||
lo = seed & 0xFF
|
||||
hi = (seed >> 8) & 0xFF
|
||||
result = bytearray(len(encrypted_data))
|
||||
for i in range(len(encrypted_data)):
|
||||
lo = (hi ^ ((lo << 1) & 0xFF)) & 0xFF
|
||||
result[i] = lo ^ encrypted_data[i]
|
||||
hi = (lo ^ ((hi >> 1) & 0xFF)) & 0xFF
|
||||
return bytes(result)
|
||||
```
|
||||
|
||||
Этот же алгоритм используется для шифрования данных ресурсов с методом XOR (флаги 0x20, 0x60, 0xA0), но с другим начальным ключом из записи.
|
||||
|
||||
## 2.4. Запись таблицы (32 байта, на диске, до дешифровки)
|
||||
|
||||
После дешифровки каждая запись имеет следующую структуру:
|
||||
|
||||
| Смещение | Размер | Тип | Описание |
|
||||
| -------- | ------ | -------- | -------------------------------------------------------------- |
|
||||
| 0 | 12 | char[12] | Имя ресурса (ASCII, обычно uppercase; строка читается до `\0`) |
|
||||
| 12 | 4 | — | Зарезервировано (движком игнорируется) |
|
||||
| 16 | 2 | int16 | **Флаги** (метод сжатия и атрибуты) |
|
||||
| 18 | 2 | int16 | **`sort_to_original[i]` / XOR‑ключ** (см. ниже) |
|
||||
| 20 | 4 | uint32 | **Размер распакованных данных** (`unpacked_size`) |
|
||||
| 24 | 4 | uint32 | Смещение данных от начала файла (`data_offset`) |
|
||||
| 28 | 4 | uint32 | Размер упакованных данных в байтах (`packed_size`) |
|
||||
|
||||
### Имена ресурсов
|
||||
|
||||
- Поле `name[12]` копируется побайтно. Внутренне движок всегда имеет `\0` сразу после этих 12 байт (зарезервированные 4 байта в памяти принудительно обнуляются), поэтому имя **может быть длиной до 12 символов** даже без `\0` внутри `name[12]`.
|
||||
- На практике имена обычно **uppercase ASCII**. `rsFind` приводит запрос к верхнему регистру (`_strupr`) и сравнивает побайтно.
|
||||
- `rsFind` копирует имя запроса `strncpy(..., 16)` и принудительно ставит `\0` в `Destination[15]`, поэтому запрос длиннее 15 символов будет усечён.
|
||||
|
||||
### Поле `sort_to_original[i]` (смещение 18)
|
||||
|
||||
Это **не “свойство записи”**, а элемент таблицы индексов, по которой `rsFind` делает двоичный поиск:
|
||||
|
||||
- Таблица реализована “внутри записей”: значение берётся как `entry[i].sort_to_original` (где `i` — позиция двоичного поиска), а реальная запись для сравнения берётся как `entry[ sort_to_original[i] ]`.
|
||||
- Тем же значением (младшие 16 бит) инициализируется XOR‑шифр данных для методов, где он используется (0x20/0x60/0xA0). Поэтому при упаковке/шифровании данных ключ должен совпадать с итоговым `sort_to_original[i]` (см. флаг 0xABBA в разделе 2.2).
|
||||
|
||||
Поиск выполняется **двоичным поиском** по этой таблице, с фолбэком на **линейный поиск** если двоичный не нашёл (поведение `rsFind`).
|
||||
|
||||
## 2.5. Поле флагов (смещение 16 записи)
|
||||
|
||||
Биты поля флагов кодируют метод сжатия и дополнительные атрибуты:
|
||||
|
||||
```
|
||||
Биты [8:5] (маска 0x1E0): Метод сжатия/шифрования
|
||||
Бит [6] (маска 0x040): Флаг realloc (буфер декомпрессии может быть больше)
|
||||
```
|
||||
|
||||
### Методы сжатия (биты 8–5, маска 0x1E0)
|
||||
|
||||
| Значение | Hex | Описание |
|
||||
| -------- | ----- | --------------------------------------- |
|
||||
| 0x000 | 0x00 | Без сжатия (копирование) |
|
||||
| 0x020 | 0x20 | Только XOR-шифр |
|
||||
| 0x040 | 0x40 | LZSS (простой вариант) |
|
||||
| 0x060 | 0x60 | XOR-шифр + LZSS (простой вариант) |
|
||||
| 0x080 | 0x80 | LZSS с адаптивным кодированием Хаффмана |
|
||||
| 0x0A0 | 0xA0 | XOR-шифр + LZSS с Хаффманом |
|
||||
| 0x100 | 0x100 | Deflate (аналог zlib/RFC 1951) |
|
||||
|
||||
Примечание: `rsGetPackMethod()` возвращает `flags & 0x1C0` (без бита 0x20). Поэтому:
|
||||
|
||||
- для 0x20 вернётся 0x00,
|
||||
- для 0x60 вернётся 0x40,
|
||||
- для 0xA0 вернётся 0x80.
|
||||
|
||||
### Бит 0x40 (выделение +0x12 и последующее `realloc`)
|
||||
|
||||
Бит 0x40 проверяется отдельно (`flags & 0x40`). Если он установлен, выходной буфер выделяется с запасом `+0x12` (18 байт), а после распаковки вызывается `realloc` для усечения до точного `unpacked_size`.
|
||||
|
||||
Важно: этот же бит входит в код методов 0x40/0x60, поэтому для них поведение “+0x12 и shrink” включено автоматически.
|
||||
|
||||
## 2.6. Размеры данных
|
||||
|
||||
В каждой записи на диске хранятся оба значения:
|
||||
|
||||
- `unpacked_size` (смещение 20) — размер распакованных данных.
|
||||
- `packed_size` (смещение 28) — размер упакованных данных (байт во входном потоке для выбранного метода).
|
||||
|
||||
Для метода 0x00 (без сжатия) обычно `packed_size == unpacked_size`.
|
||||
|
||||
`rsGetInfo` возвращает именно `unpacked_size` (то, сколько байт выдаст `rsLoad`).
|
||||
|
||||
Практический нюанс для метода `0x100` (Deflate): в реальных игровых данных встречается запись, где `packed_size` указывает на диапазон до `EOF + 1`. Поток успешно декодируется и без последнего байта; это похоже на lookahead-поведение декодера.
|
||||
|
||||
## 2.7. Опциональный трейлер медиа (6 байт)
|
||||
|
||||
При открытии с флагом `a2 & 2`:
|
||||
|
||||
| Смещение от конца | Размер | Тип | Описание |
|
||||
| ----------------- | ------ | ------- | ----------------------- |
|
||||
| −6 | 2 | char[2] | Сигнатура `AO` (0x4F41) |
|
||||
| −4 | 4 | uint32 | Смещение медиа-оверлея |
|
||||
|
||||
Если трейлер присутствует, все смещения данных в записях корректируются: `effective_offset = entry_offset + media_overlay_offset`.
|
||||
|
||||
---
|
||||
|
||||
# Часть 3. Алгоритмы сжатия (формат RsLi)
|
||||
|
||||
## 3.1. XOR-шифр данных (метод 0x20)
|
||||
|
||||
Алгоритм идентичен XOR‑шифру таблицы записей (раздел 2.3), но начальный ключ берётся из `entry[i].sort_to_original` (смещение 18 записи, младшие 16 бит).
|
||||
|
||||
Важно про размер входа:
|
||||
|
||||
- В ветке **0x20** движок XOR‑ит ровно `unpacked_size` байт (и ожидает, что поток данных имеет ту же длину; на практике `packed_size == unpacked_size`).
|
||||
- В ветках **0x60/0xA0** XOR применяется к **упакованному** потоку длиной `packed_size` перед декомпрессией.
|
||||
|
||||
### Инициализация
|
||||
|
||||
```
|
||||
key16 = (uint16)entry.sort_to_original // int16 на диске по смещению 18
|
||||
lo = key16 & 0xFF
|
||||
hi = (key16 >> 8) & 0xFF
|
||||
```
|
||||
|
||||
### Дешифровка (псевдокод)
|
||||
|
||||
```
|
||||
for i in range(N): # N = unpacked_size (для 0x20) или packed_size (для 0x60/0xA0)
|
||||
lo = (hi ^ ((lo << 1) & 0xFF)) & 0xFF
|
||||
out[i] = in[i] ^ lo
|
||||
hi = (lo ^ ((hi >> 1) & 0xFF)) & 0xFF
|
||||
```
|
||||
|
||||
## 3.2. LZSS — простой вариант (метод 0x40)
|
||||
|
||||
Классический алгоритм LZSS (Lempel-Ziv-Storer-Szymanski) с кольцевым буфером.
|
||||
|
||||
### Параметры
|
||||
|
||||
| Параметр | Значение |
|
||||
| ----------------------------- | ------------------ |
|
||||
| Размер кольцевого буфера | 4096 байт (0x1000) |
|
||||
| Начальная позиция записи | 4078 (0xFEE) |
|
||||
| Начальное заполнение | 0x20 (пробел) |
|
||||
| Минимальная длина совпадения | 3 |
|
||||
| Максимальная длина совпадения | 18 (4 бита + 3) |
|
||||
|
||||
### Алгоритм декомпрессии
|
||||
|
||||
```
|
||||
Инициализация:
|
||||
ring_buffer[0..4095] = 0x20 (заполнить пробелами)
|
||||
ring_pos = 4078
|
||||
flags_byte = 0
|
||||
flags_bits_remaining = 0
|
||||
|
||||
Цикл (пока не заполнен выходной буфер И не исчерпан входной):
|
||||
|
||||
1. Если flags_bits_remaining == 0:
|
||||
- Прочитать 1 байт из входного потока → flags_byte
|
||||
- flags_bits_remaining = 8
|
||||
|
||||
Декодировать как:
|
||||
- Старший бит устанавливается в 0x7F (маркер)
|
||||
- Оставшиеся 7 бит — флаги текущей группы
|
||||
|
||||
Реально в коде: control_word = (flags_byte) | (0x7F << 8)
|
||||
Каждый бит проверяется сдвигом вправо.
|
||||
|
||||
2. Проверить младший бит control_word:
|
||||
|
||||
Если бит = 1 (литерал):
|
||||
- Прочитать 1 байт из входного потока → byte
|
||||
- ring_buffer[ring_pos] = byte
|
||||
- ring_pos = (ring_pos + 1) & 0xFFF
|
||||
- Записать byte в выходной буфер
|
||||
|
||||
Если бит = 0 (ссылка):
|
||||
- Прочитать 2 байта: low_byte, high_byte
|
||||
- offset = low_byte | ((high_byte & 0xF0) << 4) // 12 бит
|
||||
- length = (high_byte & 0x0F) + 3 // 4 бита + 3
|
||||
- Скопировать length байт из ring_buffer[offset...]:
|
||||
для j от 0 до length-1:
|
||||
byte = ring_buffer[(offset + j) & 0xFFF]
|
||||
ring_buffer[ring_pos] = byte
|
||||
ring_pos = (ring_pos + 1) & 0xFFF
|
||||
записать byte в выходной буфер
|
||||
|
||||
3. Сдвинуть control_word вправо на 1 бит
|
||||
4. flags_bits_remaining -= 1
|
||||
```
|
||||
|
||||
### Подробная раскладка пары ссылки (2 байта)
|
||||
|
||||
```
|
||||
Байт 0 (low): OOOOOOOO (биты [7:0] смещения)
|
||||
Байт 1 (high): OOOOLLLL O = биты [11:8] смещения, L = длина − 3
|
||||
|
||||
offset = low | ((high & 0xF0) << 4) // Диапазон: 0–4095
|
||||
length = (high & 0x0F) + 3 // Диапазон: 3–18
|
||||
```
|
||||
|
||||
## 3.3. LZSS с адаптивным кодированием Хаффмана (метод 0x80)
|
||||
|
||||
Расширенный вариант LZSS, где литералы и длины совпадений кодируются с помощью адаптивного дерева Хаффмана.
|
||||
|
||||
### Параметры
|
||||
|
||||
| Параметр | Значение |
|
||||
| -------------------------------- | ------------------------------ |
|
||||
| Размер кольцевого буфера | 4096 байт |
|
||||
| Начальная позиция записи | **4036** (0xFC4) |
|
||||
| Начальное заполнение | 0x20 (пробел) |
|
||||
| Количество листовых узлов дерева | 314 |
|
||||
| Символы литералов | 0–255 (байты) |
|
||||
| Символы длин | 256–313 (длина = символ − 253) |
|
||||
| Начальная длина | 3 (при символе 256) |
|
||||
| Максимальная длина | 60 (при символе 313) |
|
||||
|
||||
### Дерево Хаффмана
|
||||
|
||||
Дерево строится как **адаптивное** (dynamic, self-adjusting):
|
||||
|
||||
- **627 узлов**: 314 листовых + 313 внутренних.
|
||||
- Все листья изначально имеют **вес 1**.
|
||||
- Корень дерева — узел с индексом 0 (в массиве `parent`).
|
||||
- После декодирования каждого символа дерево **обновляется** (функция `sub_1001B0AE`): вес узла инкрементируется, и при нарушении порядка узлы **переставляются** для поддержания свойства.
|
||||
- При достижении суммарного веса **0x8000 (32768)** — все веса **делятся на 2** (с округлением вверх) и дерево полностью перестраивается.
|
||||
|
||||
### Кодирование позиции
|
||||
|
||||
Позиция в кольцевом буфере кодируется с помощью **d-кода** (таблица дистанций):
|
||||
|
||||
- 8 бит позиции ищутся в таблице `d_code[256]`, определяя базовое значение и количество дополнительных битов.
|
||||
- Из потока считываются дополнительные биты, которые объединяются с базовым значением.
|
||||
- Финальная позиция: `pos = (ring_pos − 1 − decoded_position) & 0xFFF`
|
||||
|
||||
**Таблицы инициализации** (d-коды):
|
||||
|
||||
```
|
||||
Таблица базовых значений — byte_100371D0[6]:
|
||||
{ 0x01, 0x03, 0x08, 0x0C, 0x18, 0x10 }
|
||||
|
||||
Таблица дополнительных битов — byte_100371D6[6]:
|
||||
{ 0x20, 0x30, 0x40, 0x30, 0x30, 0x10 }
|
||||
```
|
||||
|
||||
### Алгоритм декомпрессии (высокоуровневый)
|
||||
|
||||
```
|
||||
Инициализация:
|
||||
ring_buffer[0..4095] = 0x20
|
||||
ring_pos = 4036
|
||||
Инициализировать дерево Хаффмана (314 листьев, все веса = 1)
|
||||
Инициализировать таблицы d-кодов
|
||||
|
||||
Цикл:
|
||||
1. Декодировать символ из потока по дереву Хаффмана:
|
||||
- Начать с корня
|
||||
- Читать биты, спускаться по дереву (0 = левый, 1 = правый)
|
||||
- Пока не достигнут лист → символ = лист − 627
|
||||
|
||||
2. Обновить дерево Хаффмана для декодированного символа
|
||||
|
||||
3. Если символ < 256 (литерал):
|
||||
- ring_buffer[ring_pos] = символ
|
||||
- ring_pos = (ring_pos + 1) & 0xFFF
|
||||
- Записать символ в выходной буфер
|
||||
|
||||
4. Если символ >= 256 (ссылка):
|
||||
- length = символ − 253
|
||||
- Декодировать позицию через d-код:
|
||||
a) Прочитать 8 бит из потока
|
||||
b) Найти d-код и дополнительные биты по таблице
|
||||
c) Прочитать дополнительные биты
|
||||
d) position = (ring_pos − 1 − full_position) & 0xFFF
|
||||
- Скопировать length байт из ring_buffer[position...]
|
||||
|
||||
5. Если выходной буфер заполнен → завершить
|
||||
```
|
||||
|
||||
## 3.4. XOR + LZSS (методы 0x60 и 0xA0)
|
||||
|
||||
Комбинированный метод: сначала XOR-дешифровка, затем LZSS-декомпрессия.
|
||||
|
||||
### Алгоритм
|
||||
|
||||
1. Выделить временный буфер размером `compressed_size` (поле из записи, смещение 28).
|
||||
2. Дешифровать сжатые данные XOR-шифром (раздел 3.1) с ключом из записи во временный буфер.
|
||||
3. Применить LZSS-декомпрессию (простую или с Хаффманом, в зависимости от конкретного метода) из временного буфера в выходной.
|
||||
4. Освободить временный буфер.
|
||||
|
||||
- **0x60** — XOR + простой LZSS (раздел 3.2)
|
||||
- **0xA0** — XOR + LZSS с Хаффманом (раздел 3.3)
|
||||
|
||||
### Начальное состояние XOR для данных
|
||||
|
||||
При комбинированном методе seed берётся из поля по смещению 20 записи (4-байтный). Однако ключ обрабатывается как 16-битный: `lo = seed & 0xFF`, `hi = (seed >> 8) & 0xFF`.
|
||||
|
||||
## 3.5. Deflate (метод 0x100)
|
||||
|
||||
Полноценная реализация алгоритма **Deflate** (RFC 1951) с блочной структурой.
|
||||
|
||||
### Общая структура
|
||||
|
||||
Данные состоят из последовательности блоков. Каждый блок начинается с:
|
||||
|
||||
- **1 бит** — `is_final`: признак последнего блока
|
||||
- **2 бита** — `block_type`: тип блока
|
||||
|
||||
### Типы блоков
|
||||
|
||||
| block_type | Описание | Функция |
|
||||
| ---------- | --------------------------- | ---------------- |
|
||||
| 0 | Без сжатия (stored) | `sub_1001A750` |
|
||||
| 1 | Фиксированные коды Хаффмана | `sub_1001A8C0` |
|
||||
| 2 | Динамические коды Хаффмана | `sub_1001AA30` |
|
||||
| 3 | Зарезервировано (ошибка) | Возвращает код 2 |
|
||||
|
||||
### Блок типа 0 (stored)
|
||||
|
||||
1. Отбросить оставшиеся биты до границы байта (выравнивание).
|
||||
2. Прочитать 16 бит — `LEN` (длина блока).
|
||||
3. Прочитать 16 бит — `NLEN` (дополнение длины, `NLEN == ~LEN & 0xFFFF`).
|
||||
4. Проверить: `LEN == (uint16)(~NLEN)`. При несовпадении — ошибка.
|
||||
5. Скопировать `LEN` байт из входного потока в выходной.
|
||||
|
||||
Декомпрессор использует внутренний буфер размером **32768 байт** (0x8000). При заполнении — промежуточная запись результата.
|
||||
|
||||
### Блок типа 1 (фиксированные коды)
|
||||
|
||||
Стандартные коды Deflate:
|
||||
|
||||
- Литералы/длины: 288 кодов
|
||||
- 0–143: 8-битные коды
|
||||
- 144–255: 9-битные коды
|
||||
- 256–279: 7-битные коды
|
||||
- 280–287: 8-битные коды
|
||||
- Дистанции: 30 кодов, все 5-битные
|
||||
|
||||
Используются предопределённые таблицы длин и дистанций (`unk_100370AC`, `unk_1003712C` и соответствующие экстра-биты).
|
||||
|
||||
### Блок типа 2 (динамические коды)
|
||||
|
||||
1. Прочитать 5 бит → `HLIT` (количество литералов/длин − 257). Диапазон: 257–286.
|
||||
2. Прочитать 5 бит → `HDIST` (количество дистанций − 1). Диапазон: 1–30.
|
||||
3. Прочитать 4 бита → `HCLEN` (количество кодов длин − 4). Диапазон: 4–19.
|
||||
4. Прочитать `HCLEN` × 3 бит — длины кодов для алфавита длин.
|
||||
5. Построить дерево Хаффмана для алфавита длин (19 символов).
|
||||
6. С помощью этого дерева декодировать длины кодов для литералов/длин и дистанций.
|
||||
7. Построить два дерева Хаффмана: для литералов/длин и для дистанций.
|
||||
8. Декодировать данные.
|
||||
|
||||
**Порядок кодов длин** (стандартный Deflate):
|
||||
|
||||
```
|
||||
{ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }
|
||||
```
|
||||
|
||||
Хранится в `dword_10037060`.
|
||||
|
||||
### Валидации
|
||||
|
||||
- `HLIT + 257 <= 286` (max 0x11E)
|
||||
- `HDIST + 1 <= 30` (max 0x1E)
|
||||
- При нарушении — возвращается ошибка 1.
|
||||
|
||||
## 3.6. Метод 0x00 (без сжатия)
|
||||
|
||||
Данные копируются «как есть» напрямую из файла. Вызывается через указатель на функцию `dword_1003A1B8` (фактически `memcpy` или аналог).
|
||||
|
||||
---
|
||||
|
||||
# Часть 4. Внутренние структуры в памяти
|
||||
|
||||
## 4.1. Внутренняя структура NRes-архива (opened, 0x68 байт = 104)
|
||||
|
||||
```c
|
||||
struct NResArchive { // Размер: 0x68 (104 байта)
|
||||
void* vtable; // +0: Указатель на таблицу виртуальных методов
|
||||
int32_t entry_count; // +4: Количество записей
|
||||
void* mapped_base; // +8: Базовый адрес mapped view
|
||||
void* directory_ptr; // +12: Указатель на каталог записей в памяти
|
||||
char* filename; // +16: Путь к файлу (_strdup)
|
||||
int32_t ref_count; // +20: Счётчик ссылок
|
||||
uint32_t last_release_time; // +24: timeGetTime() при последнем Release
|
||||
// +28..+91: Для raw-режима — встроенная запись (единственный File entry)
|
||||
NResArchive* next; // +92: Следующий архив в связном списке
|
||||
uint8_t is_writable; // +100: Файл открыт для записи
|
||||
uint8_t is_cacheable; // +101: Не выгружать при refcount = 0
|
||||
};
|
||||
```
|
||||
|
||||
## 4.2. Внутренняя структура RsLi-архива (56 + 64 × N байт)
|
||||
|
||||
```c
|
||||
struct RsLibHeader { // 56 байт (14 DWORD)
|
||||
uint32_t magic; // +0: 'RsLi' (0x694C7352)
|
||||
int32_t entry_count; // +4: Количество записей
|
||||
uint32_t media_offset; // +8: Смещение медиа-оверлея
|
||||
uint32_t reserved_0C; // +12: 0
|
||||
HANDLE file_handle_2; // +16: -1 (дополнительный хэндл)
|
||||
uint32_t reserved_14; // +20: 0
|
||||
uint32_t reserved_18; // +24: —
|
||||
uint32_t reserved_1C; // +28: 0
|
||||
HANDLE mapping_handle_2; // +32: -1
|
||||
uint32_t reserved_24; // +36: 0
|
||||
uint32_t flag_28; // +40: (flags >> 7) & 1
|
||||
HANDLE file_handle; // +44: Хэндл файла
|
||||
HANDLE mapping_handle; // +48: Хэндл файлового маппинга
|
||||
void* mapped_view; // +52: Указатель на mapped view
|
||||
};
|
||||
// Далее следуют entry_count записей по 64 байта каждая
|
||||
```
|
||||
|
||||
### Внутренняя запись RsLi (64 байта)
|
||||
|
||||
```c
|
||||
struct RsLibEntry { // 64 байта (16 DWORD)
|
||||
char name[16]; // +0: Имя (12 из файла + 4 нуля)
|
||||
int32_t flags; // +16: Флаги (sign-extended из int16)
|
||||
int32_t sort_index; // +20: sort_to_original[i] (таблица индексов / XOR‑ключ)
|
||||
uint32_t uncompressed_size; // +24: Размер несжатых данных (из поля 20 записи)
|
||||
void* data_ptr; // +28: Указатель на данные в mapped view
|
||||
uint32_t compressed_size; // +32: Размер сжатых данных (из поля 28 записи)
|
||||
uint32_t reserved_24; // +36: 0
|
||||
uint32_t reserved_28; // +40: 0
|
||||
uint32_t reserved_2C; // +44: 0
|
||||
void* loaded_data; // +48: Указатель на декомпрессированные данные
|
||||
// +52..+63: дополнительные поля
|
||||
};
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
# Часть 5. Экспортируемые API-функции
|
||||
|
||||
## 5.1. NRes API
|
||||
|
||||
| Функция | Описание |
|
||||
| ------------------------------ | ------------------------------------------------------------------------- |
|
||||
| `niOpenResFile(path)` | Открыть NRes-архив (только чтение), эквивалент `niOpenResFileEx(path, 0)` |
|
||||
| `niOpenResFileEx(path, flags)` | Открыть NRes-архив с флагами |
|
||||
| `niOpenResInMem(ptr, size)` | Открыть NRes-архив из памяти |
|
||||
| `niCreateResFile(path)` | Создать/открыть NRes-архив для записи |
|
||||
|
||||
## 5.2. RsLi API
|
||||
|
||||
| Функция | Описание |
|
||||
| ------------------------------- | -------------------------------------------------------- |
|
||||
| `rsOpenLib(path, flags)` | Открыть RsLi-библиотеку |
|
||||
| `rsCloseLib(lib)` | Закрыть библиотеку |
|
||||
| `rsLibNum(lib)` | Получить количество записей |
|
||||
| `rsFind(lib, name)` | Найти запись по имени (→ индекс или −1) |
|
||||
| `rsLoad(lib, index)` | Загрузить и декомпрессировать ресурс |
|
||||
| `rsLoadFast(lib, index, flags)` | Быстрая загрузка (без декомпрессии если возможно) |
|
||||
| `rsLoadPacked(lib, index)` | Загрузить в «упакованном» виде (отложенная декомпрессия) |
|
||||
| `rsLoadByName(lib, name)` | `rsFind` + `rsLoad` |
|
||||
| `rsGetInfo(lib, index, out)` | Получить имя и размер ресурса |
|
||||
| `rsGetPackMethod(lib, index)` | Получить метод сжатия (`flags & 0x1C0`) |
|
||||
| `ngiUnpack(packed)` | Декомпрессировать ранее загруженный упакованный ресурс |
|
||||
| `ngiAlloc(size)` | Выделить память (с обработкой ошибок) |
|
||||
| `ngiFree(ptr)` | Освободить память |
|
||||
| `ngiGetMemSize(ptr)` | Получить размер выделенного блока |
|
||||
|
||||
---
|
||||
|
||||
# Часть 6. Контрольные заметки для реализации
|
||||
|
||||
## 6.1. Кодировки и регистр
|
||||
|
||||
- **NRes**: имена хранятся **как есть** (case-insensitive при поиске через `_strcmpi`).
|
||||
- **RsLi**: имена хранятся в **верхнем регистре**. Перед поиском запрос приводится к верхнему регистру (`_strupr`). Сравнение — через `strcmp` (case-sensitive для уже uppercase строк).
|
||||
|
||||
## 6.2. Порядок байт
|
||||
|
||||
Все значения хранятся в **little-endian** порядке (платформа x86/Win32).
|
||||
|
||||
## 6.3. Выравнивание
|
||||
|
||||
- **NRes**: данные каждого ресурса выровнены по границе **8 байт** (0-padding между файлами).
|
||||
- **RsLi**: выравнивание данных не описано в коде (данные идут подряд).
|
||||
|
||||
## 6.4. Размер записей на диске
|
||||
|
||||
- **NRes**: каталог — **64 байта** на запись, расположен в конце файла.
|
||||
- **RsLi**: таблица — **32 байта** на запись (зашифрованная), расположена в начале файла (сразу после 32-байтного заголовка).
|
||||
|
||||
## 6.5. Кэширование и memory mapping
|
||||
|
||||
Оба формата используют Windows Memory-Mapped Files (`CreateFileMapping` + `MapViewOfFile`). NRes-архивы организованы в глобальный **связный список** (`dword_1003A66C`) со счётчиком ссылок и таймером неактивности (10 секунд = 0x2710 мс). При refcount == 0 и истечении таймера архив автоматически выгружается (если не установлен флаг `is_cacheable`).
|
||||
|
||||
## 6.6. Размер seed XOR
|
||||
|
||||
- **Заголовок RsLi**: seed — **4 байта** (DWORD) по смещению 20, но используются только младшие 2 байта (`lo = byte[0]`, `hi = byte[1]`).
|
||||
- **Запись RsLi**: sort_to_original[i] — **2 байта** (int16) по смещению 18 записи.
|
||||
- **Данные при комбинированном XOR+LZSS**: seed — **4 байта** (DWORD) из поля по смещению 20 записи, но опять используются только 2 байта.
|
||||
|
||||
## 6.7. Эмпирическая проверка на данных игры
|
||||
|
||||
- Найдено архивов по сигнатуре: **122** (`NRes`: 120, `RsLi`: 2).
|
||||
- Выполнен полный roundtrip `unpack -> pack -> byte-compare`: **122/122** архивов совпали побайтно.
|
||||
- Для `RsLi` в проверенном наборе встретились методы: `0x040` и `0x100`.
|
||||
|
||||
Подтверждённые нюансы:
|
||||
|
||||
- Для LZSS (метод `0x040`) рабочая раскладка нибблов в ссылке: `OOOO LLLL`, а не `LLLL OOOO`.
|
||||
- Для Deflate (метод `0x100`) возможен случай `packed_size == фактический_конец + 1` на последней записи файла.
|
||||
@@ -1,16 +0,0 @@
|
||||
[package]
|
||||
name = "libnres"
|
||||
version = "0.1.4"
|
||||
description = "Library for NRes files"
|
||||
authors = ["Valentin Popov <valentin@popov.link>"]
|
||||
homepage = "https://git.popov.link/valentineus/fparkan"
|
||||
repository = "https://git.popov.link/valentineus/fparkan.git"
|
||||
license = "GPL-2.0"
|
||||
edition = "2021"
|
||||
keywords = ["gamedev", "library", "nres"]
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4"
|
||||
log = "0.4"
|
||||
miette = "5.6"
|
||||
thiserror = "1.0"
|
||||
@@ -1,25 +0,0 @@
|
||||
# Library for NRes files (Deprecated)
|
||||
|
||||
Library for viewing and retrieving game resources of the game **"Parkan: Iron Strategy"**.
|
||||
All versions of the game are supported: Demo, IS, IS: Part 1, IS: Part 2.
|
||||
Supports files with `lib`, `trf`, `rlb` extensions.
|
||||
|
||||
The files `gamefont.rlb` and `sprites.lib` are not supported.
|
||||
This files have an unknown signature.
|
||||
|
||||
## Example
|
||||
|
||||
Example of extracting game resources:
|
||||
|
||||
```rust
|
||||
fn main() {
|
||||
let file = std::fs::File::open("./voices.lib").unwrap();
|
||||
// Extracting the list of files
|
||||
let list = libnres::reader::get_list(&file).unwrap();
|
||||
|
||||
for element in list {
|
||||
// Extracting the contents of the file
|
||||
let data = libnres::reader::get_file(&file, &element).unwrap();
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -1,33 +0,0 @@
|
||||
use crate::error::ConverterError;
|
||||
|
||||
/// Method for converting u32 to u64.
|
||||
pub fn u32_to_u64(value: u32) -> Result<u64, ConverterError> {
|
||||
match u64::try_from(value) {
|
||||
Err(error) => Err(ConverterError::Infallible(error)),
|
||||
Ok(result) => Ok(result),
|
||||
}
|
||||
}
|
||||
|
||||
/// Method for converting u32 to usize.
|
||||
pub fn u32_to_usize(value: u32) -> Result<usize, ConverterError> {
|
||||
match usize::try_from(value) {
|
||||
Err(error) => Err(ConverterError::TryFromIntError(error)),
|
||||
Ok(result) => Ok(result),
|
||||
}
|
||||
}
|
||||
|
||||
/// Method for converting u64 to u32.
|
||||
pub fn u64_to_u32(value: u64) -> Result<u32, ConverterError> {
|
||||
match u32::try_from(value) {
|
||||
Err(error) => Err(ConverterError::TryFromIntError(error)),
|
||||
Ok(result) => Ok(result),
|
||||
}
|
||||
}
|
||||
|
||||
/// Method for converting usize to u32.
|
||||
pub fn usize_to_u32(value: usize) -> Result<u32, ConverterError> {
|
||||
match u32::try_from(value) {
|
||||
Err(error) => Err(ConverterError::TryFromIntError(error)),
|
||||
Ok(result) => Ok(result),
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
extern crate miette;
|
||||
extern crate thiserror;
|
||||
|
||||
use miette::Diagnostic;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Diagnostic, Debug)]
|
||||
pub enum ConverterError {
|
||||
#[error("error converting an value")]
|
||||
#[diagnostic(code(libnres::infallible))]
|
||||
Infallible(#[from] std::convert::Infallible),
|
||||
|
||||
#[error("error converting an value")]
|
||||
#[diagnostic(code(libnres::try_from_int_error))]
|
||||
TryFromIntError(#[from] std::num::TryFromIntError),
|
||||
}
|
||||
|
||||
#[derive(Error, Diagnostic, Debug)]
|
||||
pub enum ReaderError {
|
||||
#[error(transparent)]
|
||||
#[diagnostic(code(libnres::convert_error))]
|
||||
ConvertValue(#[from] ConverterError),
|
||||
|
||||
#[error("incorrect header format")]
|
||||
#[diagnostic(code(libnres::list_type_error))]
|
||||
IncorrectHeader,
|
||||
|
||||
#[error("incorrect file size (expected {expected:?} bytes, received {received:?} bytes)")]
|
||||
#[diagnostic(code(libnres::file_size_error))]
|
||||
IncorrectSizeFile { expected: u32, received: u32 },
|
||||
|
||||
#[error(
|
||||
"incorrect size of the file list (not a multiple of {expected:?}, received {received:?})"
|
||||
)]
|
||||
#[diagnostic(code(libnres::list_size_error))]
|
||||
IncorrectSizeList { expected: u32, received: u32 },
|
||||
|
||||
#[error("resource file reading error")]
|
||||
#[diagnostic(code(libnres::io_error))]
|
||||
ReadFile(#[from] std::io::Error),
|
||||
|
||||
#[error("file is too small (must be at least {expected:?} bytes, received {received:?} byte)")]
|
||||
#[diagnostic(code(libnres::file_size_error))]
|
||||
SmallFile { expected: u32, received: u32 },
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
/// First constant value of the NRes file ("NRes" characters in numeric)
|
||||
pub const FILE_TYPE_1: u32 = 1936020046;
|
||||
/// Second constant value of the NRes file
|
||||
pub const FILE_TYPE_2: u32 = 256;
|
||||
/// Size of the element item (in bytes)
|
||||
pub const LIST_ELEMENT_SIZE: u32 = 64;
|
||||
/// Minimum allowed file size (in bytes)
|
||||
pub const MINIMUM_FILE_SIZE: u32 = 16;
|
||||
|
||||
static DEBUG: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false);
|
||||
|
||||
mod converter;
|
||||
mod error;
|
||||
pub mod reader;
|
||||
|
||||
/// Get debug status value
|
||||
pub fn get_debug() -> bool {
|
||||
DEBUG.load(std::sync::atomic::Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Change debug status value
|
||||
pub fn set_debug(value: bool) {
|
||||
DEBUG.store(value, std::sync::atomic::Ordering::Relaxed)
|
||||
}
|
||||
@@ -1,227 +0,0 @@
|
||||
use std::io::{Read, Seek};
|
||||
|
||||
use byteorder::ByteOrder;
|
||||
|
||||
use crate::error::ReaderError;
|
||||
use crate::{converter, FILE_TYPE_1, FILE_TYPE_2, LIST_ELEMENT_SIZE, MINIMUM_FILE_SIZE};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ListElement {
|
||||
/// Unknown parameter
|
||||
_unknown0: i32,
|
||||
/// Unknown parameter
|
||||
_unknown1: i32,
|
||||
/// Unknown parameter
|
||||
_unknown2: i32,
|
||||
/// File extension
|
||||
pub extension: String,
|
||||
/// Identifier or sequence number
|
||||
pub index: u32,
|
||||
/// File name
|
||||
pub name: String,
|
||||
/// Position in the file
|
||||
pub position: u32,
|
||||
/// File size (in bytes)
|
||||
pub size: u32,
|
||||
}
|
||||
|
||||
impl ListElement {
|
||||
/// Get full name of the file
|
||||
pub fn get_filename(&self) -> String {
|
||||
format!("{}.{}", self.name, self.extension)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FileHeader {
|
||||
/// File size
|
||||
size: u32,
|
||||
/// Number of files
|
||||
total: u32,
|
||||
/// First constant value
|
||||
type1: u32,
|
||||
/// Second constant value
|
||||
type2: u32,
|
||||
}
|
||||
|
||||
/// Get a packed file data
|
||||
pub fn get_file(file: &std::fs::File, element: &ListElement) -> Result<Vec<u8>, ReaderError> {
|
||||
let size = get_file_size(file)?;
|
||||
check_file_size(size)?;
|
||||
|
||||
let header = get_file_header(file)?;
|
||||
check_file_header(&header, size)?;
|
||||
|
||||
let data = get_element_data(file, element)?;
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
/// Get a list of packed files
|
||||
pub fn get_list(file: &std::fs::File) -> Result<Vec<ListElement>, ReaderError> {
|
||||
let mut list: Vec<ListElement> = Vec::new();
|
||||
|
||||
let size = get_file_size(file)?;
|
||||
check_file_size(size)?;
|
||||
|
||||
let header = get_file_header(file)?;
|
||||
check_file_header(&header, size)?;
|
||||
|
||||
get_file_list(file, &header, &mut list)?;
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
fn check_file_header(header: &FileHeader, size: u32) -> Result<(), ReaderError> {
|
||||
if header.type1 != FILE_TYPE_1 || header.type2 != FILE_TYPE_2 {
|
||||
return Err(ReaderError::IncorrectHeader);
|
||||
}
|
||||
|
||||
if header.size != size {
|
||||
return Err(ReaderError::IncorrectSizeFile {
|
||||
expected: size,
|
||||
received: header.size,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_file_size(size: u32) -> Result<(), ReaderError> {
|
||||
if size < MINIMUM_FILE_SIZE {
|
||||
return Err(ReaderError::SmallFile {
|
||||
expected: MINIMUM_FILE_SIZE,
|
||||
received: size,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_element_data(file: &std::fs::File, element: &ListElement) -> Result<Vec<u8>, ReaderError> {
|
||||
let position = converter::u32_to_u64(element.position)?;
|
||||
let size = converter::u32_to_usize(element.size)?;
|
||||
|
||||
let mut reader = std::io::BufReader::new(file);
|
||||
let mut buffer = vec![0u8; size];
|
||||
|
||||
if let Err(error) = reader.seek(std::io::SeekFrom::Start(position)) {
|
||||
return Err(ReaderError::ReadFile(error));
|
||||
};
|
||||
|
||||
if let Err(error) = reader.read_exact(&mut buffer) {
|
||||
return Err(ReaderError::ReadFile(error));
|
||||
};
|
||||
|
||||
Ok(buffer)
|
||||
}
|
||||
|
||||
fn get_element_position(index: u32) -> Result<(usize, usize), ReaderError> {
|
||||
let from = converter::u32_to_usize(index * LIST_ELEMENT_SIZE)?;
|
||||
let to = converter::u32_to_usize((index * LIST_ELEMENT_SIZE) + LIST_ELEMENT_SIZE)?;
|
||||
Ok((from, to))
|
||||
}
|
||||
|
||||
fn get_file_header(file: &std::fs::File) -> Result<FileHeader, ReaderError> {
|
||||
let mut reader = std::io::BufReader::new(file);
|
||||
let mut buffer = vec![0u8; MINIMUM_FILE_SIZE as usize];
|
||||
|
||||
if let Err(error) = reader.seek(std::io::SeekFrom::Start(0)) {
|
||||
return Err(ReaderError::ReadFile(error));
|
||||
};
|
||||
|
||||
if let Err(error) = reader.read_exact(&mut buffer) {
|
||||
return Err(ReaderError::ReadFile(error));
|
||||
};
|
||||
|
||||
let header = FileHeader {
|
||||
size: byteorder::LittleEndian::read_u32(&buffer[12..16]),
|
||||
total: byteorder::LittleEndian::read_u32(&buffer[8..12]),
|
||||
type1: byteorder::LittleEndian::read_u32(&buffer[0..4]),
|
||||
type2: byteorder::LittleEndian::read_u32(&buffer[4..8]),
|
||||
};
|
||||
|
||||
buffer.clear();
|
||||
Ok(header)
|
||||
}
|
||||
|
||||
fn get_file_list(
|
||||
file: &std::fs::File,
|
||||
header: &FileHeader,
|
||||
list: &mut Vec<ListElement>,
|
||||
) -> Result<(), ReaderError> {
|
||||
let (start_position, list_size) = get_list_position(header)?;
|
||||
let mut reader = std::io::BufReader::new(file);
|
||||
let mut buffer = vec![0u8; list_size];
|
||||
|
||||
if let Err(error) = reader.seek(std::io::SeekFrom::Start(start_position)) {
|
||||
return Err(ReaderError::ReadFile(error));
|
||||
};
|
||||
|
||||
if let Err(error) = reader.read_exact(&mut buffer) {
|
||||
return Err(ReaderError::ReadFile(error));
|
||||
}
|
||||
|
||||
let buffer_size = converter::usize_to_u32(buffer.len())?;
|
||||
|
||||
if buffer_size % LIST_ELEMENT_SIZE != 0 {
|
||||
return Err(ReaderError::IncorrectSizeList {
|
||||
expected: LIST_ELEMENT_SIZE,
|
||||
received: buffer_size,
|
||||
});
|
||||
}
|
||||
|
||||
for i in 0..(buffer_size / LIST_ELEMENT_SIZE) {
|
||||
let (from, to) = get_element_position(i)?;
|
||||
let chunk: &[u8] = &buffer[from..to];
|
||||
|
||||
let element = get_list_element(chunk)?;
|
||||
list.push(element);
|
||||
}
|
||||
|
||||
buffer.clear();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_file_size(file: &std::fs::File) -> Result<u32, ReaderError> {
|
||||
let metadata = match file.metadata() {
|
||||
Err(error) => return Err(ReaderError::ReadFile(error)),
|
||||
Ok(value) => value,
|
||||
};
|
||||
|
||||
let result = converter::u64_to_u32(metadata.len())?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn get_list_element(buffer: &[u8]) -> Result<ListElement, ReaderError> {
|
||||
let index = byteorder::LittleEndian::read_u32(&buffer[60..64]);
|
||||
let position = byteorder::LittleEndian::read_u32(&buffer[56..60]);
|
||||
let size = byteorder::LittleEndian::read_u32(&buffer[12..16]);
|
||||
let unknown0 = byteorder::LittleEndian::read_i32(&buffer[4..8]);
|
||||
let unknown1 = byteorder::LittleEndian::read_i32(&buffer[8..12]);
|
||||
let unknown2 = byteorder::LittleEndian::read_i32(&buffer[16..20]);
|
||||
|
||||
let extension = String::from_utf8_lossy(&buffer[0..4])
|
||||
.trim_matches(char::from(0))
|
||||
.to_string();
|
||||
|
||||
let name = String::from_utf8_lossy(&buffer[20..56])
|
||||
.trim_matches(char::from(0))
|
||||
.to_string();
|
||||
|
||||
Ok(ListElement {
|
||||
_unknown0: unknown0,
|
||||
_unknown1: unknown1,
|
||||
_unknown2: unknown2,
|
||||
extension,
|
||||
index,
|
||||
name,
|
||||
position,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_list_position(header: &FileHeader) -> Result<(u64, usize), ReaderError> {
|
||||
let position = converter::u32_to_u64(header.size - (header.total * LIST_ELEMENT_SIZE))?;
|
||||
let size = converter::u32_to_usize(header.total * LIST_ELEMENT_SIZE)?;
|
||||
Ok((position, size))
|
||||
}
|
||||
33
mkdocs.yml
Normal file
33
mkdocs.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
# Project information
|
||||
site_name: FParkan
|
||||
site_url: https://fparkan.popov.link/
|
||||
site_author: Valentin Popov
|
||||
site_description: >-
|
||||
Utilities and tools for the game “Parkan: Iron Strategy”.
|
||||
|
||||
# Repository
|
||||
repo_name: valentineus/fparkan
|
||||
repo_url: https://github.com/valentineus/fparkan
|
||||
|
||||
# Copyright
|
||||
copyright: Copyright © 2023 — 2024 Valentin Popov
|
||||
|
||||
# Configuration
|
||||
theme:
|
||||
name: material
|
||||
language: ru
|
||||
palette:
|
||||
scheme: slate
|
||||
|
||||
# Navigation
|
||||
nav:
|
||||
- Home: index.md
|
||||
- Specs:
|
||||
- NRes / RsLi: specs/nres.md
|
||||
- Форматы 3D‑ресурсов: specs/msh.md
|
||||
|
||||
# Additional configuration
|
||||
extra:
|
||||
social:
|
||||
- icon: fontawesome/brands/github
|
||||
link: https://github.com/valentineus/fparkan
|
||||
@@ -1,20 +0,0 @@
|
||||
[package]
|
||||
name = "nres-cli"
|
||||
version = "0.2.3"
|
||||
description = "Console tool for NRes files"
|
||||
authors = ["Valentin Popov <valentin@popov.link>"]
|
||||
homepage = "https://git.popov.link/valentineus/fparkan"
|
||||
repository = "https://git.popov.link/valentineus/fparkan.git"
|
||||
license = "GPL-2.0"
|
||||
edition = "2021"
|
||||
keywords = ["cli", "gamedev", "nres"]
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4"
|
||||
clap = { version = "4.2", features = ["derive"] }
|
||||
console = "0.15"
|
||||
dialoguer = { version = "0.10", features = ["completion"] }
|
||||
indicatif = "0.17"
|
||||
libnres = { version = "0.1", path = "../libnres" }
|
||||
miette = { version = "5.6", features = ["fancy"] }
|
||||
tempdir = "0.3"
|
||||
@@ -1,6 +0,0 @@
|
||||
# Console tool for NRes files (Deprecated)
|
||||
|
||||
## Commands
|
||||
|
||||
- `extract` - Extract game resources from a "NRes" file.
|
||||
- `ls` - Get a list of files in a "NRes" file.
|
||||
@@ -1,198 +0,0 @@
|
||||
extern crate core;
|
||||
extern crate libnres;
|
||||
|
||||
use std::io::Write;
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
use miette::{IntoDiagnostic, Result};
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "NRes CLI")]
|
||||
#[command(about, author, version, long_about = None)]
|
||||
struct Cli {
|
||||
#[command(subcommand)]
|
||||
command: Commands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum Commands {
|
||||
/// Check if the "NRes" file can be extract
|
||||
Check {
|
||||
/// "NRes" file
|
||||
file: String,
|
||||
},
|
||||
/// Print debugging information on the "NRes" file
|
||||
#[command(arg_required_else_help = true)]
|
||||
Debug {
|
||||
/// "NRes" file
|
||||
file: String,
|
||||
/// Filter results by file name
|
||||
#[arg(long)]
|
||||
name: Option<String>,
|
||||
},
|
||||
/// Extract files or a file from the "NRes" file
|
||||
#[command(arg_required_else_help = true)]
|
||||
Extract {
|
||||
/// "NRes" file
|
||||
file: String,
|
||||
/// Overwrite files
|
||||
#[arg(short, long, default_value_t = false, value_name = "TRUE|FALSE")]
|
||||
force: bool,
|
||||
/// Outbound directory
|
||||
#[arg(short, long, value_name = "DIR")]
|
||||
out: String,
|
||||
},
|
||||
/// Print a list of files in the "NRes" file
|
||||
#[command(arg_required_else_help = true)]
|
||||
Ls {
|
||||
/// "NRes" file
|
||||
file: String,
|
||||
},
|
||||
}
|
||||
|
||||
pub fn main() -> Result<()> {
|
||||
let stdout = console::Term::stdout();
|
||||
let cli = Cli::parse();
|
||||
|
||||
match cli.command {
|
||||
Commands::Check { file } => command_check(stdout, file)?,
|
||||
Commands::Debug { file, name } => command_debug(stdout, file, name)?,
|
||||
Commands::Extract { file, force, out } => command_extract(stdout, file, out, force)?,
|
||||
Commands::Ls { file } => command_ls(stdout, file)?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn command_check(_stdout: console::Term, file: String) -> Result<()> {
|
||||
let file = std::fs::File::open(file).into_diagnostic()?;
|
||||
let list = libnres::reader::get_list(&file).into_diagnostic()?;
|
||||
let tmp = tempdir::TempDir::new("nres").into_diagnostic()?;
|
||||
let bar = indicatif::ProgressBar::new(list.len() as u64);
|
||||
|
||||
bar.set_style(get_bar_style()?);
|
||||
|
||||
for element in list {
|
||||
bar.set_message(element.get_filename());
|
||||
|
||||
let path = tmp.path().join(element.get_filename());
|
||||
let mut output = std::fs::File::create(path).into_diagnostic()?;
|
||||
let mut buffer = libnres::reader::get_file(&file, &element).into_diagnostic()?;
|
||||
|
||||
output.write_all(&buffer).into_diagnostic()?;
|
||||
buffer.clear();
|
||||
bar.inc(1);
|
||||
}
|
||||
|
||||
bar.finish();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn command_debug(stdout: console::Term, file: String, name: Option<String>) -> Result<()> {
|
||||
let file = std::fs::File::open(file).into_diagnostic()?;
|
||||
let mut list = libnres::reader::get_list(&file).into_diagnostic()?;
|
||||
|
||||
let mut total_files_size: u32 = 0;
|
||||
let mut total_files_gap: u32 = 0;
|
||||
let mut total_files: u32 = 0;
|
||||
|
||||
for (index, item) in list.iter().enumerate() {
|
||||
total_files_size += item.size;
|
||||
total_files += 1;
|
||||
let mut gap = 0;
|
||||
|
||||
if index > 1 {
|
||||
let previous_item = &list[index - 1];
|
||||
gap = item.position - (previous_item.position + previous_item.size);
|
||||
}
|
||||
|
||||
total_files_gap += gap;
|
||||
}
|
||||
|
||||
if let Some(name) = name {
|
||||
list.retain(|item| item.name.contains(&name));
|
||||
};
|
||||
|
||||
for (index, item) in list.iter().enumerate() {
|
||||
let mut gap = 0;
|
||||
|
||||
if index > 1 {
|
||||
let previous_item = &list[index - 1];
|
||||
gap = item.position - (previous_item.position + previous_item.size);
|
||||
}
|
||||
|
||||
let text = format!("Index: {};\nGap: {};\nItem: {:#?};\n", index, gap, item);
|
||||
stdout.write_line(&text).into_diagnostic()?;
|
||||
}
|
||||
|
||||
let text = format!(
|
||||
"Total files: {};\nTotal files gap: {} (bytes);\nTotal files size: {} (bytes);",
|
||||
total_files, total_files_gap, total_files_size
|
||||
);
|
||||
|
||||
stdout.write_line(&text).into_diagnostic()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn command_extract(_stdout: console::Term, file: String, out: String, force: bool) -> Result<()> {
|
||||
let file = std::fs::File::open(file).into_diagnostic()?;
|
||||
let list = libnres::reader::get_list(&file).into_diagnostic()?;
|
||||
let bar = indicatif::ProgressBar::new(list.len() as u64);
|
||||
|
||||
bar.set_style(get_bar_style()?);
|
||||
|
||||
for element in list {
|
||||
bar.set_message(element.get_filename());
|
||||
|
||||
let path = format!("{}/{}", out, element.get_filename());
|
||||
|
||||
if !force && is_exist_file(&path) {
|
||||
let message = format!("File \"{}\" exists. Overwrite it?", path);
|
||||
|
||||
if !dialoguer::Confirm::new()
|
||||
.with_prompt(message)
|
||||
.interact()
|
||||
.into_diagnostic()?
|
||||
{
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let mut output = std::fs::File::create(path).into_diagnostic()?;
|
||||
let mut buffer = libnres::reader::get_file(&file, &element).into_diagnostic()?;
|
||||
|
||||
output.write_all(&buffer).into_diagnostic()?;
|
||||
buffer.clear();
|
||||
bar.inc(1);
|
||||
}
|
||||
|
||||
bar.finish();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn command_ls(stdout: console::Term, file: String) -> Result<()> {
|
||||
let file = std::fs::File::open(file).into_diagnostic()?;
|
||||
let list = libnres::reader::get_list(&file).into_diagnostic()?;
|
||||
|
||||
for element in list {
|
||||
stdout.write_line(&element.name).into_diagnostic()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_bar_style() -> Result<indicatif::ProgressStyle> {
|
||||
Ok(
|
||||
indicatif::ProgressStyle::with_template("[{bar:32}] {pos:>7}/{len:7} {msg}")
|
||||
.into_diagnostic()?
|
||||
.progress_chars("=>-"),
|
||||
)
|
||||
}
|
||||
|
||||
fn is_exist_file(path: &String) -> bool {
|
||||
let metadata = std::path::Path::new(path);
|
||||
metadata.exists()
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
[package]
|
||||
name = "packer"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
serde = { version = "1.0.160", features = ["derive"] }
|
||||
serde_json = "1.0.96"
|
||||
@@ -1,27 +0,0 @@
|
||||
# NRes Game Resource Packer
|
||||
|
||||
At the moment, this is a demonstration of the NRes game resource packing algorithm in action.
|
||||
It packs 100% of the NRes game resources for the game "Parkan: Iron Strategy".
|
||||
The hash sums of the resulting files match the original game files.
|
||||
|
||||
__Attention!__
|
||||
This is a test version of the utility. It overwrites the specified final file without asking.
|
||||
|
||||
## Building
|
||||
|
||||
To build the tools, you need to run the following command in the root directory:
|
||||
|
||||
```bash
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
## Running
|
||||
|
||||
You can run the utility with the following command:
|
||||
|
||||
```bash
|
||||
./target/release/packer /path/to/unpack /path/to/file.ex
|
||||
```
|
||||
|
||||
- `/path/to/unpack`: This is the directory with the resources unpacked by the [unpacker](../unpacker) utility.
|
||||
- `/path/to/file.ex`: This is the final file that will be created.
|
||||
@@ -1,175 +0,0 @@
|
||||
use std::env;
|
||||
use std::{
|
||||
fs::{self, File},
|
||||
io::{BufReader, Read},
|
||||
};
|
||||
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ImportListElement {
|
||||
pub extension: String,
|
||||
pub index: u32,
|
||||
pub name: String,
|
||||
pub unknown0: u32,
|
||||
pub unknown1: u32,
|
||||
pub unknown2: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ListElement {
|
||||
pub extension: String,
|
||||
pub index: u32,
|
||||
pub name: String,
|
||||
pub position: u32,
|
||||
pub size: u32,
|
||||
pub unknown0: u32,
|
||||
pub unknown1: u32,
|
||||
pub unknown2: u32,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args: Vec<String> = env::args().collect();
|
||||
|
||||
let input = &args[1];
|
||||
let output = &args[2];
|
||||
|
||||
pack(String::from(input), String::from(output));
|
||||
}
|
||||
|
||||
fn pack(input: String, output: String) {
|
||||
// Загружаем индекс-файл
|
||||
let index_file = format!("{}/{}", input, "index.json");
|
||||
let data = fs::read_to_string(index_file).unwrap();
|
||||
let list: Vec<ImportListElement> = serde_json::from_str(&data).unwrap();
|
||||
|
||||
// Общий буфер хранения файлов
|
||||
let mut content_buffer: Vec<u8> = Vec::new();
|
||||
let mut list_buffer: Vec<u8> = Vec::new();
|
||||
|
||||
// Общее количество файлов
|
||||
let total_files: u32 = list.len() as u32;
|
||||
|
||||
for (index, item) in list.iter().enumerate() {
|
||||
// Открываем дескриптор файла
|
||||
let path = format!("{}/{}.{}", input, item.name, item.index);
|
||||
let file = File::open(path).unwrap();
|
||||
let metadata = file.metadata().unwrap();
|
||||
|
||||
// Считываем файл в буфер
|
||||
let mut reader = BufReader::new(file);
|
||||
let mut file_buffer: Vec<u8> = Vec::new();
|
||||
reader.read_to_end(&mut file_buffer).unwrap();
|
||||
|
||||
// Выравнивание буфера
|
||||
if index != 0 {
|
||||
while content_buffer.len() % 8 != 0 {
|
||||
content_buffer.push(0);
|
||||
}
|
||||
}
|
||||
|
||||
// Получение позиции файла
|
||||
let position = content_buffer.len() + 16;
|
||||
|
||||
// Записываем файл в буфер
|
||||
content_buffer.extend(file_buffer);
|
||||
|
||||
// Формируем элемент
|
||||
let element = ListElement {
|
||||
extension: item.extension.to_string(),
|
||||
index: item.index,
|
||||
name: item.name.to_string(),
|
||||
position: position as u32,
|
||||
size: metadata.len() as u32,
|
||||
unknown0: item.unknown0,
|
||||
unknown1: item.unknown1,
|
||||
unknown2: item.unknown2,
|
||||
};
|
||||
|
||||
// Создаем буфер из элемента
|
||||
let mut element_buffer: Vec<u8> = Vec::new();
|
||||
|
||||
// Пишем тип файла
|
||||
let mut extension_buffer: [u8; 4] = [0; 4];
|
||||
let mut file_extension_buffer = element.extension.into_bytes();
|
||||
file_extension_buffer.resize(4, 0);
|
||||
extension_buffer.copy_from_slice(&file_extension_buffer);
|
||||
element_buffer.extend(extension_buffer);
|
||||
|
||||
// Пишем неизвестное значение #1
|
||||
let mut unknown0_buffer: [u8; 4] = [0; 4];
|
||||
LittleEndian::write_u32(&mut unknown0_buffer, element.unknown0);
|
||||
element_buffer.extend(unknown0_buffer);
|
||||
|
||||
// Пишем неизвестное значение #2
|
||||
let mut unknown1_buffer: [u8; 4] = [0; 4];
|
||||
LittleEndian::write_u32(&mut unknown1_buffer, element.unknown1);
|
||||
element_buffer.extend(unknown1_buffer);
|
||||
|
||||
// Пишем размер файла
|
||||
let mut file_size_buffer: [u8; 4] = [0; 4];
|
||||
LittleEndian::write_u32(&mut file_size_buffer, element.size);
|
||||
element_buffer.extend(file_size_buffer);
|
||||
|
||||
// Пишем неизвестное значение #3
|
||||
let mut unknown2_buffer: [u8; 4] = [0; 4];
|
||||
LittleEndian::write_u32(&mut unknown2_buffer, element.unknown2);
|
||||
element_buffer.extend(unknown2_buffer);
|
||||
|
||||
// Пишем название файла
|
||||
let mut name_buffer: [u8; 36] = [0; 36];
|
||||
let mut file_name_buffer = element.name.into_bytes();
|
||||
file_name_buffer.resize(36, 0);
|
||||
name_buffer.copy_from_slice(&file_name_buffer);
|
||||
element_buffer.extend(name_buffer);
|
||||
|
||||
// Пишем позицию файла
|
||||
let mut position_buffer: [u8; 4] = [0; 4];
|
||||
LittleEndian::write_u32(&mut position_buffer, element.position);
|
||||
element_buffer.extend(position_buffer);
|
||||
|
||||
// Пишем индекс файла
|
||||
let mut index_buffer: [u8; 4] = [0; 4];
|
||||
LittleEndian::write_u32(&mut index_buffer, element.index);
|
||||
element_buffer.extend(index_buffer);
|
||||
|
||||
// Добавляем итоговый буфер в буфер элементов списка
|
||||
list_buffer.extend(element_buffer);
|
||||
}
|
||||
|
||||
// Выравнивание буфера
|
||||
while content_buffer.len() % 8 != 0 {
|
||||
content_buffer.push(0);
|
||||
}
|
||||
|
||||
let mut header_buffer: Vec<u8> = Vec::new();
|
||||
|
||||
// Пишем первый тип файла
|
||||
let mut header_type_1 = [0; 4];
|
||||
LittleEndian::write_u32(&mut header_type_1, 1936020046_u32);
|
||||
header_buffer.extend(header_type_1);
|
||||
|
||||
// Пишем второй тип файла
|
||||
let mut header_type_2 = [0; 4];
|
||||
LittleEndian::write_u32(&mut header_type_2, 256_u32);
|
||||
header_buffer.extend(header_type_2);
|
||||
|
||||
// Пишем количество файлов
|
||||
let mut header_total_files = [0; 4];
|
||||
LittleEndian::write_u32(&mut header_total_files, total_files);
|
||||
header_buffer.extend(header_total_files);
|
||||
|
||||
// Пишем общий размер файла
|
||||
let mut header_total_size = [0; 4];
|
||||
let total_size: u32 = ((content_buffer.len() + 16) as u32) + (total_files * 64);
|
||||
LittleEndian::write_u32(&mut header_total_size, total_size);
|
||||
header_buffer.extend(header_total_size);
|
||||
|
||||
let mut result_buffer: Vec<u8> = Vec::new();
|
||||
result_buffer.extend(header_buffer);
|
||||
result_buffer.extend(content_buffer);
|
||||
result_buffer.extend(list_buffer);
|
||||
|
||||
fs::write(output, result_buffer).unwrap();
|
||||
}
|
||||
6
renovate.config.cjs
Normal file
6
renovate.config.cjs
Normal file
@@ -0,0 +1,6 @@
|
||||
module.exports = {
|
||||
endpoint: "https://code.popov.link",
|
||||
gitAuthor: "renovate[bot] <renovatebot@noreply.localhost>",
|
||||
optimizeForDisabled: true,
|
||||
platform: "gitea",
|
||||
};
|
||||
1
requirements.txt
Normal file
1
requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
mkdocs-material
|
||||
2
testdata/nres/.gitignore
vendored
Normal file
2
testdata/nres/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*
|
||||
!.gitignore
|
||||
2
testdata/rsli/.gitignore
vendored
Normal file
2
testdata/rsli/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*
|
||||
!.gitignore
|
||||
@@ -1,8 +0,0 @@
|
||||
[package]
|
||||
name = "texture-decoder"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
image = "0.24.7"
|
||||
@@ -1,13 +0,0 @@
|
||||
# Декодировщик текстур
|
||||
|
||||
Сборка:
|
||||
|
||||
```bash
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
Запуск:
|
||||
|
||||
```bash
|
||||
./target/release/texture-decoder ./out/AIM_02.0 ./out/AIM_02.0.png
|
||||
```
|
||||
@@ -1,41 +0,0 @@
|
||||
use std::io::Read;
|
||||
|
||||
use byteorder::ReadBytesExt;
|
||||
use image::Rgba;
|
||||
|
||||
fn decode_texture(file_path: &str, output_path: &str) -> Result<(), std::io::Error> {
|
||||
// Читаем файл
|
||||
let mut file = std::fs::File::open(file_path)?;
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
file.read_to_end(&mut buffer)?;
|
||||
|
||||
// Декодируем метаданные
|
||||
let mut cursor = std::io::Cursor::new(&buffer[4..]);
|
||||
let img_width = cursor.read_u32::<byteorder::LittleEndian>()?;
|
||||
let img_height = cursor.read_u32::<byteorder::LittleEndian>()?;
|
||||
|
||||
// Пропустить оставшиеся байты метаданных
|
||||
cursor.set_position(20);
|
||||
|
||||
// Извлекаем данные изображения
|
||||
let image_data = buffer[cursor.position() as usize..].to_vec();
|
||||
let img =
|
||||
image::ImageBuffer::<Rgba<u8>, _>::from_raw(img_width, img_height, image_data.to_vec())
|
||||
.expect("Failed to decode image");
|
||||
|
||||
// Сохраняем изображение
|
||||
img.save(output_path).unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
|
||||
let input = &args[1];
|
||||
let output = &args[2];
|
||||
|
||||
if let Err(err) = decode_texture(input, output) {
|
||||
eprintln!("Error: {}", err)
|
||||
}
|
||||
}
|
||||
201
tools/README.md
Normal file
201
tools/README.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# Инструменты в каталоге `tools`
|
||||
|
||||
## `archive_roundtrip_validator.py`
|
||||
|
||||
Скрипт предназначен для **валидации документации по форматам NRes и RsLi на реальных данных игры**.
|
||||
|
||||
Что делает утилита:
|
||||
|
||||
- находит архивы по сигнатуре заголовка (а не по расширению файла);
|
||||
- распаковывает архивы в структуру `manifest.json + entries/*`;
|
||||
- собирает архивы обратно из `manifest.json`;
|
||||
- выполняет проверку `unpack -> repack -> byte-compare`;
|
||||
- формирует отчёт о расхождениях со спецификацией.
|
||||
|
||||
Скрипт не изменяет оригинальные файлы игры. Рабочие файлы создаются только в указанном `--workdir` (или во временной папке).
|
||||
|
||||
## Поддерживаемые сигнатуры
|
||||
|
||||
- `NRes` (`4E 52 65 73`)
|
||||
- `RsLi` в файловом формате библиотеки: `NL 00 01`
|
||||
|
||||
## Основные команды
|
||||
|
||||
Сканирование архива по сигнатурам:
|
||||
|
||||
```bash
|
||||
python3 tools/archive_roundtrip_validator.py scan --input tmp/gamedata
|
||||
```
|
||||
|
||||
Распаковка/упаковка одного NRes:
|
||||
|
||||
```bash
|
||||
python3 tools/archive_roundtrip_validator.py nres-unpack \
|
||||
--archive tmp/gamedata/sounds.lib \
|
||||
--output tmp/work/nres_sounds
|
||||
|
||||
python3 tools/archive_roundtrip_validator.py nres-pack \
|
||||
--manifest tmp/work/nres_sounds/manifest.json \
|
||||
--output tmp/work/sounds.repacked.lib
|
||||
```
|
||||
|
||||
Распаковка/упаковка одного RsLi:
|
||||
|
||||
```bash
|
||||
python3 tools/archive_roundtrip_validator.py rsli-unpack \
|
||||
--archive tmp/gamedata/sprites.lib \
|
||||
--output tmp/work/rsli_sprites
|
||||
|
||||
python3 tools/archive_roundtrip_validator.py rsli-pack \
|
||||
--manifest tmp/work/rsli_sprites/manifest.json \
|
||||
--output tmp/work/sprites.repacked.lib
|
||||
```
|
||||
|
||||
Полная валидация документации на всём наборе данных:
|
||||
|
||||
```bash
|
||||
python3 tools/archive_roundtrip_validator.py validate \
|
||||
--input tmp/gamedata \
|
||||
--workdir tmp/validation_work \
|
||||
--report tmp/validation_report.json \
|
||||
--fail-on-diff
|
||||
```
|
||||
|
||||
## Формат распаковки
|
||||
|
||||
Для каждого архива создаются:
|
||||
|
||||
- `manifest.json` — все поля заголовка, записи, индексы, смещения, контрольные суммы;
|
||||
- `entries/*.bin` — payload-файлы.
|
||||
|
||||
Имена файлов в `entries` включают индекс записи, поэтому коллизии одинаковых имён внутри архива обрабатываются корректно.
|
||||
|
||||
## `init_testdata.py`
|
||||
|
||||
Скрипт инициализирует тестовые данные по сигнатурам архивов из спецификации:
|
||||
|
||||
- `NRes` (`4E 52 65 73`);
|
||||
- `RsLi` (`NL 00 01`).
|
||||
|
||||
Что делает утилита:
|
||||
|
||||
- рекурсивно сканирует все файлы в `--input`;
|
||||
- копирует найденные `NRes` в `--output/nres/`;
|
||||
- копирует найденные `RsLi` в `--output/rsli/`;
|
||||
- сохраняет относительный путь исходного файла внутри целевого каталога;
|
||||
- создаёт целевые каталоги автоматически, если их нет.
|
||||
|
||||
Базовый запуск:
|
||||
|
||||
```bash
|
||||
python3 tools/init_testdata.py --input tmp/gamedata --output testdata
|
||||
```
|
||||
|
||||
Если целевой файл уже существует, скрипт спрашивает подтверждение перезаписи (`yes/no/all/quit`).
|
||||
|
||||
Для перезаписи без вопросов используйте `--force`:
|
||||
|
||||
```bash
|
||||
python3 tools/init_testdata.py --input tmp/gamedata --output testdata --force
|
||||
```
|
||||
|
||||
Проверки надёжности:
|
||||
|
||||
- `--input` должен существовать и быть каталогом;
|
||||
- если `--output` указывает на существующий файл, скрипт завершится с ошибкой;
|
||||
- если `--output` расположен внутри `--input`, каталог вывода исключается из сканирования;
|
||||
- если `stdin` неинтерактивный и требуется перезапись, нужно явно указать `--force`.
|
||||
|
||||
## `msh_doc_validator.py`
|
||||
|
||||
Скрипт валидирует ключевые инварианты из документации `/Users/valentineus/Developer/personal/fparkan/docs/specs/msh.md` на реальных данных.
|
||||
|
||||
Проверяемые группы:
|
||||
|
||||
- модели `*.msh` (вложенные `NRes` в архивах `NRes`);
|
||||
- текстуры `Texm` (`type_id = 0x6D786554`);
|
||||
- эффекты `FXID` (`type_id = 0x44495846`).
|
||||
|
||||
Что проверяет для моделей:
|
||||
|
||||
- обязательные ресурсы (`Res1/2/3/6/13`) и известные опциональные (`Res4/5/7/8/10/15/16/18/19`);
|
||||
- `size/attr1/attr3` и шаги структур по таблицам;
|
||||
- диапазоны индексов, батчей и ссылок между таблицами;
|
||||
- разбор `Res10` как `len + bytes + NUL` для каждого узла;
|
||||
- матрицу слотов в `Res1` (LOD/group) и границы по `Res2/Res7/Res13/Res19`.
|
||||
|
||||
Быстрый запуск:
|
||||
|
||||
```bash
|
||||
python3 tools/msh_doc_validator.py scan --input testdata/nres
|
||||
python3 tools/msh_doc_validator.py validate --input testdata/nres --print-limit 20
|
||||
```
|
||||
|
||||
С отчётом в JSON:
|
||||
|
||||
```bash
|
||||
python3 tools/msh_doc_validator.py validate \
|
||||
--input testdata/nres \
|
||||
--report tmp/msh_validation_report.json \
|
||||
--fail-on-warnings
|
||||
```
|
||||
|
||||
## `msh_preview_renderer.py`
|
||||
|
||||
Примитивный программный рендерер моделей `*.msh` без внешних зависимостей.
|
||||
|
||||
- вход: архив `NRes` (например `animals.rlb`) или прямой payload модели;
|
||||
- выход: изображение `PPM` (`P6`);
|
||||
- использует `Res3` (позиции), `Res6` (индексы), `Res13` (батчи), `Res1/Res2` (выбор слотов по `lod/group`).
|
||||
|
||||
Показать доступные модели в архиве:
|
||||
|
||||
```bash
|
||||
python3 tools/msh_preview_renderer.py list-models --archive testdata/nres/animals.rlb
|
||||
```
|
||||
|
||||
Сгенерировать тестовый рендер:
|
||||
|
||||
```bash
|
||||
python3 tools/msh_preview_renderer.py render \
|
||||
--archive testdata/nres/animals.rlb \
|
||||
--model A_L_01.msh \
|
||||
--output tmp/renders/A_L_01.ppm \
|
||||
--width 800 \
|
||||
--height 600 \
|
||||
--lod 0 \
|
||||
--group 0 \
|
||||
--wireframe
|
||||
```
|
||||
|
||||
Ограничения:
|
||||
|
||||
- инструмент предназначен для smoke-теста геометрии, а не для пиксельно-точного рендера движка;
|
||||
- текстуры/материалы/эффектные проходы не эмулируются.
|
||||
|
||||
## `msh_export_obj.py`
|
||||
|
||||
Экспортирует геометрию `*.msh` в `Wavefront OBJ`, чтобы открыть модель в Blender/MeshLab.
|
||||
|
||||
- вход: `NRes` архив (например `animals.rlb`) или прямой payload модели;
|
||||
- выбор геометрии: через `Res1` slot matrix (`lod/group`) как в рендерере;
|
||||
- опция `--all-batches` экспортирует все батчи, игнорируя slot matrix.
|
||||
|
||||
Показать модели в архиве:
|
||||
|
||||
```bash
|
||||
python3 tools/msh_export_obj.py list-models --archive testdata/nres/animals.rlb
|
||||
```
|
||||
|
||||
Экспорт в OBJ:
|
||||
|
||||
```bash
|
||||
python3 tools/msh_export_obj.py export \
|
||||
--archive testdata/nres/animals.rlb \
|
||||
--model A_L_01.msh \
|
||||
--output tmp/renders/A_L_01.obj \
|
||||
--lod 0 \
|
||||
--group 0
|
||||
```
|
||||
|
||||
Файл `OBJ` можно открыть напрямую в Blender (`File -> Import -> Wavefront (.obj)`).
|
||||
944
tools/archive_roundtrip_validator.py
Normal file
944
tools/archive_roundtrip_validator.py
Normal file
@@ -0,0 +1,944 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Roundtrip tools for NRes and RsLi archives.
|
||||
|
||||
The script can:
|
||||
1) scan archives by header signature (ignores file extensions),
|
||||
2) unpack / pack NRes archives,
|
||||
3) unpack / pack RsLi archives,
|
||||
4) validate docs assumptions by full roundtrip and byte-to-byte comparison.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import re
|
||||
import shutil
|
||||
import struct
|
||||
import tempfile
|
||||
import zlib
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
MAGIC_NRES = b"NRes"
|
||||
MAGIC_RSLI = b"NL\x00\x01"
|
||||
|
||||
|
||||
class ArchiveFormatError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
def sha256_hex(data: bytes) -> str:
|
||||
return hashlib.sha256(data).hexdigest()
|
||||
|
||||
|
||||
def safe_component(value: str, fallback: str = "item", max_len: int = 80) -> str:
|
||||
clean = re.sub(r"[^A-Za-z0-9._-]+", "_", value).strip("._-")
|
||||
if not clean:
|
||||
clean = fallback
|
||||
return clean[:max_len]
|
||||
|
||||
|
||||
def first_diff(a: bytes, b: bytes) -> tuple[int | None, str | None]:
|
||||
if a == b:
|
||||
return None, None
|
||||
limit = min(len(a), len(b))
|
||||
for idx in range(limit):
|
||||
if a[idx] != b[idx]:
|
||||
return idx, f"{a[idx]:02x}!={b[idx]:02x}"
|
||||
return limit, f"len {len(a)}!={len(b)}"
|
||||
|
||||
|
||||
def load_json(path: Path) -> dict[str, Any]:
|
||||
with path.open("r", encoding="utf-8") as handle:
|
||||
return json.load(handle)
|
||||
|
||||
|
||||
def dump_json(path: Path, payload: dict[str, Any]) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with path.open("w", encoding="utf-8") as handle:
|
||||
json.dump(payload, handle, indent=2, ensure_ascii=False)
|
||||
handle.write("\n")
|
||||
|
||||
|
||||
def xor_stream(data: bytes, key16: int) -> bytes:
|
||||
lo = key16 & 0xFF
|
||||
hi = (key16 >> 8) & 0xFF
|
||||
out = bytearray(len(data))
|
||||
for i, value in enumerate(data):
|
||||
lo = (hi ^ ((lo << 1) & 0xFF)) & 0xFF
|
||||
out[i] = value ^ lo
|
||||
hi = (lo ^ ((hi >> 1) & 0xFF)) & 0xFF
|
||||
return bytes(out)
|
||||
|
||||
|
||||
def lzss_decompress_simple(data: bytes, expected_size: int) -> bytes:
|
||||
ring = bytearray([0x20] * 0x1000)
|
||||
ring_pos = 0xFEE
|
||||
out = bytearray()
|
||||
in_pos = 0
|
||||
control = 0
|
||||
bits_left = 0
|
||||
|
||||
while len(out) < expected_size and in_pos < len(data):
|
||||
if bits_left == 0:
|
||||
control = data[in_pos]
|
||||
in_pos += 1
|
||||
bits_left = 8
|
||||
|
||||
if control & 1:
|
||||
if in_pos >= len(data):
|
||||
break
|
||||
byte = data[in_pos]
|
||||
in_pos += 1
|
||||
out.append(byte)
|
||||
ring[ring_pos] = byte
|
||||
ring_pos = (ring_pos + 1) & 0x0FFF
|
||||
else:
|
||||
if in_pos + 1 >= len(data):
|
||||
break
|
||||
low = data[in_pos]
|
||||
high = data[in_pos + 1]
|
||||
in_pos += 2
|
||||
# Real files indicate nibble layout opposite to common LZSS variant:
|
||||
# high nibble extends offset, low nibble stores (length - 3).
|
||||
offset = low | ((high & 0xF0) << 4)
|
||||
length = (high & 0x0F) + 3
|
||||
for step in range(length):
|
||||
byte = ring[(offset + step) & 0x0FFF]
|
||||
out.append(byte)
|
||||
ring[ring_pos] = byte
|
||||
ring_pos = (ring_pos + 1) & 0x0FFF
|
||||
if len(out) >= expected_size:
|
||||
break
|
||||
|
||||
control >>= 1
|
||||
bits_left -= 1
|
||||
|
||||
if len(out) != expected_size:
|
||||
raise ArchiveFormatError(
|
||||
f"LZSS size mismatch: expected {expected_size}, got {len(out)}"
|
||||
)
|
||||
return bytes(out)
|
||||
|
||||
|
||||
def decode_rsli_payload(
|
||||
packed: bytes, method: int, sort_to_original: int, unpacked_size: int
|
||||
) -> bytes:
|
||||
key16 = sort_to_original & 0xFFFF
|
||||
|
||||
if method == 0x000:
|
||||
out = packed
|
||||
elif method == 0x020:
|
||||
if len(packed) < unpacked_size:
|
||||
raise ArchiveFormatError(
|
||||
f"method 0x20 packed too short: {len(packed)} < {unpacked_size}"
|
||||
)
|
||||
out = xor_stream(packed[:unpacked_size], key16)
|
||||
elif method == 0x040:
|
||||
out = lzss_decompress_simple(packed, unpacked_size)
|
||||
elif method == 0x060:
|
||||
out = lzss_decompress_simple(xor_stream(packed, key16), unpacked_size)
|
||||
elif method == 0x100:
|
||||
try:
|
||||
out = zlib.decompress(packed, -15)
|
||||
except zlib.error:
|
||||
out = zlib.decompress(packed)
|
||||
else:
|
||||
raise ArchiveFormatError(f"unsupported RsLi method: 0x{method:03X}")
|
||||
|
||||
if len(out) != unpacked_size:
|
||||
raise ArchiveFormatError(
|
||||
f"unpacked_size mismatch: expected {unpacked_size}, got {len(out)}"
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
def detect_archive_type(path: Path) -> str | None:
|
||||
try:
|
||||
with path.open("rb") as handle:
|
||||
magic = handle.read(4)
|
||||
except OSError:
|
||||
return None
|
||||
|
||||
if magic == MAGIC_NRES:
|
||||
return "nres"
|
||||
if magic == MAGIC_RSLI:
|
||||
return "rsli"
|
||||
return None
|
||||
|
||||
|
||||
def scan_archives(root: Path) -> list[dict[str, Any]]:
|
||||
found: list[dict[str, Any]] = []
|
||||
for path in sorted(root.rglob("*")):
|
||||
if not path.is_file():
|
||||
continue
|
||||
archive_type = detect_archive_type(path)
|
||||
if not archive_type:
|
||||
continue
|
||||
found.append(
|
||||
{
|
||||
"path": str(path),
|
||||
"relative_path": str(path.relative_to(root)),
|
||||
"type": archive_type,
|
||||
"size": path.stat().st_size,
|
||||
}
|
||||
)
|
||||
return found
|
||||
|
||||
|
||||
def parse_nres(data: bytes, source: str = "<memory>") -> dict[str, Any]:
|
||||
if len(data) < 16:
|
||||
raise ArchiveFormatError(f"{source}: NRes too short ({len(data)} bytes)")
|
||||
|
||||
magic, version, entry_count, total_size = struct.unpack_from("<4sIII", data, 0)
|
||||
if magic != MAGIC_NRES:
|
||||
raise ArchiveFormatError(f"{source}: invalid NRes magic")
|
||||
|
||||
issues: list[str] = []
|
||||
if total_size != len(data):
|
||||
issues.append(
|
||||
f"header.total_size={total_size} != actual_size={len(data)} (spec 1.2)"
|
||||
)
|
||||
if version != 0x100:
|
||||
issues.append(f"version=0x{version:08X} != 0x00000100 (spec 1.2)")
|
||||
|
||||
directory_offset = total_size - entry_count * 64
|
||||
if directory_offset < 16 or directory_offset > len(data):
|
||||
raise ArchiveFormatError(
|
||||
f"{source}: invalid directory offset {directory_offset} for entry_count={entry_count}"
|
||||
)
|
||||
if directory_offset + entry_count * 64 != len(data):
|
||||
issues.append(
|
||||
"directory_offset + entry_count*64 != file_size (spec 1.3)"
|
||||
)
|
||||
|
||||
entries: list[dict[str, Any]] = []
|
||||
for index in range(entry_count):
|
||||
offset = directory_offset + index * 64
|
||||
if offset + 64 > len(data):
|
||||
raise ArchiveFormatError(f"{source}: truncated directory entry {index}")
|
||||
|
||||
(
|
||||
type_id,
|
||||
attr1,
|
||||
attr2,
|
||||
size,
|
||||
attr3,
|
||||
name_raw,
|
||||
data_offset,
|
||||
sort_index,
|
||||
) = struct.unpack_from("<IIIII36sII", data, offset)
|
||||
name_bytes = name_raw.split(b"\x00", 1)[0]
|
||||
name = name_bytes.decode("latin1", errors="replace")
|
||||
entries.append(
|
||||
{
|
||||
"index": index,
|
||||
"type_id": type_id,
|
||||
"attr1": attr1,
|
||||
"attr2": attr2,
|
||||
"size": size,
|
||||
"attr3": attr3,
|
||||
"name": name,
|
||||
"name_bytes_hex": name_bytes.hex(),
|
||||
"name_raw_hex": name_raw.hex(),
|
||||
"data_offset": data_offset,
|
||||
"sort_index": sort_index,
|
||||
}
|
||||
)
|
||||
|
||||
# Spec checks.
|
||||
expected_sort = sorted(
|
||||
range(entry_count),
|
||||
key=lambda idx: bytes.fromhex(entries[idx]["name_bytes_hex"]).lower(),
|
||||
)
|
||||
current_sort = [item["sort_index"] for item in entries]
|
||||
if current_sort != expected_sort:
|
||||
issues.append(
|
||||
"sort_index table does not match case-insensitive name order (spec 1.4)"
|
||||
)
|
||||
|
||||
data_regions = sorted(
|
||||
(
|
||||
item["index"],
|
||||
item["data_offset"],
|
||||
item["size"],
|
||||
)
|
||||
for item in entries
|
||||
)
|
||||
for idx, data_offset, size in data_regions:
|
||||
if data_offset % 8 != 0:
|
||||
issues.append(f"entry {idx}: data_offset={data_offset} not aligned to 8 (spec 1.5)")
|
||||
if data_offset < 16 or data_offset + size > directory_offset:
|
||||
issues.append(
|
||||
f"entry {idx}: data range [{data_offset}, {data_offset + size}) out of data area (spec 1.3)"
|
||||
)
|
||||
for i in range(len(data_regions) - 1):
|
||||
_, start, size = data_regions[i]
|
||||
_, next_start, _ = data_regions[i + 1]
|
||||
if start + size > next_start:
|
||||
issues.append(
|
||||
f"entry overlap at data_offset={start}, next={next_start}"
|
||||
)
|
||||
padding = data[start + size : next_start]
|
||||
if any(padding):
|
||||
issues.append(
|
||||
f"non-zero padding after data block at offset={start + size} (spec 1.5)"
|
||||
)
|
||||
|
||||
return {
|
||||
"format": "NRes",
|
||||
"header": {
|
||||
"magic": "NRes",
|
||||
"version": version,
|
||||
"entry_count": entry_count,
|
||||
"total_size": total_size,
|
||||
"directory_offset": directory_offset,
|
||||
},
|
||||
"entries": entries,
|
||||
"issues": issues,
|
||||
}
|
||||
|
||||
|
||||
def build_nres_name_field(entry: dict[str, Any]) -> bytes:
|
||||
if "name_bytes_hex" in entry:
|
||||
raw = bytes.fromhex(entry["name_bytes_hex"])
|
||||
else:
|
||||
raw = entry.get("name", "").encode("latin1", errors="replace")
|
||||
raw = raw[:35]
|
||||
return raw + b"\x00" * (36 - len(raw))
|
||||
|
||||
|
||||
def unpack_nres_file(archive_path: Path, out_dir: Path, source_root: Path | None = None) -> dict[str, Any]:
|
||||
data = archive_path.read_bytes()
|
||||
parsed = parse_nres(data, source=str(archive_path))
|
||||
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
entries_dir = out_dir / "entries"
|
||||
entries_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
manifest: dict[str, Any] = {
|
||||
"format": "NRes",
|
||||
"source_path": str(archive_path),
|
||||
"source_relative_path": str(archive_path.relative_to(source_root)) if source_root else str(archive_path),
|
||||
"header": parsed["header"],
|
||||
"entries": [],
|
||||
"issues": parsed["issues"],
|
||||
"source_sha256": sha256_hex(data),
|
||||
}
|
||||
|
||||
for entry in parsed["entries"]:
|
||||
begin = entry["data_offset"]
|
||||
end = begin + entry["size"]
|
||||
if begin < 0 or end > len(data):
|
||||
raise ArchiveFormatError(
|
||||
f"{archive_path}: entry {entry['index']} data range outside file"
|
||||
)
|
||||
payload = data[begin:end]
|
||||
base = safe_component(entry["name"], fallback=f"entry_{entry['index']:05d}")
|
||||
file_name = (
|
||||
f"{entry['index']:05d}__{base}"
|
||||
f"__t{entry['type_id']:08X}_a1{entry['attr1']:08X}_a2{entry['attr2']:08X}.bin"
|
||||
)
|
||||
(entries_dir / file_name).write_bytes(payload)
|
||||
|
||||
manifest_entry = dict(entry)
|
||||
manifest_entry["data_file"] = f"entries/{file_name}"
|
||||
manifest_entry["sha256"] = sha256_hex(payload)
|
||||
manifest["entries"].append(manifest_entry)
|
||||
|
||||
dump_json(out_dir / "manifest.json", manifest)
|
||||
return manifest
|
||||
|
||||
|
||||
def pack_nres_manifest(manifest_path: Path, out_file: Path) -> bytes:
|
||||
manifest = load_json(manifest_path)
|
||||
if manifest.get("format") != "NRes":
|
||||
raise ArchiveFormatError(f"{manifest_path}: not an NRes manifest")
|
||||
|
||||
entries = manifest["entries"]
|
||||
count = len(entries)
|
||||
version = int(manifest.get("header", {}).get("version", 0x100))
|
||||
|
||||
out = bytearray(b"\x00" * 16)
|
||||
data_offsets: list[int] = []
|
||||
data_sizes: list[int] = []
|
||||
|
||||
for entry in entries:
|
||||
payload_path = manifest_path.parent / entry["data_file"]
|
||||
payload = payload_path.read_bytes()
|
||||
offset = len(out)
|
||||
out.extend(payload)
|
||||
padding = (-len(out)) % 8
|
||||
if padding:
|
||||
out.extend(b"\x00" * padding)
|
||||
data_offsets.append(offset)
|
||||
data_sizes.append(len(payload))
|
||||
|
||||
directory_offset = len(out)
|
||||
expected_sort = sorted(
|
||||
range(count),
|
||||
key=lambda idx: bytes.fromhex(entries[idx].get("name_bytes_hex", "")).lower(),
|
||||
)
|
||||
|
||||
for index, entry in enumerate(entries):
|
||||
name_field = build_nres_name_field(entry)
|
||||
out.extend(
|
||||
struct.pack(
|
||||
"<IIIII36sII",
|
||||
int(entry["type_id"]),
|
||||
int(entry["attr1"]),
|
||||
int(entry["attr2"]),
|
||||
data_sizes[index],
|
||||
int(entry["attr3"]),
|
||||
name_field,
|
||||
data_offsets[index],
|
||||
expected_sort[index],
|
||||
)
|
||||
)
|
||||
|
||||
total_size = len(out)
|
||||
struct.pack_into("<4sIII", out, 0, MAGIC_NRES, version, count, total_size)
|
||||
|
||||
out_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
out_file.write_bytes(out)
|
||||
return bytes(out)
|
||||
|
||||
|
||||
def parse_rsli(data: bytes, source: str = "<memory>") -> dict[str, Any]:
|
||||
if len(data) < 32:
|
||||
raise ArchiveFormatError(f"{source}: RsLi too short ({len(data)} bytes)")
|
||||
if data[:4] != MAGIC_RSLI:
|
||||
raise ArchiveFormatError(f"{source}: invalid RsLi magic")
|
||||
|
||||
issues: list[str] = []
|
||||
reserved_zero = data[2]
|
||||
version = data[3]
|
||||
entry_count = struct.unpack_from("<h", data, 4)[0]
|
||||
presorted_flag = struct.unpack_from("<H", data, 14)[0]
|
||||
seed = struct.unpack_from("<I", data, 20)[0]
|
||||
|
||||
if reserved_zero != 0:
|
||||
issues.append(f"header[2]={reserved_zero} != 0 (spec 2.2)")
|
||||
if version != 1:
|
||||
issues.append(f"version={version} != 1 (spec 2.2)")
|
||||
if entry_count < 0:
|
||||
raise ArchiveFormatError(f"{source}: negative entry_count={entry_count}")
|
||||
|
||||
table_offset = 32
|
||||
table_size = entry_count * 32
|
||||
if table_offset + table_size > len(data):
|
||||
raise ArchiveFormatError(
|
||||
f"{source}: encrypted table out of file bounds ({table_offset}+{table_size}>{len(data)})"
|
||||
)
|
||||
|
||||
table_encrypted = data[table_offset : table_offset + table_size]
|
||||
table_plain = xor_stream(table_encrypted, seed & 0xFFFF)
|
||||
|
||||
trailer: dict[str, Any] = {"present": False}
|
||||
overlay_offset = 0
|
||||
if len(data) >= 6 and data[-6:-4] == b"AO":
|
||||
overlay_offset = struct.unpack_from("<I", data, len(data) - 4)[0]
|
||||
trailer = {
|
||||
"present": True,
|
||||
"signature": "AO",
|
||||
"overlay_offset": overlay_offset,
|
||||
"raw_hex": data[-6:].hex(),
|
||||
}
|
||||
|
||||
entries: list[dict[str, Any]] = []
|
||||
sort_values: list[int] = []
|
||||
for index in range(entry_count):
|
||||
row = table_plain[index * 32 : (index + 1) * 32]
|
||||
name_raw = row[0:12]
|
||||
reserved4 = row[12:16]
|
||||
flags_signed, sort_to_original = struct.unpack_from("<hh", row, 16)
|
||||
unpacked_size, data_offset, packed_size = struct.unpack_from("<III", row, 20)
|
||||
method = flags_signed & 0x1E0
|
||||
name = name_raw.split(b"\x00", 1)[0].decode("latin1", errors="replace")
|
||||
effective_offset = data_offset + overlay_offset
|
||||
entries.append(
|
||||
{
|
||||
"index": index,
|
||||
"name": name,
|
||||
"name_raw_hex": name_raw.hex(),
|
||||
"reserved_raw_hex": reserved4.hex(),
|
||||
"flags_signed": flags_signed,
|
||||
"flags_u16": flags_signed & 0xFFFF,
|
||||
"method": method,
|
||||
"sort_to_original": sort_to_original,
|
||||
"unpacked_size": unpacked_size,
|
||||
"data_offset": data_offset,
|
||||
"effective_data_offset": effective_offset,
|
||||
"packed_size": packed_size,
|
||||
}
|
||||
)
|
||||
sort_values.append(sort_to_original)
|
||||
|
||||
if effective_offset < 0:
|
||||
issues.append(f"entry {index}: negative effective_data_offset={effective_offset}")
|
||||
elif effective_offset + packed_size > len(data):
|
||||
end = effective_offset + packed_size
|
||||
if method == 0x100 and end == len(data) + 1:
|
||||
issues.append(
|
||||
f"entry {index}: deflate packed_size reaches EOF+1 ({end}); "
|
||||
"observed in game data, likely decoder lookahead byte"
|
||||
)
|
||||
else:
|
||||
issues.append(
|
||||
f"entry {index}: packed range [{effective_offset}, {end}) out of file"
|
||||
)
|
||||
|
||||
if presorted_flag == 0xABBA:
|
||||
if sorted(sort_values) != list(range(entry_count)):
|
||||
issues.append(
|
||||
"presorted flag is 0xABBA but sort_to_original is not a permutation [0..N-1] (spec 2.2/2.4)"
|
||||
)
|
||||
|
||||
return {
|
||||
"format": "RsLi",
|
||||
"header_raw_hex": data[:32].hex(),
|
||||
"header": {
|
||||
"magic": "NL\\x00\\x01",
|
||||
"entry_count": entry_count,
|
||||
"seed": seed,
|
||||
"presorted_flag": presorted_flag,
|
||||
},
|
||||
"entries": entries,
|
||||
"issues": issues,
|
||||
"trailer": trailer,
|
||||
}
|
||||
|
||||
|
||||
def unpack_rsli_file(archive_path: Path, out_dir: Path, source_root: Path | None = None) -> dict[str, Any]:
|
||||
data = archive_path.read_bytes()
|
||||
parsed = parse_rsli(data, source=str(archive_path))
|
||||
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
entries_dir = out_dir / "entries"
|
||||
entries_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
manifest: dict[str, Any] = {
|
||||
"format": "RsLi",
|
||||
"source_path": str(archive_path),
|
||||
"source_relative_path": str(archive_path.relative_to(source_root)) if source_root else str(archive_path),
|
||||
"source_size": len(data),
|
||||
"header_raw_hex": parsed["header_raw_hex"],
|
||||
"header": parsed["header"],
|
||||
"entries": [],
|
||||
"issues": list(parsed["issues"]),
|
||||
"trailer": parsed["trailer"],
|
||||
"source_sha256": sha256_hex(data),
|
||||
}
|
||||
|
||||
for entry in parsed["entries"]:
|
||||
begin = int(entry["effective_data_offset"])
|
||||
end = begin + int(entry["packed_size"])
|
||||
packed = data[begin:end]
|
||||
base = safe_component(entry["name"], fallback=f"entry_{entry['index']:05d}")
|
||||
packed_name = f"{entry['index']:05d}__{base}__packed.bin"
|
||||
(entries_dir / packed_name).write_bytes(packed)
|
||||
|
||||
manifest_entry = dict(entry)
|
||||
manifest_entry["packed_file"] = f"entries/{packed_name}"
|
||||
manifest_entry["packed_file_size"] = len(packed)
|
||||
manifest_entry["packed_sha256"] = sha256_hex(packed)
|
||||
|
||||
try:
|
||||
unpacked = decode_rsli_payload(
|
||||
packed=packed,
|
||||
method=int(entry["method"]),
|
||||
sort_to_original=int(entry["sort_to_original"]),
|
||||
unpacked_size=int(entry["unpacked_size"]),
|
||||
)
|
||||
unpacked_name = f"{entry['index']:05d}__{base}__unpacked.bin"
|
||||
(entries_dir / unpacked_name).write_bytes(unpacked)
|
||||
manifest_entry["unpacked_file"] = f"entries/{unpacked_name}"
|
||||
manifest_entry["unpacked_sha256"] = sha256_hex(unpacked)
|
||||
except ArchiveFormatError as exc:
|
||||
manifest_entry["unpack_error"] = str(exc)
|
||||
manifest["issues"].append(
|
||||
f"entry {entry['index']}: cannot decode method 0x{entry['method']:03X}: {exc}"
|
||||
)
|
||||
|
||||
manifest["entries"].append(manifest_entry)
|
||||
|
||||
dump_json(out_dir / "manifest.json", manifest)
|
||||
return manifest
|
||||
|
||||
|
||||
def _pack_i16(value: int) -> int:
|
||||
if not (-32768 <= int(value) <= 32767):
|
||||
raise ArchiveFormatError(f"int16 overflow: {value}")
|
||||
return int(value)
|
||||
|
||||
|
||||
def pack_rsli_manifest(manifest_path: Path, out_file: Path) -> bytes:
|
||||
manifest = load_json(manifest_path)
|
||||
if manifest.get("format") != "RsLi":
|
||||
raise ArchiveFormatError(f"{manifest_path}: not an RsLi manifest")
|
||||
|
||||
entries = manifest["entries"]
|
||||
count = len(entries)
|
||||
|
||||
header_raw = bytes.fromhex(manifest["header_raw_hex"])
|
||||
if len(header_raw) != 32:
|
||||
raise ArchiveFormatError(f"{manifest_path}: header_raw_hex must be 32 bytes")
|
||||
header = bytearray(header_raw)
|
||||
header[:4] = MAGIC_RSLI
|
||||
struct.pack_into("<h", header, 4, count)
|
||||
seed = int(manifest["header"]["seed"])
|
||||
struct.pack_into("<I", header, 20, seed)
|
||||
|
||||
rows = bytearray()
|
||||
packed_chunks: list[tuple[dict[str, Any], bytes]] = []
|
||||
|
||||
for entry in entries:
|
||||
packed_path = manifest_path.parent / entry["packed_file"]
|
||||
packed = packed_path.read_bytes()
|
||||
declared_size = int(entry["packed_size"])
|
||||
if len(packed) > declared_size:
|
||||
raise ArchiveFormatError(
|
||||
f"{packed_path}: packed size {len(packed)} > manifest packed_size {declared_size}"
|
||||
)
|
||||
|
||||
data_offset = int(entry["data_offset"])
|
||||
packed_chunks.append((entry, packed))
|
||||
|
||||
row = bytearray(32)
|
||||
name_raw = bytes.fromhex(entry["name_raw_hex"])
|
||||
reserved_raw = bytes.fromhex(entry["reserved_raw_hex"])
|
||||
if len(name_raw) != 12 or len(reserved_raw) != 4:
|
||||
raise ArchiveFormatError(
|
||||
f"entry {entry['index']}: invalid name/reserved raw length"
|
||||
)
|
||||
row[0:12] = name_raw
|
||||
row[12:16] = reserved_raw
|
||||
struct.pack_into(
|
||||
"<hhIII",
|
||||
row,
|
||||
16,
|
||||
_pack_i16(int(entry["flags_signed"])),
|
||||
_pack_i16(int(entry["sort_to_original"])),
|
||||
int(entry["unpacked_size"]),
|
||||
data_offset,
|
||||
declared_size,
|
||||
)
|
||||
rows.extend(row)
|
||||
|
||||
encrypted_table = xor_stream(bytes(rows), seed & 0xFFFF)
|
||||
trailer = manifest.get("trailer", {})
|
||||
trailer_raw = b""
|
||||
if trailer.get("present"):
|
||||
raw_hex = trailer.get("raw_hex", "")
|
||||
trailer_raw = bytes.fromhex(raw_hex)
|
||||
if len(trailer_raw) != 6:
|
||||
raise ArchiveFormatError("trailer raw length must be 6 bytes")
|
||||
|
||||
source_size = manifest.get("source_size")
|
||||
table_end = 32 + count * 32
|
||||
if source_size is not None:
|
||||
pre_trailer_size = int(source_size) - len(trailer_raw)
|
||||
if pre_trailer_size < table_end:
|
||||
raise ArchiveFormatError(
|
||||
f"invalid source_size={source_size}: smaller than header+table"
|
||||
)
|
||||
else:
|
||||
pre_trailer_size = table_end
|
||||
for entry, packed in packed_chunks:
|
||||
pre_trailer_size = max(
|
||||
pre_trailer_size, int(entry["data_offset"]) + len(packed)
|
||||
)
|
||||
|
||||
out = bytearray(pre_trailer_size)
|
||||
out[0:32] = header
|
||||
out[32:table_end] = encrypted_table
|
||||
occupied = bytearray(pre_trailer_size)
|
||||
occupied[0:table_end] = b"\x01" * table_end
|
||||
|
||||
for entry, packed in packed_chunks:
|
||||
base_offset = int(entry["data_offset"])
|
||||
for index, byte in enumerate(packed):
|
||||
pos = base_offset + index
|
||||
if pos >= pre_trailer_size:
|
||||
raise ArchiveFormatError(
|
||||
f"entry {entry['index']}: data write at {pos} beyond output size {pre_trailer_size}"
|
||||
)
|
||||
if occupied[pos] and out[pos] != byte:
|
||||
raise ArchiveFormatError(
|
||||
f"entry {entry['index']}: overlapping packed data conflict at offset {pos}"
|
||||
)
|
||||
out[pos] = byte
|
||||
occupied[pos] = 1
|
||||
|
||||
out.extend(trailer_raw)
|
||||
if source_size is not None and len(out) != int(source_size):
|
||||
raise ArchiveFormatError(
|
||||
f"packed size {len(out)} != source_size {source_size} from manifest"
|
||||
)
|
||||
|
||||
out_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
out_file.write_bytes(out)
|
||||
return bytes(out)
|
||||
|
||||
|
||||
def cmd_scan(args: argparse.Namespace) -> int:
|
||||
root = Path(args.input).resolve()
|
||||
archives = scan_archives(root)
|
||||
if args.json:
|
||||
print(json.dumps(archives, ensure_ascii=False, indent=2))
|
||||
else:
|
||||
print(f"Found {len(archives)} archive(s) in {root}")
|
||||
for item in archives:
|
||||
print(f"{item['type']:4} {item['size']:10d} {item['relative_path']}")
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_nres_unpack(args: argparse.Namespace) -> int:
|
||||
archive_path = Path(args.archive).resolve()
|
||||
out_dir = Path(args.output).resolve()
|
||||
manifest = unpack_nres_file(archive_path, out_dir)
|
||||
print(f"NRes unpacked: {archive_path}")
|
||||
print(f"Manifest: {out_dir / 'manifest.json'}")
|
||||
print(f"Entries : {len(manifest['entries'])}")
|
||||
if manifest["issues"]:
|
||||
print("Issues:")
|
||||
for issue in manifest["issues"]:
|
||||
print(f"- {issue}")
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_nres_pack(args: argparse.Namespace) -> int:
|
||||
manifest_path = Path(args.manifest).resolve()
|
||||
out_file = Path(args.output).resolve()
|
||||
packed = pack_nres_manifest(manifest_path, out_file)
|
||||
print(f"NRes packed: {out_file} ({len(packed)} bytes, sha256={sha256_hex(packed)})")
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_rsli_unpack(args: argparse.Namespace) -> int:
|
||||
archive_path = Path(args.archive).resolve()
|
||||
out_dir = Path(args.output).resolve()
|
||||
manifest = unpack_rsli_file(archive_path, out_dir)
|
||||
print(f"RsLi unpacked: {archive_path}")
|
||||
print(f"Manifest: {out_dir / 'manifest.json'}")
|
||||
print(f"Entries : {len(manifest['entries'])}")
|
||||
if manifest["issues"]:
|
||||
print("Issues:")
|
||||
for issue in manifest["issues"]:
|
||||
print(f"- {issue}")
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_rsli_pack(args: argparse.Namespace) -> int:
|
||||
manifest_path = Path(args.manifest).resolve()
|
||||
out_file = Path(args.output).resolve()
|
||||
packed = pack_rsli_manifest(manifest_path, out_file)
|
||||
print(f"RsLi packed: {out_file} ({len(packed)} bytes, sha256={sha256_hex(packed)})")
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_validate(args: argparse.Namespace) -> int:
|
||||
input_root = Path(args.input).resolve()
|
||||
archives = scan_archives(input_root)
|
||||
|
||||
temp_created = False
|
||||
if args.workdir:
|
||||
workdir = Path(args.workdir).resolve()
|
||||
workdir.mkdir(parents=True, exist_ok=True)
|
||||
else:
|
||||
workdir = Path(tempfile.mkdtemp(prefix="nres-rsli-validate-"))
|
||||
temp_created = True
|
||||
|
||||
report: dict[str, Any] = {
|
||||
"input_root": str(input_root),
|
||||
"workdir": str(workdir),
|
||||
"archives_total": len(archives),
|
||||
"results": [],
|
||||
"summary": {},
|
||||
}
|
||||
|
||||
failures = 0
|
||||
try:
|
||||
for idx, item in enumerate(archives):
|
||||
rel = item["relative_path"]
|
||||
archive_path = input_root / rel
|
||||
marker = f"{idx:04d}_{safe_component(rel, fallback='archive')}"
|
||||
unpack_dir = workdir / "unpacked" / marker
|
||||
repacked_file = workdir / "repacked" / f"{marker}.bin"
|
||||
try:
|
||||
if item["type"] == "nres":
|
||||
manifest = unpack_nres_file(archive_path, unpack_dir, source_root=input_root)
|
||||
repacked = pack_nres_manifest(unpack_dir / "manifest.json", repacked_file)
|
||||
elif item["type"] == "rsli":
|
||||
manifest = unpack_rsli_file(archive_path, unpack_dir, source_root=input_root)
|
||||
repacked = pack_rsli_manifest(unpack_dir / "manifest.json", repacked_file)
|
||||
else:
|
||||
continue
|
||||
|
||||
original = archive_path.read_bytes()
|
||||
match = original == repacked
|
||||
diff_offset, diff_desc = first_diff(original, repacked)
|
||||
issues = list(manifest.get("issues", []))
|
||||
result = {
|
||||
"relative_path": rel,
|
||||
"type": item["type"],
|
||||
"size_original": len(original),
|
||||
"size_repacked": len(repacked),
|
||||
"sha256_original": sha256_hex(original),
|
||||
"sha256_repacked": sha256_hex(repacked),
|
||||
"match": match,
|
||||
"first_diff_offset": diff_offset,
|
||||
"first_diff": diff_desc,
|
||||
"issues": issues,
|
||||
"entries": len(manifest.get("entries", [])),
|
||||
"error": None,
|
||||
}
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
result = {
|
||||
"relative_path": rel,
|
||||
"type": item["type"],
|
||||
"size_original": item["size"],
|
||||
"size_repacked": None,
|
||||
"sha256_original": None,
|
||||
"sha256_repacked": None,
|
||||
"match": False,
|
||||
"first_diff_offset": None,
|
||||
"first_diff": None,
|
||||
"issues": [f"processing error: {exc}"],
|
||||
"entries": None,
|
||||
"error": str(exc),
|
||||
}
|
||||
|
||||
report["results"].append(result)
|
||||
|
||||
if not result["match"]:
|
||||
failures += 1
|
||||
if result["issues"] and args.fail_on_issues:
|
||||
failures += 1
|
||||
|
||||
matches = sum(1 for row in report["results"] if row["match"])
|
||||
mismatches = len(report["results"]) - matches
|
||||
nres_count = sum(1 for row in report["results"] if row["type"] == "nres")
|
||||
rsli_count = sum(1 for row in report["results"] if row["type"] == "rsli")
|
||||
issues_total = sum(len(row["issues"]) for row in report["results"])
|
||||
report["summary"] = {
|
||||
"nres_count": nres_count,
|
||||
"rsli_count": rsli_count,
|
||||
"matches": matches,
|
||||
"mismatches": mismatches,
|
||||
"issues_total": issues_total,
|
||||
}
|
||||
|
||||
if args.report:
|
||||
dump_json(Path(args.report).resolve(), report)
|
||||
|
||||
print(f"Input root : {input_root}")
|
||||
print(f"Work dir : {workdir}")
|
||||
print(f"NRes archives : {nres_count}")
|
||||
print(f"RsLi archives : {rsli_count}")
|
||||
print(f"Roundtrip match: {matches}/{len(report['results'])}")
|
||||
print(f"Doc issues : {issues_total}")
|
||||
|
||||
if mismatches:
|
||||
print("\nMismatches:")
|
||||
for row in report["results"]:
|
||||
if row["match"]:
|
||||
continue
|
||||
print(
|
||||
f"- {row['relative_path']} [{row['type']}] "
|
||||
f"diff@{row['first_diff_offset']}: {row['first_diff']}"
|
||||
)
|
||||
|
||||
if issues_total:
|
||||
print("\nIssues:")
|
||||
for row in report["results"]:
|
||||
if not row["issues"]:
|
||||
continue
|
||||
print(f"- {row['relative_path']} [{row['type']}]")
|
||||
for issue in row["issues"]:
|
||||
print(f" * {issue}")
|
||||
|
||||
finally:
|
||||
if temp_created or args.cleanup:
|
||||
shutil.rmtree(workdir, ignore_errors=True)
|
||||
|
||||
if failures > 0:
|
||||
return 1
|
||||
if report["summary"].get("mismatches", 0) > 0 and args.fail_on_diff:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def build_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="NRes/RsLi tools: scan, unpack, repack, and roundtrip validation."
|
||||
)
|
||||
sub = parser.add_subparsers(dest="command", required=True)
|
||||
|
||||
scan = sub.add_parser("scan", help="Scan files by header signatures.")
|
||||
scan.add_argument("--input", required=True, help="Root directory to scan.")
|
||||
scan.add_argument("--json", action="store_true", help="Print JSON output.")
|
||||
scan.set_defaults(func=cmd_scan)
|
||||
|
||||
nres_unpack = sub.add_parser("nres-unpack", help="Unpack a single NRes archive.")
|
||||
nres_unpack.add_argument("--archive", required=True, help="Path to NRes file.")
|
||||
nres_unpack.add_argument("--output", required=True, help="Output directory.")
|
||||
nres_unpack.set_defaults(func=cmd_nres_unpack)
|
||||
|
||||
nres_pack = sub.add_parser("nres-pack", help="Pack NRes archive from manifest.")
|
||||
nres_pack.add_argument("--manifest", required=True, help="Path to manifest.json.")
|
||||
nres_pack.add_argument("--output", required=True, help="Output file path.")
|
||||
nres_pack.set_defaults(func=cmd_nres_pack)
|
||||
|
||||
rsli_unpack = sub.add_parser("rsli-unpack", help="Unpack a single RsLi archive.")
|
||||
rsli_unpack.add_argument("--archive", required=True, help="Path to RsLi file.")
|
||||
rsli_unpack.add_argument("--output", required=True, help="Output directory.")
|
||||
rsli_unpack.set_defaults(func=cmd_rsli_unpack)
|
||||
|
||||
rsli_pack = sub.add_parser("rsli-pack", help="Pack RsLi archive from manifest.")
|
||||
rsli_pack.add_argument("--manifest", required=True, help="Path to manifest.json.")
|
||||
rsli_pack.add_argument("--output", required=True, help="Output file path.")
|
||||
rsli_pack.set_defaults(func=cmd_rsli_pack)
|
||||
|
||||
validate = sub.add_parser(
|
||||
"validate",
|
||||
help="Scan all archives and run unpack->repack->byte-compare validation.",
|
||||
)
|
||||
validate.add_argument("--input", required=True, help="Root with game data files.")
|
||||
validate.add_argument(
|
||||
"--workdir",
|
||||
help="Working directory for temporary unpack/repack files. "
|
||||
"If omitted, a temporary directory is used and removed automatically.",
|
||||
)
|
||||
validate.add_argument("--report", help="Optional JSON report output path.")
|
||||
validate.add_argument(
|
||||
"--fail-on-diff",
|
||||
action="store_true",
|
||||
help="Return non-zero exit code if any byte mismatch exists.",
|
||||
)
|
||||
validate.add_argument(
|
||||
"--fail-on-issues",
|
||||
action="store_true",
|
||||
help="Return non-zero exit code if any spec issue was detected.",
|
||||
)
|
||||
validate.add_argument(
|
||||
"--cleanup",
|
||||
action="store_true",
|
||||
help="Remove --workdir after completion.",
|
||||
)
|
||||
validate.set_defaults(func=cmd_validate)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = build_parser()
|
||||
args = parser.parse_args()
|
||||
return int(args.func(args))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
204
tools/init_testdata.py
Normal file
204
tools/init_testdata.py
Normal file
@@ -0,0 +1,204 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Initialize test data folders by archive signatures.
|
||||
|
||||
The script scans all files in --input and copies matching archives into:
|
||||
--output/nres/<relative path>
|
||||
--output/rsli/<relative path>
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
MAGIC_NRES = b"NRes"
|
||||
MAGIC_RSLI = b"NL\x00\x01"
|
||||
|
||||
|
||||
def is_relative_to(path: Path, base: Path) -> bool:
|
||||
try:
|
||||
path.relative_to(base)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def detect_archive_type(path: Path) -> str | None:
|
||||
try:
|
||||
with path.open("rb") as handle:
|
||||
magic = handle.read(4)
|
||||
except OSError as exc:
|
||||
print(f"[warn] cannot read {path}: {exc}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
if magic == MAGIC_NRES:
|
||||
return "nres"
|
||||
if magic == MAGIC_RSLI:
|
||||
return "rsli"
|
||||
return None
|
||||
|
||||
|
||||
def scan_archives(input_root: Path, excluded_root: Path | None) -> list[tuple[Path, str]]:
|
||||
found: list[tuple[Path, str]] = []
|
||||
for path in sorted(input_root.rglob("*")):
|
||||
if not path.is_file():
|
||||
continue
|
||||
if excluded_root and is_relative_to(path.resolve(), excluded_root):
|
||||
continue
|
||||
|
||||
archive_type = detect_archive_type(path)
|
||||
if archive_type:
|
||||
found.append((path, archive_type))
|
||||
return found
|
||||
|
||||
|
||||
def confirm_overwrite(path: Path) -> str:
|
||||
prompt = (
|
||||
f"File exists: {path}\n"
|
||||
"Overwrite? [y]es / [n]o / [a]ll / [q]uit (default: n): "
|
||||
)
|
||||
while True:
|
||||
try:
|
||||
answer = input(prompt).strip().lower()
|
||||
except EOFError:
|
||||
return "quit"
|
||||
|
||||
if answer in {"", "n", "no"}:
|
||||
return "no"
|
||||
if answer in {"y", "yes"}:
|
||||
return "yes"
|
||||
if answer in {"a", "all"}:
|
||||
return "all"
|
||||
if answer in {"q", "quit"}:
|
||||
return "quit"
|
||||
print("Please answer with y, n, a, or q.")
|
||||
|
||||
|
||||
def copy_archives(
|
||||
archives: list[tuple[Path, str]],
|
||||
input_root: Path,
|
||||
output_root: Path,
|
||||
force: bool,
|
||||
) -> int:
|
||||
copied = 0
|
||||
skipped = 0
|
||||
overwritten = 0
|
||||
overwrite_all = force
|
||||
|
||||
type_counts = {"nres": 0, "rsli": 0}
|
||||
for _, archive_type in archives:
|
||||
type_counts[archive_type] += 1
|
||||
|
||||
print(
|
||||
f"Found archives: total={len(archives)}, "
|
||||
f"nres={type_counts['nres']}, rsli={type_counts['rsli']}"
|
||||
)
|
||||
|
||||
for source, archive_type in archives:
|
||||
rel_path = source.relative_to(input_root)
|
||||
destination = output_root / archive_type / rel_path
|
||||
destination.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if destination.exists():
|
||||
if destination.is_dir():
|
||||
print(
|
||||
f"[error] destination is a directory, expected file: {destination}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 2
|
||||
|
||||
if not overwrite_all:
|
||||
if not sys.stdin.isatty():
|
||||
print(
|
||||
"[error] destination file exists but stdin is not interactive. "
|
||||
"Use --force to overwrite without prompts.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 2
|
||||
|
||||
decision = confirm_overwrite(destination)
|
||||
if decision == "quit":
|
||||
print("Aborted by user.")
|
||||
return 130
|
||||
if decision == "no":
|
||||
skipped += 1
|
||||
continue
|
||||
if decision == "all":
|
||||
overwrite_all = True
|
||||
|
||||
overwritten += 1
|
||||
|
||||
try:
|
||||
shutil.copy2(source, destination)
|
||||
except OSError as exc:
|
||||
print(f"[error] failed to copy {source} -> {destination}: {exc}", file=sys.stderr)
|
||||
return 2
|
||||
copied += 1
|
||||
|
||||
print(
|
||||
f"Done: copied={copied}, overwritten={overwritten}, skipped={skipped}, "
|
||||
f"output={output_root}"
|
||||
)
|
||||
return 0
|
||||
|
||||
|
||||
def build_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Initialize test data by scanning NRes/RsLi signatures."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--input",
|
||||
required=True,
|
||||
help="Input directory to scan recursively.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
required=True,
|
||||
help="Output root directory (archives go to nres/ and rsli/ subdirs).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="Overwrite destination files without confirmation prompts.",
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = build_parser().parse_args()
|
||||
|
||||
input_root = Path(args.input)
|
||||
if not input_root.exists():
|
||||
print(f"[error] input directory does not exist: {input_root}", file=sys.stderr)
|
||||
return 2
|
||||
if not input_root.is_dir():
|
||||
print(f"[error] input path is not a directory: {input_root}", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
output_root = Path(args.output)
|
||||
if output_root.exists() and not output_root.is_dir():
|
||||
print(f"[error] output path exists and is not a directory: {output_root}", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
input_resolved = input_root.resolve()
|
||||
output_resolved = output_root.resolve()
|
||||
if input_resolved == output_resolved:
|
||||
print("[error] input and output directories must be different.", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
excluded_root: Path | None = None
|
||||
if is_relative_to(output_resolved, input_resolved):
|
||||
excluded_root = output_resolved
|
||||
print(f"Notice: output is inside input, skipping scan under: {excluded_root}")
|
||||
|
||||
archives = scan_archives(input_root, excluded_root)
|
||||
|
||||
output_root.mkdir(parents=True, exist_ok=True)
|
||||
return copy_archives(archives, input_root, output_root, force=args.force)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
1000
tools/msh_doc_validator.py
Normal file
1000
tools/msh_doc_validator.py
Normal file
File diff suppressed because it is too large
Load Diff
357
tools/msh_export_obj.py
Normal file
357
tools/msh_export_obj.py
Normal file
@@ -0,0 +1,357 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Export NGI MSH geometry to Wavefront OBJ.
|
||||
|
||||
The exporter is intended for inspection/debugging and uses the same
|
||||
batch/slot selection logic as msh_preview_renderer.py.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import math
|
||||
import struct
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import archive_roundtrip_validator as arv
|
||||
|
||||
MAGIC_NRES = b"NRes"
|
||||
|
||||
|
||||
def _entry_payload(blob: bytes, entry: dict[str, Any]) -> bytes:
|
||||
start = int(entry["data_offset"])
|
||||
end = start + int(entry["size"])
|
||||
return blob[start:end]
|
||||
|
||||
|
||||
def _parse_nres(blob: bytes, source: str) -> dict[str, Any]:
|
||||
if blob[:4] != MAGIC_NRES:
|
||||
raise RuntimeError(f"{source}: not an NRes payload")
|
||||
return arv.parse_nres(blob, source=source)
|
||||
|
||||
|
||||
def _by_type(entries: list[dict[str, Any]]) -> dict[int, list[dict[str, Any]]]:
|
||||
out: dict[int, list[dict[str, Any]]] = {}
|
||||
for row in entries:
|
||||
out.setdefault(int(row["type_id"]), []).append(row)
|
||||
return out
|
||||
|
||||
|
||||
def _get_single(by_type: dict[int, list[dict[str, Any]]], type_id: int, label: str) -> dict[str, Any]:
|
||||
rows = by_type.get(type_id, [])
|
||||
if not rows:
|
||||
raise RuntimeError(f"missing resource type {type_id} ({label})")
|
||||
return rows[0]
|
||||
|
||||
|
||||
def _pick_model_payload(archive_path: Path, model_name: str | None) -> tuple[bytes, str]:
|
||||
root_blob = archive_path.read_bytes()
|
||||
parsed = _parse_nres(root_blob, str(archive_path))
|
||||
|
||||
msh_entries = [row for row in parsed["entries"] if str(row["name"]).lower().endswith(".msh")]
|
||||
if msh_entries:
|
||||
chosen: dict[str, Any] | None = None
|
||||
if model_name:
|
||||
model_l = model_name.lower()
|
||||
for row in msh_entries:
|
||||
name_l = str(row["name"]).lower()
|
||||
if name_l == model_l:
|
||||
chosen = row
|
||||
break
|
||||
if chosen is None:
|
||||
for row in msh_entries:
|
||||
if str(row["name"]).lower().startswith(model_l):
|
||||
chosen = row
|
||||
break
|
||||
else:
|
||||
chosen = msh_entries[0]
|
||||
|
||||
if chosen is None:
|
||||
names = ", ".join(str(row["name"]) for row in msh_entries[:12])
|
||||
raise RuntimeError(
|
||||
f"model '{model_name}' not found in {archive_path}. Available: {names}"
|
||||
)
|
||||
return _entry_payload(root_blob, chosen), str(chosen["name"])
|
||||
|
||||
by_type = _by_type(parsed["entries"])
|
||||
if all(k in by_type for k in (1, 2, 3, 6, 13)):
|
||||
return root_blob, archive_path.name
|
||||
|
||||
raise RuntimeError(
|
||||
f"{archive_path} does not contain .msh entries and does not look like a direct model payload"
|
||||
)
|
||||
|
||||
|
||||
def _extract_geometry(
|
||||
model_blob: bytes,
|
||||
*,
|
||||
lod: int,
|
||||
group: int,
|
||||
max_faces: int,
|
||||
all_batches: bool,
|
||||
) -> tuple[list[tuple[float, float, float]], list[tuple[int, int, int]], dict[str, int]]:
|
||||
parsed = _parse_nres(model_blob, "<model>")
|
||||
by_type = _by_type(parsed["entries"])
|
||||
|
||||
res1 = _get_single(by_type, 1, "Res1")
|
||||
res2 = _get_single(by_type, 2, "Res2")
|
||||
res3 = _get_single(by_type, 3, "Res3")
|
||||
res6 = _get_single(by_type, 6, "Res6")
|
||||
res13 = _get_single(by_type, 13, "Res13")
|
||||
|
||||
pos_blob = _entry_payload(model_blob, res3)
|
||||
if len(pos_blob) % 12 != 0:
|
||||
raise RuntimeError(f"Res3 size is not divisible by 12: {len(pos_blob)}")
|
||||
vertex_count = len(pos_blob) // 12
|
||||
positions = [struct.unpack_from("<3f", pos_blob, i * 12) for i in range(vertex_count)]
|
||||
|
||||
idx_blob = _entry_payload(model_blob, res6)
|
||||
if len(idx_blob) % 2 != 0:
|
||||
raise RuntimeError(f"Res6 size is not divisible by 2: {len(idx_blob)}")
|
||||
index_count = len(idx_blob) // 2
|
||||
indices = list(struct.unpack_from(f"<{index_count}H", idx_blob, 0))
|
||||
|
||||
batch_blob = _entry_payload(model_blob, res13)
|
||||
if len(batch_blob) % 20 != 0:
|
||||
raise RuntimeError(f"Res13 size is not divisible by 20: {len(batch_blob)}")
|
||||
batch_count = len(batch_blob) // 20
|
||||
batches: list[tuple[int, int, int, int]] = []
|
||||
for i in range(batch_count):
|
||||
off = i * 20
|
||||
idx_count = struct.unpack_from("<H", batch_blob, off + 8)[0]
|
||||
idx_start = struct.unpack_from("<I", batch_blob, off + 10)[0]
|
||||
base_vertex = struct.unpack_from("<I", batch_blob, off + 16)[0]
|
||||
batches.append((idx_count, idx_start, base_vertex, i))
|
||||
|
||||
res2_blob = _entry_payload(model_blob, res2)
|
||||
if len(res2_blob) < 0x8C:
|
||||
raise RuntimeError("Res2 is too small (< 0x8C)")
|
||||
slot_blob = res2_blob[0x8C:]
|
||||
if len(slot_blob) % 68 != 0:
|
||||
raise RuntimeError(f"Res2 slot area is not divisible by 68: {len(slot_blob)}")
|
||||
slot_count = len(slot_blob) // 68
|
||||
slots: list[tuple[int, int, int, int]] = []
|
||||
for i in range(slot_count):
|
||||
off = i * 68
|
||||
tri_start, tri_count, batch_start, slot_batch_count = struct.unpack_from("<4H", slot_blob, off)
|
||||
slots.append((tri_start, tri_count, batch_start, slot_batch_count))
|
||||
|
||||
res1_blob = _entry_payload(model_blob, res1)
|
||||
node_stride = int(res1["attr3"])
|
||||
node_count = int(res1["attr1"])
|
||||
node_slot_indices: list[int] = []
|
||||
if not all_batches and node_stride >= 38 and len(res1_blob) >= node_count * node_stride:
|
||||
if lod < 0 or lod > 2:
|
||||
raise RuntimeError(f"lod must be 0..2 (got {lod})")
|
||||
if group < 0 or group > 4:
|
||||
raise RuntimeError(f"group must be 0..4 (got {group})")
|
||||
matrix_index = lod * 5 + group
|
||||
for n in range(node_count):
|
||||
off = n * node_stride + 8 + matrix_index * 2
|
||||
slot_idx = struct.unpack_from("<H", res1_blob, off)[0]
|
||||
if slot_idx == 0xFFFF:
|
||||
continue
|
||||
if slot_idx >= slot_count:
|
||||
continue
|
||||
node_slot_indices.append(slot_idx)
|
||||
|
||||
faces: list[tuple[int, int, int]] = []
|
||||
used_batches = 0
|
||||
used_slots = 0
|
||||
|
||||
def append_batch(batch_idx: int) -> None:
|
||||
nonlocal used_batches
|
||||
if batch_idx < 0 or batch_idx >= len(batches):
|
||||
return
|
||||
idx_count, idx_start, base_vertex, _ = batches[batch_idx]
|
||||
if idx_count < 3:
|
||||
return
|
||||
end = idx_start + idx_count
|
||||
if end > len(indices):
|
||||
return
|
||||
used_batches += 1
|
||||
tri_count = idx_count // 3
|
||||
for t in range(tri_count):
|
||||
i0 = indices[idx_start + t * 3 + 0] + base_vertex
|
||||
i1 = indices[idx_start + t * 3 + 1] + base_vertex
|
||||
i2 = indices[idx_start + t * 3 + 2] + base_vertex
|
||||
if i0 >= vertex_count or i1 >= vertex_count or i2 >= vertex_count:
|
||||
continue
|
||||
faces.append((i0, i1, i2))
|
||||
if len(faces) >= max_faces:
|
||||
return
|
||||
|
||||
if node_slot_indices:
|
||||
for slot_idx in node_slot_indices:
|
||||
if len(faces) >= max_faces:
|
||||
break
|
||||
_tri_start, _tri_count, batch_start, slot_batch_count = slots[slot_idx]
|
||||
used_slots += 1
|
||||
for bi in range(batch_start, batch_start + slot_batch_count):
|
||||
append_batch(bi)
|
||||
if len(faces) >= max_faces:
|
||||
break
|
||||
else:
|
||||
for bi in range(batch_count):
|
||||
append_batch(bi)
|
||||
if len(faces) >= max_faces:
|
||||
break
|
||||
|
||||
if not faces:
|
||||
raise RuntimeError("no faces selected for export")
|
||||
|
||||
meta = {
|
||||
"vertex_count": vertex_count,
|
||||
"index_count": index_count,
|
||||
"batch_count": batch_count,
|
||||
"slot_count": slot_count,
|
||||
"node_count": node_count,
|
||||
"used_slots": used_slots,
|
||||
"used_batches": used_batches,
|
||||
"face_count": len(faces),
|
||||
}
|
||||
return positions, faces, meta
|
||||
|
||||
|
||||
def _compute_vertex_normals(
|
||||
positions: list[tuple[float, float, float]],
|
||||
faces: list[tuple[int, int, int]],
|
||||
) -> list[tuple[float, float, float]]:
|
||||
acc = [[0.0, 0.0, 0.0] for _ in positions]
|
||||
for i0, i1, i2 in faces:
|
||||
p0 = positions[i0]
|
||||
p1 = positions[i1]
|
||||
p2 = positions[i2]
|
||||
ux = p1[0] - p0[0]
|
||||
uy = p1[1] - p0[1]
|
||||
uz = p1[2] - p0[2]
|
||||
vx = p2[0] - p0[0]
|
||||
vy = p2[1] - p0[1]
|
||||
vz = p2[2] - p0[2]
|
||||
nx = uy * vz - uz * vy
|
||||
ny = uz * vx - ux * vz
|
||||
nz = ux * vy - uy * vx
|
||||
acc[i0][0] += nx
|
||||
acc[i0][1] += ny
|
||||
acc[i0][2] += nz
|
||||
acc[i1][0] += nx
|
||||
acc[i1][1] += ny
|
||||
acc[i1][2] += nz
|
||||
acc[i2][0] += nx
|
||||
acc[i2][1] += ny
|
||||
acc[i2][2] += nz
|
||||
|
||||
normals: list[tuple[float, float, float]] = []
|
||||
for nx, ny, nz in acc:
|
||||
ln = math.sqrt(nx * nx + ny * ny + nz * nz)
|
||||
if ln <= 1e-12:
|
||||
normals.append((0.0, 1.0, 0.0))
|
||||
else:
|
||||
normals.append((nx / ln, ny / ln, nz / ln))
|
||||
return normals
|
||||
|
||||
|
||||
def _write_obj(
|
||||
output_path: Path,
|
||||
object_name: str,
|
||||
positions: list[tuple[float, float, float]],
|
||||
faces: list[tuple[int, int, int]],
|
||||
) -> None:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
normals = _compute_vertex_normals(positions, faces)
|
||||
|
||||
with output_path.open("w", encoding="utf-8", newline="\n") as out:
|
||||
out.write("# Exported by msh_export_obj.py\n")
|
||||
out.write(f"o {object_name}\n")
|
||||
for x, y, z in positions:
|
||||
out.write(f"v {x:.9g} {y:.9g} {z:.9g}\n")
|
||||
for nx, ny, nz in normals:
|
||||
out.write(f"vn {nx:.9g} {ny:.9g} {nz:.9g}\n")
|
||||
for i0, i1, i2 in faces:
|
||||
a = i0 + 1
|
||||
b = i1 + 1
|
||||
c = i2 + 1
|
||||
out.write(f"f {a}//{a} {b}//{b} {c}//{c}\n")
|
||||
|
||||
|
||||
def cmd_list_models(args: argparse.Namespace) -> int:
|
||||
archive_path = Path(args.archive).resolve()
|
||||
blob = archive_path.read_bytes()
|
||||
parsed = _parse_nres(blob, str(archive_path))
|
||||
rows = [row for row in parsed["entries"] if str(row["name"]).lower().endswith(".msh")]
|
||||
print(f"Archive: {archive_path}")
|
||||
print(f"MSH entries: {len(rows)}")
|
||||
for row in rows:
|
||||
print(f"- {row['name']}")
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_export(args: argparse.Namespace) -> int:
|
||||
archive_path = Path(args.archive).resolve()
|
||||
output_path = Path(args.output).resolve()
|
||||
|
||||
model_blob, model_label = _pick_model_payload(archive_path, args.model)
|
||||
positions, faces, meta = _extract_geometry(
|
||||
model_blob,
|
||||
lod=int(args.lod),
|
||||
group=int(args.group),
|
||||
max_faces=int(args.max_faces),
|
||||
all_batches=bool(args.all_batches),
|
||||
)
|
||||
obj_name = Path(model_label).stem or "msh_model"
|
||||
_write_obj(output_path, obj_name, positions, faces)
|
||||
|
||||
print(f"Exported model : {model_label}")
|
||||
print(f"Output OBJ : {output_path}")
|
||||
print(f"Object name : {obj_name}")
|
||||
print(
|
||||
"Geometry : "
|
||||
f"vertices={meta['vertex_count']}, faces={meta['face_count']}, "
|
||||
f"batches={meta['used_batches']}/{meta['batch_count']}, slots={meta['used_slots']}/{meta['slot_count']}"
|
||||
)
|
||||
print(
|
||||
"Mode : "
|
||||
f"lod={args.lod}, group={args.group}, all_batches={bool(args.all_batches)}"
|
||||
)
|
||||
return 0
|
||||
|
||||
|
||||
def build_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Export NGI MSH geometry to Wavefront OBJ."
|
||||
)
|
||||
sub = parser.add_subparsers(dest="command", required=True)
|
||||
|
||||
list_models = sub.add_parser("list-models", help="List .msh entries in an NRes archive.")
|
||||
list_models.add_argument("--archive", required=True, help="Path to archive (e.g. animals.rlb).")
|
||||
list_models.set_defaults(func=cmd_list_models)
|
||||
|
||||
export = sub.add_parser("export", help="Export one model to OBJ.")
|
||||
export.add_argument("--archive", required=True, help="Path to NRes archive or direct model payload.")
|
||||
export.add_argument(
|
||||
"--model",
|
||||
help="Model entry name (*.msh) inside archive. If omitted, first .msh is used.",
|
||||
)
|
||||
export.add_argument("--output", required=True, help="Output .obj path.")
|
||||
export.add_argument("--lod", type=int, default=0, help="LOD index 0..2 (default: 0).")
|
||||
export.add_argument("--group", type=int, default=0, help="Group index 0..4 (default: 0).")
|
||||
export.add_argument("--max-faces", type=int, default=120000, help="Face limit (default: 120000).")
|
||||
export.add_argument(
|
||||
"--all-batches",
|
||||
action="store_true",
|
||||
help="Ignore slot matrix selection and export all batches.",
|
||||
)
|
||||
export.set_defaults(func=cmd_export)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = build_parser()
|
||||
args = parser.parse_args()
|
||||
return int(args.func(args))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
481
tools/msh_preview_renderer.py
Normal file
481
tools/msh_preview_renderer.py
Normal file
@@ -0,0 +1,481 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Primitive software renderer for NGI MSH models.
|
||||
|
||||
Output format: binary PPM (P6), no external dependencies.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import math
|
||||
import struct
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import archive_roundtrip_validator as arv
|
||||
|
||||
MAGIC_NRES = b"NRes"
|
||||
|
||||
|
||||
def _entry_payload(blob: bytes, entry: dict[str, Any]) -> bytes:
|
||||
start = int(entry["data_offset"])
|
||||
end = start + int(entry["size"])
|
||||
return blob[start:end]
|
||||
|
||||
|
||||
def _parse_nres(blob: bytes, source: str) -> dict[str, Any]:
|
||||
if blob[:4] != MAGIC_NRES:
|
||||
raise RuntimeError(f"{source}: not an NRes payload")
|
||||
return arv.parse_nres(blob, source=source)
|
||||
|
||||
|
||||
def _by_type(entries: list[dict[str, Any]]) -> dict[int, list[dict[str, Any]]]:
|
||||
out: dict[int, list[dict[str, Any]]] = {}
|
||||
for row in entries:
|
||||
out.setdefault(int(row["type_id"]), []).append(row)
|
||||
return out
|
||||
|
||||
|
||||
def _pick_model_payload(archive_path: Path, model_name: str | None) -> tuple[bytes, str]:
|
||||
root_blob = archive_path.read_bytes()
|
||||
parsed = _parse_nres(root_blob, str(archive_path))
|
||||
|
||||
msh_entries = [row for row in parsed["entries"] if str(row["name"]).lower().endswith(".msh")]
|
||||
if msh_entries:
|
||||
chosen: dict[str, Any] | None = None
|
||||
if model_name:
|
||||
model_l = model_name.lower()
|
||||
for row in msh_entries:
|
||||
name_l = str(row["name"]).lower()
|
||||
if name_l == model_l:
|
||||
chosen = row
|
||||
break
|
||||
if chosen is None:
|
||||
for row in msh_entries:
|
||||
if str(row["name"]).lower().startswith(model_l):
|
||||
chosen = row
|
||||
break
|
||||
else:
|
||||
chosen = msh_entries[0]
|
||||
|
||||
if chosen is None:
|
||||
names = ", ".join(str(row["name"]) for row in msh_entries[:12])
|
||||
raise RuntimeError(
|
||||
f"model '{model_name}' not found in {archive_path}. Available: {names}"
|
||||
)
|
||||
return _entry_payload(root_blob, chosen), str(chosen["name"])
|
||||
|
||||
# Fallback: treat file itself as a model NRes payload.
|
||||
by_type = _by_type(parsed["entries"])
|
||||
if all(k in by_type for k in (1, 2, 3, 6, 13)):
|
||||
return root_blob, archive_path.name
|
||||
|
||||
raise RuntimeError(
|
||||
f"{archive_path} does not contain .msh entries and does not look like a direct model payload"
|
||||
)
|
||||
|
||||
|
||||
def _get_single(by_type: dict[int, list[dict[str, Any]]], type_id: int, label: str) -> dict[str, Any]:
|
||||
rows = by_type.get(type_id, [])
|
||||
if not rows:
|
||||
raise RuntimeError(f"missing resource type {type_id} ({label})")
|
||||
return rows[0]
|
||||
|
||||
|
||||
def _extract_geometry(
|
||||
model_blob: bytes,
|
||||
*,
|
||||
lod: int,
|
||||
group: int,
|
||||
max_faces: int,
|
||||
) -> tuple[list[tuple[float, float, float]], list[tuple[int, int, int]], dict[str, int]]:
|
||||
parsed = _parse_nres(model_blob, "<model>")
|
||||
by_type = _by_type(parsed["entries"])
|
||||
|
||||
res1 = _get_single(by_type, 1, "Res1")
|
||||
res2 = _get_single(by_type, 2, "Res2")
|
||||
res3 = _get_single(by_type, 3, "Res3")
|
||||
res6 = _get_single(by_type, 6, "Res6")
|
||||
res13 = _get_single(by_type, 13, "Res13")
|
||||
|
||||
# Positions
|
||||
pos_blob = _entry_payload(model_blob, res3)
|
||||
if len(pos_blob) % 12 != 0:
|
||||
raise RuntimeError(f"Res3 size is not divisible by 12: {len(pos_blob)}")
|
||||
vertex_count = len(pos_blob) // 12
|
||||
positions = [struct.unpack_from("<3f", pos_blob, i * 12) for i in range(vertex_count)]
|
||||
|
||||
# Indices
|
||||
idx_blob = _entry_payload(model_blob, res6)
|
||||
if len(idx_blob) % 2 != 0:
|
||||
raise RuntimeError(f"Res6 size is not divisible by 2: {len(idx_blob)}")
|
||||
index_count = len(idx_blob) // 2
|
||||
indices = list(struct.unpack_from(f"<{index_count}H", idx_blob, 0))
|
||||
|
||||
# Batches
|
||||
batch_blob = _entry_payload(model_blob, res13)
|
||||
if len(batch_blob) % 20 != 0:
|
||||
raise RuntimeError(f"Res13 size is not divisible by 20: {len(batch_blob)}")
|
||||
batch_count = len(batch_blob) // 20
|
||||
batches: list[tuple[int, int, int, int]] = []
|
||||
for i in range(batch_count):
|
||||
off = i * 20
|
||||
# Keep only fields used by renderer:
|
||||
# indexCount, indexStart, baseVertex
|
||||
idx_count = struct.unpack_from("<H", batch_blob, off + 8)[0]
|
||||
idx_start = struct.unpack_from("<I", batch_blob, off + 10)[0]
|
||||
base_vertex = struct.unpack_from("<I", batch_blob, off + 16)[0]
|
||||
batches.append((idx_count, idx_start, base_vertex, i))
|
||||
|
||||
# Slots
|
||||
res2_blob = _entry_payload(model_blob, res2)
|
||||
if len(res2_blob) < 0x8C:
|
||||
raise RuntimeError("Res2 is too small (< 0x8C)")
|
||||
slot_blob = res2_blob[0x8C:]
|
||||
if len(slot_blob) % 68 != 0:
|
||||
raise RuntimeError(f"Res2 slot area is not divisible by 68: {len(slot_blob)}")
|
||||
slot_count = len(slot_blob) // 68
|
||||
slots: list[tuple[int, int, int, int]] = []
|
||||
for i in range(slot_count):
|
||||
off = i * 68
|
||||
tri_start, tri_count, batch_start, slot_batch_count = struct.unpack_from("<4H", slot_blob, off)
|
||||
slots.append((tri_start, tri_count, batch_start, slot_batch_count))
|
||||
|
||||
# Nodes / slot matrix
|
||||
res1_blob = _entry_payload(model_blob, res1)
|
||||
node_stride = int(res1["attr3"])
|
||||
node_count = int(res1["attr1"])
|
||||
node_slot_indices: list[int] = []
|
||||
if node_stride >= 38 and len(res1_blob) >= node_count * node_stride:
|
||||
if lod < 0 or lod > 2:
|
||||
raise RuntimeError(f"lod must be 0..2 (got {lod})")
|
||||
if group < 0 or group > 4:
|
||||
raise RuntimeError(f"group must be 0..4 (got {group})")
|
||||
matrix_index = lod * 5 + group
|
||||
for n in range(node_count):
|
||||
off = n * node_stride + 8 + matrix_index * 2
|
||||
slot_idx = struct.unpack_from("<H", res1_blob, off)[0]
|
||||
if slot_idx == 0xFFFF:
|
||||
continue
|
||||
if slot_idx >= slot_count:
|
||||
continue
|
||||
node_slot_indices.append(slot_idx)
|
||||
|
||||
# Build triangle list.
|
||||
faces: list[tuple[int, int, int]] = []
|
||||
used_batches = 0
|
||||
used_slots = 0
|
||||
|
||||
def append_batch(batch_idx: int) -> None:
|
||||
nonlocal used_batches
|
||||
if batch_idx < 0 or batch_idx >= len(batches):
|
||||
return
|
||||
idx_count, idx_start, base_vertex, _ = batches[batch_idx]
|
||||
if idx_count < 3:
|
||||
return
|
||||
end = idx_start + idx_count
|
||||
if end > len(indices):
|
||||
return
|
||||
used_batches += 1
|
||||
tri_count = idx_count // 3
|
||||
for t in range(tri_count):
|
||||
i0 = indices[idx_start + t * 3 + 0] + base_vertex
|
||||
i1 = indices[idx_start + t * 3 + 1] + base_vertex
|
||||
i2 = indices[idx_start + t * 3 + 2] + base_vertex
|
||||
if i0 >= vertex_count or i1 >= vertex_count or i2 >= vertex_count:
|
||||
continue
|
||||
faces.append((i0, i1, i2))
|
||||
if len(faces) >= max_faces:
|
||||
return
|
||||
|
||||
if node_slot_indices:
|
||||
for slot_idx in node_slot_indices:
|
||||
if len(faces) >= max_faces:
|
||||
break
|
||||
_tri_start, _tri_count, batch_start, slot_batch_count = slots[slot_idx]
|
||||
used_slots += 1
|
||||
for bi in range(batch_start, batch_start + slot_batch_count):
|
||||
append_batch(bi)
|
||||
if len(faces) >= max_faces:
|
||||
break
|
||||
else:
|
||||
# Fallback if slot matrix is unavailable: draw all batches.
|
||||
for bi in range(batch_count):
|
||||
append_batch(bi)
|
||||
if len(faces) >= max_faces:
|
||||
break
|
||||
|
||||
meta = {
|
||||
"vertex_count": vertex_count,
|
||||
"index_count": index_count,
|
||||
"batch_count": batch_count,
|
||||
"slot_count": slot_count,
|
||||
"node_count": node_count,
|
||||
"used_slots": used_slots,
|
||||
"used_batches": used_batches,
|
||||
"face_count": len(faces),
|
||||
}
|
||||
if not faces:
|
||||
raise RuntimeError("no faces selected for rendering")
|
||||
return positions, faces, meta
|
||||
|
||||
|
||||
def _write_ppm(path: Path, width: int, height: int, rgb: bytearray) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with path.open("wb") as handle:
|
||||
handle.write(f"P6\n{width} {height}\n255\n".encode("ascii"))
|
||||
handle.write(rgb)
|
||||
|
||||
|
||||
def _render_software(
|
||||
positions: list[tuple[float, float, float]],
|
||||
faces: list[tuple[int, int, int]],
|
||||
*,
|
||||
width: int,
|
||||
height: int,
|
||||
yaw_deg: float,
|
||||
pitch_deg: float,
|
||||
wireframe: bool,
|
||||
) -> bytearray:
|
||||
xs = [p[0] for p in positions]
|
||||
ys = [p[1] for p in positions]
|
||||
zs = [p[2] for p in positions]
|
||||
cx = (min(xs) + max(xs)) * 0.5
|
||||
cy = (min(ys) + max(ys)) * 0.5
|
||||
cz = (min(zs) + max(zs)) * 0.5
|
||||
span = max(max(xs) - min(xs), max(ys) - min(ys), max(zs) - min(zs))
|
||||
radius = max(span * 0.5, 1e-3)
|
||||
|
||||
yaw = math.radians(yaw_deg)
|
||||
pitch = math.radians(pitch_deg)
|
||||
cyaw = math.cos(yaw)
|
||||
syaw = math.sin(yaw)
|
||||
cpitch = math.cos(pitch)
|
||||
spitch = math.sin(pitch)
|
||||
|
||||
camera_dist = radius * 3.2
|
||||
scale = min(width, height) * 0.95
|
||||
|
||||
# Transform all vertices once.
|
||||
vx: list[float] = []
|
||||
vy: list[float] = []
|
||||
vz: list[float] = []
|
||||
sx: list[float] = []
|
||||
sy: list[float] = []
|
||||
for x, y, z in positions:
|
||||
x0 = x - cx
|
||||
y0 = y - cy
|
||||
z0 = z - cz
|
||||
x1 = cyaw * x0 + syaw * z0
|
||||
z1 = -syaw * x0 + cyaw * z0
|
||||
y2 = cpitch * y0 - spitch * z1
|
||||
z2 = spitch * y0 + cpitch * z1 + camera_dist
|
||||
if z2 < 1e-3:
|
||||
z2 = 1e-3
|
||||
vx.append(x1)
|
||||
vy.append(y2)
|
||||
vz.append(z2)
|
||||
sx.append(width * 0.5 + (x1 / z2) * scale)
|
||||
sy.append(height * 0.5 - (y2 / z2) * scale)
|
||||
|
||||
rgb = bytearray([16, 18, 24] * (width * height))
|
||||
zbuf = [float("inf")] * (width * height)
|
||||
light_dir = (0.35, 0.45, 1.0)
|
||||
l_len = math.sqrt(light_dir[0] ** 2 + light_dir[1] ** 2 + light_dir[2] ** 2)
|
||||
light = (light_dir[0] / l_len, light_dir[1] / l_len, light_dir[2] / l_len)
|
||||
|
||||
def edge(ax: float, ay: float, bx: float, by: float, px: float, py: float) -> float:
|
||||
return (px - ax) * (by - ay) - (py - ay) * (bx - ax)
|
||||
|
||||
for i0, i1, i2 in faces:
|
||||
x0 = sx[i0]
|
||||
y0 = sy[i0]
|
||||
x1 = sx[i1]
|
||||
y1 = sy[i1]
|
||||
x2 = sx[i2]
|
||||
y2 = sy[i2]
|
||||
area = edge(x0, y0, x1, y1, x2, y2)
|
||||
if area == 0.0:
|
||||
continue
|
||||
|
||||
# Shading from camera-space normal.
|
||||
ux = vx[i1] - vx[i0]
|
||||
uy = vy[i1] - vy[i0]
|
||||
uz = vz[i1] - vz[i0]
|
||||
wx = vx[i2] - vx[i0]
|
||||
wy = vy[i2] - vy[i0]
|
||||
wz = vz[i2] - vz[i0]
|
||||
nx = uy * wz - uz * wy
|
||||
ny = uz * wx - ux * wz
|
||||
nz = ux * wy - uy * wx
|
||||
n_len = math.sqrt(nx * nx + ny * ny + nz * nz)
|
||||
if n_len > 0.0:
|
||||
nx /= n_len
|
||||
ny /= n_len
|
||||
nz /= n_len
|
||||
intensity = nx * light[0] + ny * light[1] + nz * light[2]
|
||||
if intensity < 0.0:
|
||||
intensity = 0.0
|
||||
shade = int(45 + 200 * intensity)
|
||||
color = (shade, shade, min(255, shade + 18))
|
||||
|
||||
minx = int(max(0, math.floor(min(x0, x1, x2))))
|
||||
maxx = int(min(width - 1, math.ceil(max(x0, x1, x2))))
|
||||
miny = int(max(0, math.floor(min(y0, y1, y2))))
|
||||
maxy = int(min(height - 1, math.ceil(max(y0, y1, y2))))
|
||||
if minx > maxx or miny > maxy:
|
||||
continue
|
||||
|
||||
z0 = vz[i0]
|
||||
z1 = vz[i1]
|
||||
z2 = vz[i2]
|
||||
|
||||
for py in range(miny, maxy + 1):
|
||||
fy = py + 0.5
|
||||
row = py * width
|
||||
for px in range(minx, maxx + 1):
|
||||
fx = px + 0.5
|
||||
w0 = edge(x1, y1, x2, y2, fx, fy)
|
||||
w1 = edge(x2, y2, x0, y0, fx, fy)
|
||||
w2 = edge(x0, y0, x1, y1, fx, fy)
|
||||
if area > 0:
|
||||
if w0 < 0 or w1 < 0 or w2 < 0:
|
||||
continue
|
||||
else:
|
||||
if w0 > 0 or w1 > 0 or w2 > 0:
|
||||
continue
|
||||
inv_area = 1.0 / area
|
||||
bz0 = w0 * inv_area
|
||||
bz1 = w1 * inv_area
|
||||
bz2 = w2 * inv_area
|
||||
depth = bz0 * z0 + bz1 * z1 + bz2 * z2
|
||||
idx = row + px
|
||||
if depth >= zbuf[idx]:
|
||||
continue
|
||||
zbuf[idx] = depth
|
||||
p = idx * 3
|
||||
rgb[p + 0] = color[0]
|
||||
rgb[p + 1] = color[1]
|
||||
rgb[p + 2] = color[2]
|
||||
|
||||
if wireframe:
|
||||
def draw_line(xa: float, ya: float, xb: float, yb: float) -> None:
|
||||
x0i = int(round(xa))
|
||||
y0i = int(round(ya))
|
||||
x1i = int(round(xb))
|
||||
y1i = int(round(yb))
|
||||
dx = abs(x1i - x0i)
|
||||
sx_step = 1 if x0i < x1i else -1
|
||||
dy = -abs(y1i - y0i)
|
||||
sy_step = 1 if y0i < y1i else -1
|
||||
err = dx + dy
|
||||
x = x0i
|
||||
y = y0i
|
||||
while True:
|
||||
if 0 <= x < width and 0 <= y < height:
|
||||
p = (y * width + x) * 3
|
||||
rgb[p + 0] = 240
|
||||
rgb[p + 1] = 245
|
||||
rgb[p + 2] = 255
|
||||
if x == x1i and y == y1i:
|
||||
break
|
||||
e2 = 2 * err
|
||||
if e2 >= dy:
|
||||
err += dy
|
||||
x += sx_step
|
||||
if e2 <= dx:
|
||||
err += dx
|
||||
y += sy_step
|
||||
|
||||
for i0, i1, i2 in faces:
|
||||
draw_line(sx[i0], sy[i0], sx[i1], sy[i1])
|
||||
draw_line(sx[i1], sy[i1], sx[i2], sy[i2])
|
||||
draw_line(sx[i2], sy[i2], sx[i0], sy[i0])
|
||||
|
||||
return rgb
|
||||
|
||||
|
||||
def cmd_list_models(args: argparse.Namespace) -> int:
|
||||
archive_path = Path(args.archive).resolve()
|
||||
blob = archive_path.read_bytes()
|
||||
parsed = _parse_nres(blob, str(archive_path))
|
||||
rows = [row for row in parsed["entries"] if str(row["name"]).lower().endswith(".msh")]
|
||||
print(f"Archive: {archive_path}")
|
||||
print(f"MSH entries: {len(rows)}")
|
||||
for row in rows:
|
||||
print(f"- {row['name']}")
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_render(args: argparse.Namespace) -> int:
|
||||
archive_path = Path(args.archive).resolve()
|
||||
output_path = Path(args.output).resolve()
|
||||
|
||||
model_blob, model_label = _pick_model_payload(archive_path, args.model)
|
||||
positions, faces, meta = _extract_geometry(
|
||||
model_blob,
|
||||
lod=int(args.lod),
|
||||
group=int(args.group),
|
||||
max_faces=int(args.max_faces),
|
||||
)
|
||||
rgb = _render_software(
|
||||
positions,
|
||||
faces,
|
||||
width=int(args.width),
|
||||
height=int(args.height),
|
||||
yaw_deg=float(args.yaw),
|
||||
pitch_deg=float(args.pitch),
|
||||
wireframe=bool(args.wireframe),
|
||||
)
|
||||
_write_ppm(output_path, int(args.width), int(args.height), rgb)
|
||||
|
||||
print(f"Rendered model: {model_label}")
|
||||
print(f"Output : {output_path}")
|
||||
print(
|
||||
"Geometry : "
|
||||
f"vertices={meta['vertex_count']}, faces={meta['face_count']}, "
|
||||
f"batches={meta['used_batches']}/{meta['batch_count']}, slots={meta['used_slots']}/{meta['slot_count']}"
|
||||
)
|
||||
print(f"Mode : lod={args.lod}, group={args.group}, wireframe={bool(args.wireframe)}")
|
||||
return 0
|
||||
|
||||
|
||||
def build_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Primitive NGI MSH renderer (software, dependency-free)."
|
||||
)
|
||||
sub = parser.add_subparsers(dest="command", required=True)
|
||||
|
||||
list_models = sub.add_parser("list-models", help="List .msh entries in an NRes archive.")
|
||||
list_models.add_argument("--archive", required=True, help="Path to archive (e.g. animals.rlb).")
|
||||
list_models.set_defaults(func=cmd_list_models)
|
||||
|
||||
render = sub.add_parser("render", help="Render one model to PPM image.")
|
||||
render.add_argument("--archive", required=True, help="Path to NRes archive or direct model payload.")
|
||||
render.add_argument(
|
||||
"--model",
|
||||
help="Model entry name (*.msh) inside archive. If omitted, first .msh is used.",
|
||||
)
|
||||
render.add_argument("--output", required=True, help="Output .ppm file path.")
|
||||
render.add_argument("--lod", type=int, default=0, help="LOD index 0..2 (default: 0).")
|
||||
render.add_argument("--group", type=int, default=0, help="Group index 0..4 (default: 0).")
|
||||
render.add_argument("--max-faces", type=int, default=120000, help="Face limit (default: 120000).")
|
||||
render.add_argument("--width", type=int, default=1280, help="Image width (default: 1280).")
|
||||
render.add_argument("--height", type=int, default=720, help="Image height (default: 720).")
|
||||
render.add_argument("--yaw", type=float, default=35.0, help="Yaw angle in degrees (default: 35).")
|
||||
render.add_argument("--pitch", type=float, default=18.0, help="Pitch angle in degrees (default: 18).")
|
||||
render.add_argument("--wireframe", action="store_true", help="Draw white wireframe overlay.")
|
||||
render.set_defaults(func=cmd_render)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = build_parser()
|
||||
args = parser.parse_args()
|
||||
return int(args.func(args))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -1,9 +0,0 @@
|
||||
[package]
|
||||
name = "unpacker"
|
||||
version = "0.1.1"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
serde = { version = "1.0.160", features = ["derive"] }
|
||||
serde_json = "1.0.96"
|
||||
@@ -1,41 +0,0 @@
|
||||
# NRes Game Resource Unpacker
|
||||
|
||||
At the moment, this is a demonstration of the NRes game resource unpacking algorithm in action.
|
||||
It unpacks 100% of the NRes game resources for the game "Parkan: Iron Strategy".
|
||||
The unpacked resources can be packed again using the [packer](../packer) utility and replace the original game files.
|
||||
|
||||
__Attention!__
|
||||
This is a test version of the utility.
|
||||
It overwrites existing files without asking.
|
||||
|
||||
## Building
|
||||
|
||||
To build the tools, you need to run the following command in the root directory:
|
||||
|
||||
```bash
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
## Running
|
||||
|
||||
You can run the utility with the following command:
|
||||
|
||||
```bash
|
||||
./target/release/unpacker /path/to/file.ex /path/to/output
|
||||
```
|
||||
|
||||
- `/path/to/file.ex`: This is the file containing the game resources that will be unpacked.
|
||||
- `/path/to/output`: This is the directory where the unpacked files will be placed.
|
||||
|
||||
## How it Works
|
||||
|
||||
The structure describing the packed game resources is not fully understood yet.
|
||||
Therefore, the utility saves unpacked files in the format `file_name.file_index` because some files have the same name.
|
||||
|
||||
Additionally, an `index.json` file is created, which is important for re-packing the files.
|
||||
This file lists all the fields that game resources have in their packed form.
|
||||
It is essential to preserve the file index for the game to function correctly, as the game engine looks for the necessary files by index.
|
||||
|
||||
Files can be replaced and packed back using the [packer](../packer).
|
||||
The newly obtained game resource files are correctly processed by the game engine.
|
||||
For example, sounds and 3D models of warbots' weapons were successfully replaced.
|
||||
@@ -1,124 +0,0 @@
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
|
||||
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct FileHeader {
|
||||
pub size: u32,
|
||||
pub total: u32,
|
||||
pub type1: u32,
|
||||
pub type2: u32,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ListElement {
|
||||
pub extension: String,
|
||||
pub index: u32,
|
||||
pub name: String,
|
||||
#[serde(skip_serializing)]
|
||||
pub position: u32,
|
||||
#[serde(skip_serializing)]
|
||||
pub size: u32,
|
||||
pub unknown0: u32,
|
||||
pub unknown1: u32,
|
||||
pub unknown2: u32,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args: Vec<String> = env::args().collect();
|
||||
|
||||
let input = &args[1];
|
||||
let output = &args[2];
|
||||
|
||||
unpack(String::from(input), String::from(output));
|
||||
}
|
||||
|
||||
fn unpack(input: String, output: String) {
|
||||
let file = File::open(input).unwrap();
|
||||
let metadata = file.metadata().unwrap();
|
||||
|
||||
let mut reader = BufReader::new(file);
|
||||
let mut list: Vec<ListElement> = Vec::new();
|
||||
|
||||
// Считываем заголовок файла
|
||||
let mut header_buffer = [0u8; 16];
|
||||
reader.seek(SeekFrom::Start(0)).unwrap();
|
||||
reader.read_exact(&mut header_buffer).unwrap();
|
||||
|
||||
let file_header = FileHeader {
|
||||
size: LittleEndian::read_u32(&header_buffer[12..16]),
|
||||
total: LittleEndian::read_u32(&header_buffer[8..12]),
|
||||
type1: LittleEndian::read_u32(&header_buffer[0..4]),
|
||||
type2: LittleEndian::read_u32(&header_buffer[4..8]),
|
||||
};
|
||||
|
||||
if file_header.type1 != 1936020046 || file_header.type2 != 256 {
|
||||
panic!("this isn't NRes file");
|
||||
}
|
||||
|
||||
if metadata.len() != file_header.size as u64 {
|
||||
panic!("incorrect size")
|
||||
}
|
||||
|
||||
// Считываем список файлов
|
||||
let list_files_start_position = file_header.size - (file_header.total * 64);
|
||||
let list_files_size = file_header.total * 64;
|
||||
|
||||
let mut list_buffer = vec![0u8; list_files_size as usize];
|
||||
reader
|
||||
.seek(SeekFrom::Start(list_files_start_position as u64))
|
||||
.unwrap();
|
||||
reader.read_exact(&mut list_buffer).unwrap();
|
||||
|
||||
if list_buffer.len() % 64 != 0 {
|
||||
panic!("invalid files list")
|
||||
}
|
||||
|
||||
for i in 0..(list_buffer.len() / 64) {
|
||||
let from = i * 64;
|
||||
let to = (i * 64) + 64;
|
||||
let chunk: &[u8] = &list_buffer[from..to];
|
||||
|
||||
let element_list = ListElement {
|
||||
extension: String::from_utf8_lossy(&chunk[0..4])
|
||||
.trim_matches(char::from(0))
|
||||
.to_string(),
|
||||
index: LittleEndian::read_u32(&chunk[60..64]),
|
||||
name: String::from_utf8_lossy(&chunk[20..56])
|
||||
.trim_matches(char::from(0))
|
||||
.to_string(),
|
||||
position: LittleEndian::read_u32(&chunk[56..60]),
|
||||
size: LittleEndian::read_u32(&chunk[12..16]),
|
||||
unknown0: LittleEndian::read_u32(&chunk[4..8]),
|
||||
unknown1: LittleEndian::read_u32(&chunk[8..12]),
|
||||
unknown2: LittleEndian::read_u32(&chunk[16..20]),
|
||||
};
|
||||
|
||||
list.push(element_list)
|
||||
}
|
||||
|
||||
// Распаковываем файлы в директорию
|
||||
for element in &list {
|
||||
let path = format!("{}/{}.{}", output, element.name, element.index);
|
||||
let mut file = File::create(path).unwrap();
|
||||
|
||||
let mut file_buffer = vec![0u8; element.size as usize];
|
||||
reader
|
||||
.seek(SeekFrom::Start(element.position as u64))
|
||||
.unwrap();
|
||||
reader.read_exact(&mut file_buffer).unwrap();
|
||||
|
||||
file.write_all(&file_buffer).unwrap();
|
||||
file_buffer.clear();
|
||||
}
|
||||
|
||||
// Выгрузка списка файлов в JSON
|
||||
let path = format!("{}/{}", output, "index.json");
|
||||
let file = File::create(path).unwrap();
|
||||
let mut writer = BufWriter::new(file);
|
||||
serde_json::to_writer_pretty(&mut writer, &list).unwrap();
|
||||
writer.flush().unwrap();
|
||||
}
|
||||
1
vendor/addr2line/.cargo-checksum.json
vendored
1
vendor/addr2line/.cargo-checksum.json
vendored
@@ -1 +0,0 @@
|
||||
{"files":{"CHANGELOG.md":"ef9fa958318e442f1da7d204494cefec75c144aa6d5d5c93b0a5d6fcdf4ef6c6","Cargo.lock":"20b23c454fc3127f08a1bcd2864bbf029793759e6411fba24d44d8f4b7831ad0","Cargo.toml":"d0f15fde73d42bdf00e93f960dff908447225bede9364cb1659e44740a536c04","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e99d88d232bf57d70f0fb87f6b496d44b6653f99f8a63d250a54c61ea4bcde40","README.md":"76d28502bd2e83f6a9e3576bd45e9a7fe5308448c4b5384b0d249515b5f67a5c","bench.plot.r":"6a5d7a4d36ed6b3d9919be703a479bef47698bf947818b483ff03951df2d4e01","benchmark.sh":"b35f89b1ca2c1dc0476cdd07f0284b72d41920d1c7b6054072f50ffba296d78d","coverage.sh":"4677e81922d08a82e83068a911717a247c66af12e559f37b78b6be3337ac9f07","examples/addr2line.rs":"3c5eb5a6726634df6cf53e4d67ee9f90c9ac09838303947f45c3bea1e84548b5","rustfmt.toml":"01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b","src/builtin_split_dwarf_loader.rs":"dc6979de81b35f82e97275e6be27ec61f3c4225ea10574a9e031813e00185174","src/function.rs":"68f047e0c78afe18ad165db255c8254ee74c35cd6df0cc07e400252981f661ed","src/lazy.rs":"0bf23f7098f1902f181e43c2ffa82a3f86df2c0dbcb9bc0ebce6a0168dd8b060","src/lib.rs":"9d6531f71fd138d31cc7596db9ab234198d0895a21ea9cb116434c19ec78b660","tests/correctness.rs":"4081f8019535305e3aa254c6a4e1436272dd873f9717c687ca0e66ea8d5871ed","tests/output_equivalence.rs":"b2cd7c59fa55808a2e66e9fe7f160d846867e3ecefe22c22a818f822c3c41f23","tests/parse.rs":"c2f7362e4679c1b4803b12ec6e8dca6da96aed7273fd210a857524a4182c30e7"},"package":"8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"}
|
||||
336
vendor/addr2line/CHANGELOG.md
vendored
336
vendor/addr2line/CHANGELOG.md
vendored
@@ -1,336 +0,0 @@
|
||||
# `addr2line` Change Log
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.21.0 (2023/08/12)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* Updated `gimli`, `object`, and `fallible-iterator` dependencies.
|
||||
|
||||
### Changed
|
||||
|
||||
* The minimum supported rust version is 1.65.0.
|
||||
|
||||
* Store boxed slices instead of `Vec` objects in `Context`.
|
||||
[#278](https://github.com/gimli-rs/addr2line/pull/278)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.20.0 (2023/04/15)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* The minimum supported rust version is 1.58.0.
|
||||
|
||||
* Changed `Context::find_frames` to return `LookupResult`.
|
||||
Use `LookupResult::skip_all_loads` to obtain the result without loading split DWARF.
|
||||
[#260](https://github.com/gimli-rs/addr2line/pull/260)
|
||||
|
||||
* Replaced `Context::find_dwarf_unit` with `Context::find_dwarf_and_unit`.
|
||||
[#260](https://github.com/gimli-rs/addr2line/pull/260)
|
||||
|
||||
* Updated `object` dependency.
|
||||
|
||||
### Changed
|
||||
|
||||
* Fix handling of file index 0 for DWARF 5.
|
||||
[#264](https://github.com/gimli-rs/addr2line/pull/264)
|
||||
|
||||
### Added
|
||||
|
||||
* Added types and methods to support loading split DWARF:
|
||||
`LookupResult`, `SplitDwarfLoad`, `SplitDwarfLoader`, `Context::preload_units`.
|
||||
[#260](https://github.com/gimli-rs/addr2line/pull/260)
|
||||
[#262](https://github.com/gimli-rs/addr2line/pull/262)
|
||||
[#263](https://github.com/gimli-rs/addr2line/pull/263)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.19.0 (2022/11/24)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* Updated `gimli` and `object` dependencies.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.18.0 (2022/07/16)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* Updated `object` dependency.
|
||||
|
||||
### Changed
|
||||
|
||||
* Fixed handling of relative path for `DW_AT_comp_dir`.
|
||||
[#239](https://github.com/gimli-rs/addr2line/pull/239)
|
||||
|
||||
* Fixed handling of `DW_FORM_addrx` for DWARF 5 support.
|
||||
[#243](https://github.com/gimli-rs/addr2line/pull/243)
|
||||
|
||||
* Fixed handling of units that are missing range information.
|
||||
[#249](https://github.com/gimli-rs/addr2line/pull/249)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.17.0 (2021/10/24)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* Updated `gimli` and `object` dependencies.
|
||||
|
||||
### Changed
|
||||
|
||||
* Use `skip_attributes` to improve performance.
|
||||
[#236](https://github.com/gimli-rs/addr2line/pull/236)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.16.0 (2021/07/26)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* Updated `gimli` and `object` dependencies.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.15.2 (2021/06/04)
|
||||
|
||||
### Fixed
|
||||
|
||||
* Allow `Context` to be `Send`.
|
||||
[#219](https://github.com/gimli-rs/addr2line/pull/219)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.15.1 (2021/05/02)
|
||||
|
||||
### Fixed
|
||||
|
||||
* Don't ignore aranges with address 0.
|
||||
[#217](https://github.com/gimli-rs/addr2line/pull/217)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.15.0 (2021/05/02)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* Updated `gimli` and `object` dependencies.
|
||||
[#215](https://github.com/gimli-rs/addr2line/pull/215)
|
||||
|
||||
* Added `debug_aranges` parameter to `Context::from_sections`.
|
||||
[#200](https://github.com/gimli-rs/addr2line/pull/200)
|
||||
|
||||
### Added
|
||||
|
||||
* Added `.debug_aranges` support.
|
||||
[#200](https://github.com/gimli-rs/addr2line/pull/200)
|
||||
|
||||
* Added supplementary object file support.
|
||||
[#208](https://github.com/gimli-rs/addr2line/pull/208)
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fixed handling of Windows paths in locations.
|
||||
[#209](https://github.com/gimli-rs/addr2line/pull/209)
|
||||
|
||||
* examples/addr2line: Flush stdout after each response.
|
||||
[#210](https://github.com/gimli-rs/addr2line/pull/210)
|
||||
|
||||
* examples/addr2line: Avoid copying every section.
|
||||
[#213](https://github.com/gimli-rs/addr2line/pull/213)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.14.1 (2020/12/31)
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix location lookup for skeleton units.
|
||||
[#201](https://github.com/gimli-rs/addr2line/pull/201)
|
||||
|
||||
### Added
|
||||
|
||||
* Added `Context::find_location_range`.
|
||||
[#196](https://github.com/gimli-rs/addr2line/pull/196)
|
||||
[#199](https://github.com/gimli-rs/addr2line/pull/199)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.14.0 (2020/10/27)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* Updated `gimli` and `object` dependencies.
|
||||
|
||||
### Fixed
|
||||
|
||||
* Handle units that only have line information.
|
||||
[#188](https://github.com/gimli-rs/addr2line/pull/188)
|
||||
|
||||
* Handle DWARF units with version <= 4 and no `DW_AT_name`.
|
||||
[#191](https://github.com/gimli-rs/addr2line/pull/191)
|
||||
|
||||
* Fix handling of `DW_FORM_ref_addr`.
|
||||
[#193](https://github.com/gimli-rs/addr2line/pull/193)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.13.0 (2020/07/07)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* Updated `gimli` and `object` dependencies.
|
||||
|
||||
* Added `rustc-dep-of-std` feature.
|
||||
[#166](https://github.com/gimli-rs/addr2line/pull/166)
|
||||
|
||||
### Changed
|
||||
|
||||
* Improve performance by parsing function contents lazily.
|
||||
[#178](https://github.com/gimli-rs/addr2line/pull/178)
|
||||
|
||||
* Don't skip `.debug_info` and `.debug_line` entries with a zero address.
|
||||
[#182](https://github.com/gimli-rs/addr2line/pull/182)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.12.2 (2020/06/21)
|
||||
|
||||
### Fixed
|
||||
|
||||
* Avoid linear search for `DW_FORM_ref_addr`.
|
||||
[#175](https://github.com/gimli-rs/addr2line/pull/175)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.12.1 (2020/05/19)
|
||||
|
||||
### Fixed
|
||||
|
||||
* Handle units with overlapping address ranges.
|
||||
[#163](https://github.com/gimli-rs/addr2line/pull/163)
|
||||
|
||||
* Don't assert for functions with overlapping address ranges.
|
||||
[#168](https://github.com/gimli-rs/addr2line/pull/168)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.12.0 (2020/05/12)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* Updated `gimli` and `object` dependencies.
|
||||
|
||||
* Added more optional features: `smallvec` and `fallible-iterator`.
|
||||
[#160](https://github.com/gimli-rs/addr2line/pull/160)
|
||||
|
||||
### Added
|
||||
|
||||
* Added `Context::dwarf` and `Context::find_dwarf_unit`.
|
||||
[#159](https://github.com/gimli-rs/addr2line/pull/159)
|
||||
|
||||
### Changed
|
||||
|
||||
* Removed `lazycell` dependency.
|
||||
[#160](https://github.com/gimli-rs/addr2line/pull/160)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.11.0 (2020/01/11)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* Updated `gimli` and `object` dependencies.
|
||||
|
||||
* [#130](https://github.com/gimli-rs/addr2line/pull/130)
|
||||
Changed `Location::file` from `Option<String>` to `Option<&str>`.
|
||||
This required adding lifetime parameters to `Location` and other structs that
|
||||
contain it.
|
||||
|
||||
* [#152](https://github.com/gimli-rs/addr2line/pull/152)
|
||||
Changed `Location::line` and `Location::column` from `Option<u64>`to `Option<u32>`.
|
||||
|
||||
* [#156](https://github.com/gimli-rs/addr2line/pull/156)
|
||||
Deleted `alloc` feature, and fixed `no-std` builds with stable rust.
|
||||
Removed default `Reader` parameter for `Context`, and added `ObjectContext` instead.
|
||||
|
||||
### Added
|
||||
|
||||
* [#134](https://github.com/gimli-rs/addr2line/pull/134)
|
||||
Added `Context::from_dwarf`.
|
||||
|
||||
### Changed
|
||||
|
||||
* [#133](https://github.com/gimli-rs/addr2line/pull/133)
|
||||
Fixed handling of units that can't be parsed.
|
||||
|
||||
* [#155](https://github.com/gimli-rs/addr2line/pull/155)
|
||||
Fixed `addr2line` output to match binutils.
|
||||
|
||||
* [#130](https://github.com/gimli-rs/addr2line/pull/130)
|
||||
Improved `.debug_line` parsing performance.
|
||||
|
||||
* [#148](https://github.com/gimli-rs/addr2line/pull/148)
|
||||
[#150](https://github.com/gimli-rs/addr2line/pull/150)
|
||||
[#151](https://github.com/gimli-rs/addr2line/pull/151)
|
||||
[#152](https://github.com/gimli-rs/addr2line/pull/152)
|
||||
Improved `.debug_info` parsing performance.
|
||||
|
||||
* [#137](https://github.com/gimli-rs/addr2line/pull/137)
|
||||
[#138](https://github.com/gimli-rs/addr2line/pull/138)
|
||||
[#139](https://github.com/gimli-rs/addr2line/pull/139)
|
||||
[#140](https://github.com/gimli-rs/addr2line/pull/140)
|
||||
[#146](https://github.com/gimli-rs/addr2line/pull/146)
|
||||
Improved benchmarks.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.10.0 (2019/07/07)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* [#127](https://github.com/gimli-rs/addr2line/pull/127)
|
||||
Update `gimli`.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.9.0 (2019/05/02)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* [#121](https://github.com/gimli-rs/addr2line/pull/121)
|
||||
Update `gimli`, `object`, and `fallible-iterator` dependencies.
|
||||
|
||||
### Added
|
||||
|
||||
* [#121](https://github.com/gimli-rs/addr2line/pull/121)
|
||||
Reexport `gimli`, `object`, and `fallible-iterator`.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.8.0 (2019/02/06)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* [#107](https://github.com/gimli-rs/addr2line/pull/107)
|
||||
Update `object` dependency to 0.11. This is part of the public API.
|
||||
|
||||
### Added
|
||||
|
||||
* [#101](https://github.com/gimli-rs/addr2line/pull/101)
|
||||
Add `object` feature (enabled by default). Disable this feature to remove
|
||||
the `object` dependency and `Context::new` API.
|
||||
|
||||
* [#102](https://github.com/gimli-rs/addr2line/pull/102)
|
||||
Add `std` (enabled by default) and `alloc` features.
|
||||
|
||||
### Changed
|
||||
|
||||
* [#108](https://github.com/gimli-rs/addr2line/issues/108)
|
||||
`demangle` no longer outputs the hash for rust symbols.
|
||||
|
||||
* [#109](https://github.com/gimli-rs/addr2line/issues/109)
|
||||
Set default `R` for `Context<R>`.
|
||||
704
vendor/addr2line/Cargo.lock
generated
vendored
704
vendor/addr2line/Cargo.lock
generated
vendored
@@ -1,704 +0,0 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97"
|
||||
dependencies = [
|
||||
"gimli 0.27.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.21.0"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"clap",
|
||||
"compiler_builtins",
|
||||
"cpp_demangle",
|
||||
"fallible-iterator",
|
||||
"findshlibs",
|
||||
"gimli 0.28.0",
|
||||
"libtest-mimic",
|
||||
"memmap2",
|
||||
"object 0.32.0",
|
||||
"rustc-demangle",
|
||||
"rustc-std-workspace-alloc",
|
||||
"rustc-std-workspace-core",
|
||||
"smallvec",
|
||||
"typed-arena",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "adler"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
|
||||
|
||||
[[package]]
|
||||
name = "anstream"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"anstyle-parse",
|
||||
"anstyle-query",
|
||||
"anstyle-wincon",
|
||||
"colorchoice",
|
||||
"is-terminal",
|
||||
"utf8parse",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd"
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-parse"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333"
|
||||
dependencies = [
|
||||
"utf8parse",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-query"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
|
||||
dependencies = [
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-wincon"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.67"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca"
|
||||
dependencies = [
|
||||
"addr2line 0.19.0",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide",
|
||||
"object 0.30.3",
|
||||
"rustc-demangle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.79"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.3.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.3.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
"clap_lex",
|
||||
"strsim",
|
||||
"terminal_size",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "4.3.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.15",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_lex"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
|
||||
|
||||
[[package]]
|
||||
name = "colorchoice"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
|
||||
|
||||
[[package]]
|
||||
name = "compiler_builtins"
|
||||
version = "0.1.91"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "571298a3cce7e2afbd3d61abb91a18667d5ab25993ec577a88ee8ac45f00cc3a"
|
||||
|
||||
[[package]]
|
||||
name = "cpp_demangle"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2c76f98bdfc7f66172e6c7065f981ebb576ffc903fe4c0561d9f0c2509226dc6"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f"
|
||||
dependencies = [
|
||||
"errno-dragonfly",
|
||||
"libc",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno-dragonfly"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fallible-iterator"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"
|
||||
|
||||
[[package]]
|
||||
name = "findshlibs"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "flate2"
|
||||
version = "1.0.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841"
|
||||
dependencies = [
|
||||
"crc32fast",
|
||||
"miniz_oxide",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.27.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4"
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.28.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
|
||||
dependencies = [
|
||||
"compiler_builtins",
|
||||
"fallible-iterator",
|
||||
"rustc-std-workspace-alloc",
|
||||
"rustc-std-workspace-core",
|
||||
"stable_deref_trait",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b"
|
||||
|
||||
[[package]]
|
||||
name = "io-lifetimes"
|
||||
version = "1.0.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
|
||||
dependencies = [
|
||||
"hermit-abi 0.3.2",
|
||||
"libc",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "is-terminal"
|
||||
version = "0.4.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
|
||||
dependencies = [
|
||||
"hermit-abi 0.3.2",
|
||||
"rustix 0.38.8",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.147"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
|
||||
|
||||
[[package]]
|
||||
name = "libtest-mimic"
|
||||
version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6d8de370f98a6cb8a4606618e53e802f93b094ddec0f96988eaec2c27e6e9ce7"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"termcolor",
|
||||
"threadpool",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
|
||||
|
||||
[[package]]
|
||||
name = "memmap2"
|
||||
version = "0.5.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa"
|
||||
dependencies = [
|
||||
"adler",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b"
|
||||
dependencies = [
|
||||
"hermit-abi 0.2.6",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.30.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.32.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe"
|
||||
dependencies = [
|
||||
"flate2",
|
||||
"memchr",
|
||||
"ruzstd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.17.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.56"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-std-workspace-alloc"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ff66d57013a5686e1917ed6a025d54dd591fcda71a41fe07edf4d16726aefa86"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-std-workspace-core"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1956f5517128a2b6f23ab2dadf1a976f4f5b27962e7724c2bf3d45e539ec098c"
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "0.37.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"errno",
|
||||
"io-lifetimes",
|
||||
"libc",
|
||||
"linux-raw-sys 0.3.8",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "0.38.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f"
|
||||
dependencies = [
|
||||
"bitflags 2.4.0",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.4.5",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ruzstd"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3ffab8f9715a0d455df4bbb9d21e91135aab3cd3ca187af0cd0c3c3f868fdc"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"thiserror-core",
|
||||
"twox-hash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
|
||||
|
||||
[[package]]
|
||||
name = "stable_deref_trait"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
|
||||
|
||||
[[package]]
|
||||
name = "static_assertions"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.109"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "terminal_size"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237"
|
||||
dependencies = [
|
||||
"rustix 0.37.23",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-core"
|
||||
version = "1.0.38"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0d97345f6437bb2004cd58819d8a9ef8e36cdd7661c2abc4bbde0a7c40d9f497"
|
||||
dependencies = [
|
||||
"thiserror-core-impl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-core-impl"
|
||||
version = "1.0.38"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "threadpool"
|
||||
version = "1.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
|
||||
dependencies = [
|
||||
"num_cpus",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "twox-hash"
|
||||
version = "1.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typed-arena"
|
||||
version = "2.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4"
|
||||
|
||||
[[package]]
|
||||
name = "utf8parse"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.48.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
|
||||
147
vendor/addr2line/Cargo.toml
vendored
147
vendor/addr2line/Cargo.toml
vendored
@@ -1,147 +0,0 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2018"
|
||||
rust-version = "1.65"
|
||||
name = "addr2line"
|
||||
version = "0.21.0"
|
||||
exclude = [
|
||||
"/benches/*",
|
||||
"/fixtures/*",
|
||||
".github",
|
||||
]
|
||||
description = "A cross-platform symbolication library written in Rust, using `gimli`"
|
||||
documentation = "https://docs.rs/addr2line"
|
||||
readme = "./README.md"
|
||||
keywords = [
|
||||
"DWARF",
|
||||
"debug",
|
||||
"elf",
|
||||
"symbolicate",
|
||||
"atos",
|
||||
]
|
||||
categories = ["development-tools::debugging"]
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository = "https://github.com/gimli-rs/addr2line"
|
||||
|
||||
[profile.bench]
|
||||
codegen-units = 1
|
||||
debug = true
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
[[example]]
|
||||
name = "addr2line"
|
||||
required-features = ["default"]
|
||||
|
||||
[[test]]
|
||||
name = "output_equivalence"
|
||||
harness = false
|
||||
required-features = ["default"]
|
||||
|
||||
[[test]]
|
||||
name = "correctness"
|
||||
required-features = ["default"]
|
||||
|
||||
[[test]]
|
||||
name = "parse"
|
||||
required-features = ["std-object"]
|
||||
|
||||
[dependencies.alloc]
|
||||
version = "1.0.0"
|
||||
optional = true
|
||||
package = "rustc-std-workspace-alloc"
|
||||
|
||||
[dependencies.compiler_builtins]
|
||||
version = "0.1.2"
|
||||
optional = true
|
||||
|
||||
[dependencies.core]
|
||||
version = "1.0.0"
|
||||
optional = true
|
||||
package = "rustc-std-workspace-core"
|
||||
|
||||
[dependencies.cpp_demangle]
|
||||
version = "0.4"
|
||||
features = ["alloc"]
|
||||
optional = true
|
||||
default-features = false
|
||||
|
||||
[dependencies.fallible-iterator]
|
||||
version = "0.3.0"
|
||||
optional = true
|
||||
default-features = false
|
||||
|
||||
[dependencies.gimli]
|
||||
version = "0.28.0"
|
||||
features = ["read"]
|
||||
default-features = false
|
||||
|
||||
[dependencies.memmap2]
|
||||
version = "0.5.5"
|
||||
optional = true
|
||||
|
||||
[dependencies.object]
|
||||
version = "0.32.0"
|
||||
features = ["read"]
|
||||
optional = true
|
||||
default-features = false
|
||||
|
||||
[dependencies.rustc-demangle]
|
||||
version = "0.1"
|
||||
optional = true
|
||||
|
||||
[dependencies.smallvec]
|
||||
version = "1"
|
||||
optional = true
|
||||
default-features = false
|
||||
|
||||
[dev-dependencies.backtrace]
|
||||
version = "0.3.13"
|
||||
|
||||
[dev-dependencies.clap]
|
||||
version = "4.3.21"
|
||||
features = ["wrap_help"]
|
||||
|
||||
[dev-dependencies.findshlibs]
|
||||
version = "0.10"
|
||||
|
||||
[dev-dependencies.libtest-mimic]
|
||||
version = "0.6.1"
|
||||
|
||||
[dev-dependencies.typed-arena]
|
||||
version = "2"
|
||||
|
||||
[features]
|
||||
default = [
|
||||
"rustc-demangle",
|
||||
"cpp_demangle",
|
||||
"std-object",
|
||||
"fallible-iterator",
|
||||
"smallvec",
|
||||
"memmap2",
|
||||
]
|
||||
rustc-dep-of-std = [
|
||||
"core",
|
||||
"alloc",
|
||||
"compiler_builtins",
|
||||
"gimli/rustc-dep-of-std",
|
||||
]
|
||||
std = ["gimli/std"]
|
||||
std-object = [
|
||||
"std",
|
||||
"object",
|
||||
"object/std",
|
||||
"object/compression",
|
||||
"gimli/endian-reader",
|
||||
]
|
||||
201
vendor/addr2line/LICENSE-APACHE
vendored
201
vendor/addr2line/LICENSE-APACHE
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
25
vendor/addr2line/LICENSE-MIT
vendored
25
vendor/addr2line/LICENSE-MIT
vendored
@@ -1,25 +0,0 @@
|
||||
Copyright (c) 2016-2018 The gimli Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
48
vendor/addr2line/README.md
vendored
48
vendor/addr2line/README.md
vendored
@@ -1,48 +0,0 @@
|
||||
# addr2line
|
||||
|
||||
[](https://crates.io/crates/addr2line)
|
||||
[](https://docs.rs/addr2line)
|
||||
[](https://coveralls.io/github/gimli-rs/addr2line?branch=master)
|
||||
|
||||
A cross-platform library for retrieving per-address debug information
|
||||
from files with DWARF debug information.
|
||||
|
||||
`addr2line` uses [`gimli`](https://github.com/gimli-rs/gimli) to parse
|
||||
the debug information, and exposes an interface for finding
|
||||
the source file, line number, and wrapping function for instruction
|
||||
addresses within the target program. These lookups can either be
|
||||
performed programmatically through `Context::find_location` and
|
||||
`Context::find_frames`, or via the included example binary,
|
||||
`addr2line` (named and modelled after the equivalent utility from
|
||||
[GNU binutils](https://sourceware.org/binutils/docs/binutils/addr2line.html)).
|
||||
|
||||
# Quickstart
|
||||
- Add the [`addr2line` crate](https://crates.io/crates/addr2line) to your `Cargo.toml`
|
||||
- Load the file and parse it with [`addr2line::object::read::File::parse`](https://docs.rs/object/*/object/read/struct.File.html#method.parse)
|
||||
- Pass the parsed file to [`addr2line::Context::new` ](https://docs.rs/addr2line/*/addr2line/struct.Context.html#method.new)
|
||||
- Use [`addr2line::Context::find_location`](https://docs.rs/addr2line/*/addr2line/struct.Context.html#method.find_location)
|
||||
or [`addr2line::Context::find_frames`](https://docs.rs/addr2line/*/addr2line/struct.Context.html#method.find_frames)
|
||||
to look up debug information for an address
|
||||
|
||||
# Performance
|
||||
|
||||
`addr2line` optimizes for speed over memory by caching parsed information.
|
||||
The DWARF information is parsed lazily where possible.
|
||||
|
||||
The library aims to perform similarly to equivalent existing tools such
|
||||
as `addr2line` from binutils, `eu-addr2line` from elfutils, and
|
||||
`llvm-symbolize` from the llvm project, and in the past some benchmarking
|
||||
was done that indicates a comparable performance.
|
||||
|
||||
## License
|
||||
|
||||
Licensed under either of
|
||||
|
||||
* Apache License, Version 2.0 ([`LICENSE-APACHE`](./LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
||||
* MIT license ([`LICENSE-MIT`](./LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
Unless you explicitly state otherwise, any contribution intentionally submitted
|
||||
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
|
||||
dual licensed as above, without any additional terms or conditions.
|
||||
23
vendor/addr2line/bench.plot.r
vendored
23
vendor/addr2line/bench.plot.r
vendored
@@ -1,23 +0,0 @@
|
||||
v <- read.table(file("stdin"))
|
||||
t <- data.frame(prog=v[,1], funcs=(v[,2]=="func"), time=v[,3], mem=v[,4], stringsAsFactors=FALSE)
|
||||
|
||||
t$prog <- as.character(t$prog)
|
||||
t$prog[t$prog == "master"] <- "gimli-rs/addr2line"
|
||||
t$funcs[t$funcs == TRUE] <- "With functions"
|
||||
t$funcs[t$funcs == FALSE] <- "File/line only"
|
||||
t$mem = t$mem / 1024.0
|
||||
|
||||
library(ggplot2)
|
||||
p <- ggplot(data=t, aes(x=prog, y=time, fill=prog))
|
||||
p <- p + geom_bar(stat = "identity")
|
||||
p <- p + facet_wrap(~ funcs)
|
||||
p <- p + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())
|
||||
p <- p + ylab("time (s)") + ggtitle("addr2line runtime")
|
||||
ggsave('time.png',plot=p,width=10,height=6)
|
||||
|
||||
p <- ggplot(data=t, aes(x=prog, y=mem, fill=prog))
|
||||
p <- p + geom_bar(stat = "identity")
|
||||
p <- p + facet_wrap(~ funcs)
|
||||
p <- p + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())
|
||||
p <- p + ylab("memory (kB)") + ggtitle("addr2line memory usage")
|
||||
ggsave('memory.png',plot=p,width=10,height=6)
|
||||
112
vendor/addr2line/benchmark.sh
vendored
112
vendor/addr2line/benchmark.sh
vendored
@@ -1,112 +0,0 @@
|
||||
#!/bin/bash
|
||||
if [[ $# -le 1 ]]; then
|
||||
echo "Usage: $0 <executable> [<addresses>] REFS..."
|
||||
exit 1
|
||||
fi
|
||||
target="$1"
|
||||
shift
|
||||
|
||||
addresses=""
|
||||
if [[ -e "$1" ]]; then
|
||||
addresses="$1"
|
||||
shift
|
||||
fi
|
||||
|
||||
# path to "us"
|
||||
# readlink -f, but more portable:
|
||||
dirname=$(perl -e 'use Cwd "abs_path";print abs_path(shift)' "$(dirname "$0")")
|
||||
|
||||
# https://stackoverflow.com/a/2358432/472927
|
||||
{
|
||||
# compile all refs
|
||||
pushd "$dirname" > /dev/null
|
||||
# if the user has some local changes, preserve them
|
||||
nstashed=$(git stash list | wc -l)
|
||||
echo "==> Stashing any local modifications"
|
||||
git stash --keep-index > /dev/null
|
||||
popstash() {
|
||||
# https://stackoverflow.com/q/24520791/472927
|
||||
if [[ "$(git stash list | wc -l)" -ne "$nstashed" ]]; then
|
||||
echo "==> Restoring stashed state"
|
||||
git stash pop > /dev/null
|
||||
fi
|
||||
}
|
||||
# if the user has added stuff to the index, abort
|
||||
if ! git diff-index --quiet HEAD --; then
|
||||
echo "Refusing to overwrite outstanding git changes"
|
||||
popstash
|
||||
exit 2
|
||||
fi
|
||||
current=$(git symbolic-ref --short HEAD)
|
||||
for ref in "$@"; do
|
||||
echo "==> Compiling $ref"
|
||||
git checkout -q "$ref"
|
||||
commit=$(git rev-parse HEAD)
|
||||
fn="target/release/addr2line-$commit"
|
||||
if [[ ! -e "$fn" ]]; then
|
||||
cargo build --release --example addr2line
|
||||
cp target/release/examples/addr2line "$fn"
|
||||
fi
|
||||
if [[ "$ref" != "$commit" ]]; then
|
||||
ln -sfn "addr2line-$commit" target/release/addr2line-"$ref"
|
||||
fi
|
||||
done
|
||||
git checkout -q "$current"
|
||||
popstash
|
||||
popd > /dev/null
|
||||
|
||||
# get us some addresses to look up
|
||||
if [[ -z "$addresses" ]]; then
|
||||
echo "==> Looking for benchmarking addresses (this may take a while)"
|
||||
addresses=$(mktemp tmp.XXXXXXXXXX)
|
||||
objdump -C -x --disassemble -l "$target" \
|
||||
| grep -P '0[048]:' \
|
||||
| awk '{print $1}' \
|
||||
| sed 's/:$//' \
|
||||
> "$addresses"
|
||||
echo " -> Addresses stored in $addresses; you should re-use it next time"
|
||||
fi
|
||||
|
||||
run() {
|
||||
func="$1"
|
||||
name="$2"
|
||||
cmd="$3"
|
||||
args="$4"
|
||||
printf "%s\t%s\t" "$name" "$func"
|
||||
if [[ "$cmd" =~ llvm-symbolizer ]]; then
|
||||
/usr/bin/time -f '%e\t%M' "$cmd" $args -obj="$target" < "$addresses" 2>&1 >/dev/null
|
||||
else
|
||||
/usr/bin/time -f '%e\t%M' "$cmd" $args -e "$target" < "$addresses" 2>&1 >/dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
# run without functions
|
||||
log1=$(mktemp tmp.XXXXXXXXXX)
|
||||
echo "==> Benchmarking"
|
||||
run nofunc binutils addr2line >> "$log1"
|
||||
#run nofunc elfutils eu-addr2line >> "$log1"
|
||||
run nofunc llvm-sym llvm-symbolizer -functions=none >> "$log1"
|
||||
for ref in "$@"; do
|
||||
run nofunc "$ref" "$dirname/target/release/addr2line-$ref" >> "$log1"
|
||||
done
|
||||
cat "$log1" | column -t
|
||||
|
||||
# run with functions
|
||||
log2=$(mktemp tmp.XXXXXXXXXX)
|
||||
echo "==> Benchmarking with -f"
|
||||
run func binutils addr2line "-f -i" >> "$log2"
|
||||
#run func elfutils eu-addr2line "-f -i" >> "$log2"
|
||||
run func llvm-sym llvm-symbolizer "-functions=linkage -demangle=0" >> "$log2"
|
||||
for ref in "$@"; do
|
||||
run func "$ref" "$dirname/target/release/addr2line-$ref" "-f -i" >> "$log2"
|
||||
done
|
||||
cat "$log2" | column -t
|
||||
cat "$log2" >> "$log1"; rm "$log2"
|
||||
|
||||
echo "==> Plotting"
|
||||
Rscript --no-readline --no-restore --no-save "$dirname/bench.plot.r" < "$log1"
|
||||
|
||||
echo "==> Cleaning up"
|
||||
rm "$log1"
|
||||
exit 0
|
||||
}
|
||||
5
vendor/addr2line/coverage.sh
vendored
5
vendor/addr2line/coverage.sh
vendored
@@ -1,5 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Run tarpaulin and pycobertura to generate coverage.html.
|
||||
|
||||
cargo tarpaulin --skip-clean --out Xml
|
||||
pycobertura show --format html --output coverage.html cobertura.xml
|
||||
317
vendor/addr2line/examples/addr2line.rs
vendored
317
vendor/addr2line/examples/addr2line.rs
vendored
@@ -1,317 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, Lines, StdinLock, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use clap::{Arg, ArgAction, Command};
|
||||
use fallible_iterator::FallibleIterator;
|
||||
use object::{Object, ObjectSection, SymbolMap, SymbolMapName};
|
||||
use typed_arena::Arena;
|
||||
|
||||
use addr2line::{Context, Location};
|
||||
|
||||
fn parse_uint_from_hex_string(string: &str) -> Option<u64> {
|
||||
if string.len() > 2 && string.starts_with("0x") {
|
||||
u64::from_str_radix(&string[2..], 16).ok()
|
||||
} else {
|
||||
u64::from_str_radix(string, 16).ok()
|
||||
}
|
||||
}
|
||||
|
||||
enum Addrs<'a> {
|
||||
Args(clap::parser::ValuesRef<'a, String>),
|
||||
Stdin(Lines<StdinLock<'a>>),
|
||||
}
|
||||
|
||||
impl<'a> Iterator for Addrs<'a> {
|
||||
type Item = Option<u64>;
|
||||
|
||||
fn next(&mut self) -> Option<Option<u64>> {
|
||||
let text = match *self {
|
||||
Addrs::Args(ref mut vals) => vals.next().map(Cow::from),
|
||||
Addrs::Stdin(ref mut lines) => lines.next().map(Result::unwrap).map(Cow::from),
|
||||
};
|
||||
text.as_ref()
|
||||
.map(Cow::as_ref)
|
||||
.map(parse_uint_from_hex_string)
|
||||
}
|
||||
}
|
||||
|
||||
fn print_loc(loc: Option<&Location<'_>>, basenames: bool, llvm: bool) {
|
||||
if let Some(loc) = loc {
|
||||
if let Some(ref file) = loc.file.as_ref() {
|
||||
let path = if basenames {
|
||||
Path::new(Path::new(file).file_name().unwrap())
|
||||
} else {
|
||||
Path::new(file)
|
||||
};
|
||||
print!("{}:", path.display());
|
||||
} else {
|
||||
print!("??:");
|
||||
}
|
||||
if llvm {
|
||||
print!("{}:{}", loc.line.unwrap_or(0), loc.column.unwrap_or(0));
|
||||
} else if let Some(line) = loc.line {
|
||||
print!("{}", line);
|
||||
} else {
|
||||
print!("?");
|
||||
}
|
||||
println!();
|
||||
} else if llvm {
|
||||
println!("??:0:0");
|
||||
} else {
|
||||
println!("??:0");
|
||||
}
|
||||
}
|
||||
|
||||
fn print_function(name: Option<&str>, language: Option<gimli::DwLang>, demangle: bool) {
|
||||
if let Some(name) = name {
|
||||
if demangle {
|
||||
print!("{}", addr2line::demangle_auto(Cow::from(name), language));
|
||||
} else {
|
||||
print!("{}", name);
|
||||
}
|
||||
} else {
|
||||
print!("??");
|
||||
}
|
||||
}
|
||||
|
||||
fn load_file_section<'input, 'arena, Endian: gimli::Endianity>(
|
||||
id: gimli::SectionId,
|
||||
file: &object::File<'input>,
|
||||
endian: Endian,
|
||||
arena_data: &'arena Arena<Cow<'input, [u8]>>,
|
||||
) -> Result<gimli::EndianSlice<'arena, Endian>, ()> {
|
||||
// TODO: Unify with dwarfdump.rs in gimli.
|
||||
let name = id.name();
|
||||
match file.section_by_name(name) {
|
||||
Some(section) => match section.uncompressed_data().unwrap() {
|
||||
Cow::Borrowed(b) => Ok(gimli::EndianSlice::new(b, endian)),
|
||||
Cow::Owned(b) => Ok(gimli::EndianSlice::new(arena_data.alloc(b.into()), endian)),
|
||||
},
|
||||
None => Ok(gimli::EndianSlice::new(&[][..], endian)),
|
||||
}
|
||||
}
|
||||
|
||||
fn find_name_from_symbols<'a>(
|
||||
symbols: &'a SymbolMap<SymbolMapName<'_>>,
|
||||
probe: u64,
|
||||
) -> Option<&'a str> {
|
||||
symbols.get(probe).map(|x| x.name())
|
||||
}
|
||||
|
||||
struct Options<'a> {
|
||||
do_functions: bool,
|
||||
do_inlines: bool,
|
||||
pretty: bool,
|
||||
print_addrs: bool,
|
||||
basenames: bool,
|
||||
demangle: bool,
|
||||
llvm: bool,
|
||||
exe: &'a PathBuf,
|
||||
sup: Option<&'a PathBuf>,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let matches = Command::new("addr2line")
|
||||
.version(env!("CARGO_PKG_VERSION"))
|
||||
.about("A fast addr2line Rust port")
|
||||
.max_term_width(100)
|
||||
.args(&[
|
||||
Arg::new("exe")
|
||||
.short('e')
|
||||
.long("exe")
|
||||
.value_name("filename")
|
||||
.value_parser(clap::value_parser!(PathBuf))
|
||||
.help(
|
||||
"Specify the name of the executable for which addresses should be translated.",
|
||||
)
|
||||
.required(true),
|
||||
Arg::new("sup")
|
||||
.long("sup")
|
||||
.value_name("filename")
|
||||
.value_parser(clap::value_parser!(PathBuf))
|
||||
.help("Path to supplementary object file."),
|
||||
Arg::new("functions")
|
||||
.short('f')
|
||||
.long("functions")
|
||||
.action(ArgAction::SetTrue)
|
||||
.help("Display function names as well as file and line number information."),
|
||||
Arg::new("pretty").short('p').long("pretty-print")
|
||||
.action(ArgAction::SetTrue)
|
||||
.help(
|
||||
"Make the output more human friendly: each location are printed on one line.",
|
||||
),
|
||||
Arg::new("inlines").short('i').long("inlines")
|
||||
.action(ArgAction::SetTrue)
|
||||
.help(
|
||||
"If the address belongs to a function that was inlined, the source information for \
|
||||
all enclosing scopes back to the first non-inlined function will also be printed.",
|
||||
),
|
||||
Arg::new("addresses").short('a').long("addresses")
|
||||
.action(ArgAction::SetTrue)
|
||||
.help(
|
||||
"Display the address before the function name, file and line number information.",
|
||||
),
|
||||
Arg::new("basenames")
|
||||
.short('s')
|
||||
.long("basenames")
|
||||
.action(ArgAction::SetTrue)
|
||||
.help("Display only the base of each file name."),
|
||||
Arg::new("demangle").short('C').long("demangle")
|
||||
.action(ArgAction::SetTrue)
|
||||
.help(
|
||||
"Demangle function names. \
|
||||
Specifying a specific demangling style (like GNU addr2line) is not supported. \
|
||||
(TODO)"
|
||||
),
|
||||
Arg::new("llvm")
|
||||
.long("llvm")
|
||||
.action(ArgAction::SetTrue)
|
||||
.help("Display output in the same format as llvm-symbolizer."),
|
||||
Arg::new("addrs")
|
||||
.action(ArgAction::Append)
|
||||
.help("Addresses to use instead of reading from stdin."),
|
||||
])
|
||||
.get_matches();
|
||||
|
||||
let arena_data = Arena::new();
|
||||
|
||||
let opts = Options {
|
||||
do_functions: matches.get_flag("functions"),
|
||||
do_inlines: matches.get_flag("inlines"),
|
||||
pretty: matches.get_flag("pretty"),
|
||||
print_addrs: matches.get_flag("addresses"),
|
||||
basenames: matches.get_flag("basenames"),
|
||||
demangle: matches.get_flag("demangle"),
|
||||
llvm: matches.get_flag("llvm"),
|
||||
exe: matches.get_one::<PathBuf>("exe").unwrap(),
|
||||
sup: matches.get_one::<PathBuf>("sup"),
|
||||
};
|
||||
|
||||
let file = File::open(opts.exe).unwrap();
|
||||
let map = unsafe { memmap2::Mmap::map(&file).unwrap() };
|
||||
let object = &object::File::parse(&*map).unwrap();
|
||||
|
||||
let endian = if object.is_little_endian() {
|
||||
gimli::RunTimeEndian::Little
|
||||
} else {
|
||||
gimli::RunTimeEndian::Big
|
||||
};
|
||||
|
||||
let mut load_section = |id: gimli::SectionId| -> Result<_, _> {
|
||||
load_file_section(id, object, endian, &arena_data)
|
||||
};
|
||||
|
||||
let sup_map;
|
||||
let sup_object = if let Some(sup_path) = opts.sup {
|
||||
let sup_file = File::open(sup_path).unwrap();
|
||||
sup_map = unsafe { memmap2::Mmap::map(&sup_file).unwrap() };
|
||||
Some(object::File::parse(&*sup_map).unwrap())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let symbols = object.symbol_map();
|
||||
let mut dwarf = gimli::Dwarf::load(&mut load_section).unwrap();
|
||||
if let Some(ref sup_object) = sup_object {
|
||||
let mut load_sup_section = |id: gimli::SectionId| -> Result<_, _> {
|
||||
load_file_section(id, sup_object, endian, &arena_data)
|
||||
};
|
||||
dwarf.load_sup(&mut load_sup_section).unwrap();
|
||||
}
|
||||
|
||||
let mut split_dwarf_loader = addr2line::builtin_split_dwarf_loader::SplitDwarfLoader::new(
|
||||
|data, endian| {
|
||||
gimli::EndianSlice::new(arena_data.alloc(Cow::Owned(data.into_owned())), endian)
|
||||
},
|
||||
Some(opts.exe.clone()),
|
||||
);
|
||||
let ctx = Context::from_dwarf(dwarf).unwrap();
|
||||
|
||||
let stdin = std::io::stdin();
|
||||
let addrs = matches
|
||||
.get_many::<String>("addrs")
|
||||
.map(Addrs::Args)
|
||||
.unwrap_or_else(|| Addrs::Stdin(stdin.lock().lines()));
|
||||
|
||||
for probe in addrs {
|
||||
if opts.print_addrs {
|
||||
let addr = probe.unwrap_or(0);
|
||||
if opts.llvm {
|
||||
print!("0x{:x}", addr);
|
||||
} else {
|
||||
print!("0x{:016x}", addr);
|
||||
}
|
||||
if opts.pretty {
|
||||
print!(": ");
|
||||
} else {
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
if opts.do_functions || opts.do_inlines {
|
||||
let mut printed_anything = false;
|
||||
if let Some(probe) = probe {
|
||||
let frames = ctx.find_frames(probe);
|
||||
let frames = split_dwarf_loader.run(frames).unwrap();
|
||||
let mut frames = frames.enumerate();
|
||||
while let Some((i, frame)) = frames.next().unwrap() {
|
||||
if opts.pretty && i != 0 {
|
||||
print!(" (inlined by) ");
|
||||
}
|
||||
|
||||
if opts.do_functions {
|
||||
if let Some(func) = frame.function {
|
||||
print_function(
|
||||
func.raw_name().ok().as_ref().map(AsRef::as_ref),
|
||||
func.language,
|
||||
opts.demangle,
|
||||
);
|
||||
} else {
|
||||
let name = find_name_from_symbols(&symbols, probe);
|
||||
print_function(name, None, opts.demangle);
|
||||
}
|
||||
|
||||
if opts.pretty {
|
||||
print!(" at ");
|
||||
} else {
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
print_loc(frame.location.as_ref(), opts.basenames, opts.llvm);
|
||||
|
||||
printed_anything = true;
|
||||
|
||||
if !opts.do_inlines {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !printed_anything {
|
||||
if opts.do_functions {
|
||||
let name = probe.and_then(|probe| find_name_from_symbols(&symbols, probe));
|
||||
print_function(name, None, opts.demangle);
|
||||
|
||||
if opts.pretty {
|
||||
print!(" at ");
|
||||
} else {
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
print_loc(None, opts.basenames, opts.llvm);
|
||||
}
|
||||
} else {
|
||||
let loc = probe.and_then(|probe| ctx.find_location(probe).unwrap());
|
||||
print_loc(loc.as_ref(), opts.basenames, opts.llvm);
|
||||
}
|
||||
|
||||
if opts.llvm {
|
||||
println!();
|
||||
}
|
||||
std::io::stdout().flush().unwrap();
|
||||
}
|
||||
}
|
||||
1
vendor/addr2line/rustfmt.toml
vendored
1
vendor/addr2line/rustfmt.toml
vendored
@@ -1 +0,0 @@
|
||||
|
||||
164
vendor/addr2line/src/builtin_split_dwarf_loader.rs
vendored
164
vendor/addr2line/src/builtin_split_dwarf_loader.rs
vendored
@@ -1,164 +0,0 @@
|
||||
use alloc::borrow::Cow;
|
||||
use alloc::sync::Arc;
|
||||
use std::fs::File;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use object::Object;
|
||||
|
||||
use crate::{LookupContinuation, LookupResult};
|
||||
|
||||
#[cfg(unix)]
|
||||
fn convert_path<R: gimli::Reader<Endian = gimli::RunTimeEndian>>(
|
||||
r: &R,
|
||||
) -> Result<PathBuf, gimli::Error> {
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
let bytes = r.to_slice()?;
|
||||
let s = OsStr::from_bytes(&bytes);
|
||||
Ok(PathBuf::from(s))
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn convert_path<R: gimli::Reader<Endian = gimli::RunTimeEndian>>(
|
||||
r: &R,
|
||||
) -> Result<PathBuf, gimli::Error> {
|
||||
let bytes = r.to_slice()?;
|
||||
let s = std::str::from_utf8(&bytes).map_err(|_| gimli::Error::BadUtf8)?;
|
||||
Ok(PathBuf::from(s))
|
||||
}
|
||||
|
||||
fn load_section<'data: 'file, 'file, O, R, F>(
|
||||
id: gimli::SectionId,
|
||||
file: &'file O,
|
||||
endian: R::Endian,
|
||||
loader: &mut F,
|
||||
) -> Result<R, gimli::Error>
|
||||
where
|
||||
O: object::Object<'data, 'file>,
|
||||
R: gimli::Reader<Endian = gimli::RunTimeEndian>,
|
||||
F: FnMut(Cow<'data, [u8]>, R::Endian) -> R,
|
||||
{
|
||||
use object::ObjectSection;
|
||||
|
||||
let data = id
|
||||
.dwo_name()
|
||||
.and_then(|dwo_name| {
|
||||
file.section_by_name(dwo_name)
|
||||
.and_then(|section| section.uncompressed_data().ok())
|
||||
})
|
||||
.unwrap_or(Cow::Borrowed(&[]));
|
||||
Ok(loader(data, endian))
|
||||
}
|
||||
|
||||
/// A simple builtin split DWARF loader.
|
||||
pub struct SplitDwarfLoader<R, F>
|
||||
where
|
||||
R: gimli::Reader<Endian = gimli::RunTimeEndian>,
|
||||
F: FnMut(Cow<'_, [u8]>, R::Endian) -> R,
|
||||
{
|
||||
loader: F,
|
||||
dwarf_package: Option<gimli::DwarfPackage<R>>,
|
||||
}
|
||||
|
||||
impl<R, F> SplitDwarfLoader<R, F>
|
||||
where
|
||||
R: gimli::Reader<Endian = gimli::RunTimeEndian>,
|
||||
F: FnMut(Cow<'_, [u8]>, R::Endian) -> R,
|
||||
{
|
||||
fn load_dwarf_package(loader: &mut F, path: Option<PathBuf>) -> Option<gimli::DwarfPackage<R>> {
|
||||
let mut path = path.map(Ok).unwrap_or_else(std::env::current_exe).ok()?;
|
||||
let dwp_extension = path
|
||||
.extension()
|
||||
.map(|previous_extension| {
|
||||
let mut previous_extension = previous_extension.to_os_string();
|
||||
previous_extension.push(".dwp");
|
||||
previous_extension
|
||||
})
|
||||
.unwrap_or_else(|| "dwp".into());
|
||||
path.set_extension(dwp_extension);
|
||||
let file = File::open(&path).ok()?;
|
||||
let map = unsafe { memmap2::Mmap::map(&file).ok()? };
|
||||
let dwp = object::File::parse(&*map).ok()?;
|
||||
|
||||
let endian = if dwp.is_little_endian() {
|
||||
gimli::RunTimeEndian::Little
|
||||
} else {
|
||||
gimli::RunTimeEndian::Big
|
||||
};
|
||||
|
||||
let empty = loader(Cow::Borrowed(&[]), endian);
|
||||
gimli::DwarfPackage::load(
|
||||
|section_id| load_section(section_id, &dwp, endian, loader),
|
||||
empty,
|
||||
)
|
||||
.ok()
|
||||
}
|
||||
|
||||
/// Create a new split DWARF loader.
|
||||
pub fn new(mut loader: F, path: Option<PathBuf>) -> SplitDwarfLoader<R, F> {
|
||||
let dwarf_package = SplitDwarfLoader::load_dwarf_package(&mut loader, path);
|
||||
SplitDwarfLoader {
|
||||
loader,
|
||||
dwarf_package,
|
||||
}
|
||||
}
|
||||
|
||||
/// Run the provided `LookupResult` to completion, loading any necessary
|
||||
/// split DWARF along the way.
|
||||
pub fn run<L>(&mut self, mut l: LookupResult<L>) -> L::Output
|
||||
where
|
||||
L: LookupContinuation<Buf = R>,
|
||||
{
|
||||
loop {
|
||||
let (load, continuation) = match l {
|
||||
LookupResult::Output(output) => break output,
|
||||
LookupResult::Load { load, continuation } => (load, continuation),
|
||||
};
|
||||
|
||||
let mut r: Option<Arc<gimli::Dwarf<_>>> = None;
|
||||
if let Some(dwp) = self.dwarf_package.as_ref() {
|
||||
if let Ok(Some(cu)) = dwp.find_cu(load.dwo_id, &load.parent) {
|
||||
r = Some(Arc::new(cu));
|
||||
}
|
||||
}
|
||||
|
||||
if r.is_none() {
|
||||
let mut path = PathBuf::new();
|
||||
if let Some(p) = load.comp_dir.as_ref() {
|
||||
if let Ok(p) = convert_path(p) {
|
||||
path.push(p);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(p) = load.path.as_ref() {
|
||||
if let Ok(p) = convert_path(p) {
|
||||
path.push(p);
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(file) = File::open(&path) {
|
||||
if let Ok(map) = unsafe { memmap2::Mmap::map(&file) } {
|
||||
if let Ok(file) = object::File::parse(&*map) {
|
||||
let endian = if file.is_little_endian() {
|
||||
gimli::RunTimeEndian::Little
|
||||
} else {
|
||||
gimli::RunTimeEndian::Big
|
||||
};
|
||||
|
||||
r = gimli::Dwarf::load(|id| {
|
||||
load_section(id, &file, endian, &mut self.loader)
|
||||
})
|
||||
.ok()
|
||||
.map(|mut dwo_dwarf| {
|
||||
dwo_dwarf.make_dwo(&load.parent);
|
||||
Arc::new(dwo_dwarf)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
l = continuation.resume(r);
|
||||
}
|
||||
}
|
||||
}
|
||||
555
vendor/addr2line/src/function.rs
vendored
555
vendor/addr2line/src/function.rs
vendored
@@ -1,555 +0,0 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::vec::Vec;
|
||||
use core::cmp::Ordering;
|
||||
use core::iter;
|
||||
|
||||
use crate::lazy::LazyCell;
|
||||
use crate::maybe_small;
|
||||
use crate::{Context, DebugFile, Error, RangeAttributes};
|
||||
|
||||
pub(crate) struct Functions<R: gimli::Reader> {
|
||||
/// List of all `DW_TAG_subprogram` details in the unit.
|
||||
pub(crate) functions: Box<
|
||||
[(
|
||||
gimli::UnitOffset<R::Offset>,
|
||||
LazyCell<Result<Function<R>, Error>>,
|
||||
)],
|
||||
>,
|
||||
/// List of `DW_TAG_subprogram` address ranges in the unit.
|
||||
pub(crate) addresses: Box<[FunctionAddress]>,
|
||||
}
|
||||
|
||||
/// A single address range for a function.
|
||||
///
|
||||
/// It is possible for a function to have multiple address ranges; this
|
||||
/// is handled by having multiple `FunctionAddress` entries with the same
|
||||
/// `function` field.
|
||||
pub(crate) struct FunctionAddress {
|
||||
range: gimli::Range,
|
||||
/// An index into `Functions::functions`.
|
||||
pub(crate) function: usize,
|
||||
}
|
||||
|
||||
pub(crate) struct Function<R: gimli::Reader> {
|
||||
pub(crate) dw_die_offset: gimli::UnitOffset<R::Offset>,
|
||||
pub(crate) name: Option<R>,
|
||||
/// List of all `DW_TAG_inlined_subroutine` details in this function.
|
||||
inlined_functions: Box<[InlinedFunction<R>]>,
|
||||
/// List of `DW_TAG_inlined_subroutine` address ranges in this function.
|
||||
inlined_addresses: Box<[InlinedFunctionAddress]>,
|
||||
}
|
||||
|
||||
pub(crate) struct InlinedFunctionAddress {
|
||||
range: gimli::Range,
|
||||
call_depth: usize,
|
||||
/// An index into `Function::inlined_functions`.
|
||||
function: usize,
|
||||
}
|
||||
|
||||
pub(crate) struct InlinedFunction<R: gimli::Reader> {
|
||||
pub(crate) dw_die_offset: gimli::UnitOffset<R::Offset>,
|
||||
pub(crate) name: Option<R>,
|
||||
pub(crate) call_file: Option<u64>,
|
||||
pub(crate) call_line: u32,
|
||||
pub(crate) call_column: u32,
|
||||
}
|
||||
|
||||
impl<R: gimli::Reader> Functions<R> {
|
||||
pub(crate) fn parse(
|
||||
unit: &gimli::Unit<R>,
|
||||
sections: &gimli::Dwarf<R>,
|
||||
) -> Result<Functions<R>, Error> {
|
||||
let mut functions = Vec::new();
|
||||
let mut addresses = Vec::new();
|
||||
let mut entries = unit.entries_raw(None)?;
|
||||
while !entries.is_empty() {
|
||||
let dw_die_offset = entries.next_offset();
|
||||
if let Some(abbrev) = entries.read_abbreviation()? {
|
||||
if abbrev.tag() == gimli::DW_TAG_subprogram {
|
||||
let mut ranges = RangeAttributes::default();
|
||||
for spec in abbrev.attributes() {
|
||||
match entries.read_attribute(*spec) {
|
||||
Ok(ref attr) => {
|
||||
match attr.name() {
|
||||
gimli::DW_AT_low_pc => match attr.value() {
|
||||
gimli::AttributeValue::Addr(val) => {
|
||||
ranges.low_pc = Some(val)
|
||||
}
|
||||
gimli::AttributeValue::DebugAddrIndex(index) => {
|
||||
ranges.low_pc = Some(sections.address(unit, index)?);
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
gimli::DW_AT_high_pc => match attr.value() {
|
||||
gimli::AttributeValue::Addr(val) => {
|
||||
ranges.high_pc = Some(val)
|
||||
}
|
||||
gimli::AttributeValue::DebugAddrIndex(index) => {
|
||||
ranges.high_pc = Some(sections.address(unit, index)?);
|
||||
}
|
||||
gimli::AttributeValue::Udata(val) => {
|
||||
ranges.size = Some(val)
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
gimli::DW_AT_ranges => {
|
||||
ranges.ranges_offset =
|
||||
sections.attr_ranges_offset(unit, attr.value())?;
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
let function_index = functions.len();
|
||||
if ranges.for_each_range(sections, unit, |range| {
|
||||
addresses.push(FunctionAddress {
|
||||
range,
|
||||
function: function_index,
|
||||
});
|
||||
})? {
|
||||
functions.push((dw_die_offset, LazyCell::new()));
|
||||
}
|
||||
} else {
|
||||
entries.skip_attributes(abbrev.attributes())?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The binary search requires the addresses to be sorted.
|
||||
//
|
||||
// It also requires them to be non-overlapping. In practice, overlapping
|
||||
// function ranges are unlikely, so we don't try to handle that yet.
|
||||
//
|
||||
// It's possible for multiple functions to have the same address range if the
|
||||
// compiler can detect and remove functions with identical code. In that case
|
||||
// we'll nondeterministically return one of them.
|
||||
addresses.sort_by_key(|x| x.range.begin);
|
||||
|
||||
Ok(Functions {
|
||||
functions: functions.into_boxed_slice(),
|
||||
addresses: addresses.into_boxed_slice(),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn find_address(&self, probe: u64) -> Option<usize> {
|
||||
self.addresses
|
||||
.binary_search_by(|address| {
|
||||
if probe < address.range.begin {
|
||||
Ordering::Greater
|
||||
} else if probe >= address.range.end {
|
||||
Ordering::Less
|
||||
} else {
|
||||
Ordering::Equal
|
||||
}
|
||||
})
|
||||
.ok()
|
||||
}
|
||||
|
||||
pub(crate) fn parse_inlined_functions(
|
||||
&self,
|
||||
file: DebugFile,
|
||||
unit: &gimli::Unit<R>,
|
||||
ctx: &Context<R>,
|
||||
sections: &gimli::Dwarf<R>,
|
||||
) -> Result<(), Error> {
|
||||
for function in &*self.functions {
|
||||
function
|
||||
.1
|
||||
.borrow_with(|| Function::parse(function.0, file, unit, ctx, sections))
|
||||
.as_ref()
|
||||
.map_err(Error::clone)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: gimli::Reader> Function<R> {
|
||||
pub(crate) fn parse(
|
||||
dw_die_offset: gimli::UnitOffset<R::Offset>,
|
||||
file: DebugFile,
|
||||
unit: &gimli::Unit<R>,
|
||||
ctx: &Context<R>,
|
||||
sections: &gimli::Dwarf<R>,
|
||||
) -> Result<Self, Error> {
|
||||
let mut entries = unit.entries_raw(Some(dw_die_offset))?;
|
||||
let depth = entries.next_depth();
|
||||
let abbrev = entries.read_abbreviation()?.unwrap();
|
||||
debug_assert_eq!(abbrev.tag(), gimli::DW_TAG_subprogram);
|
||||
|
||||
let mut name = None;
|
||||
for spec in abbrev.attributes() {
|
||||
match entries.read_attribute(*spec) {
|
||||
Ok(ref attr) => {
|
||||
match attr.name() {
|
||||
gimli::DW_AT_linkage_name | gimli::DW_AT_MIPS_linkage_name => {
|
||||
if let Ok(val) = sections.attr_string(unit, attr.value()) {
|
||||
name = Some(val);
|
||||
}
|
||||
}
|
||||
gimli::DW_AT_name => {
|
||||
if name.is_none() {
|
||||
name = sections.attr_string(unit, attr.value()).ok();
|
||||
}
|
||||
}
|
||||
gimli::DW_AT_abstract_origin | gimli::DW_AT_specification => {
|
||||
if name.is_none() {
|
||||
name = name_attr(attr.value(), file, unit, ctx, sections, 16)?;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
let mut inlined_functions = Vec::new();
|
||||
let mut inlined_addresses = Vec::new();
|
||||
Function::parse_children(
|
||||
&mut entries,
|
||||
depth,
|
||||
file,
|
||||
unit,
|
||||
ctx,
|
||||
sections,
|
||||
&mut inlined_functions,
|
||||
&mut inlined_addresses,
|
||||
0,
|
||||
)?;
|
||||
|
||||
// Sort ranges in "breadth-first traversal order", i.e. first by call_depth
|
||||
// and then by range.begin. This allows finding the range containing an
|
||||
// address at a certain depth using binary search.
|
||||
// Note: Using DFS order, i.e. ordering by range.begin first and then by
|
||||
// call_depth, would not work! Consider the two examples
|
||||
// "[0..10 at depth 0], [0..2 at depth 1], [6..8 at depth 1]" and
|
||||
// "[0..5 at depth 0], [0..2 at depth 1], [5..10 at depth 0], [6..8 at depth 1]".
|
||||
// In this example, if you want to look up address 7 at depth 0, and you
|
||||
// encounter [0..2 at depth 1], are you before or after the target range?
|
||||
// You don't know.
|
||||
inlined_addresses.sort_by(|r1, r2| {
|
||||
if r1.call_depth < r2.call_depth {
|
||||
Ordering::Less
|
||||
} else if r1.call_depth > r2.call_depth {
|
||||
Ordering::Greater
|
||||
} else if r1.range.begin < r2.range.begin {
|
||||
Ordering::Less
|
||||
} else if r1.range.begin > r2.range.begin {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
Ordering::Equal
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Function {
|
||||
dw_die_offset,
|
||||
name,
|
||||
inlined_functions: inlined_functions.into_boxed_slice(),
|
||||
inlined_addresses: inlined_addresses.into_boxed_slice(),
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_children(
|
||||
entries: &mut gimli::EntriesRaw<'_, '_, R>,
|
||||
depth: isize,
|
||||
file: DebugFile,
|
||||
unit: &gimli::Unit<R>,
|
||||
ctx: &Context<R>,
|
||||
sections: &gimli::Dwarf<R>,
|
||||
inlined_functions: &mut Vec<InlinedFunction<R>>,
|
||||
inlined_addresses: &mut Vec<InlinedFunctionAddress>,
|
||||
inlined_depth: usize,
|
||||
) -> Result<(), Error> {
|
||||
loop {
|
||||
let dw_die_offset = entries.next_offset();
|
||||
let next_depth = entries.next_depth();
|
||||
if next_depth <= depth {
|
||||
return Ok(());
|
||||
}
|
||||
if let Some(abbrev) = entries.read_abbreviation()? {
|
||||
match abbrev.tag() {
|
||||
gimli::DW_TAG_subprogram => {
|
||||
Function::skip(entries, abbrev, next_depth)?;
|
||||
}
|
||||
gimli::DW_TAG_inlined_subroutine => {
|
||||
InlinedFunction::parse(
|
||||
dw_die_offset,
|
||||
entries,
|
||||
abbrev,
|
||||
next_depth,
|
||||
file,
|
||||
unit,
|
||||
ctx,
|
||||
sections,
|
||||
inlined_functions,
|
||||
inlined_addresses,
|
||||
inlined_depth,
|
||||
)?;
|
||||
}
|
||||
_ => {
|
||||
entries.skip_attributes(abbrev.attributes())?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn skip(
|
||||
entries: &mut gimli::EntriesRaw<'_, '_, R>,
|
||||
abbrev: &gimli::Abbreviation,
|
||||
depth: isize,
|
||||
) -> Result<(), Error> {
|
||||
// TODO: use DW_AT_sibling
|
||||
entries.skip_attributes(abbrev.attributes())?;
|
||||
while entries.next_depth() > depth {
|
||||
if let Some(abbrev) = entries.read_abbreviation()? {
|
||||
entries.skip_attributes(abbrev.attributes())?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build the list of inlined functions that contain `probe`.
|
||||
pub(crate) fn find_inlined_functions(
|
||||
&self,
|
||||
probe: u64,
|
||||
) -> iter::Rev<maybe_small::IntoIter<&InlinedFunction<R>>> {
|
||||
// `inlined_functions` is ordered from outside to inside.
|
||||
let mut inlined_functions = maybe_small::Vec::new();
|
||||
let mut inlined_addresses = &self.inlined_addresses[..];
|
||||
loop {
|
||||
let current_depth = inlined_functions.len();
|
||||
// Look up (probe, current_depth) in inline_ranges.
|
||||
// `inlined_addresses` is sorted in "breadth-first traversal order", i.e.
|
||||
// by `call_depth` first, and then by `range.begin`. See the comment at
|
||||
// the sort call for more information about why.
|
||||
let search = inlined_addresses.binary_search_by(|range| {
|
||||
if range.call_depth > current_depth {
|
||||
Ordering::Greater
|
||||
} else if range.call_depth < current_depth {
|
||||
Ordering::Less
|
||||
} else if range.range.begin > probe {
|
||||
Ordering::Greater
|
||||
} else if range.range.end <= probe {
|
||||
Ordering::Less
|
||||
} else {
|
||||
Ordering::Equal
|
||||
}
|
||||
});
|
||||
if let Ok(index) = search {
|
||||
let function_index = inlined_addresses[index].function;
|
||||
inlined_functions.push(&self.inlined_functions[function_index]);
|
||||
inlined_addresses = &inlined_addresses[index + 1..];
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
inlined_functions.into_iter().rev()
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: gimli::Reader> InlinedFunction<R> {
|
||||
fn parse(
|
||||
dw_die_offset: gimli::UnitOffset<R::Offset>,
|
||||
entries: &mut gimli::EntriesRaw<'_, '_, R>,
|
||||
abbrev: &gimli::Abbreviation,
|
||||
depth: isize,
|
||||
file: DebugFile,
|
||||
unit: &gimli::Unit<R>,
|
||||
ctx: &Context<R>,
|
||||
sections: &gimli::Dwarf<R>,
|
||||
inlined_functions: &mut Vec<InlinedFunction<R>>,
|
||||
inlined_addresses: &mut Vec<InlinedFunctionAddress>,
|
||||
inlined_depth: usize,
|
||||
) -> Result<(), Error> {
|
||||
let mut ranges = RangeAttributes::default();
|
||||
let mut name = None;
|
||||
let mut call_file = None;
|
||||
let mut call_line = 0;
|
||||
let mut call_column = 0;
|
||||
for spec in abbrev.attributes() {
|
||||
match entries.read_attribute(*spec) {
|
||||
Ok(ref attr) => match attr.name() {
|
||||
gimli::DW_AT_low_pc => match attr.value() {
|
||||
gimli::AttributeValue::Addr(val) => ranges.low_pc = Some(val),
|
||||
gimli::AttributeValue::DebugAddrIndex(index) => {
|
||||
ranges.low_pc = Some(sections.address(unit, index)?);
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
gimli::DW_AT_high_pc => match attr.value() {
|
||||
gimli::AttributeValue::Addr(val) => ranges.high_pc = Some(val),
|
||||
gimli::AttributeValue::DebugAddrIndex(index) => {
|
||||
ranges.high_pc = Some(sections.address(unit, index)?);
|
||||
}
|
||||
gimli::AttributeValue::Udata(val) => ranges.size = Some(val),
|
||||
_ => {}
|
||||
},
|
||||
gimli::DW_AT_ranges => {
|
||||
ranges.ranges_offset = sections.attr_ranges_offset(unit, attr.value())?;
|
||||
}
|
||||
gimli::DW_AT_linkage_name | gimli::DW_AT_MIPS_linkage_name => {
|
||||
if let Ok(val) = sections.attr_string(unit, attr.value()) {
|
||||
name = Some(val);
|
||||
}
|
||||
}
|
||||
gimli::DW_AT_name => {
|
||||
if name.is_none() {
|
||||
name = sections.attr_string(unit, attr.value()).ok();
|
||||
}
|
||||
}
|
||||
gimli::DW_AT_abstract_origin | gimli::DW_AT_specification => {
|
||||
if name.is_none() {
|
||||
name = name_attr(attr.value(), file, unit, ctx, sections, 16)?;
|
||||
}
|
||||
}
|
||||
gimli::DW_AT_call_file => {
|
||||
// There is a spec issue [1] with how DW_AT_call_file is specified in DWARF 5.
|
||||
// Before, a file index of 0 would indicate no source file, however in
|
||||
// DWARF 5 this could be a valid index into the file table.
|
||||
//
|
||||
// Implementations such as LLVM generates a file index of 0 when DWARF 5 is
|
||||
// used.
|
||||
//
|
||||
// Thus, if we see a version of 5 or later, treat a file index of 0 as such.
|
||||
// [1]: http://wiki.dwarfstd.org/index.php?title=DWARF5_Line_Table_File_Numbers
|
||||
if let gimli::AttributeValue::FileIndex(fi) = attr.value() {
|
||||
if fi > 0 || unit.header.version() >= 5 {
|
||||
call_file = Some(fi);
|
||||
}
|
||||
}
|
||||
}
|
||||
gimli::DW_AT_call_line => {
|
||||
call_line = attr.udata_value().unwrap_or(0) as u32;
|
||||
}
|
||||
gimli::DW_AT_call_column => {
|
||||
call_column = attr.udata_value().unwrap_or(0) as u32;
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
let function_index = inlined_functions.len();
|
||||
inlined_functions.push(InlinedFunction {
|
||||
dw_die_offset,
|
||||
name,
|
||||
call_file,
|
||||
call_line,
|
||||
call_column,
|
||||
});
|
||||
|
||||
ranges.for_each_range(sections, unit, |range| {
|
||||
inlined_addresses.push(InlinedFunctionAddress {
|
||||
range,
|
||||
call_depth: inlined_depth,
|
||||
function: function_index,
|
||||
});
|
||||
})?;
|
||||
|
||||
Function::parse_children(
|
||||
entries,
|
||||
depth,
|
||||
file,
|
||||
unit,
|
||||
ctx,
|
||||
sections,
|
||||
inlined_functions,
|
||||
inlined_addresses,
|
||||
inlined_depth + 1,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn name_attr<R>(
|
||||
attr: gimli::AttributeValue<R>,
|
||||
mut file: DebugFile,
|
||||
unit: &gimli::Unit<R>,
|
||||
ctx: &Context<R>,
|
||||
sections: &gimli::Dwarf<R>,
|
||||
recursion_limit: usize,
|
||||
) -> Result<Option<R>, Error>
|
||||
where
|
||||
R: gimli::Reader,
|
||||
{
|
||||
if recursion_limit == 0 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
match attr {
|
||||
gimli::AttributeValue::UnitRef(offset) => {
|
||||
name_entry(file, unit, offset, ctx, sections, recursion_limit)
|
||||
}
|
||||
gimli::AttributeValue::DebugInfoRef(dr) => {
|
||||
let (unit, offset) = ctx.find_unit(dr, file)?;
|
||||
name_entry(file, unit, offset, ctx, sections, recursion_limit)
|
||||
}
|
||||
gimli::AttributeValue::DebugInfoRefSup(dr) => {
|
||||
if let Some(sup_sections) = sections.sup.as_ref() {
|
||||
file = DebugFile::Supplementary;
|
||||
let (unit, offset) = ctx.find_unit(dr, file)?;
|
||||
name_entry(file, unit, offset, ctx, sup_sections, recursion_limit)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn name_entry<R>(
|
||||
file: DebugFile,
|
||||
unit: &gimli::Unit<R>,
|
||||
offset: gimli::UnitOffset<R::Offset>,
|
||||
ctx: &Context<R>,
|
||||
sections: &gimli::Dwarf<R>,
|
||||
recursion_limit: usize,
|
||||
) -> Result<Option<R>, Error>
|
||||
where
|
||||
R: gimli::Reader,
|
||||
{
|
||||
let mut entries = unit.entries_raw(Some(offset))?;
|
||||
let abbrev = if let Some(abbrev) = entries.read_abbreviation()? {
|
||||
abbrev
|
||||
} else {
|
||||
return Err(gimli::Error::NoEntryAtGivenOffset);
|
||||
};
|
||||
|
||||
let mut name = None;
|
||||
let mut next = None;
|
||||
for spec in abbrev.attributes() {
|
||||
match entries.read_attribute(*spec) {
|
||||
Ok(ref attr) => match attr.name() {
|
||||
gimli::DW_AT_linkage_name | gimli::DW_AT_MIPS_linkage_name => {
|
||||
if let Ok(val) = sections.attr_string(unit, attr.value()) {
|
||||
return Ok(Some(val));
|
||||
}
|
||||
}
|
||||
gimli::DW_AT_name => {
|
||||
if let Ok(val) = sections.attr_string(unit, attr.value()) {
|
||||
name = Some(val);
|
||||
}
|
||||
}
|
||||
gimli::DW_AT_abstract_origin | gimli::DW_AT_specification => {
|
||||
next = Some(attr.value());
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
if name.is_some() {
|
||||
return Ok(name);
|
||||
}
|
||||
|
||||
if let Some(next) = next {
|
||||
return name_attr(next, file, unit, ctx, sections, recursion_limit - 1);
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
31
vendor/addr2line/src/lazy.rs
vendored
31
vendor/addr2line/src/lazy.rs
vendored
@@ -1,31 +0,0 @@
|
||||
use core::cell::UnsafeCell;
|
||||
|
||||
pub struct LazyCell<T> {
|
||||
contents: UnsafeCell<Option<T>>,
|
||||
}
|
||||
impl<T> LazyCell<T> {
|
||||
pub fn new() -> LazyCell<T> {
|
||||
LazyCell {
|
||||
contents: UnsafeCell::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn borrow(&self) -> Option<&T> {
|
||||
unsafe { &*self.contents.get() }.as_ref()
|
||||
}
|
||||
|
||||
pub fn borrow_with(&self, closure: impl FnOnce() -> T) -> &T {
|
||||
// First check if we're already initialized...
|
||||
let ptr = self.contents.get();
|
||||
if let Some(val) = unsafe { &*ptr } {
|
||||
return val;
|
||||
}
|
||||
// Note that while we're executing `closure` our `borrow_with` may
|
||||
// be called recursively. This means we need to check again after
|
||||
// the closure has executed. For that we use the `get_or_insert`
|
||||
// method which will only perform mutation if we aren't already
|
||||
// `Some`.
|
||||
let val = closure();
|
||||
unsafe { (*ptr).get_or_insert(val) }
|
||||
}
|
||||
}
|
||||
1729
vendor/addr2line/src/lib.rs
vendored
1729
vendor/addr2line/src/lib.rs
vendored
File diff suppressed because it is too large
Load Diff
126
vendor/addr2line/tests/correctness.rs
vendored
126
vendor/addr2line/tests/correctness.rs
vendored
@@ -1,126 +0,0 @@
|
||||
use addr2line::Context;
|
||||
use fallible_iterator::FallibleIterator;
|
||||
use findshlibs::{IterationControl, SharedLibrary, TargetSharedLibrary};
|
||||
use object::Object;
|
||||
use std::borrow::Cow;
|
||||
use std::fs::File;
|
||||
use std::sync::Arc;
|
||||
|
||||
fn find_debuginfo() -> memmap2::Mmap {
|
||||
let path = std::env::current_exe().unwrap();
|
||||
let file = File::open(&path).unwrap();
|
||||
let map = unsafe { memmap2::Mmap::map(&file).unwrap() };
|
||||
let file = &object::File::parse(&*map).unwrap();
|
||||
if let Ok(uuid) = file.mach_uuid() {
|
||||
for candidate in path.parent().unwrap().read_dir().unwrap() {
|
||||
let path = candidate.unwrap().path();
|
||||
if !path.to_str().unwrap().ends_with(".dSYM") {
|
||||
continue;
|
||||
}
|
||||
for candidate in path.join("Contents/Resources/DWARF").read_dir().unwrap() {
|
||||
let path = candidate.unwrap().path();
|
||||
let file = File::open(&path).unwrap();
|
||||
let map = unsafe { memmap2::Mmap::map(&file).unwrap() };
|
||||
let file = &object::File::parse(&*map).unwrap();
|
||||
if file.mach_uuid().unwrap() == uuid {
|
||||
return map;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn correctness() {
|
||||
let map = find_debuginfo();
|
||||
let file = &object::File::parse(&*map).unwrap();
|
||||
let module_base = file.relative_address_base();
|
||||
|
||||
let endian = if file.is_little_endian() {
|
||||
gimli::RunTimeEndian::Little
|
||||
} else {
|
||||
gimli::RunTimeEndian::Big
|
||||
};
|
||||
|
||||
fn load_section<'data: 'file, 'file, O, Endian>(
|
||||
id: gimli::SectionId,
|
||||
file: &'file O,
|
||||
endian: Endian,
|
||||
) -> Result<gimli::EndianArcSlice<Endian>, gimli::Error>
|
||||
where
|
||||
O: object::Object<'data, 'file>,
|
||||
Endian: gimli::Endianity,
|
||||
{
|
||||
use object::ObjectSection;
|
||||
|
||||
let data = file
|
||||
.section_by_name(id.name())
|
||||
.and_then(|section| section.uncompressed_data().ok())
|
||||
.unwrap_or(Cow::Borrowed(&[]));
|
||||
Ok(gimli::EndianArcSlice::new(Arc::from(&*data), endian))
|
||||
}
|
||||
|
||||
let dwarf = gimli::Dwarf::load(|id| load_section(id, file, endian)).unwrap();
|
||||
let ctx = Context::from_dwarf(dwarf).unwrap();
|
||||
let mut split_dwarf_loader = addr2line::builtin_split_dwarf_loader::SplitDwarfLoader::new(
|
||||
|data, endian| gimli::EndianArcSlice::new(Arc::from(&*data), endian),
|
||||
None,
|
||||
);
|
||||
|
||||
let mut bias = None;
|
||||
TargetSharedLibrary::each(|lib| {
|
||||
bias = Some((lib.virtual_memory_bias().0 as u64).wrapping_sub(module_base));
|
||||
IterationControl::Break
|
||||
});
|
||||
|
||||
#[allow(unused_mut)]
|
||||
let mut test = |sym: u64, expected_prefix: &str| {
|
||||
let ip = sym.wrapping_sub(bias.unwrap());
|
||||
|
||||
let frames = ctx.find_frames(ip);
|
||||
let frames = split_dwarf_loader.run(frames).unwrap();
|
||||
let frame = frames.last().unwrap().unwrap();
|
||||
let name = frame.function.as_ref().unwrap().demangle().unwrap();
|
||||
// Old rust versions generate DWARF with wrong linkage name,
|
||||
// so only check the start.
|
||||
if !name.starts_with(expected_prefix) {
|
||||
panic!("incorrect name '{}', expected {:?}", name, expected_prefix);
|
||||
}
|
||||
};
|
||||
|
||||
test(test_function as u64, "correctness::test_function");
|
||||
test(
|
||||
small::test_function as u64,
|
||||
"correctness::small::test_function",
|
||||
);
|
||||
test(auxiliary::foo as u64, "auxiliary::foo");
|
||||
}
|
||||
|
||||
mod small {
|
||||
pub fn test_function() {
|
||||
println!("y");
|
||||
}
|
||||
}
|
||||
|
||||
fn test_function() {
|
||||
println!("x");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn zero_function() {
|
||||
let map = find_debuginfo();
|
||||
let file = &object::File::parse(&*map).unwrap();
|
||||
let ctx = Context::new(file).unwrap();
|
||||
for probe in 0..10 {
|
||||
assert!(
|
||||
ctx.find_frames(probe)
|
||||
.skip_all_loads()
|
||||
.unwrap()
|
||||
.count()
|
||||
.unwrap()
|
||||
< 10
|
||||
);
|
||||
}
|
||||
}
|
||||
135
vendor/addr2line/tests/output_equivalence.rs
vendored
135
vendor/addr2line/tests/output_equivalence.rs
vendored
@@ -1,135 +0,0 @@
|
||||
use std::env;
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
|
||||
use backtrace::Backtrace;
|
||||
use findshlibs::{IterationControl, SharedLibrary, TargetSharedLibrary};
|
||||
use libtest_mimic::{Arguments, Failed, Trial};
|
||||
|
||||
#[inline(never)]
|
||||
fn make_trace() -> Vec<String> {
|
||||
fn foo() -> Backtrace {
|
||||
bar()
|
||||
}
|
||||
#[inline(never)]
|
||||
fn bar() -> Backtrace {
|
||||
baz()
|
||||
}
|
||||
#[inline(always)]
|
||||
fn baz() -> Backtrace {
|
||||
Backtrace::new_unresolved()
|
||||
}
|
||||
|
||||
let mut base_addr = None;
|
||||
TargetSharedLibrary::each(|lib| {
|
||||
base_addr = Some(lib.virtual_memory_bias().0 as isize);
|
||||
IterationControl::Break
|
||||
});
|
||||
let addrfix = -base_addr.unwrap();
|
||||
|
||||
let trace = foo();
|
||||
trace
|
||||
.frames()
|
||||
.iter()
|
||||
.take(5)
|
||||
.map(|x| format!("{:p}", (x.ip() as *const u8).wrapping_offset(addrfix)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn run_cmd<P: AsRef<OsStr>>(exe: P, me: &Path, flags: Option<&str>, trace: &str) -> String {
|
||||
let mut cmd = Command::new(exe);
|
||||
cmd.env("LC_ALL", "C"); // GNU addr2line is localized, we aren't
|
||||
cmd.env("RUST_BACKTRACE", "1"); // if a child crashes, we want to know why
|
||||
|
||||
if let Some(flags) = flags {
|
||||
cmd.arg(flags);
|
||||
}
|
||||
cmd.arg("--exe").arg(me).arg(trace);
|
||||
|
||||
let output = cmd.output().unwrap();
|
||||
|
||||
assert!(output.status.success());
|
||||
String::from_utf8(output.stdout).unwrap()
|
||||
}
|
||||
|
||||
fn run_test(flags: Option<&str>) -> Result<(), Failed> {
|
||||
let me = env::current_exe().unwrap();
|
||||
let mut exe = me.clone();
|
||||
assert!(exe.pop());
|
||||
if exe.file_name().unwrap().to_str().unwrap() == "deps" {
|
||||
assert!(exe.pop());
|
||||
}
|
||||
exe.push("examples");
|
||||
exe.push("addr2line");
|
||||
|
||||
assert!(exe.is_file());
|
||||
|
||||
let trace = make_trace();
|
||||
|
||||
// HACK: GNU addr2line has a bug where looking up multiple addresses can cause the second
|
||||
// lookup to fail. Workaround by doing one address at a time.
|
||||
for addr in &trace {
|
||||
let theirs = run_cmd("addr2line", &me, flags, addr);
|
||||
let ours = run_cmd(&exe, &me, flags, addr);
|
||||
|
||||
// HACK: GNU addr2line does not tidy up paths properly, causing double slashes to be printed.
|
||||
// We consider our behavior to be correct, so we fix their output to match ours.
|
||||
let theirs = theirs.replace("//", "/");
|
||||
|
||||
assert!(
|
||||
theirs == ours,
|
||||
"Output not equivalent:
|
||||
|
||||
$ addr2line {0} --exe {1} {2}
|
||||
{4}
|
||||
$ {3} {0} --exe {1} {2}
|
||||
{5}
|
||||
|
||||
|
||||
",
|
||||
flags.unwrap_or(""),
|
||||
me.display(),
|
||||
trace.join(" "),
|
||||
exe.display(),
|
||||
theirs,
|
||||
ours
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
static FLAGS: &str = "aipsf";
|
||||
|
||||
fn make_tests() -> Vec<Trial> {
|
||||
(0..(1 << FLAGS.len()))
|
||||
.map(|bits| {
|
||||
if bits == 0 {
|
||||
None
|
||||
} else {
|
||||
let mut param = String::new();
|
||||
param.push('-');
|
||||
for (i, flag) in FLAGS.chars().enumerate() {
|
||||
if (bits & (1 << i)) != 0 {
|
||||
param.push(flag);
|
||||
}
|
||||
}
|
||||
Some(param)
|
||||
}
|
||||
})
|
||||
.map(|param| {
|
||||
Trial::test(
|
||||
format!("addr2line {}", param.as_ref().map_or("", String::as_str)),
|
||||
move || run_test(param.as_ref().map(String::as_str)),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn main() {
|
||||
if !cfg!(target_os = "linux") {
|
||||
return;
|
||||
}
|
||||
let args = Arguments::from_args();
|
||||
libtest_mimic::run(&args, make_tests()).exit();
|
||||
}
|
||||
114
vendor/addr2line/tests/parse.rs
vendored
114
vendor/addr2line/tests/parse.rs
vendored
@@ -1,114 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::path::{self, PathBuf};
|
||||
|
||||
use object::Object;
|
||||
|
||||
fn release_fixture_path() -> PathBuf {
|
||||
if let Ok(p) = env::var("ADDR2LINE_FIXTURE_PATH") {
|
||||
return p.into();
|
||||
}
|
||||
|
||||
let mut path = PathBuf::new();
|
||||
if let Ok(dir) = env::var("CARGO_MANIFEST_DIR") {
|
||||
path.push(dir);
|
||||
}
|
||||
path.push("fixtures");
|
||||
path.push("addr2line-release");
|
||||
path
|
||||
}
|
||||
|
||||
fn with_file<F: FnOnce(&object::File<'_>)>(target: &path::Path, f: F) {
|
||||
let file = File::open(target).unwrap();
|
||||
let map = unsafe { memmap2::Mmap::map(&file).unwrap() };
|
||||
let file = object::File::parse(&*map).unwrap();
|
||||
f(&file)
|
||||
}
|
||||
|
||||
fn dwarf_load<'a>(object: &object::File<'a>) -> gimli::Dwarf<Cow<'a, [u8]>> {
|
||||
let load_section = |id: gimli::SectionId| -> Result<Cow<'a, [u8]>, gimli::Error> {
|
||||
use object::ObjectSection;
|
||||
|
||||
let data = object
|
||||
.section_by_name(id.name())
|
||||
.and_then(|section| section.data().ok())
|
||||
.unwrap_or(&[][..]);
|
||||
Ok(Cow::Borrowed(data))
|
||||
};
|
||||
gimli::Dwarf::load(&load_section).unwrap()
|
||||
}
|
||||
|
||||
fn dwarf_borrow<'a>(
|
||||
dwarf: &'a gimli::Dwarf<Cow<'_, [u8]>>,
|
||||
) -> gimli::Dwarf<gimli::EndianSlice<'a, gimli::LittleEndian>> {
|
||||
let borrow_section: &dyn for<'b> Fn(
|
||||
&'b Cow<'_, [u8]>,
|
||||
) -> gimli::EndianSlice<'b, gimli::LittleEndian> =
|
||||
&|section| gimli::EndianSlice::new(section, gimli::LittleEndian);
|
||||
dwarf.borrow(&borrow_section)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_base_rc() {
|
||||
let target = release_fixture_path();
|
||||
|
||||
with_file(&target, |file| {
|
||||
addr2line::ObjectContext::new(file).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_base_slice() {
|
||||
let target = release_fixture_path();
|
||||
|
||||
with_file(&target, |file| {
|
||||
let dwarf = dwarf_load(file);
|
||||
let dwarf = dwarf_borrow(&dwarf);
|
||||
addr2line::Context::from_dwarf(dwarf).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_lines_rc() {
|
||||
let target = release_fixture_path();
|
||||
|
||||
with_file(&target, |file| {
|
||||
let context = addr2line::ObjectContext::new(file).unwrap();
|
||||
context.parse_lines().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_lines_slice() {
|
||||
let target = release_fixture_path();
|
||||
|
||||
with_file(&target, |file| {
|
||||
let dwarf = dwarf_load(file);
|
||||
let dwarf = dwarf_borrow(&dwarf);
|
||||
let context = addr2line::Context::from_dwarf(dwarf).unwrap();
|
||||
context.parse_lines().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_functions_rc() {
|
||||
let target = release_fixture_path();
|
||||
|
||||
with_file(&target, |file| {
|
||||
let context = addr2line::ObjectContext::new(file).unwrap();
|
||||
context.parse_functions().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_functions_slice() {
|
||||
let target = release_fixture_path();
|
||||
|
||||
with_file(&target, |file| {
|
||||
let dwarf = dwarf_load(file);
|
||||
let dwarf = dwarf_borrow(&dwarf);
|
||||
let context = addr2line::Context::from_dwarf(dwarf).unwrap();
|
||||
context.parse_functions().unwrap();
|
||||
});
|
||||
}
|
||||
1
vendor/adler/.cargo-checksum.json
vendored
1
vendor/adler/.cargo-checksum.json
vendored
@@ -1 +0,0 @@
|
||||
{"files":{"CHANGELOG.md":"737088e45fdf27fe2cfedce163332d8ce08c58fd86ca287de2de34c0fbaf63e7","Cargo.toml":"f410869f0f1a5697f65a8a77be03da7aeecc0be26e7cf3a1feb1acaa4f518770","LICENSE-0BSD":"861399f8c21c042b110517e76dc6b63a2b334276c8cf17412fc3c8908ca8dc17","LICENSE-APACHE":"8ada45cd9f843acf64e4722ae262c622a2b3b3007c7310ef36ac1061a30f6adb","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"308c50cdb42b9573743068158339570b45ca3f895015ca3b87ba983edb0a21e6","RELEASE_PROCESS.md":"a86cd10fc70f167f8d00e9e4ce0c6b4ebdfa1865058390dffd1e0ad4d3e68d9d","benches/bench.rs":"c07ce370e3680c602e415f8d1ec4e543ea2163ab22a09b6b82d93e8a30adca82","src/algo.rs":"b664b131f724a809591394a10b9023f40ab5963e32a83fa3163c2668e59c8b66","src/lib.rs":"b55ba9c629b30360d08168b2ca0c96275432856a539737a105a6d6ae6bf7e88f"},"package":"f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"}
|
||||
63
vendor/adler/CHANGELOG.md
vendored
63
vendor/adler/CHANGELOG.md
vendored
@@ -1,63 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
## Unreleased
|
||||
|
||||
No changes.
|
||||
|
||||
## [1.0.2 - 2021-02-26](https://github.com/jonas-schievink/adler/releases/tag/v1.0.2)
|
||||
|
||||
- Fix doctest on big-endian systems ([#9]).
|
||||
|
||||
[#9]: https://github.com/jonas-schievink/adler/pull/9
|
||||
|
||||
## [1.0.1 - 2020-11-08](https://github.com/jonas-schievink/adler/releases/tag/v1.0.1)
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fix documentation on docs.rs.
|
||||
|
||||
## [1.0.0 - 2020-11-08](https://github.com/jonas-schievink/adler/releases/tag/v1.0.0)
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fix `cargo test --no-default-features` ([#5]).
|
||||
|
||||
### Improvements
|
||||
|
||||
- Extended and clarified documentation.
|
||||
- Added more rustdoc examples.
|
||||
- Extended CI to test the crate with `--no-default-features`.
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- `adler32_reader` now takes its generic argument by value instead of as a `&mut`.
|
||||
- Renamed `adler32_reader` to `adler32`.
|
||||
|
||||
## [0.2.3 - 2020-07-11](https://github.com/jonas-schievink/adler/releases/tag/v0.2.3)
|
||||
|
||||
- Process 4 Bytes at a time, improving performance by up to 50% ([#2]).
|
||||
|
||||
## [0.2.2 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.2)
|
||||
|
||||
- Bump MSRV to 1.31.0.
|
||||
|
||||
## [0.2.1 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.1)
|
||||
|
||||
- Add a few `#[inline]` annotations to small functions.
|
||||
- Fix CI badge.
|
||||
- Allow integration into libstd.
|
||||
|
||||
## [0.2.0 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.0)
|
||||
|
||||
- Support `#![no_std]` when using `default-features = false`.
|
||||
- Improve performance by around 7x.
|
||||
- Support Rust 1.8.0.
|
||||
- Improve API naming.
|
||||
|
||||
## [0.1.0 - 2020-06-26](https://github.com/jonas-schievink/adler/releases/tag/v0.1.0)
|
||||
|
||||
Initial release.
|
||||
|
||||
|
||||
[#2]: https://github.com/jonas-schievink/adler/pull/2
|
||||
[#5]: https://github.com/jonas-schievink/adler/pull/5
|
||||
64
vendor/adler/Cargo.toml
vendored
64
vendor/adler/Cargo.toml
vendored
@@ -1,64 +0,0 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "adler"
|
||||
version = "1.0.2"
|
||||
authors = ["Jonas Schievink <jonasschievink@gmail.com>"]
|
||||
description = "A simple clean-room implementation of the Adler-32 checksum"
|
||||
documentation = "https://docs.rs/adler/"
|
||||
readme = "README.md"
|
||||
keywords = ["checksum", "integrity", "hash", "adler32", "zlib"]
|
||||
categories = ["algorithms"]
|
||||
license = "0BSD OR MIT OR Apache-2.0"
|
||||
repository = "https://github.com/jonas-schievink/adler.git"
|
||||
[package.metadata.docs.rs]
|
||||
rustdoc-args = ["--cfg=docsrs"]
|
||||
|
||||
[package.metadata.release]
|
||||
no-dev-version = true
|
||||
pre-release-commit-message = "Release {{version}}"
|
||||
tag-message = "{{version}}"
|
||||
|
||||
[[package.metadata.release.pre-release-replacements]]
|
||||
file = "CHANGELOG.md"
|
||||
replace = "## Unreleased\n\nNo changes.\n\n## [{{version}} - {{date}}](https://github.com/jonas-schievink/adler/releases/tag/v{{version}})\n"
|
||||
search = "## Unreleased\n"
|
||||
|
||||
[[package.metadata.release.pre-release-replacements]]
|
||||
file = "README.md"
|
||||
replace = "adler = \"{{version}}\""
|
||||
search = "adler = \"[a-z0-9\\\\.-]+\""
|
||||
|
||||
[[package.metadata.release.pre-release-replacements]]
|
||||
file = "src/lib.rs"
|
||||
replace = "https://docs.rs/adler/{{version}}"
|
||||
search = "https://docs.rs/adler/[a-z0-9\\.-]+"
|
||||
|
||||
[[bench]]
|
||||
name = "bench"
|
||||
harness = false
|
||||
[dependencies.compiler_builtins]
|
||||
version = "0.1.2"
|
||||
optional = true
|
||||
|
||||
[dependencies.core]
|
||||
version = "1.0.0"
|
||||
optional = true
|
||||
package = "rustc-std-workspace-core"
|
||||
[dev-dependencies.criterion]
|
||||
version = "0.3.2"
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
rustc-dep-of-std = ["core", "compiler_builtins"]
|
||||
std = []
|
||||
12
vendor/adler/LICENSE-0BSD
vendored
12
vendor/adler/LICENSE-0BSD
vendored
@@ -1,12 +0,0 @@
|
||||
Copyright (C) Jonas Schievink <jonasschievink@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for
|
||||
any purpose with or without fee is hereby granted.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
|
||||
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
201
vendor/adler/LICENSE-APACHE
vendored
201
vendor/adler/LICENSE-APACHE
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
23
vendor/adler/LICENSE-MIT
vendored
23
vendor/adler/LICENSE-MIT
vendored
@@ -1,23 +0,0 @@
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
39
vendor/adler/README.md
vendored
39
vendor/adler/README.md
vendored
@@ -1,39 +0,0 @@
|
||||
# Adler-32 checksums for Rust
|
||||
|
||||
[](https://crates.io/crates/adler)
|
||||
[](https://docs.rs/adler/)
|
||||

|
||||
|
||||
This crate provides a simple implementation of the Adler-32 checksum, used in
|
||||
the zlib compression format.
|
||||
|
||||
Please refer to the [changelog](CHANGELOG.md) to see what changed in the last
|
||||
releases.
|
||||
|
||||
## Features
|
||||
|
||||
- Permissively licensed (0BSD) clean-room implementation.
|
||||
- Zero dependencies.
|
||||
- Zero `unsafe`.
|
||||
- Decent performance (3-4 GB/s).
|
||||
- Supports `#![no_std]` (with `default-features = false`).
|
||||
|
||||
## Usage
|
||||
|
||||
Add an entry to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
adler = "1.0.2"
|
||||
```
|
||||
|
||||
Check the [API Documentation](https://docs.rs/adler/) for how to use the
|
||||
crate's functionality.
|
||||
|
||||
## Rust version support
|
||||
|
||||
Currently, this crate supports all Rust versions starting at Rust 1.31.0.
|
||||
|
||||
Bumping the Minimum Supported Rust Version (MSRV) is *not* considered a breaking
|
||||
change, but will not be done without good reasons. The latest 3 stable Rust
|
||||
versions will always be supported no matter what.
|
||||
13
vendor/adler/RELEASE_PROCESS.md
vendored
13
vendor/adler/RELEASE_PROCESS.md
vendored
@@ -1,13 +0,0 @@
|
||||
# What to do to publish a new release
|
||||
|
||||
1. Ensure all notable changes are in the changelog under "Unreleased".
|
||||
|
||||
2. Execute `cargo release <level>` to bump version(s), tag and publish
|
||||
everything. External subcommand, must be installed with `cargo install
|
||||
cargo-release`.
|
||||
|
||||
`<level>` can be one of `major|minor|patch`. If this is the first release
|
||||
(`0.1.0`), use `minor`, since the version starts out as `0.0.0`.
|
||||
|
||||
3. Go to the GitHub releases, edit the just-pushed tag. Copy the release notes
|
||||
from the changelog.
|
||||
109
vendor/adler/benches/bench.rs
vendored
109
vendor/adler/benches/bench.rs
vendored
@@ -1,109 +0,0 @@
|
||||
extern crate adler;
|
||||
extern crate criterion;
|
||||
|
||||
use adler::{adler32_slice, Adler32};
|
||||
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
|
||||
|
||||
fn simple(c: &mut Criterion) {
|
||||
{
|
||||
const SIZE: usize = 100;
|
||||
|
||||
let mut group = c.benchmark_group("simple-100b");
|
||||
group.throughput(Throughput::Bytes(SIZE as u64));
|
||||
group.bench_function("zeroes-100", |bencher| {
|
||||
bencher.iter(|| {
|
||||
adler32_slice(&[0; SIZE]);
|
||||
});
|
||||
});
|
||||
group.bench_function("ones-100", |bencher| {
|
||||
bencher.iter(|| {
|
||||
adler32_slice(&[0xff; SIZE]);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
const SIZE: usize = 1024;
|
||||
|
||||
let mut group = c.benchmark_group("simple-1k");
|
||||
group.throughput(Throughput::Bytes(SIZE as u64));
|
||||
|
||||
group.bench_function("zeroes-1k", |bencher| {
|
||||
bencher.iter(|| {
|
||||
adler32_slice(&[0; SIZE]);
|
||||
});
|
||||
});
|
||||
|
||||
group.bench_function("ones-1k", |bencher| {
|
||||
bencher.iter(|| {
|
||||
adler32_slice(&[0xff; SIZE]);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
const SIZE: usize = 1024 * 1024;
|
||||
|
||||
let mut group = c.benchmark_group("simple-1m");
|
||||
group.throughput(Throughput::Bytes(SIZE as u64));
|
||||
group.bench_function("zeroes-1m", |bencher| {
|
||||
bencher.iter(|| {
|
||||
adler32_slice(&[0; SIZE]);
|
||||
});
|
||||
});
|
||||
|
||||
group.bench_function("ones-1m", |bencher| {
|
||||
bencher.iter(|| {
|
||||
adler32_slice(&[0xff; SIZE]);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn chunked(c: &mut Criterion) {
|
||||
const SIZE: usize = 16 * 1024 * 1024;
|
||||
|
||||
let data = vec![0xAB; SIZE];
|
||||
|
||||
let mut group = c.benchmark_group("chunked-16m");
|
||||
group.throughput(Throughput::Bytes(SIZE as u64));
|
||||
group.bench_function("5552", |bencher| {
|
||||
bencher.iter(|| {
|
||||
let mut h = Adler32::new();
|
||||
for chunk in data.chunks(5552) {
|
||||
h.write_slice(chunk);
|
||||
}
|
||||
h.checksum()
|
||||
});
|
||||
});
|
||||
group.bench_function("8k", |bencher| {
|
||||
bencher.iter(|| {
|
||||
let mut h = Adler32::new();
|
||||
for chunk in data.chunks(8 * 1024) {
|
||||
h.write_slice(chunk);
|
||||
}
|
||||
h.checksum()
|
||||
});
|
||||
});
|
||||
group.bench_function("64k", |bencher| {
|
||||
bencher.iter(|| {
|
||||
let mut h = Adler32::new();
|
||||
for chunk in data.chunks(64 * 1024) {
|
||||
h.write_slice(chunk);
|
||||
}
|
||||
h.checksum()
|
||||
});
|
||||
});
|
||||
group.bench_function("1m", |bencher| {
|
||||
bencher.iter(|| {
|
||||
let mut h = Adler32::new();
|
||||
for chunk in data.chunks(1024 * 1024) {
|
||||
h.write_slice(chunk);
|
||||
}
|
||||
h.checksum()
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, simple, chunked);
|
||||
criterion_main!(benches);
|
||||
146
vendor/adler/src/algo.rs
vendored
146
vendor/adler/src/algo.rs
vendored
@@ -1,146 +0,0 @@
|
||||
use crate::Adler32;
|
||||
use std::ops::{AddAssign, MulAssign, RemAssign};
|
||||
|
||||
impl Adler32 {
|
||||
pub(crate) fn compute(&mut self, bytes: &[u8]) {
|
||||
// The basic algorithm is, for every byte:
|
||||
// a = (a + byte) % MOD
|
||||
// b = (b + a) % MOD
|
||||
// where MOD = 65521.
|
||||
//
|
||||
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
|
||||
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
|
||||
// - We use 32-bit arithmetic in this function.
|
||||
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
|
||||
// operation.
|
||||
//
|
||||
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
|
||||
// b = n×D1 + (n−1)×D2 + (n−2)×D3 + ... + Dn + n*1 (mod 65521)
|
||||
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
|
||||
// for the previous values of a and b, as well as treat every input Byte as being 255:
|
||||
// b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520
|
||||
// Or in other words:
|
||||
// b_inc = n*65520 + n(n+1)/2*255
|
||||
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
|
||||
// 2^32-65521 = n*65520 + n(n+1)/2*255
|
||||
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
|
||||
//
|
||||
// On top of the optimization outlined above, the algorithm can also be parallelized with a
|
||||
// bit more work:
|
||||
//
|
||||
// Note that b is a linear combination of a vector of input bytes (D1, ..., Dn).
|
||||
//
|
||||
// If we fix some value k<N and rewrite indices 1, ..., N as
|
||||
//
|
||||
// 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k,
|
||||
//
|
||||
// then we can express a and b in terms of sums of smaller sequences kb and ka:
|
||||
//
|
||||
// ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k
|
||||
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k
|
||||
//
|
||||
// a = ka(1) + ka(2) + ... + ka(k) + 1
|
||||
// b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N
|
||||
//
|
||||
// We use this insight to unroll the main loop and process k=4 bytes at a time.
|
||||
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
|
||||
// stem from increased pipeline parallelism rather than auto-vectorization.
|
||||
//
|
||||
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
|
||||
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
|
||||
|
||||
const MOD: u32 = 65521;
|
||||
const CHUNK_SIZE: usize = 5552 * 4;
|
||||
|
||||
let mut a = u32::from(self.a);
|
||||
let mut b = u32::from(self.b);
|
||||
let mut a_vec = U32X4([0; 4]);
|
||||
let mut b_vec = a_vec;
|
||||
|
||||
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
|
||||
|
||||
// iterate over 4 bytes at a time
|
||||
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
|
||||
let remainder_chunk = chunk_iter.remainder();
|
||||
for chunk in chunk_iter {
|
||||
for byte_vec in chunk.chunks_exact(4) {
|
||||
let val = U32X4::from(byte_vec);
|
||||
a_vec += val;
|
||||
b_vec += a_vec;
|
||||
}
|
||||
b += CHUNK_SIZE as u32 * a;
|
||||
a_vec %= MOD;
|
||||
b_vec %= MOD;
|
||||
b %= MOD;
|
||||
}
|
||||
// special-case the final chunk because it may be shorter than the rest
|
||||
for byte_vec in remainder_chunk.chunks_exact(4) {
|
||||
let val = U32X4::from(byte_vec);
|
||||
a_vec += val;
|
||||
b_vec += a_vec;
|
||||
}
|
||||
b += remainder_chunk.len() as u32 * a;
|
||||
a_vec %= MOD;
|
||||
b_vec %= MOD;
|
||||
b %= MOD;
|
||||
|
||||
// combine the sub-sum results into the main sum
|
||||
b_vec *= 4;
|
||||
b_vec.0[1] += MOD - a_vec.0[1];
|
||||
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
|
||||
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
|
||||
for &av in a_vec.0.iter() {
|
||||
a += av;
|
||||
}
|
||||
for &bv in b_vec.0.iter() {
|
||||
b += bv;
|
||||
}
|
||||
|
||||
// iterate over the remaining few bytes in serial
|
||||
for &byte in remainder.iter() {
|
||||
a += u32::from(byte);
|
||||
b += a;
|
||||
}
|
||||
|
||||
self.a = (a % MOD) as u16;
|
||||
self.b = (b % MOD) as u16;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct U32X4([u32; 4]);
|
||||
|
||||
impl U32X4 {
|
||||
fn from(bytes: &[u8]) -> Self {
|
||||
U32X4([
|
||||
u32::from(bytes[0]),
|
||||
u32::from(bytes[1]),
|
||||
u32::from(bytes[2]),
|
||||
u32::from(bytes[3]),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
impl AddAssign<Self> for U32X4 {
|
||||
fn add_assign(&mut self, other: Self) {
|
||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||
*s += o;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RemAssign<u32> for U32X4 {
|
||||
fn rem_assign(&mut self, quotient: u32) {
|
||||
for s in self.0.iter_mut() {
|
||||
*s %= quotient;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MulAssign<u32> for U32X4 {
|
||||
fn mul_assign(&mut self, rhs: u32) {
|
||||
for s in self.0.iter_mut() {
|
||||
*s *= rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
287
vendor/adler/src/lib.rs
vendored
287
vendor/adler/src/lib.rs
vendored
@@ -1,287 +0,0 @@
|
||||
//! Adler-32 checksum implementation.
|
||||
//!
|
||||
//! This implementation features:
|
||||
//!
|
||||
//! - Permissively licensed (0BSD) clean-room implementation.
|
||||
//! - Zero dependencies.
|
||||
//! - Zero `unsafe`.
|
||||
//! - Decent performance (3-4 GB/s).
|
||||
//! - `#![no_std]` support (with `default-features = false`).
|
||||
|
||||
#![doc(html_root_url = "https://docs.rs/adler/1.0.2")]
|
||||
// Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default
|
||||
#![doc(test(attr(deny(unused_imports, unused_must_use))))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![warn(missing_debug_implementations)]
|
||||
#![forbid(unsafe_code)]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
extern crate core as std;
|
||||
|
||||
mod algo;
|
||||
|
||||
use std::hash::Hasher;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
use std::io::{self, BufRead};
|
||||
|
||||
/// Adler-32 checksum calculator.
|
||||
///
|
||||
/// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default
|
||||
/// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via
|
||||
/// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`].
|
||||
///
|
||||
/// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any
|
||||
/// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although
|
||||
/// that is not recommended (while every checksum is a hash function, they are not necessarily a
|
||||
/// good one).
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Basic, piecewise checksum calculation:
|
||||
///
|
||||
/// ```
|
||||
/// use adler::Adler32;
|
||||
///
|
||||
/// let mut adler = Adler32::new();
|
||||
///
|
||||
/// adler.write_slice(&[0, 1, 2]);
|
||||
/// adler.write_slice(&[3, 4, 5]);
|
||||
///
|
||||
/// assert_eq!(adler.checksum(), 0x00290010);
|
||||
/// ```
|
||||
///
|
||||
/// Using `Hash` to process structures:
|
||||
///
|
||||
/// ```
|
||||
/// use std::hash::Hash;
|
||||
/// use adler::Adler32;
|
||||
///
|
||||
/// #[derive(Hash)]
|
||||
/// struct Data {
|
||||
/// byte: u8,
|
||||
/// word: u16,
|
||||
/// big: u64,
|
||||
/// }
|
||||
///
|
||||
/// let mut adler = Adler32::new();
|
||||
///
|
||||
/// let data = Data { byte: 0x1F, word: 0xABCD, big: !0 };
|
||||
/// data.hash(&mut adler);
|
||||
///
|
||||
/// // hash value depends on architecture endianness
|
||||
/// if cfg!(target_endian = "little") {
|
||||
/// assert_eq!(adler.checksum(), 0x33410990);
|
||||
/// }
|
||||
/// if cfg!(target_endian = "big") {
|
||||
/// assert_eq!(adler.checksum(), 0x331F0990);
|
||||
/// }
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
/// [`new`]: #method.new
|
||||
/// [`from_checksum`]: #method.from_checksum
|
||||
/// [`checksum`]: #method.checksum
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct Adler32 {
|
||||
a: u16,
|
||||
b: u16,
|
||||
}
|
||||
|
||||
impl Adler32 {
|
||||
/// Creates a new Adler-32 instance with default state.
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Creates an `Adler32` instance from a precomputed Adler-32 checksum.
|
||||
///
|
||||
/// This allows resuming checksum calculation without having to keep the `Adler32` instance
|
||||
/// around.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// # use adler::Adler32;
|
||||
/// let parts = [
|
||||
/// "rust",
|
||||
/// "acean",
|
||||
/// ];
|
||||
/// let whole = adler::adler32_slice(b"rustacean");
|
||||
///
|
||||
/// let mut sum = Adler32::new();
|
||||
/// sum.write_slice(parts[0].as_bytes());
|
||||
/// let partial = sum.checksum();
|
||||
///
|
||||
/// // ...later
|
||||
///
|
||||
/// let mut sum = Adler32::from_checksum(partial);
|
||||
/// sum.write_slice(parts[1].as_bytes());
|
||||
/// assert_eq!(sum.checksum(), whole);
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn from_checksum(sum: u32) -> Self {
|
||||
Adler32 {
|
||||
a: sum as u16,
|
||||
b: (sum >> 16) as u16,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the calculated checksum at this point in time.
|
||||
#[inline]
|
||||
pub fn checksum(&self) -> u32 {
|
||||
(u32::from(self.b) << 16) | u32::from(self.a)
|
||||
}
|
||||
|
||||
/// Adds `bytes` to the checksum calculation.
|
||||
///
|
||||
/// If efficiency matters, this should be called with Byte slices that contain at least a few
|
||||
/// thousand Bytes.
|
||||
pub fn write_slice(&mut self, bytes: &[u8]) {
|
||||
self.compute(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Adler32 {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
Adler32 { a: 1, b: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
impl Hasher for Adler32 {
|
||||
#[inline]
|
||||
fn finish(&self) -> u64 {
|
||||
u64::from(self.checksum())
|
||||
}
|
||||
|
||||
fn write(&mut self, bytes: &[u8]) {
|
||||
self.write_slice(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculates the Adler-32 checksum of a byte slice.
|
||||
///
|
||||
/// This is a convenience function around the [`Adler32`] type.
|
||||
///
|
||||
/// [`Adler32`]: struct.Adler32.html
|
||||
pub fn adler32_slice(data: &[u8]) -> u32 {
|
||||
let mut h = Adler32::new();
|
||||
h.write_slice(data);
|
||||
h.checksum()
|
||||
}
|
||||
|
||||
/// Calculates the Adler-32 checksum of a `BufRead`'s contents.
|
||||
///
|
||||
/// The passed `BufRead` implementor will be read until it reaches EOF (or until it reports an
|
||||
/// error).
|
||||
///
|
||||
/// If you only have a `Read` implementor, you can wrap it in `std::io::BufReader` before calling
|
||||
/// this function.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Any error returned by the reader are bubbled up by this function.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// # fn run() -> Result<(), Box<dyn std::error::Error>> {
|
||||
/// use adler::adler32;
|
||||
///
|
||||
/// use std::fs::File;
|
||||
/// use std::io::BufReader;
|
||||
///
|
||||
/// let file = File::open("input.txt")?;
|
||||
/// let mut file = BufReader::new(file);
|
||||
///
|
||||
/// adler32(&mut file)?;
|
||||
/// # Ok(()) }
|
||||
/// # fn main() { run().unwrap() }
|
||||
/// ```
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
|
||||
pub fn adler32<R: BufRead>(mut reader: R) -> io::Result<u32> {
|
||||
let mut h = Adler32::new();
|
||||
loop {
|
||||
let len = {
|
||||
let buf = reader.fill_buf()?;
|
||||
if buf.is_empty() {
|
||||
return Ok(h.checksum());
|
||||
}
|
||||
|
||||
h.write_slice(buf);
|
||||
buf.len()
|
||||
};
|
||||
reader.consume(len);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn zeroes() {
|
||||
assert_eq!(adler32_slice(&[]), 1);
|
||||
assert_eq!(adler32_slice(&[0]), 1 | 1 << 16);
|
||||
assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16);
|
||||
assert_eq!(adler32_slice(&[0; 100]), 0x00640001);
|
||||
assert_eq!(adler32_slice(&[0; 1024]), 0x04000001);
|
||||
assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ones() {
|
||||
assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e);
|
||||
assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mixed() {
|
||||
assert_eq!(adler32_slice(&[1]), 2 | 2 << 16);
|
||||
assert_eq!(adler32_slice(&[40]), 41 | 41 << 16);
|
||||
|
||||
assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1);
|
||||
}
|
||||
|
||||
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
|
||||
#[test]
|
||||
fn wiki() {
|
||||
assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume() {
|
||||
let mut adler = Adler32::new();
|
||||
adler.write_slice(&[0xff; 1024]);
|
||||
let partial = adler.checksum();
|
||||
assert_eq!(partial, 0x79a6fc2e); // from above
|
||||
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
|
||||
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
|
||||
|
||||
// Make sure that we can resume computing from the partial checksum via `from_checksum`.
|
||||
let mut adler = Adler32::from_checksum(partial);
|
||||
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
|
||||
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[test]
|
||||
fn bufread() {
|
||||
use std::io::BufReader;
|
||||
fn test(data: &[u8], checksum: u32) {
|
||||
// `BufReader` uses an 8 KB buffer, so this will test buffer refilling.
|
||||
let mut buf = BufReader::new(data);
|
||||
let real_sum = adler32(&mut buf).unwrap();
|
||||
assert_eq!(checksum, real_sum);
|
||||
}
|
||||
|
||||
test(&[], 1);
|
||||
test(&[0; 1024], 0x04000001);
|
||||
test(&[0; 1024 * 1024], 0x00f00001);
|
||||
test(&[0xA5; 1024 * 1024], 0xd5009ab1);
|
||||
}
|
||||
}
|
||||
1
vendor/anstream/.cargo-checksum.json
vendored
1
vendor/anstream/.cargo-checksum.json
vendored
@@ -1 +0,0 @@
|
||||
{"files":{"Cargo.lock":"e89078a9d7e89f125bea210c74fd30ef1167c208b9b240baa3fe76ec1170f6ec","Cargo.toml":"38deb1bfcca1eaef87c409274c63f9b25df94f6faaebc74061fa7ef1e4f078f1","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"6efb0476a1cc085077ed49357026d8c173bf33017278ef440f222fb9cbcb66e6","README.md":"b230c2257d0c7a49b9bd97f2fa73abedcdc055757b5cedd2b0eb1a7a448ff461","benches/stream.rs":"7e666c4f4b79ddb5237361ed25264a966ee241192fbb2c1baea3006e3e0326b4","benches/strip.rs":"9603bd5ca1ae4661c2ccab50315dbfdec0c661ac2624262172bbd8f5d0bd87c9","benches/wincon.rs":"680e86933c008b242a3286c5149c33d3c086426eb99fe134b6e79f7578f96663","examples/dump-stream.rs":"54b2bce2409fc1a1f00dbdcab7abbbb6cde447fa20b5c829d1b17ce2e15eefd1","examples/query-stream.rs":"16f38843083174fbefa974a5aa38a5f3ffa51bd6e6db3dc1d91164462219399e","src/adapter/mod.rs":"baf4237ea0b18df63609e49d93572ca27c2202a4cbec0220adb5a7e815c7d8ed","src/adapter/strip.rs":"010972f96708c56da9bced98287f134ce43a4f6459c22c1697abdc4fd6f82d00","src/adapter/wincon.rs":"07d75878ca9edcef4f473a5ff6113b40aab681dcbcd1ae9de1ec895332f7cc2a","src/auto.rs":"71c249ab6b0af64c3946817ea9f1719d4b789128c244611a05075b1e13413007","src/buffer.rs":"83e7088b50dd3e2941c06a417d9eef75fda45311a2912ba94f480ec98d6f0183","src/fmt.rs":"cc11b005c4559843bd908a57958a13c8d0922fae6aff5261f3583c90e60da73c","src/lib.rs":"649b86b187835e0e33baaaf2242c5f331b7dff133fae8fc419c52b7add797c57","src/macros.rs":"a26ababe32a39732d0aade9674f6e5e267bd26c6ea06603ff9e61e80681195e0","src/stream.rs":"cbe8f61fba4c3c60934339c8bda5d1ff43320f57cdc4ed409aa173945a941b3d","src/strip.rs":"56e6516283b6c0dfa72a8e0e6679da8424295f50a3e56c44281e76de6aa0344b","src/wincon.rs":"fe5aff7bfd80b14c9a6b07143079d59b81831293ad766b845e46fad2e1459c9a"},"package":"d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6"}
|
||||
1094
vendor/anstream/Cargo.lock
generated
vendored
1094
vendor/anstream/Cargo.lock
generated
vendored
File diff suppressed because it is too large
Load Diff
144
vendor/anstream/Cargo.toml
vendored
144
vendor/anstream/Cargo.toml
vendored
@@ -1,144 +0,0 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2021"
|
||||
rust-version = "1.70.0"
|
||||
name = "anstream"
|
||||
version = "0.6.5"
|
||||
include = [
|
||||
"build.rs",
|
||||
"src/**/*",
|
||||
"Cargo.toml",
|
||||
"Cargo.lock",
|
||||
"LICENSE*",
|
||||
"README.md",
|
||||
"benches/**/*",
|
||||
"examples/**/*",
|
||||
]
|
||||
description = "A simple cross platform library for writing colored text to a terminal."
|
||||
homepage = "https://github.com/rust-cli/anstyle"
|
||||
readme = "README.md"
|
||||
keywords = [
|
||||
"ansi",
|
||||
"terminal",
|
||||
"color",
|
||||
"strip",
|
||||
"wincon",
|
||||
]
|
||||
categories = ["command-line-interface"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/rust-cli/anstyle.git"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
cargo-args = [
|
||||
"-Zunstable-options",
|
||||
"-Zrustdoc-scrape-examples",
|
||||
]
|
||||
rustdoc-args = [
|
||||
"--cfg",
|
||||
"docsrs",
|
||||
]
|
||||
|
||||
[[package.metadata.release.pre-release-replacements]]
|
||||
file = "CHANGELOG.md"
|
||||
min = 1
|
||||
replace = "{{version}}"
|
||||
search = "Unreleased"
|
||||
|
||||
[[package.metadata.release.pre-release-replacements]]
|
||||
exactly = 1
|
||||
file = "CHANGELOG.md"
|
||||
replace = "...{{tag_name}}"
|
||||
search = '\.\.\.HEAD'
|
||||
|
||||
[[package.metadata.release.pre-release-replacements]]
|
||||
file = "CHANGELOG.md"
|
||||
min = 1
|
||||
replace = "{{date}}"
|
||||
search = "ReleaseDate"
|
||||
|
||||
[[package.metadata.release.pre-release-replacements]]
|
||||
exactly = 1
|
||||
file = "CHANGELOG.md"
|
||||
replace = """
|
||||
<!-- next-header -->
|
||||
## [Unreleased] - ReleaseDate
|
||||
"""
|
||||
search = "<!-- next-header -->"
|
||||
|
||||
[[package.metadata.release.pre-release-replacements]]
|
||||
exactly = 1
|
||||
file = "CHANGELOG.md"
|
||||
replace = """
|
||||
<!-- next-url -->
|
||||
[Unreleased]: https://github.com/rust-cli/anstyle/compare/{{tag_name}}...HEAD"""
|
||||
search = "<!-- next-url -->"
|
||||
|
||||
[[bench]]
|
||||
name = "strip"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "wincon"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "stream"
|
||||
harness = false
|
||||
|
||||
[dependencies.anstyle]
|
||||
version = "1.0.0"
|
||||
|
||||
[dependencies.anstyle-parse]
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies.anstyle-query]
|
||||
version = "1.0.0"
|
||||
optional = true
|
||||
|
||||
[dependencies.colorchoice]
|
||||
version = "1.0.0"
|
||||
optional = true
|
||||
|
||||
[dependencies.utf8parse]
|
||||
version = "0.2.1"
|
||||
|
||||
[dev-dependencies.criterion]
|
||||
version = "0.5.1"
|
||||
|
||||
[dev-dependencies.lexopt]
|
||||
version = "0.3.0"
|
||||
|
||||
[dev-dependencies.owo-colors]
|
||||
version = "3.5.0"
|
||||
|
||||
[dev-dependencies.proptest]
|
||||
version = "1.4.0"
|
||||
|
||||
[dev-dependencies.strip-ansi-escapes]
|
||||
version = "0.2.0"
|
||||
|
||||
[features]
|
||||
auto = [
|
||||
"dep:anstyle-query",
|
||||
"dep:colorchoice",
|
||||
]
|
||||
default = [
|
||||
"auto",
|
||||
"wincon",
|
||||
]
|
||||
test = []
|
||||
wincon = ["dep:anstyle-wincon"]
|
||||
|
||||
[target."cfg(windows)".dependencies.anstyle-wincon]
|
||||
version = "3.0.1"
|
||||
optional = true
|
||||
202
vendor/anstream/LICENSE-APACHE
vendored
202
vendor/anstream/LICENSE-APACHE
vendored
@@ -1,202 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
19
vendor/anstream/LICENSE-MIT
vendored
19
vendor/anstream/LICENSE-MIT
vendored
@@ -1,19 +0,0 @@
|
||||
Copyright (c) Individual contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
34
vendor/anstream/README.md
vendored
34
vendor/anstream/README.md
vendored
@@ -1,34 +0,0 @@
|
||||
# anstream
|
||||
|
||||
> A simple cross platform library for writing colored text to a terminal.
|
||||
|
||||
*A portmanteau of "ansi stream"*
|
||||
|
||||
[][Documentation]
|
||||

|
||||
[](https://crates.io/crates/anstream)
|
||||
|
||||
Specialized `stdout` and `stderr` that accept ANSI escape codes and adapt them
|
||||
based on the terminal's capabilities.
|
||||
|
||||
`anstream::adapter::strip_str` may also be of interest on its own for low
|
||||
overhead stripping of ANSI escape codes.
|
||||
|
||||
## License
|
||||
|
||||
Licensed under either of
|
||||
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
### Contribution
|
||||
|
||||
Unless you explicitly state otherwise, any contribution intentionally
|
||||
submitted for inclusion in the work by you, as defined in the Apache-2.0
|
||||
license, shall be dual licensed as above, without any additional terms or
|
||||
conditions.
|
||||
|
||||
[Crates.io]: https://crates.io/crates/anstream
|
||||
[Documentation]: https://docs.rs/anstream
|
||||
81
vendor/anstream/benches/stream.rs
vendored
81
vendor/anstream/benches/stream.rs
vendored
@@ -1,81 +0,0 @@
|
||||
use std::io::Write as _;
|
||||
|
||||
use criterion::{black_box, Criterion};
|
||||
|
||||
fn stream(c: &mut Criterion) {
|
||||
for (name, content) in [
|
||||
("demo.vte", &include_bytes!("../tests/demo.vte")[..]),
|
||||
("rg_help.vte", &include_bytes!("../tests/rg_help.vte")[..]),
|
||||
("rg_linus.vte", &include_bytes!("../tests/rg_linus.vte")[..]),
|
||||
(
|
||||
"state_changes",
|
||||
&b"\x1b]2;X\x1b\\ \x1b[0m \x1bP0@\x1b\\"[..],
|
||||
),
|
||||
] {
|
||||
let mut group = c.benchmark_group(name);
|
||||
group.bench_function("nop", |b| {
|
||||
b.iter(|| {
|
||||
let buffer = Vec::with_capacity(content.len());
|
||||
let mut stream = buffer;
|
||||
|
||||
stream.write_all(content).unwrap();
|
||||
|
||||
black_box(stream)
|
||||
})
|
||||
});
|
||||
group.bench_function("StripStream", |b| {
|
||||
b.iter(|| {
|
||||
let buffer = Vec::with_capacity(content.len());
|
||||
let mut stream = anstream::StripStream::new(buffer);
|
||||
|
||||
stream.write_all(content).unwrap();
|
||||
|
||||
black_box(stream)
|
||||
})
|
||||
});
|
||||
#[cfg(all(windows, feature = "wincon"))]
|
||||
group.bench_function("WinconStream", |b| {
|
||||
b.iter(|| {
|
||||
let buffer = Vec::with_capacity(content.len());
|
||||
let mut stream = anstream::WinconStream::new(buffer);
|
||||
|
||||
stream.write_all(content).unwrap();
|
||||
|
||||
black_box(stream)
|
||||
})
|
||||
});
|
||||
group.bench_function("AutoStream::always_ansi", |b| {
|
||||
b.iter(|| {
|
||||
let buffer = Vec::with_capacity(content.len());
|
||||
let mut stream = anstream::AutoStream::always_ansi(buffer);
|
||||
|
||||
stream.write_all(content).unwrap();
|
||||
|
||||
black_box(stream)
|
||||
})
|
||||
});
|
||||
group.bench_function("AutoStream::always", |b| {
|
||||
b.iter(|| {
|
||||
let buffer = Vec::with_capacity(content.len());
|
||||
let mut stream = anstream::AutoStream::always(buffer);
|
||||
|
||||
stream.write_all(content).unwrap();
|
||||
|
||||
black_box(stream)
|
||||
})
|
||||
});
|
||||
group.bench_function("AutoStream::never", |b| {
|
||||
b.iter(|| {
|
||||
let buffer = Vec::with_capacity(content.len());
|
||||
let mut stream = anstream::AutoStream::never(buffer);
|
||||
|
||||
stream.write_all(content).unwrap();
|
||||
|
||||
black_box(stream)
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
criterion::criterion_group!(benches, stream);
|
||||
criterion::criterion_main!(benches);
|
||||
102
vendor/anstream/benches/strip.rs
vendored
102
vendor/anstream/benches/strip.rs
vendored
@@ -1,102 +0,0 @@
|
||||
use criterion::{black_box, Criterion};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Strip(String);
|
||||
impl Strip {
|
||||
fn with_capacity(capacity: usize) -> Self {
|
||||
Self(String::with_capacity(capacity))
|
||||
}
|
||||
}
|
||||
impl anstyle_parse::Perform for Strip {
|
||||
fn print(&mut self, c: char) {
|
||||
self.0.push(c);
|
||||
}
|
||||
|
||||
fn execute(&mut self, byte: u8) {
|
||||
if byte.is_ascii_whitespace() {
|
||||
self.0.push(byte as char);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn strip(c: &mut Criterion) {
|
||||
for (name, content) in [
|
||||
("demo.vte", &include_bytes!("../tests/demo.vte")[..]),
|
||||
("rg_help.vte", &include_bytes!("../tests/rg_help.vte")[..]),
|
||||
("rg_linus.vte", &include_bytes!("../tests/rg_linus.vte")[..]),
|
||||
(
|
||||
"state_changes",
|
||||
&b"\x1b]2;X\x1b\\ \x1b[0m \x1bP0@\x1b\\"[..],
|
||||
),
|
||||
] {
|
||||
// Make sure the comparison is fair
|
||||
if let Ok(content) = std::str::from_utf8(content) {
|
||||
let mut stripped = Strip::with_capacity(content.len());
|
||||
let mut parser = anstyle_parse::Parser::<anstyle_parse::DefaultCharAccumulator>::new();
|
||||
for byte in content.as_bytes() {
|
||||
parser.advance(&mut stripped, *byte);
|
||||
}
|
||||
assert_eq!(
|
||||
stripped.0,
|
||||
anstream::adapter::strip_str(content).to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
stripped.0,
|
||||
String::from_utf8(anstream::adapter::strip_bytes(content.as_bytes()).into_vec())
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
let mut group = c.benchmark_group(name);
|
||||
group.bench_function("advance_strip", |b| {
|
||||
b.iter(|| {
|
||||
let mut stripped = Strip::with_capacity(content.len());
|
||||
let mut parser =
|
||||
anstyle_parse::Parser::<anstyle_parse::DefaultCharAccumulator>::new();
|
||||
|
||||
for byte in content {
|
||||
parser.advance(&mut stripped, *byte);
|
||||
}
|
||||
|
||||
black_box(stripped.0)
|
||||
})
|
||||
});
|
||||
group.bench_function("strip_ansi_escapes", |b| {
|
||||
b.iter(|| {
|
||||
let stripped = strip_ansi_escapes::strip(content);
|
||||
|
||||
black_box(stripped)
|
||||
})
|
||||
});
|
||||
if let Ok(content) = std::str::from_utf8(content) {
|
||||
group.bench_function("strip_str", |b| {
|
||||
b.iter(|| {
|
||||
let stripped = anstream::adapter::strip_str(content).to_string();
|
||||
|
||||
black_box(stripped)
|
||||
})
|
||||
});
|
||||
group.bench_function("StripStr", |b| {
|
||||
b.iter(|| {
|
||||
let mut stripped = String::with_capacity(content.len());
|
||||
let mut state = anstream::adapter::StripStr::new();
|
||||
for printable in state.strip_next(content) {
|
||||
stripped.push_str(printable);
|
||||
}
|
||||
|
||||
black_box(stripped)
|
||||
})
|
||||
});
|
||||
}
|
||||
group.bench_function("strip_bytes", |b| {
|
||||
b.iter(|| {
|
||||
let stripped = anstream::adapter::strip_bytes(content).into_vec();
|
||||
|
||||
black_box(stripped)
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
criterion::criterion_group!(benches, strip);
|
||||
criterion::criterion_main!(benches);
|
||||
26
vendor/anstream/benches/wincon.rs
vendored
26
vendor/anstream/benches/wincon.rs
vendored
@@ -1,26 +0,0 @@
|
||||
use criterion::{black_box, Criterion};
|
||||
|
||||
fn wincon(c: &mut Criterion) {
|
||||
for (name, content) in [
|
||||
("demo.vte", &include_bytes!("../tests/demo.vte")[..]),
|
||||
("rg_help.vte", &include_bytes!("../tests/rg_help.vte")[..]),
|
||||
("rg_linus.vte", &include_bytes!("../tests/rg_linus.vte")[..]),
|
||||
(
|
||||
"state_changes",
|
||||
&b"\x1b]2;X\x1b\\ \x1b[0m \x1bP0@\x1b\\"[..],
|
||||
),
|
||||
] {
|
||||
let mut group = c.benchmark_group(name);
|
||||
group.bench_function("wincon_bytes", |b| {
|
||||
b.iter(|| {
|
||||
let mut state = anstream::adapter::WinconBytes::new();
|
||||
let stripped = state.extract_next(content).collect::<Vec<_>>();
|
||||
|
||||
black_box(stripped)
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
criterion::criterion_group!(benches, wincon);
|
||||
criterion::criterion_main!(benches);
|
||||
128
vendor/anstream/examples/dump-stream.rs
vendored
128
vendor/anstream/examples/dump-stream.rs
vendored
@@ -1,128 +0,0 @@
|
||||
use std::io::Write;
|
||||
|
||||
fn main() -> Result<(), lexopt::Error> {
|
||||
let args = Args::parse()?;
|
||||
let stdout = anstream::stdout();
|
||||
let mut stdout = stdout.lock();
|
||||
|
||||
for fixed in 0..16 {
|
||||
let style = style(fixed, args.layer, args.effects);
|
||||
let _ = print_number(&mut stdout, fixed, style);
|
||||
if fixed == 7 || fixed == 15 {
|
||||
let _ = writeln!(&mut stdout);
|
||||
}
|
||||
}
|
||||
|
||||
for r in 0..6 {
|
||||
let _ = writeln!(stdout);
|
||||
for g in 0..6 {
|
||||
for b in 0..6 {
|
||||
let fixed = r * 36 + g * 6 + b + 16;
|
||||
let style = style(fixed, args.layer, args.effects);
|
||||
let _ = print_number(&mut stdout, fixed, style);
|
||||
}
|
||||
let _ = writeln!(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
for c in 0..24 {
|
||||
if 0 == c % 8 {
|
||||
let _ = writeln!(stdout);
|
||||
}
|
||||
let fixed = 232 + c;
|
||||
let style = style(fixed, args.layer, args.effects);
|
||||
let _ = print_number(&mut stdout, fixed, style);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn style(fixed: u8, layer: Layer, effects: anstyle::Effects) -> anstyle::Style {
|
||||
let color = anstyle::Ansi256Color(fixed).into();
|
||||
(match layer {
|
||||
Layer::Fg => anstyle::Style::new().fg_color(Some(color)),
|
||||
Layer::Bg => anstyle::Style::new().bg_color(Some(color)),
|
||||
Layer::Underline => anstyle::Style::new().underline_color(Some(color)),
|
||||
}) | effects
|
||||
}
|
||||
|
||||
fn print_number(stdout: &mut impl Write, fixed: u8, style: anstyle::Style) -> std::io::Result<()> {
|
||||
write!(
|
||||
stdout,
|
||||
"{}{:>4}{}",
|
||||
style.render(),
|
||||
fixed,
|
||||
anstyle::Reset.render()
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct Args {
|
||||
effects: anstyle::Effects,
|
||||
layer: Layer,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Default)]
|
||||
enum Layer {
|
||||
#[default]
|
||||
Fg,
|
||||
Bg,
|
||||
Underline,
|
||||
}
|
||||
|
||||
impl Args {
|
||||
fn parse() -> Result<Self, lexopt::Error> {
|
||||
use lexopt::prelude::*;
|
||||
|
||||
let mut res = Args::default();
|
||||
|
||||
let mut args = lexopt::Parser::from_env();
|
||||
while let Some(arg) = args.next()? {
|
||||
match arg {
|
||||
Long("layer") => {
|
||||
res.layer = args.value()?.parse_with(|s| match s {
|
||||
"fg" => Ok(Layer::Fg),
|
||||
"bg" => Ok(Layer::Bg),
|
||||
"underline" => Ok(Layer::Underline),
|
||||
_ => Err("expected values fg, bg, underline"),
|
||||
})?;
|
||||
}
|
||||
Long("effect") => {
|
||||
const EFFECTS: [(&str, anstyle::Effects); 12] = [
|
||||
("bold", anstyle::Effects::BOLD),
|
||||
("dimmed", anstyle::Effects::DIMMED),
|
||||
("italic", anstyle::Effects::ITALIC),
|
||||
("underline", anstyle::Effects::UNDERLINE),
|
||||
("double_underline", anstyle::Effects::DOUBLE_UNDERLINE),
|
||||
("curly_underline", anstyle::Effects::CURLY_UNDERLINE),
|
||||
("dotted_underline", anstyle::Effects::DOTTED_UNDERLINE),
|
||||
("dashed_underline", anstyle::Effects::DASHED_UNDERLINE),
|
||||
("blink", anstyle::Effects::BLINK),
|
||||
("invert", anstyle::Effects::INVERT),
|
||||
("hidden", anstyle::Effects::HIDDEN),
|
||||
("strikethrough", anstyle::Effects::STRIKETHROUGH),
|
||||
];
|
||||
let effect = args.value()?.parse_with(|s| {
|
||||
EFFECTS
|
||||
.into_iter()
|
||||
.find(|(name, _)| *name == s)
|
||||
.map(|(_, effect)| effect)
|
||||
.ok_or_else(|| {
|
||||
format!(
|
||||
"expected one of {}",
|
||||
EFFECTS
|
||||
.into_iter()
|
||||
.map(|(n, _)| n)
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
)
|
||||
})
|
||||
})?;
|
||||
res.effects = res.effects.insert(effect);
|
||||
}
|
||||
_ => return Err(arg.unexpected()),
|
||||
}
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
20
vendor/anstream/examples/query-stream.rs
vendored
20
vendor/anstream/examples/query-stream.rs
vendored
@@ -1,20 +0,0 @@
|
||||
fn main() {
|
||||
println!("stdout:");
|
||||
println!(
|
||||
" choice: {:?}",
|
||||
anstream::AutoStream::choice(&std::io::stdout())
|
||||
);
|
||||
println!(
|
||||
" choice: {:?}",
|
||||
anstream::AutoStream::auto(std::io::stdout()).current_choice()
|
||||
);
|
||||
println!("stderr:");
|
||||
println!(
|
||||
" choice: {:?}",
|
||||
anstream::AutoStream::choice(&std::io::stderr())
|
||||
);
|
||||
println!(
|
||||
" choice: {:?}",
|
||||
anstream::AutoStream::auto(std::io::stderr()).current_choice()
|
||||
);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user