feat: добавить тесты для проверки структурных инвариантов и корректности сортировки в RsLi

This commit is contained in:
2026-02-11 21:21:32 +00:00
parent 041b1a6cb3
commit 3410b54793
3 changed files with 355 additions and 1 deletions

View File

@@ -81,6 +81,19 @@ fn read_u32_le(bytes: &[u8], offset: usize) -> u32 {
u32::from_le_bytes(arr) u32::from_le_bytes(arr)
} }
fn read_i32_le(bytes: &[u8], offset: usize) -> i32 {
let slice = bytes
.get(offset..offset + 4)
.expect("i32 read out of bounds in test");
let arr: [u8; 4] = slice.try_into().expect("i32 conversion failed in test");
i32::from_le_bytes(arr)
}
fn name_field_bytes(raw: &[u8; 36]) -> Option<&[u8]> {
let nul = raw.iter().position(|value| *value == 0)?;
Some(&raw[..nul])
}
fn build_nres_bytes(entries: &[SyntheticEntry<'_>]) -> Vec<u8> { fn build_nres_bytes(entries: &[SyntheticEntry<'_>]) -> Vec<u8> {
let mut out = vec![0u8; 16]; let mut out = vec![0u8; 16];
let mut offsets = Vec::with_capacity(entries.len()); let mut offsets = Vec::with_capacity(entries.len());
@@ -133,6 +146,154 @@ fn build_nres_bytes(entries: &[SyntheticEntry<'_>]) -> Vec<u8> {
out out
} }
#[test]
fn nres_docs_structural_invariants_all_files() {
let files = nres_test_files();
if files.is_empty() {
eprintln!(
"skipping nres_docs_structural_invariants_all_files: no NRes archives in testdata/nres"
);
return;
}
for path in files {
let bytes = fs::read(&path).unwrap_or_else(|err| {
panic!("failed to read {}: {err}", path.display());
});
assert!(
bytes.len() >= 16,
"NRes header too short in {}",
path.display()
);
assert_eq!(&bytes[0..4], b"NRes", "bad magic in {}", path.display());
assert_eq!(
read_u32_le(&bytes, 4),
0x100,
"bad version in {}",
path.display()
);
assert_eq!(
usize::try_from(read_u32_le(&bytes, 12)).expect("size overflow"),
bytes.len(),
"header.total_size mismatch in {}",
path.display()
);
let entry_count_i32 = read_i32_le(&bytes, 8);
assert!(
entry_count_i32 >= 0,
"negative entry_count={} in {}",
entry_count_i32,
path.display()
);
let entry_count = usize::try_from(entry_count_i32).expect("entry_count overflow");
let directory_len = entry_count.checked_mul(64).expect("directory_len overflow");
let directory_offset = bytes
.len()
.checked_sub(directory_len)
.unwrap_or_else(|| panic!("directory underflow in {}", path.display()));
assert!(
directory_offset >= 16,
"directory offset before data area in {}",
path.display()
);
assert_eq!(
directory_offset + directory_len,
bytes.len(),
"directory not at file end in {}",
path.display()
);
let mut sort_indices = Vec::with_capacity(entry_count);
let mut entries = Vec::with_capacity(entry_count);
for index in 0..entry_count {
let base = directory_offset + index * 64;
let size = usize::try_from(read_u32_le(&bytes, base + 12)).expect("size overflow");
let data_offset =
usize::try_from(read_u32_le(&bytes, base + 56)).expect("offset overflow");
let sort_index =
usize::try_from(read_u32_le(&bytes, base + 60)).expect("sort_index overflow");
let mut name_raw = [0u8; 36];
name_raw.copy_from_slice(
bytes
.get(base + 20..base + 56)
.expect("name field out of bounds in test"),
);
let name_bytes = name_field_bytes(&name_raw).unwrap_or_else(|| {
panic!(
"name field without NUL terminator in {} entry #{index}",
path.display()
)
});
assert!(
name_bytes.len() <= 35,
"name longer than 35 bytes in {} entry #{index}",
path.display()
);
sort_indices.push(sort_index);
entries.push((name_bytes.to_vec(), data_offset, size));
}
let mut expected_sort: Vec<usize> = (0..entry_count).collect();
expected_sort.sort_by(|a, b| cmp_name_case_insensitive(&entries[*a].0, &entries[*b].0));
assert_eq!(
sort_indices,
expected_sort,
"sort_index table mismatch in {}",
path.display()
);
let mut data_regions: Vec<(usize, usize)> =
entries.iter().map(|(_, off, size)| (*off, *size)).collect();
data_regions.sort_by_key(|(off, _)| *off);
for (idx, (data_offset, size)) in data_regions.iter().enumerate() {
assert_eq!(
data_offset % 8,
0,
"data offset is not 8-byte aligned in {} (region #{idx})",
path.display()
);
assert!(
*data_offset >= 16,
"data offset before header end in {} (region #{idx})",
path.display()
);
assert!(
data_offset.checked_add(*size).unwrap_or(usize::MAX) <= directory_offset,
"data region overlaps directory in {} (region #{idx})",
path.display()
);
}
for pair in data_regions.windows(2) {
let (start, size) = pair[0];
let (next_start, _) = pair[1];
let end = start
.checked_add(size)
.unwrap_or_else(|| panic!("size overflow in {}", path.display()));
assert!(
end <= next_start,
"overlapping data regions in {}: [{start}, {end}) and next at {next_start}",
path.display()
);
for (offset, value) in bytes[end..next_start].iter().enumerate() {
assert_eq!(
*value,
0,
"non-zero alignment padding in {} at offset {}",
path.display(),
end + offset
);
}
}
}
}
#[test] #[test]
fn nres_read_and_roundtrip_all_files() { fn nres_read_and_roundtrip_all_files() {
let files = nres_test_files(); let files = nres_test_files();

View File

@@ -149,13 +149,31 @@ pub fn parse_library(bytes: Arc<[u8]>, opts: OpenOptions) -> Result<Library> {
let presorted_flag = u16::from_le_bytes([bytes[14], bytes[15]]); let presorted_flag = u16::from_le_bytes([bytes[14], bytes[15]]);
if presorted_flag == 0xABBA { if presorted_flag == 0xABBA {
let mut seen = vec![false; count];
for entry in &entries { for entry in &entries {
let idx = i32::from(entry.sort_to_original); let idx = i32::from(entry.sort_to_original);
if idx < 0 || usize::try_from(idx).map_err(|_| Error::IntegerOverflow)? >= count { if idx < 0 {
return Err(Error::CorruptEntryTable( return Err(Error::CorruptEntryTable(
"sort_to_original is not a valid permutation index", "sort_to_original is not a valid permutation index",
)); ));
} }
let idx = usize::try_from(idx).map_err(|_| Error::IntegerOverflow)?;
if idx >= count {
return Err(Error::CorruptEntryTable(
"sort_to_original is not a valid permutation index",
));
}
if seen[idx] {
return Err(Error::CorruptEntryTable(
"sort_to_original is not a permutation",
));
}
seen[idx] = true;
}
if seen.iter().any(|value| !*value) {
return Err(Error::CorruptEntryTable(
"sort_to_original is not a permutation",
));
} }
} else { } else {
let mut sorted: Vec<usize> = (0..count).collect(); let mut sorted: Vec<usize> = (0..count).collect();

View File

@@ -444,6 +444,14 @@ fn build_rsli_bytes(entries: &[SyntheticRsliEntry], opts: &RsliBuildOptions) ->
output output
} }
fn read_u32_le(bytes: &[u8], offset: usize) -> u32 {
let slice = bytes
.get(offset..offset + 4)
.expect("u32 read out of bounds in test");
let arr: [u8; 4] = slice.try_into().expect("u32 conversion failed in test");
u32::from_le_bytes(arr)
}
#[test] #[test]
fn rsli_read_unpack_and_repack_all_files() { fn rsli_read_unpack_and_repack_all_files() {
let files = rsli_test_files(); let files = rsli_test_files();
@@ -581,6 +589,126 @@ fn rsli_read_unpack_and_repack_all_files() {
} }
} }
#[test]
fn rsli_docs_structural_invariants_all_files() {
let files = rsli_test_files();
if files.is_empty() {
eprintln!(
"skipping rsli_docs_structural_invariants_all_files: no RsLi archives in testdata/rsli"
);
return;
}
let mut deflate_eof_plus_one_quirks = Vec::new();
for path in files {
let bytes = fs::read(&path).unwrap_or_else(|err| {
panic!("failed to read {}: {err}", path.display());
});
assert!(
bytes.len() >= 32,
"RsLi header too short in {}",
path.display()
);
assert_eq!(&bytes[0..2], b"NL", "bad magic in {}", path.display());
assert_eq!(
bytes[2],
0,
"reserved header byte must be zero in {}",
path.display()
);
assert_eq!(bytes[3], 1, "bad version in {}", path.display());
let entry_count = i16::from_le_bytes([bytes[4], bytes[5]]);
assert!(
entry_count >= 0,
"negative entry_count={} in {}",
entry_count,
path.display()
);
let count = usize::try_from(entry_count).expect("entry_count overflow");
let table_size = count.checked_mul(32).expect("table_size overflow");
let table_end = 32usize.checked_add(table_size).expect("table_end overflow");
assert!(
table_end <= bytes.len(),
"table out of bounds in {}",
path.display()
);
let seed = read_u32_le(&bytes, 20);
let table_plain = xor_stream(&bytes[32..table_end], (seed & 0xFFFF) as u16);
assert_eq!(
table_plain.len(),
table_size,
"decrypted table size mismatch in {}",
path.display()
);
let mut overlay = 0u32;
if bytes.len() >= 6 && &bytes[bytes.len() - 6..bytes.len() - 4] == b"AO" {
overlay = read_u32_le(&bytes, bytes.len() - 4);
assert!(
usize::try_from(overlay).expect("overlay overflow") <= bytes.len(),
"overlay beyond EOF in {}",
path.display()
);
}
let presorted_flag = u16::from_le_bytes([bytes[14], bytes[15]]);
let mut sort_values = Vec::with_capacity(count);
for index in 0..count {
let base = index * 32;
let row = &table_plain[base..base + 32];
let flags_signed = i16::from_le_bytes([row[16], row[17]]);
let sort_to_original = i16::from_le_bytes([row[18], row[19]]);
let data_offset = u64::from(read_u32_le(row, 24));
let packed_size = u64::from(read_u32_le(row, 28));
let method = (flags_signed as u16 as u32) & 0x1E0;
let effective_offset = data_offset + u64::from(overlay);
let end = effective_offset + packed_size;
let file_len = u64::try_from(bytes.len()).expect("file size overflow");
if end > file_len {
assert!(
method == 0x100 && end == file_len + 1,
"packed range out of bounds in {} entry #{index}: method=0x{method:03X}, range=[{effective_offset}, {end}), file={file_len}",
path.display()
);
deflate_eof_plus_one_quirks.push((path.display().to_string(), index));
}
sort_values.push(sort_to_original);
}
if presorted_flag == 0xABBA {
let mut sorted = sort_values;
sorted.sort_unstable();
let expected: Vec<i16> = (0..count)
.map(|idx| i16::try_from(idx).expect("too many entries for i16"))
.collect();
assert_eq!(
sorted,
expected,
"sort_to_original is not a permutation in {}",
path.display()
);
}
}
if !deflate_eof_plus_one_quirks.is_empty() {
assert!(
deflate_eof_plus_one_quirks
.iter()
.all(|(file, idx)| file.ends_with("sprites.lib") && *idx == 23),
"unexpected deflate EOF+1 quirks: {:?}",
deflate_eof_plus_one_quirks
);
}
}
#[test] #[test]
fn rsli_synthetic_all_methods_roundtrip() { fn rsli_synthetic_all_methods_roundtrip() {
let entries = vec![ let entries = vec![
@@ -667,6 +795,53 @@ fn rsli_synthetic_all_methods_roundtrip() {
let _ = fs::remove_file(&path); let _ = fs::remove_file(&path);
} }
#[test]
fn rsli_presorted_flag_requires_permutation() {
let entries = vec![
SyntheticRsliEntry {
name: "AAA".to_string(),
method_raw: 0x000,
plain: b"a".to_vec(),
declared_packed_size: None,
},
SyntheticRsliEntry {
name: "BBB".to_string(),
method_raw: 0x000,
plain: b"b".to_vec(),
declared_packed_size: None,
},
];
let mut bytes = build_rsli_bytes(
&entries,
&RsliBuildOptions {
presorted: true,
..RsliBuildOptions::default()
},
);
let seed = read_u32_le(&bytes, 20);
let mut table_plain = xor_stream(&bytes[32..32 + entries.len() * 32], (seed & 0xFFFF) as u16);
// Corrupt sort_to_original: duplicate index 0, so the table is not a permutation.
table_plain[18..20].copy_from_slice(&0i16.to_le_bytes());
table_plain[50..52].copy_from_slice(&0i16.to_le_bytes());
let table_encrypted = xor_stream(&table_plain, (seed & 0xFFFF) as u16);
bytes[32..32 + table_encrypted.len()].copy_from_slice(&table_encrypted);
let path = write_temp_file("rsli-bad-presorted-perm", &bytes);
match Library::open_path(&path) {
Err(Error::CorruptEntryTable(message)) => {
assert!(
message.contains("permutation"),
"unexpected error message: {message}"
);
}
other => panic!("expected CorruptEntryTable for invalid permutation, got {other:?}"),
}
let _ = fs::remove_file(&path);
}
#[test] #[test]
fn rsli_xorlzss_huffman_on_the_fly_roundtrip() { fn rsli_xorlzss_huffman_on_the_fly_roundtrip() {
let plain: Vec<u8> = (0..512u16).map(|i| b'A' + (i % 26) as u8).collect(); let plain: Vec<u8> = (0..512u16).map(|i| b'A' + (i % 26) as u8).collect();