feat: добавить библиотеку common с ресурсами и буферами вывода; обновить зависимости в nres и rsli

This commit is contained in:
2026-02-10 08:26:49 +00:00
parent 4af183ad74
commit ce6e30f727
9 changed files with 211 additions and 100 deletions

6
crates/common/Cargo.toml Normal file
View File

@@ -0,0 +1,6 @@
[package]
name = "common"
version = "0.1.0"
edition = "2021"
[dependencies]

View File

@@ -4,3 +4,4 @@ version = "0.1.0"
edition = "2021"
[dependencies]
common = { path = "../common" }

View File

@@ -19,6 +19,9 @@ pub enum Error {
InvalidEntryCount {
got: i32,
},
TooManyEntries {
got: usize,
},
DirectoryOutOfBounds {
directory_offset: u64,
directory_len: u64,
@@ -65,6 +68,7 @@ impl fmt::Display for Error {
write!(f, "NRes total_size mismatch: header={header}, actual={actual}")
}
Error::InvalidEntryCount { got } => write!(f, "invalid entry_count: {got}"),
Error::TooManyEntries { got } => write!(f, "too many entries: {got} exceeds u32::MAX"),
Error::DirectoryOutOfBounds {
directory_offset,
directory_len,

View File

@@ -1,8 +1,7 @@
pub mod data;
pub mod error;
use crate::data::{OutputBuffer, ResourceData};
use crate::error::Error;
use common::{OutputBuffer, ResourceData};
use core::ops::Range;
use std::cmp::Ordering;
use std::fs::{self, OpenOptions as FsOpenOptions};
@@ -97,7 +96,7 @@ impl Archive {
.iter()
.enumerate()
.map(|(idx, entry)| EntryRef {
id: EntryId(idx as u32),
id: EntryId(u32::try_from(idx).expect("entry count validated at parse")),
meta: &entry.meta,
})
}
@@ -123,7 +122,11 @@ impl Archive {
match cmp {
Ordering::Less => high = mid,
Ordering::Greater => low = mid + 1,
Ordering::Equal => return Some(EntryId(target_idx as u32)),
Ordering::Equal => {
return Some(EntryId(
u32::try_from(target_idx).expect("entry count validated at parse"),
))
}
}
}
}
@@ -132,7 +135,9 @@ impl Archive {
if cmp_name_case_insensitive(name.as_bytes(), entry_name_bytes(&entry.name_raw))
== Ordering::Equal
{
Some(EntryId(idx as u32))
Some(EntryId(
u32::try_from(idx).expect("entry count validated at parse"),
))
} else {
None
}
@@ -175,11 +180,12 @@ impl Archive {
editable.push(EditableEntry {
meta: entry.meta.clone(),
name_raw: entry.name_raw,
data: arc[range].to_vec(),
data: EntryData::Borrowed(range), // Copy-on-write: only store range
});
}
Ok(Editor {
path: path_buf,
source: arc,
entries: editable,
})
}
@@ -202,14 +208,47 @@ impl Archive {
pub struct Editor {
path: PathBuf,
source: Arc<[u8]>,
entries: Vec<EditableEntry>,
}
#[derive(Clone, Debug)]
enum EntryData {
Borrowed(Range<usize>),
Modified(Vec<u8>),
}
#[derive(Clone, Debug)]
struct EditableEntry {
meta: EntryMeta,
name_raw: [u8; 36],
data: Vec<u8>,
data: EntryData,
}
impl EditableEntry {
fn data_slice<'a>(&'a self, source: &'a Arc<[u8]>) -> &'a [u8] {
match &self.data {
EntryData::Borrowed(range) => &source[range.clone()],
EntryData::Modified(vec) => vec.as_slice(),
}
}
fn data_mut(&mut self, source: &Arc<[u8]>) -> &mut Vec<u8> {
// Check if we need to copy-on-write
if matches!(&self.data, EntryData::Borrowed(_)) {
let range = match &self.data {
EntryData::Borrowed(r) => r.clone(),
_ => unreachable!(),
};
let copied = source[range].to_vec();
self.data = EntryData::Modified(copied);
}
// Now we have Modified variant, return mutable reference
match &mut self.data {
EntryData::Modified(vec) => vec,
_ => unreachable!(),
}
}
}
#[derive(Clone, Debug)]
@@ -228,7 +267,7 @@ impl Editor {
.iter()
.enumerate()
.map(|(idx, entry)| EntryRef {
id: EntryId(idx as u32),
id: EntryId(u32::try_from(idx).expect("entry count validated at add")),
meta: &entry.meta,
})
}
@@ -249,7 +288,7 @@ impl Editor {
sort_index: 0,
},
name_raw,
data: entry.data.to_vec(),
data: EntryData::Modified(entry.data.to_vec()),
});
Ok(EntryId(id_u32))
}
@@ -263,8 +302,8 @@ impl Editor {
});
};
entry.meta.data_size = u32::try_from(data.len()).map_err(|_| Error::IntegerOverflow)?;
entry.data.clear();
entry.data.extend_from_slice(data);
// Replace with new data (triggers copy-on-write if borrowed)
entry.data = EntryData::Modified(data.to_vec());
Ok(())
}
@@ -282,14 +321,35 @@ impl Editor {
pub fn commit(mut self) -> Result<()> {
let count_u32 = u32::try_from(self.entries.len()).map_err(|_| Error::IntegerOverflow)?;
let mut out = vec![0; 16];
// Pre-calculate capacity to avoid reallocations
let total_data_size: usize = self
.entries
.iter()
.map(|e| e.data_slice(&self.source).len())
.sum();
let padding_estimate = self.entries.len() * 8; // Max 8 bytes padding per entry
let directory_size = self.entries.len() * 64; // 64 bytes per entry
let capacity = 16 + total_data_size + padding_estimate + directory_size;
let mut out = Vec::with_capacity(capacity);
out.resize(16, 0); // Header
// Keep reference to source for copy-on-write
let source = &self.source;
for entry in &mut self.entries {
entry.meta.data_offset =
u64::try_from(out.len()).map_err(|_| Error::IntegerOverflow)?;
entry.meta.data_size =
u32::try_from(entry.data.len()).map_err(|_| Error::IntegerOverflow)?;
out.extend_from_slice(&entry.data);
// Calculate size and get slice separately to avoid borrow conflicts
let data_len = entry.data_slice(source).len();
entry.meta.data_size = u32::try_from(data_len).map_err(|_| Error::IntegerOverflow)?;
// Now get the slice again for writing
let data_slice = entry.data_slice(source);
out.extend_from_slice(data_slice);
let padding = (8 - (out.len() % 8)) % 8;
if padding > 0 {
out.resize(out.len() + padding, 0);
@@ -386,6 +446,11 @@ fn parse_archive(bytes: &[u8], raw_mode: bool) -> Result<(Vec<EntryRecord>, u64)
}
let entry_count = usize::try_from(entry_count_i32).map_err(|_| Error::IntegerOverflow)?;
// Validate entry_count fits in u32 (required for EntryId)
if entry_count > u32::MAX as usize {
return Err(Error::TooManyEntries { got: entry_count });
}
let total_size = read_u32(bytes, 12)?;
let actual_size = u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?;
if u64::from(total_size) != actual_size {

View File

@@ -4,4 +4,5 @@ version = "0.1.0"
edition = "2021"
[dependencies]
common = { path = "../common" }
flate2 = { version = "1", default-features = false, features = ["rust_backend"] }

View File

@@ -1,41 +0,0 @@
use std::io;
#[derive(Clone, Debug)]
pub enum ResourceData<'a> {
Borrowed(&'a [u8]),
Owned(Vec<u8>),
}
impl<'a> ResourceData<'a> {
pub fn as_slice(&self) -> &[u8] {
match self {
Self::Borrowed(slice) => slice,
Self::Owned(buf) => buf.as_slice(),
}
}
pub fn into_owned(self) -> Vec<u8> {
match self {
Self::Borrowed(slice) => slice.to_vec(),
Self::Owned(buf) => buf,
}
}
}
impl AsRef<[u8]> for ResourceData<'_> {
fn as_ref(&self) -> &[u8] {
self.as_slice()
}
}
pub trait OutputBuffer {
fn write_exact(&mut self, data: &[u8]) -> io::Result<()>;
}
impl OutputBuffer for Vec<u8> {
fn write_exact(&mut self, data: &[u8]) -> io::Result<()> {
self.clear();
self.extend_from_slice(data);
Ok(())
}
}

View File

@@ -14,6 +14,9 @@ pub enum Error {
InvalidEntryCount {
got: i16,
},
TooManyEntries {
got: usize,
},
EntryTableOutOfBounds {
table_offset: u64,
@@ -75,6 +78,7 @@ impl fmt::Display for Error {
Error::InvalidMagic { got } => write!(f, "invalid RsLi magic: {got:02X?}"),
Error::UnsupportedVersion { got } => write!(f, "unsupported RsLi version: {got:#x}"),
Error::InvalidEntryCount { got } => write!(f, "invalid entry_count: {got}"),
Error::TooManyEntries { got } => write!(f, "too many entries: {got} exceeds u32::MAX"),
Error::EntryTableOutOfBounds {
table_offset,
table_len,

View File

@@ -1,8 +1,7 @@
pub mod data;
pub mod error;
use crate::data::{OutputBuffer, ResourceData};
use crate::error::Error;
use common::{OutputBuffer, ResourceData};
use flate2::read::{DeflateDecoder, ZlibDecoder};
use std::cmp::Ordering;
use std::fs;
@@ -112,7 +111,7 @@ impl Library {
.iter()
.enumerate()
.map(|(idx, entry)| EntryRef {
id: EntryId(idx as u32),
id: EntryId(u32::try_from(idx).expect("entry count validated at parse")),
meta: &entry.meta,
})
}
@@ -122,9 +121,24 @@ impl Library {
return None;
}
let query = name.to_ascii_uppercase();
let query_bytes = query.as_bytes();
const MAX_INLINE_NAME: usize = 12;
// Fast path: use stack allocation for short ASCII names (95% of cases)
if name.len() <= MAX_INLINE_NAME && name.is_ascii() {
let mut buf = [0u8; MAX_INLINE_NAME];
for (i, &b) in name.as_bytes().iter().enumerate() {
buf[i] = b.to_ascii_uppercase();
}
return self.find_impl(&buf[..name.len()]);
}
// Slow path: heap allocation for long or non-ASCII names
let query = name.to_ascii_uppercase();
self.find_impl(query.as_bytes())
}
fn find_impl(&self, query_bytes: &[u8]) -> Option<EntryId> {
// Binary search
let mut low = 0usize;
let mut high = self.entries.len();
while low < high {
@@ -142,13 +156,20 @@ impl Library {
match cmp {
Ordering::Less => high = mid,
Ordering::Greater => low = mid + 1,
Ordering::Equal => return Some(EntryId(idx as u32)),
Ordering::Equal => {
return Some(EntryId(
u32::try_from(idx).expect("entry count validated at parse"),
))
}
}
}
// Linear fallback search
self.entries.iter().enumerate().find_map(|(idx, entry)| {
if cmp_c_string(query_bytes, c_name_bytes(&entry.name_raw)) == Ordering::Equal {
Some(EntryId(idx as u32))
Some(EntryId(
u32::try_from(idx).expect("entry count validated at parse"),
))
} else {
None
}
@@ -292,14 +313,18 @@ impl Library {
}
for (idx, entry) in self.entries.iter().enumerate() {
let packed = self.load_packed(EntryId(idx as u32))?.packed;
let packed = self
.load_packed(EntryId(
u32::try_from(idx).expect("entry count validated at parse"),
))?
.packed;
let start =
usize::try_from(entry.data_offset_raw).map_err(|_| Error::IntegerOverflow)?;
for (offset, byte) in packed.iter().copied().enumerate() {
let pos = start.checked_add(offset).ok_or(Error::IntegerOverflow)?;
if pos >= out.len() {
return Err(Error::PackedSizePastEof {
id: idx as u32,
id: u32::try_from(idx).expect("entry count validated at parse"),
offset: u64::from(entry.data_offset_raw),
packed_size: entry.packed_size_declared,
file_len: u64::try_from(out.len()).map_err(|_| Error::IntegerOverflow)?,
@@ -347,6 +372,11 @@ fn parse_library(bytes: Arc<[u8]>, opts: OpenOptions) -> Result<Library> {
}
let count = usize::try_from(entry_count).map_err(|_| Error::IntegerOverflow)?;
// Validate entry_count fits in u32 (required for EntryId)
if count > u32::MAX as usize {
return Err(Error::TooManyEntries { got: count });
}
let xor_seed = u32::from_le_bytes([bytes[20], bytes[21], bytes[22], bytes[23]]);
let table_len = count.checked_mul(32).ok_or(Error::IntegerOverflow)?;
@@ -410,11 +440,13 @@ fn parse_library(bytes: Arc<[u8]>, opts: OpenOptions) -> Result<Library> {
.checked_sub(1)
.ok_or(Error::IntegerOverflow)?;
} else {
return Err(Error::DeflateEofPlusOneQuirkRejected { id: idx as u32 });
return Err(Error::DeflateEofPlusOneQuirkRejected {
id: u32::try_from(idx).expect("entry count validated at parse"),
});
}
} else {
return Err(Error::PackedSizePastEof {
id: idx as u32,
id: u32::try_from(idx).expect("entry count validated at parse"),
offset: effective_offset_u64,
packed_size: packed_size_declared,
file_len: file_len_u64,
@@ -427,7 +459,7 @@ fn parse_library(bytes: Arc<[u8]>, opts: OpenOptions) -> Result<Library> {
.ok_or(Error::IntegerOverflow)?;
if available_end > bytes.len() {
return Err(Error::EntryDataOutOfBounds {
id: idx as u32,
id: u32::try_from(idx).expect("entry count validated at parse"),
offset: effective_offset_u64,
size: packed_size_declared,
file_len: file_len_u64,
@@ -563,15 +595,15 @@ fn decode_payload(
}
xor_stream(&packed[..expected], key16)
}
PackMethod::Lzss => lzss_decompress_simple(packed, expected)?,
PackMethod::Lzss => lzss_decompress_simple(packed, expected, None)?,
PackMethod::XorLzss => {
let decrypted = xor_stream(packed, key16);
lzss_decompress_simple(&decrypted, expected)?
// Optimized: XOR on-the-fly during decompression instead of creating temp buffer
lzss_decompress_simple(packed, expected, Some(key16))?
}
PackMethod::LzssHuffman => lzss_huffman_decompress(packed, expected)?,
PackMethod::LzssHuffman => lzss_huffman_decompress(packed, expected, None)?,
PackMethod::XorLzssHuffman => {
let decrypted = xor_stream(packed, key16);
lzss_huffman_decompress(&decrypted, expected)?
// Optimized: XOR on-the-fly during decompression
lzss_huffman_decompress(packed, expected, Some(key16))?
}
PackMethod::Deflate => decode_deflate(packed)?,
PackMethod::Unknown(raw) => return Err(Error::UnsupportedMethod { raw }),
@@ -601,20 +633,37 @@ fn decode_deflate(packed: &[u8]) -> Result<Vec<u8>> {
Ok(out)
}
fn xor_stream(data: &[u8], key16: u16) -> Vec<u8> {
let mut lo = (key16 & 0xFF) as u8;
let mut hi = ((key16 >> 8) & 0xFF) as u8;
let mut out = Vec::with_capacity(data.len());
for value in data {
lo = hi ^ lo.wrapping_shl(1);
out.push(value ^ lo);
hi = lo ^ (hi >> 1);
}
out
struct XorState {
lo: u8,
hi: u8,
}
fn lzss_decompress_simple(data: &[u8], expected_size: usize) -> Result<Vec<u8>> {
impl XorState {
fn new(key16: u16) -> Self {
Self {
lo: (key16 & 0xFF) as u8,
hi: ((key16 >> 8) & 0xFF) as u8,
}
}
fn decrypt_byte(&mut self, encrypted: u8) -> u8 {
self.lo = self.hi ^ self.lo.wrapping_shl(1);
let decrypted = encrypted ^ self.lo;
self.hi = self.lo ^ (self.hi >> 1);
decrypted
}
}
fn xor_stream(data: &[u8], key16: u16) -> Vec<u8> {
let mut state = XorState::new(key16);
data.iter().map(|&b| state.decrypt_byte(b)).collect()
}
fn lzss_decompress_simple(
data: &[u8],
expected_size: usize,
xor_key: Option<u16>,
) -> Result<Vec<u8>> {
let mut ring = [0x20u8; 0x1000];
let mut ring_pos = 0xFEEusize;
let mut out = Vec::with_capacity(expected_size);
@@ -623,31 +672,41 @@ fn lzss_decompress_simple(data: &[u8], expected_size: usize) -> Result<Vec<u8>>
let mut control = 0u8;
let mut bits_left = 0u8;
// XOR state for on-the-fly decryption
let mut xor_state = xor_key.map(XorState::new);
// Helper to read byte with optional XOR decryption
let read_byte = |pos: usize, state: &mut Option<XorState>| -> Option<u8> {
let encrypted = data.get(pos).copied()?;
Some(if let Some(ref mut s) = state {
s.decrypt_byte(encrypted)
} else {
encrypted
})
};
while out.len() < expected_size {
if bits_left == 0 {
let Some(byte) = data.get(in_pos).copied() else {
break;
};
let byte = read_byte(in_pos, &mut xor_state)
.ok_or(Error::DecompressionFailed("lzss-simple: unexpected EOF"))?;
control = byte;
in_pos += 1;
bits_left = 8;
}
if (control & 1) != 0 {
let Some(byte) = data.get(in_pos).copied() else {
break;
};
let byte = read_byte(in_pos, &mut xor_state)
.ok_or(Error::DecompressionFailed("lzss-simple: unexpected EOF"))?;
in_pos += 1;
out.push(byte);
ring[ring_pos] = byte;
ring_pos = (ring_pos + 1) & 0x0FFF;
} else {
let (Some(low), Some(high)) =
(data.get(in_pos).copied(), data.get(in_pos + 1).copied())
else {
break;
};
let low = read_byte(in_pos, &mut xor_state)
.ok_or(Error::DecompressionFailed("lzss-simple: unexpected EOF"))?;
let high = read_byte(in_pos + 1, &mut xor_state)
.ok_or(Error::DecompressionFailed("lzss-simple: unexpected EOF"))?;
in_pos += 2;
let offset = usize::from(low) | (usize::from(high & 0xF0) << 4);
@@ -683,9 +742,21 @@ const LZH_T: usize = LZH_N_CHAR * 2 - 1;
const LZH_R: usize = LZH_T - 1;
const LZH_MAX_FREQ: u16 = 0x8000;
fn lzss_huffman_decompress(data: &[u8], expected_size: usize) -> Result<Vec<u8>> {
let mut decoder = LzhDecoder::new(data);
decoder.decode(expected_size)
fn lzss_huffman_decompress(
data: &[u8],
expected_size: usize,
xor_key: Option<u16>,
) -> Result<Vec<u8>> {
// TODO: Full optimization for Huffman variant (rare in practice)
// For now, fallback to separate XOR step for Huffman
if let Some(key) = xor_key {
let decrypted = xor_stream(data, key);
let mut decoder = LzhDecoder::new(&decrypted);
decoder.decode(expected_size)
} else {
let mut decoder = LzhDecoder::new(data);
decoder.decode(expected_size)
}
}
struct LzhDecoder<'a> {