feat(render-core): add default UV scale and refactor UV mapping logic
Some checks failed
Test / Lint (push) Failing after 1m12s
Test / Test (push) Has been skipped
Test / Render parity (push) Has been skipped

- Introduced a constant `DEFAULT_UV_SCALE` for UV scaling.
- Refactored UV mapping in `build_render_mesh` to use the new constant.
- Simplified `compute_bounds` functions by extracting common logic into `compute_bounds_impl`.

test(render-core): add tests for rendering with empty and multi-node models

- Added tests to verify behavior when building render meshes from models with no slots and multiple nodes.
- Ensured UV scaling is correctly applied in tests.

feat(render-demo): add FOV argument and improve error handling

- Added a `--fov` command-line argument to set the field of view.
- Enhanced error messages for texture resolution failures.
- Updated MVP computation to use the new FOV parameter.

fix(rsli): improve error handling in LZH decompression

- Added checks to prevent out-of-bounds access in LZH decoding logic.

refactor(texm): streamline texture parsing and decoding tests

- Created a helper function `build_texm_payload` for constructing test payloads.
- Added tests for various texture formats including RGB565, RGB556, ARGB4444, and Luminance Alpha.
- Improved error handling for invalid TEXM headers and mip bounds.
This commit is contained in:
2026-02-19 09:46:23 +00:00
parent 0d7ae6a017
commit efab61a45c
17 changed files with 800 additions and 299 deletions

View File

@@ -1,6 +1,7 @@
use core::fmt;
#[derive(Debug)]
#[non_exhaustive]
pub enum Error {
Nres(nres::error::Error),
MissingResource {

View File

@@ -164,6 +164,8 @@ pub fn parse_model_payload(payload: &[u8]) -> Result<Model> {
let positions = parse_positions(&res3.bytes)?;
let indices = parse_u16_array(&res6.bytes, "Res6")?;
let batches = parse_batches(&res13.bytes)?;
validate_slot_batch_ranges(&slots, batches.len())?;
validate_batch_index_ranges(&batches, indices.len())?;
let normals = match res4 {
Some(raw) => Some(parse_i8x4_array(&raw.bytes, "Res4")?),
@@ -192,6 +194,40 @@ pub fn parse_model_payload(payload: &[u8]) -> Result<Model> {
})
}
fn validate_slot_batch_ranges(slots: &[Slot], batch_count: usize) -> Result<()> {
for slot in slots {
let start = usize::from(slot.batch_start);
let end = start
.checked_add(usize::from(slot.batch_count))
.ok_or(Error::IntegerOverflow)?;
if end > batch_count {
return Err(Error::IndexOutOfBounds {
label: "Res2.batch_range",
index: end,
limit: batch_count,
});
}
}
Ok(())
}
fn validate_batch_index_ranges(batches: &[Batch], index_count: usize) -> Result<()> {
for batch in batches {
let start = usize::try_from(batch.index_start).map_err(|_| Error::IntegerOverflow)?;
let end = start
.checked_add(usize::from(batch.index_count))
.ok_or(Error::IntegerOverflow)?;
if end > index_count {
return Err(Error::IndexOutOfBounds {
label: "Res13.index_range",
index: end,
limit: index_count,
});
}
}
Ok(())
}
fn parse_positions(data: &[u8]) -> Result<Vec<[f32; 3]>> {
if !data.len().is_multiple_of(12) {
return Err(Error::InvalidResourceSize {

View File

@@ -39,6 +39,166 @@ fn is_msh_name(name: &str) -> bool {
name.to_ascii_lowercase().ends_with(".msh")
}
#[derive(Clone)]
struct SyntheticEntry {
kind: u32,
name: String,
attr1: u32,
attr2: u32,
attr3: u32,
data: Vec<u8>,
}
fn build_nested_nres(entries: &[SyntheticEntry]) -> Vec<u8> {
let mut payload = Vec::new();
payload.extend_from_slice(b"NRes");
payload.extend_from_slice(&0x100u32.to_le_bytes());
payload.extend_from_slice(
&u32::try_from(entries.len())
.expect("entry count overflow in test")
.to_le_bytes(),
);
payload.extend_from_slice(&0u32.to_le_bytes()); // total_size placeholder
let mut resource_offsets = Vec::with_capacity(entries.len());
for entry in entries {
resource_offsets.push(u32::try_from(payload.len()).expect("offset overflow in test"));
payload.extend_from_slice(&entry.data);
while !payload.len().is_multiple_of(8) {
payload.push(0);
}
}
for (index, entry) in entries.iter().enumerate() {
payload.extend_from_slice(&entry.kind.to_le_bytes());
payload.extend_from_slice(&entry.attr1.to_le_bytes());
payload.extend_from_slice(&entry.attr2.to_le_bytes());
payload.extend_from_slice(
&u32::try_from(entry.data.len())
.expect("size overflow in test")
.to_le_bytes(),
);
payload.extend_from_slice(&entry.attr3.to_le_bytes());
let mut name_raw = [0u8; 36];
let name_bytes = entry.name.as_bytes();
assert!(name_bytes.len() <= 35, "name too long for synthetic test");
name_raw[..name_bytes.len()].copy_from_slice(name_bytes);
payload.extend_from_slice(&name_raw);
payload.extend_from_slice(&resource_offsets[index].to_le_bytes());
payload.extend_from_slice(&(index as u32).to_le_bytes());
}
let total_size = u32::try_from(payload.len()).expect("size overflow in test");
payload[12..16].copy_from_slice(&total_size.to_le_bytes());
payload
}
fn synthetic_entry(kind: u32, name: &str, attr3: u32, data: Vec<u8>) -> SyntheticEntry {
SyntheticEntry {
kind,
name: name.to_string(),
attr1: 1,
attr2: 0,
attr3,
data,
}
}
fn res1_stride38_nodes(node_count: usize, node0_slot00: Option<u16>) -> Vec<u8> {
let mut out = vec![0u8; node_count.saturating_mul(38)];
for node in 0..node_count {
let node_off = node * 38;
for i in 0..15 {
let off = node_off + 8 + i * 2;
out[off..off + 2].copy_from_slice(&u16::MAX.to_le_bytes());
}
}
if let Some(slot) = node0_slot00 {
out[8..10].copy_from_slice(&slot.to_le_bytes());
}
out
}
fn res1_stride24_nodes(node_count: usize) -> Vec<u8> {
vec![0u8; node_count.saturating_mul(24)]
}
fn res2_single_slot(batch_start: u16, batch_count: u16) -> Vec<u8> {
let mut res2 = vec![0u8; 0x8C + 68];
res2[0x8C..0x8C + 2].copy_from_slice(&0u16.to_le_bytes()); // tri_start
res2[0x8C + 2..0x8C + 4].copy_from_slice(&0u16.to_le_bytes()); // tri_count
res2[0x8C + 4..0x8C + 6].copy_from_slice(&batch_start.to_le_bytes()); // batch_start
res2[0x8C + 6..0x8C + 8].copy_from_slice(&batch_count.to_le_bytes()); // batch_count
res2
}
fn res3_triangle_positions() -> Vec<u8> {
[0f32, 0f32, 0f32, 1f32, 0f32, 0f32, 0f32, 1f32, 0f32]
.iter()
.flat_map(|v| v.to_le_bytes())
.collect()
}
fn res4_normals() -> Vec<u8> {
vec![127u8, 0u8, 128u8, 0u8]
}
fn res5_uv0() -> Vec<u8> {
[1024i16, -1024i16]
.iter()
.flat_map(|v| v.to_le_bytes())
.collect()
}
fn res6_triangle_indices() -> Vec<u8> {
[0u16, 1u16, 2u16]
.iter()
.flat_map(|v| v.to_le_bytes())
.collect()
}
fn res13_single_batch(index_start: u32, index_count: u16) -> Vec<u8> {
let mut batch = vec![0u8; 20];
batch[0..2].copy_from_slice(&0u16.to_le_bytes());
batch[2..4].copy_from_slice(&0u16.to_le_bytes());
batch[8..10].copy_from_slice(&index_count.to_le_bytes());
batch[10..14].copy_from_slice(&index_start.to_le_bytes());
batch[16..20].copy_from_slice(&0u32.to_le_bytes());
batch
}
fn res10_names(names: &[Option<&str>]) -> Vec<u8> {
let mut out = Vec::new();
for name in names {
match name {
Some(name) => {
let bytes = name.as_bytes();
out.extend_from_slice(
&u32::try_from(bytes.len())
.expect("name size overflow in test")
.to_le_bytes(),
);
out.extend_from_slice(bytes);
out.push(0);
}
None => out.extend_from_slice(&0u32.to_le_bytes()),
}
}
out
}
fn base_synthetic_entries() -> Vec<SyntheticEntry> {
vec![
synthetic_entry(RES1_NODE_TABLE, "Res1", 38, res1_stride38_nodes(1, Some(0))),
synthetic_entry(RES2_SLOTS, "Res2", 68, res2_single_slot(0, 1)),
synthetic_entry(RES3_POSITIONS, "Res3", 12, res3_triangle_positions()),
synthetic_entry(RES6_INDICES, "Res6", 2, res6_triangle_indices()),
synthetic_entry(RES13_BATCHES, "Res13", 20, res13_single_batch(0, 3)),
]
}
#[test]
fn parse_all_game_msh_models() {
let archives = nres_test_files();
@@ -137,156 +297,7 @@ fn parse_all_game_msh_models() {
#[test]
fn parse_minimal_synthetic_model() {
// Nested NRes with required resources only.
let mut payload = Vec::new();
payload.extend_from_slice(b"NRes");
payload.extend_from_slice(&0x100u32.to_le_bytes());
payload.extend_from_slice(&5u32.to_le_bytes()); // entry_count
payload.extend_from_slice(&0u32.to_le_bytes()); // total_size placeholder
let mut resource_offsets = Vec::new();
let mut resource_sizes = Vec::new();
let mut resource_types = Vec::new();
let mut resource_attr3 = Vec::new();
let mut resource_names = Vec::new();
let add_resource = |payload: &mut Vec<u8>,
offsets: &mut Vec<u32>,
sizes: &mut Vec<u32>,
types: &mut Vec<u32>,
attr3: &mut Vec<u32>,
names: &mut Vec<String>,
kind: u32,
name: &str,
data: &[u8],
attr3_val: u32| {
offsets.push(u32::try_from(payload.len()).expect("offset overflow"));
payload.extend_from_slice(data);
while !payload.len().is_multiple_of(8) {
payload.push(0);
}
sizes.push(u32::try_from(data.len()).expect("size overflow"));
types.push(kind);
attr3.push(attr3_val);
names.push(name.to_string());
};
let node = {
let mut b = vec![0u8; 38];
// slot[0][0] = 0
b[8..10].copy_from_slice(&0u16.to_le_bytes());
for i in 1..15 {
let off = 8 + i * 2;
b[off..off + 2].copy_from_slice(&u16::MAX.to_le_bytes());
}
b
};
let mut res2 = vec![0u8; 0x8C + 68];
res2[0x8C..0x8C + 2].copy_from_slice(&0u16.to_le_bytes()); // tri_start
res2[0x8C + 2..0x8C + 4].copy_from_slice(&0u16.to_le_bytes()); // tri_count
res2[0x8C + 4..0x8C + 6].copy_from_slice(&0u16.to_le_bytes()); // batch_start
res2[0x8C + 6..0x8C + 8].copy_from_slice(&1u16.to_le_bytes()); // batch_count
let positions = [0f32, 0f32, 0f32, 1f32, 0f32, 0f32, 0f32, 1f32, 0f32]
.iter()
.flat_map(|v| v.to_le_bytes())
.collect::<Vec<_>>();
let indices = [0u16, 1, 2]
.iter()
.flat_map(|v| v.to_le_bytes())
.collect::<Vec<_>>();
let batch = {
let mut b = vec![0u8; 20];
b[0..2].copy_from_slice(&0u16.to_le_bytes());
b[2..4].copy_from_slice(&0u16.to_le_bytes());
b[8..10].copy_from_slice(&3u16.to_le_bytes()); // index_count
b[10..14].copy_from_slice(&0u32.to_le_bytes()); // index_start
b[16..20].copy_from_slice(&0u32.to_le_bytes()); // base_vertex
b
};
add_resource(
&mut payload,
&mut resource_offsets,
&mut resource_sizes,
&mut resource_types,
&mut resource_attr3,
&mut resource_names,
RES1_NODE_TABLE,
"Res1",
&node,
38,
);
add_resource(
&mut payload,
&mut resource_offsets,
&mut resource_sizes,
&mut resource_types,
&mut resource_attr3,
&mut resource_names,
RES2_SLOTS,
"Res2",
&res2,
68,
);
add_resource(
&mut payload,
&mut resource_offsets,
&mut resource_sizes,
&mut resource_types,
&mut resource_attr3,
&mut resource_names,
RES3_POSITIONS,
"Res3",
&positions,
12,
);
add_resource(
&mut payload,
&mut resource_offsets,
&mut resource_sizes,
&mut resource_types,
&mut resource_attr3,
&mut resource_names,
RES6_INDICES,
"Res6",
&indices,
2,
);
add_resource(
&mut payload,
&mut resource_offsets,
&mut resource_sizes,
&mut resource_types,
&mut resource_attr3,
&mut resource_names,
RES13_BATCHES,
"Res13",
&batch,
20,
);
let directory_offset = payload.len();
for i in 0..resource_types.len() {
payload.extend_from_slice(&resource_types[i].to_le_bytes());
payload.extend_from_slice(&1u32.to_le_bytes()); // attr1
payload.extend_from_slice(&0u32.to_le_bytes()); // attr2
payload.extend_from_slice(&resource_sizes[i].to_le_bytes());
payload.extend_from_slice(&resource_attr3[i].to_le_bytes());
let mut name_raw = [0u8; 36];
let bytes = resource_names[i].as_bytes();
name_raw[..bytes.len()].copy_from_slice(bytes);
payload.extend_from_slice(&name_raw);
payload.extend_from_slice(&resource_offsets[i].to_le_bytes());
payload.extend_from_slice(&(i as u32).to_le_bytes()); // sort index
}
let total_size = u32::try_from(payload.len()).expect("size overflow");
payload[12..16].copy_from_slice(&total_size.to_le_bytes());
assert_eq!(
directory_offset + resource_types.len() * 64,
payload.len(),
"synthetic nested NRes layout invalid"
);
let payload = build_nested_nres(&base_synthetic_entries());
let model = parse_model_payload(&payload).expect("failed to parse synthetic model");
assert_eq!(model.node_count, 1);
assert_eq!(model.positions.len(), 3);
@@ -294,3 +305,117 @@ fn parse_minimal_synthetic_model() {
assert_eq!(model.batches.len(), 1);
assert_eq!(model.slot_index(0, 0, 0), Some(0));
}
#[test]
fn parse_synthetic_stride24_variant() {
let mut entries = base_synthetic_entries();
entries[0] = synthetic_entry(RES1_NODE_TABLE, "Res1", 24, res1_stride24_nodes(1));
let payload = build_nested_nres(&entries);
let model = parse_model_payload(&payload).expect("failed to parse stride24 model");
assert_eq!(model.node_stride, 24);
assert_eq!(model.node_count, 1);
assert_eq!(model.slot_index(0, 0, 0), None);
}
#[test]
fn parse_synthetic_model_with_optional_res4_res5_res10() {
let mut entries = base_synthetic_entries();
entries.push(synthetic_entry(RES4_NORMALS, "Res4", 4, res4_normals()));
entries.push(synthetic_entry(RES5_UV0, "Res5", 4, res5_uv0()));
entries.push(synthetic_entry(
RES10_NAMES,
"Res10",
1,
res10_names(&[Some("Hull"), None]),
));
entries[0] = synthetic_entry(RES1_NODE_TABLE, "Res1", 38, res1_stride38_nodes(2, Some(0)));
let payload = build_nested_nres(&entries);
let model = parse_model_payload(&payload).expect("failed to parse model with optional data");
assert_eq!(model.node_count, 2);
assert_eq!(model.normals.as_ref().map(Vec::len), Some(1));
assert_eq!(model.uv0.as_ref().map(Vec::len), Some(1));
assert_eq!(model.node_names, Some(vec![Some("Hull".to_string()), None]));
}
#[test]
fn parse_fails_when_required_resource_missing() {
let mut entries = base_synthetic_entries();
entries.retain(|entry| entry.kind != RES13_BATCHES);
let payload = build_nested_nres(&entries);
assert!(matches!(
parse_model_payload(&payload),
Err(Error::MissingResource {
kind: RES13_BATCHES,
label: "Res13"
})
));
}
#[test]
fn parse_fails_for_invalid_res2_size() {
let mut entries = base_synthetic_entries();
entries[1] = synthetic_entry(RES2_SLOTS, "Res2", 68, vec![0u8; 0x8B]);
let payload = build_nested_nres(&entries);
assert!(matches!(
parse_model_payload(&payload),
Err(Error::InvalidRes2Size { .. })
));
}
#[test]
fn parse_fails_for_unsupported_node_stride() {
let mut entries = base_synthetic_entries();
entries[0] = synthetic_entry(RES1_NODE_TABLE, "Res1", 30, vec![0u8; 30]);
let payload = build_nested_nres(&entries);
assert!(matches!(
parse_model_payload(&payload),
Err(Error::UnsupportedNodeStride { stride: 30 })
));
}
#[test]
fn parse_fails_for_invalid_optional_resource_size() {
let mut entries = base_synthetic_entries();
entries.push(synthetic_entry(RES4_NORMALS, "Res4", 4, vec![1, 2, 3]));
let payload = build_nested_nres(&entries);
assert!(matches!(
parse_model_payload(&payload),
Err(Error::InvalidResourceSize { label: "Res4", .. })
));
}
#[test]
fn parse_fails_for_slot_batch_range_out_of_bounds() {
let mut entries = base_synthetic_entries();
entries[1] = synthetic_entry(RES2_SLOTS, "Res2", 68, res2_single_slot(0, 2));
let payload = build_nested_nres(&entries);
assert!(matches!(
parse_model_payload(&payload),
Err(Error::IndexOutOfBounds {
label: "Res2.batch_range",
..
})
));
}
#[test]
fn parse_fails_for_batch_index_range_out_of_bounds() {
let mut entries = base_synthetic_entries();
entries[4] = synthetic_entry(RES13_BATCHES, "Res13", 20, res13_single_batch(1, 3));
let payload = build_nested_nres(&entries);
assert!(matches!(
parse_model_payload(&payload),
Err(Error::IndexOutOfBounds {
label: "Res13.index_range",
..
})
));
}

View File

@@ -92,13 +92,13 @@ impl Archive {
}
pub fn entries(&self) -> impl Iterator<Item = EntryRef<'_>> {
self.entries
.iter()
.enumerate()
.map(|(idx, entry)| EntryRef {
id: EntryId(u32::try_from(idx).expect("entry count validated at parse")),
self.entries.iter().enumerate().filter_map(|(idx, entry)| {
let id = u32::try_from(idx).ok()?;
Some(EntryRef {
id: EntryId(id),
meta: &entry.meta,
})
})
}
pub fn find(&self, name: &str) -> Option<EntryId> {
@@ -125,9 +125,8 @@ impl Archive {
Ordering::Less => high = mid,
Ordering::Greater => low = mid + 1,
Ordering::Equal => {
return Some(EntryId(
u32::try_from(target_idx).expect("entry count validated at parse"),
))
let id = u32::try_from(target_idx).ok()?;
return Some(EntryId(id));
}
}
}
@@ -137,9 +136,8 @@ impl Archive {
if cmp_name_case_insensitive(name.as_bytes(), entry_name_bytes(&entry.name_raw))
== Ordering::Equal
{
Some(EntryId(
u32::try_from(idx).expect("entry count validated at parse"),
))
let id = u32::try_from(idx).ok()?;
Some(EntryId(id))
} else {
None
}
@@ -197,7 +195,7 @@ impl Archive {
let Some(entry) = self.entries.get(idx) else {
return Err(Error::EntryIdOutOfRange {
id: id.0,
entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
entry_count: saturating_u32_len(self.entries.len()),
});
};
checked_range(
@@ -248,13 +246,13 @@ pub struct NewEntry<'a> {
impl Editor {
pub fn entries(&self) -> impl Iterator<Item = EntryRef<'_>> {
self.entries
.iter()
.enumerate()
.map(|(idx, entry)| EntryRef {
id: EntryId(u32::try_from(idx).expect("entry count validated at add")),
self.entries.iter().enumerate().filter_map(|(idx, entry)| {
let id = u32::try_from(idx).ok()?;
Some(EntryRef {
id: EntryId(id),
meta: &entry.meta,
})
})
}
pub fn add(&mut self, entry: NewEntry<'_>) -> Result<EntryId> {
@@ -283,7 +281,7 @@ impl Editor {
let Some(entry) = self.entries.get_mut(idx) else {
return Err(Error::EntryIdOutOfRange {
id: id.0,
entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
entry_count: saturating_u32_len(self.entries.len()),
});
};
entry.meta.data_size = u32::try_from(data.len()).map_err(|_| Error::IntegerOverflow)?;
@@ -297,7 +295,7 @@ impl Editor {
if idx >= self.entries.len() {
return Err(Error::EntryIdOutOfRange {
id: id.0,
entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
entry_count: saturating_u32_len(self.entries.len()),
});
}
self.entries.remove(idx);
@@ -350,6 +348,8 @@ impl Editor {
});
for (idx, entry) in self.entries.iter_mut().enumerate() {
// sort_index stores the original-entry index at sorted position `idx`.
// This mirrors the format emitted by the retail assets and test fixtures.
entry.meta.sort_index =
u32::try_from(sort_order[idx]).map_err(|_| Error::IntegerOverflow)?;
}
@@ -599,8 +599,12 @@ fn ascii_lower(value: u8) -> u8 {
}
}
fn saturating_u32_len(len: usize) -> u32 {
u32::try_from(len).unwrap_or(u32::MAX)
}
fn prefetch_pages(bytes: &[u8]) {
use std::sync::atomic::{compiler_fence, Ordering};
use std::hint::black_box;
let mut cursor = 0usize;
let mut sink = 0u8;
@@ -608,8 +612,7 @@ fn prefetch_pages(bytes: &[u8]) {
sink ^= bytes[cursor];
cursor = cursor.saturating_add(4096);
}
compiler_fence(Ordering::SeqCst);
let _ = sink;
black_box(sink);
}
fn write_atomic(path: &Path, content: &[u8]) -> Result<()> {
@@ -675,7 +678,8 @@ fn replace_file_atomically(src: &Path, dst: &Path) -> std::io::Result<()> {
let src_wide: Vec<u16> = src.as_os_str().encode_wide().chain(iter::once(0)).collect();
let dst_wide: Vec<u16> = dst.as_os_str().encode_wide().chain(iter::once(0)).collect();
// Replace destination in one OS call, avoiding remove+rename gaps on Windows.
// SAFETY: pointers reference NUL-terminated UTF-16 buffers that stay alive
// for the duration of the call; flags and argument contract match WinAPI.
let ok = unsafe {
MoveFileExW(
src_wide.as_ptr(),

View File

@@ -5,4 +5,6 @@ edition = "2021"
[dependencies]
msh-core = { path = "../msh-core" }
[dev-dependencies]
nres = { path = "../nres" }

View File

@@ -1,5 +1,7 @@
use msh_core::Model;
pub const DEFAULT_UV_SCALE: f32 = 1024.0;
#[derive(Clone, Debug)]
pub struct RenderVertex {
pub position: [f32; 3],
@@ -58,7 +60,12 @@ pub fn build_render_mesh(model: &Model, lod: usize, group: usize) -> RenderMesh
let uv = uv0
.and_then(|uvs| uvs.get(final_idx))
.copied()
.map(|packed| [packed[0] as f32 / 1024.0, packed[1] as f32 / 1024.0])
.map(|packed| {
[
packed[0] as f32 / DEFAULT_UV_SCALE,
packed[1] as f32 / DEFAULT_UV_SCALE,
]
})
.unwrap_or([0.0, 0.0]);
vertices.push(RenderVertex {
position: *pos,
@@ -76,38 +83,28 @@ pub fn build_render_mesh(model: &Model, lod: usize, group: usize) -> RenderMesh
}
pub fn compute_bounds(vertices: &[[f32; 3]]) -> Option<([f32; 3], [f32; 3])> {
let mut iter = vertices.iter();
let first = iter.next()?;
let mut min_v = *first;
let mut max_v = *first;
for v in iter {
for i in 0..3 {
if v[i] < min_v[i] {
min_v[i] = v[i];
}
if v[i] > max_v[i] {
max_v[i] = v[i];
}
}
}
Some((min_v, max_v))
compute_bounds_impl(vertices.iter().copied())
}
pub fn compute_bounds_for_mesh(vertices: &[RenderVertex]) -> Option<([f32; 3], [f32; 3])> {
let mut iter = vertices.iter();
let first = iter.next()?;
let mut min_v = first.position;
let mut max_v = first.position;
compute_bounds_impl(vertices.iter().map(|v| v.position))
}
for v in iter {
fn compute_bounds_impl<I>(mut positions: I) -> Option<([f32; 3], [f32; 3])>
where
I: Iterator<Item = [f32; 3]>,
{
let first = positions.next()?;
let mut min_v = first;
let mut max_v = first;
for pos in positions {
for i in 0..3 {
if v.position[i] < min_v[i] {
min_v[i] = v.position[i];
if pos[i] < min_v[i] {
min_v[i] = pos[i];
}
if v.position[i] > max_v[i] {
max_v[i] = v.position[i];
if pos[i] > max_v[i] {
max_v[i] = pos[i];
}
}
}

View File

@@ -129,3 +129,105 @@ fn compute_bounds_for_mesh_handles_empty_and_non_empty() {
assert_eq!(bounds.0, [-2.0, -1.0, 0.5]);
assert_eq!(bounds.1, [1.0, 5.0, 9.0]);
}
fn nodes_with_slot_refs(slot_ids: &[Option<u16>]) -> Vec<u8> {
let mut out = vec![0u8; slot_ids.len().saturating_mul(38)];
for (node_index, slot_id) in slot_ids.iter().copied().enumerate() {
let node_off = node_index * 38;
for i in 0..15 {
let off = node_off + 8 + i * 2;
out[off..off + 2].copy_from_slice(&u16::MAX.to_le_bytes());
}
if let Some(slot_id) = slot_id {
out[node_off + 8..node_off + 10].copy_from_slice(&slot_id.to_le_bytes());
}
}
out
}
fn slot(batch_start: u16, batch_count: u16) -> msh_core::Slot {
msh_core::Slot {
tri_start: 0,
tri_count: 0,
batch_start,
batch_count,
aabb_min: [0.0; 3],
aabb_max: [0.0; 3],
sphere_center: [0.0; 3],
sphere_radius: 0.0,
opaque: [0; 5],
}
}
fn batch(index_start: u32, index_count: u16, base_vertex: u32) -> msh_core::Batch {
msh_core::Batch {
batch_flags: 0,
material_index: 0,
opaque4: 0,
opaque6: 0,
index_count,
index_start,
opaque14: 0,
base_vertex,
}
}
#[test]
fn build_render_mesh_handles_empty_slot_model() {
let model = msh_core::Model {
node_stride: 38,
node_count: 1,
nodes_raw: nodes_with_slot_refs(&[None]),
slots: Vec::new(),
positions: vec![[0.0, 0.0, 0.0]],
normals: None,
uv0: None,
indices: Vec::new(),
batches: Vec::new(),
node_names: None,
};
let mesh = build_render_mesh(&model, 0, 0);
assert!(mesh.vertices.is_empty());
assert_eq!(mesh.batch_count, 0);
assert_eq!(mesh.triangle_count(), 0);
}
#[test]
fn build_render_mesh_supports_multi_node_and_uv_scaling() {
let model = msh_core::Model {
node_stride: 38,
node_count: 2,
nodes_raw: nodes_with_slot_refs(&[Some(0), Some(1)]),
slots: vec![slot(0, 1), slot(1, 1)],
positions: vec![
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[2.0, 0.0, 0.0],
[3.0, 0.0, 0.0],
[2.0, 1.0, 0.0],
],
normals: None,
uv0: Some(vec![
[1024, -1024],
[512, 256],
[0, 0],
[1024, 1024],
[2048, 1024],
[1024, 0],
]),
indices: vec![0, 1, 2, 0, 1, 2],
batches: vec![batch(0, 3, 0), batch(3, 3, 3)],
node_names: None,
};
let mesh = build_render_mesh(&model, 0, 0);
assert_eq!(mesh.batch_count, 2);
assert_eq!(mesh.vertices.len(), 6);
assert_eq!(mesh.triangle_count(), 2);
assert_eq!(mesh.vertices[0].uv0, [1.0, -1.0]);
assert_eq!(mesh.vertices[1].uv0, [0.5, 0.25]);
assert_eq!(mesh.vertices[2].uv0, [0.0, 0.0]);
assert_eq!(mesh.vertices[3].uv0, [1.0, 1.0]);
}

View File

@@ -8,6 +8,7 @@ default = []
demo = ["dep:sdl2", "dep:glow", "dep:image"]
[dependencies]
encoding_rs = "0.8"
msh-core = { path = "../msh-core" }
nres = { path = "../nres" }
render-core = { path = "../render-core" }

View File

@@ -1,5 +1,7 @@
use encoding_rs::WINDOWS_1251;
use msh_core::{parse_model_payload, Model};
use nres::{Archive, EntryRef};
use std::fmt;
use std::path::{Path, PathBuf};
use texm::{decode_mip_rgba8, parse_texm};
@@ -22,6 +24,37 @@ pub enum Error {
InvalidMaterial(String),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Nres(err) => write!(f, "{err}"),
Self::Msh(err) => write!(f, "{err}"),
Self::Texm(err) => write!(f, "{err}"),
Self::Io(err) => write!(f, "{err}"),
Self::NoMshEntries => write!(f, "archive does not contain .msh entries"),
Self::ModelNotFound(name) => write!(f, "model not found: {name}"),
Self::NoTexmEntries => write!(f, "archive does not contain Texm entries"),
Self::TextureNotFound(name) => write!(f, "texture not found: {name}"),
Self::MaterialNotFound(name) => write!(f, "material not found: {name}"),
Self::WearNotFound(name) => write!(f, "wear entry not found: {name}"),
Self::InvalidWear(reason) => write!(f, "invalid WEAR payload: {reason}"),
Self::InvalidMaterial(reason) => write!(f, "invalid MAT0 payload: {reason}"),
}
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::Nres(err) => Some(err),
Self::Msh(err) => Some(err),
Self::Texm(err) => Some(err),
Self::Io(err) => Some(err),
_ => None,
}
}
}
impl From<nres::error::Error> for Error {
fn from(value: nres::error::Error) -> Self {
Self::Nres(value)
@@ -280,7 +313,7 @@ fn find_material_entry_with_fallback<'a>(
}
fn parse_wear_material_names(payload: &[u8]) -> Result<Vec<String>> {
let text = String::from_utf8_lossy(payload).replace('\r', "");
let text = decode_cp1251(payload).replace('\r', "");
let mut lines = text.lines();
let Some(first) = lines.next() else {
return Err(Error::InvalidWear(String::from("WEAR payload is empty")));
@@ -360,9 +393,7 @@ fn parse_primary_texture_name_from_mat0(payload: &[u8], attr2: u32) -> Result<Op
.iter()
.position(|&b| b == 0)
.unwrap_or(name_raw.len());
let name = String::from_utf8_lossy(&name_raw[..name_end])
.trim()
.to_string();
let name = decode_cp1251(&name_raw[..name_end]).trim().to_string();
if !name.is_empty() {
return Ok(Some(name));
}
@@ -371,6 +402,11 @@ fn parse_primary_texture_name_from_mat0(payload: &[u8], attr2: u32) -> Result<Op
Ok(None)
}
fn decode_cp1251(bytes: &[u8]) -> String {
let (decoded, _, _) = WINDOWS_1251.decode(bytes);
decoded.into_owned()
}
fn load_texture_from_archive_by_name(archive: &Archive, name: &str) -> Result<LoadedTexture> {
let Some(id) = archive.find(name) else {
return Err(Error::TextureNotFound(name.to_string()));
@@ -524,4 +560,45 @@ mod tests {
assert!(texture.width > 0 && texture.height > 0);
assert!(!texture.rgba8.is_empty());
}
#[test]
fn parse_wear_material_names_parses_counted_lines() {
let payload = b"2\r\n0 MAT_A\r\n1 MAT_B\r\n";
let materials =
parse_wear_material_names(payload).expect("failed to parse valid WEAR payload");
assert_eq!(materials, vec!["MAT_A".to_string(), "MAT_B".to_string()]);
}
#[test]
fn parse_wear_material_names_rejects_invalid_payload() {
let payload = b"2\n0 ONLY_ONE\n";
assert!(matches!(
parse_wear_material_names(payload),
Err(Error::InvalidWear(_))
));
}
#[test]
fn parse_primary_texture_name_from_mat0_respects_attr2_layout() {
let mut payload = vec![0u8; 4 + 10 + 34];
payload[0..2].copy_from_slice(&1u16.to_le_bytes()); // phase_count
// attr2=4 adds 10 bytes before phase table
let name = b"TEX_MAIN";
payload[4 + 10 + 18..4 + 10 + 18 + name.len()].copy_from_slice(name);
let parsed = parse_primary_texture_name_from_mat0(&payload, 4)
.expect("failed to parse MAT0 payload with attr2=4");
assert_eq!(parsed, Some("TEX_MAIN".to_string()));
}
#[test]
fn parse_primary_texture_name_from_mat0_decodes_cp1251_bytes() {
let mut payload = vec![0u8; 4 + 34];
payload[0..2].copy_from_slice(&1u16.to_le_bytes()); // phase_count
payload[4 + 18] = 0xC0; // 'А' in CP1251
let parsed =
parse_primary_texture_name_from_mat0(&payload, 0).expect("failed to parse MAT0");
assert_eq!(parsed, Some("А".to_string()));
}
}

View File

@@ -11,6 +11,7 @@ struct Args {
group: usize,
width: u32,
height: u32,
fov_deg: f32,
capture: Option<PathBuf>,
angle: Option<f32>,
spin_rate: f32,
@@ -32,6 +33,7 @@ fn parse_args() -> Result<Args, String> {
let mut group = 0usize;
let mut width = 1280u32;
let mut height = 720u32;
let mut fov_deg = 60.0f32;
let mut capture = None;
let mut angle = None;
let mut spin_rate = 0.35f32;
@@ -94,6 +96,17 @@ fn parse_args() -> Result<Args, String> {
return Err(String::from("--height must be > 0"));
}
}
"--fov" => {
let value = it
.next()
.ok_or_else(|| String::from("missing value for --fov"))?;
fov_deg = value
.parse::<f32>()
.map_err(|_| String::from("invalid --fov value"))?;
if !(1.0..=179.0).contains(&fov_deg) {
return Err(String::from("--fov must be in range [1, 179]"));
}
}
"--capture" => {
let value = it
.next()
@@ -163,6 +176,7 @@ fn parse_args() -> Result<Args, String> {
group,
width,
height,
fov_deg,
capture,
angle,
spin_rate,
@@ -176,7 +190,7 @@ fn parse_args() -> Result<Args, String> {
fn print_help() {
eprintln!(
"parkan-render-demo --archive <path> [--model <name.msh>] [--lod N] [--group N] [--width W] [--height H]"
"parkan-render-demo --archive <path> [--model <name.msh>] [--lod N] [--group N] [--width W] [--height H] [--fov DEG]"
);
eprintln!(" [--capture <out.png>] [--angle RAD] [--spin-rate RAD_PER_SEC]");
eprintln!(" [--texture <name>] [--texture-archive <path>] [--material-archive <path>] [--wear <name.wea>] [--no-texture]");
@@ -202,7 +216,7 @@ fn run(args: Args) -> Result<(), String> {
let loaded_model = load_model_with_name_from_archive(&args.archive, args.model.as_deref())
.map_err(|err| {
format!(
"failed to load model from archive {}: {err:?}",
"failed to load model from archive {}: {err}",
args.archive.display()
)
})?;
@@ -289,6 +303,7 @@ fn run(args: Args) -> Result<(), String> {
vertex_data.push(vertex.uv0[0]);
vertex_data.push(vertex.uv0[1]);
}
let vertex_bytes = f32_slice_to_ne_bytes(&vertex_data);
let gl = unsafe {
glow::Context::from_loader_function(|name| video.gl_get_proc_address(name) as *const _)
@@ -306,11 +321,7 @@ fn run(args: Args) -> Result<(), String> {
let vbo = unsafe { gl.create_buffer().map_err(|e| e.to_string())? };
unsafe {
gl.bind_buffer(glow::ARRAY_BUFFER, Some(vbo));
gl.buffer_data_u8_slice(
glow::ARRAY_BUFFER,
cast_slice_u8(&vertex_data),
glow::STATIC_DRAW,
);
gl.buffer_data_u8_slice(glow::ARRAY_BUFFER, &vertex_bytes, glow::STATIC_DRAW);
gl.bind_buffer(glow::ARRAY_BUFFER, None);
}
@@ -388,11 +399,9 @@ fn resolve_texture(args: &Args, model_name: &str) -> Result<Option<LoadedTexture
|| args.material_archive.is_some()
|| args.wear.is_some()
{
Err(format!("failed to resolve texture: {err:?}"))
Err(format!("failed to resolve texture: {err}"))
} else {
eprintln!(
"warning: auto texture resolve failed ({err:?}), fallback to solid color"
);
eprintln!("warning: auto texture resolve failed ({err}), fallback to solid color");
Ok(None)
}
}
@@ -451,7 +460,14 @@ fn run_capture(
capture_path: &Path,
) -> Result<(), String> {
let angle = args.angle.unwrap_or(0.0);
let mvp = compute_mvp(args.width, args.height, center, camera_distance, angle);
let mvp = compute_mvp(
args.width,
args.height,
args.fov_deg,
center,
camera_distance,
angle,
);
unsafe {
draw_frame(
gl,
@@ -515,7 +531,7 @@ fn run_interactive(
let angle = args
.angle
.unwrap_or(start.elapsed().as_secs_f32() * args.spin_rate);
let mvp = compute_mvp(w, h, center, camera_distance, angle);
let mvp = compute_mvp(w, h, args.fov_deg, center, camera_distance, angle);
unsafe {
draw_frame(
@@ -543,12 +559,13 @@ fn run_interactive(
fn compute_mvp(
width: u32,
height: u32,
fov_deg: f32,
center: [f32; 3],
camera_distance: f32,
angle_rad: f32,
) -> [f32; 16] {
let aspect = (width as f32 / (height.max(1) as f32)).max(0.01);
let proj = mat4_perspective(60.0_f32.to_radians(), aspect, 0.01, camera_distance * 10.0);
let proj = mat4_perspective(fov_deg.to_radians(), aspect, 0.01, camera_distance * 10.0);
let view = mat4_translation(0.0, 0.0, -camera_distance);
let center_shift = mat4_translation(-center[0], -center[1], -center[2]);
let rot = mat4_rotation_y(angle_rad);
@@ -733,8 +750,12 @@ void main() {
Ok(program)
}
fn cast_slice_u8<T>(slice: &[T]) -> &[u8] {
unsafe { std::slice::from_raw_parts(slice.as_ptr() as *const u8, std::mem::size_of_val(slice)) }
fn f32_slice_to_ne_bytes(slice: &[f32]) -> Vec<u8> {
let mut out = Vec::with_capacity(slice.len().saturating_mul(std::mem::size_of::<f32>()));
for &value in slice {
out.extend_from_slice(&value.to_ne_bytes());
}
out
}
fn mat4_identity() -> [f32; 16] {

View File

@@ -135,7 +135,12 @@ impl<'a> LzhDecoder<'a> {
let mut node = self.son[LZH_R];
while node < LZH_T {
let bit = usize::from(self.bit_reader.read_bit()?);
node = self.son[node + bit];
let branch = node
.checked_add(bit)
.ok_or(Error::DecompressionFailed("lzss-huffman tree overflow"))?;
node = *self.son.get(branch).ok_or(Error::DecompressionFailed(
"lzss-huffman tree out of bounds",
))?;
}
let c = node - LZH_T;

View File

@@ -111,13 +111,13 @@ impl Library {
}
pub fn entries(&self) -> impl Iterator<Item = EntryRef<'_>> {
self.entries
.iter()
.enumerate()
.map(|(idx, entry)| EntryRef {
id: EntryId(u32::try_from(idx).expect("entry count validated at parse")),
self.entries.iter().enumerate().filter_map(|(idx, entry)| {
let id = u32::try_from(idx).ok()?;
Some(EntryRef {
id: EntryId(id),
meta: &entry.meta,
})
})
}
pub fn find(&self, name: &str) -> Option<EntryId> {
@@ -161,9 +161,8 @@ impl Library {
Ordering::Less => high = mid,
Ordering::Greater => low = mid + 1,
Ordering::Equal => {
return Some(EntryId(
u32::try_from(idx).expect("entry count validated at parse"),
))
let id = u32::try_from(idx).ok()?;
return Some(EntryId(id));
}
}
}
@@ -171,9 +170,8 @@ impl Library {
// Linear fallback search
self.entries.iter().enumerate().find_map(|(idx, entry)| {
if cmp_c_string(query_bytes, c_name_bytes(&entry.name_raw)) == Ordering::Equal {
Some(EntryId(
u32::try_from(idx).expect("entry count validated at parse"),
))
let id = u32::try_from(idx).ok()?;
Some(EntryId(id))
} else {
None
}
@@ -251,7 +249,7 @@ impl Library {
.get(idx)
.ok_or_else(|| Error::EntryIdOutOfRange {
id: id.0,
entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
entry_count: saturating_u32_len(self.entries.len()),
})
}
@@ -317,18 +315,15 @@ impl Library {
}
for (idx, entry) in self.entries.iter().enumerate() {
let packed = self
.load_packed(EntryId(
u32::try_from(idx).expect("entry count validated at parse"),
))?
.packed;
let id = u32::try_from(idx).map_err(|_| Error::IntegerOverflow)?;
let packed = self.load_packed(EntryId(id))?.packed;
let start =
usize::try_from(entry.data_offset_raw).map_err(|_| Error::IntegerOverflow)?;
for (offset, byte) in packed.iter().copied().enumerate() {
let pos = start.checked_add(offset).ok_or(Error::IntegerOverflow)?;
if pos >= out.len() {
return Err(Error::PackedSizePastEof {
id: u32::try_from(idx).expect("entry count validated at parse"),
id,
offset: u64::from(entry.data_offset_raw),
packed_size: entry.packed_size_declared,
file_len: u64::try_from(out.len()).map_err(|_| Error::IntegerOverflow)?,
@@ -407,5 +402,9 @@ fn needs_xor_key(method: PackMethod) -> bool {
)
}
fn saturating_u32_len(len: usize) -> u32 {
u32::try_from(len).unwrap_or(u32::MAX)
}
#[cfg(test)]
mod tests;

View File

@@ -100,12 +100,12 @@ pub fn parse_library(bytes: Arc<[u8]>, opts: OpenOptions) -> Result<Library> {
.ok_or(Error::IntegerOverflow)?;
} else {
return Err(Error::DeflateEofPlusOneQuirkRejected {
id: u32::try_from(idx).expect("entry count validated at parse"),
id: u32::try_from(idx).map_err(|_| Error::IntegerOverflow)?,
});
}
} else {
return Err(Error::PackedSizePastEof {
id: u32::try_from(idx).expect("entry count validated at parse"),
id: u32::try_from(idx).map_err(|_| Error::IntegerOverflow)?,
offset: effective_offset_u64,
packed_size: packed_size_declared,
file_len: file_len_u64,
@@ -118,7 +118,7 @@ pub fn parse_library(bytes: Arc<[u8]>, opts: OpenOptions) -> Result<Library> {
.ok_or(Error::IntegerOverflow)?;
if available_end > bytes.len() {
return Err(Error::EntryDataOutOfBounds {
id: u32::try_from(idx).expect("entry count validated at parse"),
id: u32::try_from(idx).map_err(|_| Error::IntegerOverflow)?,
offset: effective_offset_u64,
size: packed_size_declared,
file_len: file_len_u64,

View File

@@ -3,5 +3,5 @@ name = "texm"
version = "0.1.0"
edition = "2021"
[dependencies]
[dev-dependencies]
nres = { path = "../nres" }

View File

@@ -1,6 +1,7 @@
use core::fmt;
#[derive(Debug)]
#[non_exhaustive]
pub enum Error {
HeaderTooSmall {
size: usize,

View File

@@ -36,6 +36,7 @@ impl PixelFormat {
match self {
Self::Indexed8 => 1,
Self::Rgb565 | Self::Rgb556 | Self::Argb4444 | Self::LuminanceAlpha88 => 2,
// Parkan stores format 888 as 32-bit RGBX in texture payloads.
Self::Rgb888 | Self::Argb8888 => 4,
}
}
@@ -173,14 +174,8 @@ pub fn parse_texm(payload: &[u8]) -> Result<Texture> {
offset: level_offset,
size: level_size,
});
w = w.max(1) >> 1;
h = h.max(1) >> 1;
if w == 0 {
w = 1;
}
if h == 0 {
h = 1;
}
w = (w >> 1).max(1);
h = (h >> 1).max(1);
}
let page_rects = parse_page_tail(payload, offset)?;
@@ -240,7 +235,8 @@ pub fn decode_mip_rgba8(texture: &Texture, payload: &[u8], mip_index: usize) ->
break;
}
let poff = usize::from(index).saturating_mul(4);
if poff + 3 >= palette.len() {
// Keep this form to accept the last palette item (index 255).
if poff + 4 > palette.len() {
continue;
}
let out = i.saturating_mul(4);

View File

@@ -35,6 +35,36 @@ fn nres_test_files() -> Vec<PathBuf> {
.collect()
}
fn build_texm_payload(
width: u32,
height: u32,
format_raw: u32,
flags5: u32,
palette: Option<[u8; 1024]>,
mip_levels: &[&[u8]],
) -> Vec<u8> {
let mut payload = Vec::new();
payload.extend_from_slice(&TEXM_MAGIC.to_le_bytes());
payload.extend_from_slice(&width.to_le_bytes());
payload.extend_from_slice(&height.to_le_bytes());
payload.extend_from_slice(
&u32::try_from(mip_levels.len())
.expect("mip level count overflow in test")
.to_le_bytes(),
);
payload.extend_from_slice(&0u32.to_le_bytes()); // flags4
payload.extend_from_slice(&flags5.to_le_bytes());
payload.extend_from_slice(&0u32.to_le_bytes()); // unk6
payload.extend_from_slice(&format_raw.to_le_bytes());
if let Some(palette) = palette {
payload.extend_from_slice(&palette);
}
for level in mip_levels {
payload.extend_from_slice(level);
}
payload
}
#[test]
fn texm_parse_all_game_textures() {
let archives = nres_test_files();
@@ -97,16 +127,7 @@ fn texm_parse_all_game_textures() {
#[test]
fn texm_parse_minimal_argb8888_no_page() {
let mut payload = Vec::new();
payload.extend_from_slice(&TEXM_MAGIC.to_le_bytes());
payload.extend_from_slice(&1u32.to_le_bytes()); // width
payload.extend_from_slice(&1u32.to_le_bytes()); // height
payload.extend_from_slice(&1u32.to_le_bytes()); // mip_count
payload.extend_from_slice(&0u32.to_le_bytes()); // flags4
payload.extend_from_slice(&0u32.to_le_bytes()); // flags5
payload.extend_from_slice(&0u32.to_le_bytes()); // unk6
payload.extend_from_slice(&8888u32.to_le_bytes()); // format
payload.extend_from_slice(&[1, 2, 3, 4]); // one pixel
let payload = build_texm_payload(1, 1, 8888, 0, None, &[&[1, 2, 3, 4]]);
let parsed = parse_texm(&payload).expect("failed to parse minimal texm");
assert_eq!(parsed.header.width, 1);
@@ -117,17 +138,7 @@ fn texm_parse_minimal_argb8888_no_page() {
#[test]
fn texm_decode_minimal_argb8888_no_page() {
let mut payload = Vec::new();
payload.extend_from_slice(&TEXM_MAGIC.to_le_bytes());
payload.extend_from_slice(&1u32.to_le_bytes()); // width
payload.extend_from_slice(&1u32.to_le_bytes()); // height
payload.extend_from_slice(&1u32.to_le_bytes()); // mip_count
payload.extend_from_slice(&0u32.to_le_bytes()); // flags4
payload.extend_from_slice(&0u32.to_le_bytes()); // flags5
payload.extend_from_slice(&0u32.to_le_bytes()); // unk6
payload.extend_from_slice(&8888u32.to_le_bytes()); // format
payload.extend_from_slice(&[0x40, 0x11, 0x22, 0x33]); // A,R,G,B in little-endian order
let payload = build_texm_payload(1, 1, 8888, 0, None, &[&[0x40, 0x11, 0x22, 0x33]]);
let parsed = parse_texm(&payload).expect("failed to parse minimal texm");
let decoded = decode_mip_rgba8(&parsed, &payload, 0).expect("failed to decode mip");
assert_eq!(decoded.width, 1);
@@ -135,19 +146,55 @@ fn texm_decode_minimal_argb8888_no_page() {
assert_eq!(decoded.rgba8, vec![0x11, 0x22, 0x33, 0x40]);
}
#[test]
fn texm_decode_rgb565() {
let word = 0xFFE0u16; // r=31 g=63 b=0
let payload = build_texm_payload(1, 1, 565, 0, None, &[&word.to_le_bytes()]);
let parsed = parse_texm(&payload).expect("failed to parse rgb565 texm");
let decoded = decode_mip_rgba8(&parsed, &payload, 0).expect("failed to decode rgb565 texm");
assert_eq!(decoded.rgba8, vec![255, 255, 0, 255]);
}
#[test]
fn texm_decode_rgb556() {
let word = 0xF800u16; // r=31 g=0 b=0
let payload = build_texm_payload(1, 1, 556, 0, None, &[&word.to_le_bytes()]);
let parsed = parse_texm(&payload).expect("failed to parse rgb556 texm");
let decoded = decode_mip_rgba8(&parsed, &payload, 0).expect("failed to decode rgb556 texm");
assert_eq!(decoded.rgba8, vec![255, 0, 0, 255]);
}
#[test]
fn texm_decode_argb4444() {
let word = 0xF12Eu16; // a=F r=1 g=2 b=E
let payload = build_texm_payload(1, 1, 4444, 0, None, &[&word.to_le_bytes()]);
let parsed = parse_texm(&payload).expect("failed to parse argb4444 texm");
let decoded = decode_mip_rgba8(&parsed, &payload, 0).expect("failed to decode argb4444 texm");
assert_eq!(decoded.rgba8, vec![17, 34, 238, 255]);
}
#[test]
fn texm_decode_luminance_alpha88() {
let word = 0x7F40u16; // luminance=0x7F alpha=0x40
let payload = build_texm_payload(1, 1, 88, 0, None, &[&word.to_le_bytes()]);
let parsed = parse_texm(&payload).expect("failed to parse la88 texm");
let decoded = decode_mip_rgba8(&parsed, &payload, 0).expect("failed to decode la88 texm");
assert_eq!(decoded.rgba8, vec![0x7F, 0x7F, 0x7F, 0x40]);
}
#[test]
fn texm_decode_rgb888x() {
let payload = build_texm_payload(1, 1, 888, 0, None, &[&[0x11, 0x22, 0x33, 0x99]]);
let parsed = parse_texm(&payload).expect("failed to parse rgb888 texm");
let decoded = decode_mip_rgba8(&parsed, &payload, 0).expect("failed to decode rgb888 texm");
assert_eq!(decoded.rgba8, vec![0x11, 0x22, 0x33, 255]);
}
#[test]
fn texm_parse_indexed_with_page_chunk() {
let mut payload = Vec::new();
payload.extend_from_slice(&TEXM_MAGIC.to_le_bytes());
payload.extend_from_slice(&2u32.to_le_bytes()); // width
payload.extend_from_slice(&2u32.to_le_bytes()); // height
payload.extend_from_slice(&1u32.to_le_bytes()); // mip_count
payload.extend_from_slice(&0u32.to_le_bytes()); // flags4
payload.extend_from_slice(&0u32.to_le_bytes()); // flags5
payload.extend_from_slice(&0u32.to_le_bytes()); // unk6
payload.extend_from_slice(&0u32.to_le_bytes()); // format indexed8
payload.extend_from_slice(&[0u8; 1024]); // palette
payload.extend_from_slice(&[1, 2, 3, 4]); // pixels
let mut palette = [0u8; 1024];
palette[4..8].copy_from_slice(&[10, 20, 30, 255]);
let mut payload = build_texm_payload(2, 2, 0, 0, Some(palette), &[&[1, 1, 1, 1]]);
payload.extend_from_slice(&PAGE_MAGIC.to_le_bytes());
payload.extend_from_slice(&1u32.to_le_bytes()); // rect_count
payload.extend_from_slice(&0i16.to_le_bytes()); // x
@@ -170,26 +217,113 @@ fn texm_parse_indexed_with_page_chunk() {
}
#[test]
fn texm_decode_indexed_with_palette() {
let mut payload = Vec::new();
payload.extend_from_slice(&TEXM_MAGIC.to_le_bytes());
payload.extend_from_slice(&2u32.to_le_bytes()); // width
payload.extend_from_slice(&1u32.to_le_bytes()); // height
payload.extend_from_slice(&1u32.to_le_bytes()); // mip_count
payload.extend_from_slice(&0u32.to_le_bytes()); // flags4
payload.extend_from_slice(&0u32.to_le_bytes()); // flags5
payload.extend_from_slice(&0u32.to_le_bytes()); // unk6
payload.extend_from_slice(&0u32.to_le_bytes()); // format indexed8
fn texm_decode_indexed_with_palette_last_entry() {
let mut palette = [0u8; 1024];
palette[4..8].copy_from_slice(&[10, 20, 30, 255]); // index 1
palette[8..12].copy_from_slice(&[40, 50, 60, 200]); // index 2
payload.extend_from_slice(&palette);
payload.extend_from_slice(&[1u8, 2u8]); // two pixels
palette[1020..1024].copy_from_slice(&[1, 2, 3, 4]); // index 255 (last)
let payload = build_texm_payload(3, 1, 0, 0, Some(palette), &[&[1u8, 2u8, 255u8]]);
let parsed = parse_texm(&payload).expect("failed to parse indexed texm");
let decoded = decode_mip_rgba8(&parsed, &payload, 0).expect("failed to decode indexed texm");
assert_eq!(decoded.width, 2);
assert_eq!(decoded.width, 3);
assert_eq!(decoded.height, 1);
assert_eq!(decoded.rgba8, vec![10, 20, 30, 255, 40, 50, 60, 200]);
assert_eq!(
decoded.rgba8,
vec![10, 20, 30, 255, 40, 50, 60, 200, 1, 2, 3, 4]
);
}
#[test]
fn texm_parse_multi_mip_offsets() {
let mip0 = [0x10u8; 32]; // 4*2*4
let mip1 = [0x20u8; 8]; // 2*1*4
let mip2 = [0x30u8; 4]; // 1*1*4
let payload = build_texm_payload(4, 2, 8888, 0, None, &[&mip0, &mip1, &mip2]);
let parsed = parse_texm(&payload).expect("failed to parse multi-mip texm");
assert_eq!(parsed.header.mip_count, 3);
assert_eq!(parsed.mip_levels.len(), 3);
assert_eq!(
parsed.mip_levels,
vec![
MipLevel {
width: 4,
height: 2,
offset: 32,
size: 32
},
MipLevel {
width: 2,
height: 1,
offset: 64,
size: 8
},
MipLevel {
width: 1,
height: 1,
offset: 72,
size: 4
},
]
);
}
#[test]
fn texm_preserves_flags5_for_mip_skip_metadata() {
let payload = build_texm_payload(1, 1, 8888, 0x0000_00A5, None, &[&[0, 0, 0, 0]]);
let parsed = parse_texm(&payload).expect("failed to parse texm");
assert_eq!(parsed.header.flags5, 0x0000_00A5);
}
#[test]
fn texm_errors_for_invalid_header_values() {
let mut bad_magic = build_texm_payload(1, 1, 8888, 0, None, &[&[0, 0, 0, 0]]);
bad_magic[0..4].copy_from_slice(&0u32.to_le_bytes());
assert!(matches!(
parse_texm(&bad_magic),
Err(Error::InvalidMagic { .. })
));
let zero_dims = build_texm_payload(0, 1, 8888, 0, None, &[&[]]);
assert!(matches!(
parse_texm(&zero_dims),
Err(Error::InvalidDimensions { .. })
));
let mut bad_mips = build_texm_payload(1, 1, 8888, 0, None, &[&[0, 0, 0, 0]]);
bad_mips[12..16].copy_from_slice(&0u32.to_le_bytes());
assert!(matches!(
parse_texm(&bad_mips),
Err(Error::InvalidMipCount { .. })
));
let bad_format = build_texm_payload(1, 1, 12345, 0, None, &[&[0, 0, 0, 0]]);
assert!(matches!(
parse_texm(&bad_format),
Err(Error::UnknownFormat { .. })
));
}
#[test]
fn texm_errors_for_page_chunk_and_mip_bounds() {
let mut bad_page = build_texm_payload(1, 1, 8888, 0, None, &[&[0, 0, 0, 0]]);
bad_page.extend_from_slice(b"X");
assert!(matches!(
parse_texm(&bad_page),
Err(Error::InvalidPageSize { .. })
));
let payload = build_texm_payload(1, 1, 8888, 0, None, &[&[1, 2, 3, 4]]);
let parsed = parse_texm(&payload).expect("failed to parse valid texm");
assert!(matches!(
decode_mip_rgba8(&parsed, &payload, 7),
Err(Error::MipIndexOutOfRange { .. })
));
let truncated = &payload[..payload.len() - 1];
assert!(matches!(
decode_mip_rgba8(&parsed, truncated, 0),
Err(Error::MipDataOutOfBounds { .. })
));
}