Initial vendor packages
Signed-off-by: Valentin Popov <valentin@popov.link>
This commit is contained in:
379
vendor/exr/src/block/chunk.rs
vendored
Normal file
379
vendor/exr/src/block/chunk.rs
vendored
Normal file
@@ -0,0 +1,379 @@
|
||||
|
||||
//! Read and write already compressed pixel data blocks.
|
||||
//! Does not include the process of compression and decompression.
|
||||
|
||||
use crate::meta::attribute::{IntegerBounds};
|
||||
|
||||
/// A generic block of pixel information.
|
||||
/// Contains pixel data and an index to the corresponding header.
|
||||
/// All pixel data in a file is split into a list of chunks.
|
||||
/// Also contains positioning information that locates this
|
||||
/// data block in the referenced layer.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Chunk {
|
||||
|
||||
/// The index of the layer that the block belongs to.
|
||||
/// This is required as the pixel data can appear in any order in a file.
|
||||
// PDF says u64, but source code seems to be i32
|
||||
pub layer_index: usize,
|
||||
|
||||
/// The compressed pixel contents.
|
||||
pub compressed_block: CompressedBlock,
|
||||
}
|
||||
|
||||
/// The raw, possibly compressed pixel data of a file.
|
||||
/// Each layer in a file can have a different type.
|
||||
/// Also contains positioning information that locates this
|
||||
/// data block in the corresponding layer.
|
||||
/// Exists inside a `Chunk`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum CompressedBlock {
|
||||
|
||||
/// Scan line blocks of flat data.
|
||||
ScanLine(CompressedScanLineBlock),
|
||||
|
||||
/// Tiles of flat data.
|
||||
Tile(CompressedTileBlock),
|
||||
|
||||
/// Scan line blocks of deep data.
|
||||
DeepScanLine(CompressedDeepScanLineBlock),
|
||||
|
||||
/// Tiles of deep data.
|
||||
DeepTile(CompressedDeepTileBlock),
|
||||
}
|
||||
|
||||
/// A `Block` of possibly compressed flat scan lines.
|
||||
/// Corresponds to type attribute `scanlineimage`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CompressedScanLineBlock {
|
||||
|
||||
/// The block's y coordinate is the pixel space y coordinate of the top scan line in the block.
|
||||
/// The top scan line block in the image is aligned with the top edge of the data window.
|
||||
pub y_coordinate: i32,
|
||||
|
||||
/// One or more scan lines may be stored together as a scan line block.
|
||||
/// The number of scan lines per block depends on how the pixel data are compressed.
|
||||
/// For each line in the tile, for each channel, the row values are contiguous.
|
||||
pub compressed_pixels: Vec<u8>,
|
||||
}
|
||||
|
||||
/// This `Block` is a tile of flat (non-deep) data.
|
||||
/// Corresponds to type attribute `tiledimage`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CompressedTileBlock {
|
||||
|
||||
/// The tile location.
|
||||
pub coordinates: TileCoordinates,
|
||||
|
||||
/// One or more scan lines may be stored together as a scan line block.
|
||||
/// The number of scan lines per block depends on how the pixel data are compressed.
|
||||
/// For each line in the tile, for each channel, the row values are contiguous.
|
||||
pub compressed_pixels: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Indicates the position and resolution level of a `TileBlock` or `DeepTileBlock`.
|
||||
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
|
||||
pub struct TileCoordinates {
|
||||
|
||||
/// Index of the tile, not pixel position.
|
||||
pub tile_index: Vec2<usize>,
|
||||
|
||||
/// Index of the Mip/Rip level.
|
||||
pub level_index: Vec2<usize>,
|
||||
}
|
||||
|
||||
/// This `Block` consists of one or more deep scan lines.
|
||||
/// Corresponds to type attribute `deepscanline`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CompressedDeepScanLineBlock {
|
||||
|
||||
/// The block's y coordinate is the pixel space y coordinate of the top scan line in the block.
|
||||
/// The top scan line block in the image is aligned with the top edge of the data window.
|
||||
pub y_coordinate: i32,
|
||||
|
||||
/// Count of samples.
|
||||
pub decompressed_sample_data_size: usize,
|
||||
|
||||
/// The pixel offset table is a list of integers, one for each pixel column within the data window.
|
||||
/// Each entry in the table indicates the total number of samples required
|
||||
/// to store the pixel in it as well as all pixels to the left of it.
|
||||
pub compressed_pixel_offset_table: Vec<i8>,
|
||||
|
||||
/// One or more scan lines may be stored together as a scan line block.
|
||||
/// The number of scan lines per block depends on how the pixel data are compressed.
|
||||
/// For each line in the tile, for each channel, the row values are contiguous.
|
||||
pub compressed_sample_data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// This `Block` is a tile of deep data.
|
||||
/// Corresponds to type attribute `deeptile`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CompressedDeepTileBlock {
|
||||
|
||||
/// The tile location.
|
||||
pub coordinates: TileCoordinates,
|
||||
|
||||
/// Count of samples.
|
||||
pub decompressed_sample_data_size: usize,
|
||||
|
||||
/// The pixel offset table is a list of integers, one for each pixel column within the data window.
|
||||
/// Each entry in the table indicates the total number of samples required
|
||||
/// to store the pixel in it as well as all pixels to the left of it.
|
||||
pub compressed_pixel_offset_table: Vec<i8>,
|
||||
|
||||
/// One or more scan lines may be stored together as a scan line block.
|
||||
/// The number of scan lines per block depends on how the pixel data are compressed.
|
||||
/// For each line in the tile, for each channel, the row values are contiguous.
|
||||
pub compressed_sample_data: Vec<u8>,
|
||||
}
|
||||
|
||||
|
||||
use crate::io::*;
|
||||
|
||||
impl TileCoordinates {
|
||||
|
||||
/// Without validation, write this instance to the byte stream.
|
||||
pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
|
||||
i32::write(usize_to_i32(self.tile_index.x()), write)?;
|
||||
i32::write(usize_to_i32(self.tile_index.y()), write)?;
|
||||
i32::write(usize_to_i32(self.level_index.x()), write)?;
|
||||
i32::write(usize_to_i32(self.level_index.y()), write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read the value without validating.
|
||||
pub fn read(read: &mut impl Read) -> Result<Self> {
|
||||
let tile_x = i32::read(read)?;
|
||||
let tile_y = i32::read(read)?;
|
||||
|
||||
let level_x = i32::read(read)?;
|
||||
let level_y = i32::read(read)?;
|
||||
|
||||
if level_x > 31 || level_y > 31 {
|
||||
// there can be at most 31 levels, because the largest level would have a size of 2^31,
|
||||
// which exceeds the maximum 32-bit integer value.
|
||||
return Err(Error::invalid("level index exceeding integer maximum"));
|
||||
}
|
||||
|
||||
Ok(TileCoordinates {
|
||||
tile_index: Vec2(tile_x, tile_y).to_usize("tile coordinate index")?,
|
||||
level_index: Vec2(level_x, level_y).to_usize("tile coordinate level")?
|
||||
})
|
||||
}
|
||||
|
||||
/// The indices which can be used to index into the arrays of a data window.
|
||||
/// These coordinates are only valid inside the corresponding one header.
|
||||
/// Will start at 0 and always be positive.
|
||||
pub fn to_data_indices(&self, tile_size: Vec2<usize>, max: Vec2<usize>) -> Result<IntegerBounds> {
|
||||
let x = self.tile_index.x() * tile_size.width();
|
||||
let y = self.tile_index.y() * tile_size.height();
|
||||
|
||||
if x >= max.x() || y >= max.y() {
|
||||
Err(Error::invalid("tile index"))
|
||||
}
|
||||
else {
|
||||
Ok(IntegerBounds {
|
||||
position: Vec2(usize_to_i32(x), usize_to_i32(y)),
|
||||
size: Vec2(
|
||||
calculate_block_size(max.x(), tile_size.width(), x)?,
|
||||
calculate_block_size(max.y(), tile_size.height(), y)?,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Absolute coordinates inside the global 2D space of a file, may be negative.
|
||||
pub fn to_absolute_indices(&self, tile_size: Vec2<usize>, data_window: IntegerBounds) -> Result<IntegerBounds> {
|
||||
let data = self.to_data_indices(tile_size, data_window.size)?;
|
||||
Ok(data.with_origin(data_window.position))
|
||||
}
|
||||
|
||||
/// Returns if this is the original resolution or a smaller copy.
|
||||
pub fn is_largest_resolution_level(&self) -> bool {
|
||||
self.level_index == Vec2(0, 0)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
use crate::meta::{MetaData, BlockDescription, calculate_block_size};
|
||||
|
||||
impl CompressedScanLineBlock {
|
||||
|
||||
/// Without validation, write this instance to the byte stream.
|
||||
pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
|
||||
debug_assert_ne!(self.compressed_pixels.len(), 0, "empty blocks should not be put in the file bug");
|
||||
|
||||
i32::write(self.y_coordinate, write)?;
|
||||
u8::write_i32_sized_slice(write, &self.compressed_pixels)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read the value without validating.
|
||||
pub fn read(read: &mut impl Read, max_block_byte_size: usize) -> Result<Self> {
|
||||
let y_coordinate = i32::read(read)?;
|
||||
let compressed_pixels = u8::read_i32_sized_vec(read, max_block_byte_size, Some(max_block_byte_size), "scan line block sample count")?;
|
||||
Ok(CompressedScanLineBlock { y_coordinate, compressed_pixels })
|
||||
}
|
||||
}
|
||||
|
||||
impl CompressedTileBlock {
|
||||
|
||||
/// Without validation, write this instance to the byte stream.
|
||||
pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
|
||||
debug_assert_ne!(self.compressed_pixels.len(), 0, "empty blocks should not be put in the file bug");
|
||||
|
||||
self.coordinates.write(write)?;
|
||||
u8::write_i32_sized_slice(write, &self.compressed_pixels)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read the value without validating.
|
||||
pub fn read(read: &mut impl Read, max_block_byte_size: usize) -> Result<Self> {
|
||||
let coordinates = TileCoordinates::read(read)?;
|
||||
let compressed_pixels = u8::read_i32_sized_vec(read, max_block_byte_size, Some(max_block_byte_size), "tile block sample count")?;
|
||||
Ok(CompressedTileBlock { coordinates, compressed_pixels })
|
||||
}
|
||||
}
|
||||
|
||||
impl CompressedDeepScanLineBlock {
|
||||
|
||||
/// Without validation, write this instance to the byte stream.
|
||||
pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
|
||||
debug_assert_ne!(self.compressed_sample_data.len(), 0, "empty blocks should not be put in the file bug");
|
||||
|
||||
i32::write(self.y_coordinate, write)?;
|
||||
u64::write(self.compressed_pixel_offset_table.len() as u64, write)?;
|
||||
u64::write(self.compressed_sample_data.len() as u64, write)?; // TODO just guessed
|
||||
u64::write(self.decompressed_sample_data_size as u64, write)?;
|
||||
i8::write_slice(write, &self.compressed_pixel_offset_table)?;
|
||||
u8::write_slice(write, &self.compressed_sample_data)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read the value without validating.
|
||||
pub fn read(read: &mut impl Read, max_block_byte_size: usize) -> Result<Self> {
|
||||
let y_coordinate = i32::read(read)?;
|
||||
let compressed_pixel_offset_table_size = u64_to_usize(u64::read(read)?);
|
||||
let compressed_sample_data_size = u64_to_usize(u64::read(read)?);
|
||||
let decompressed_sample_data_size = u64_to_usize(u64::read(read)?);
|
||||
|
||||
// doc said i32, try u8
|
||||
let compressed_pixel_offset_table = i8::read_vec(
|
||||
read, compressed_pixel_offset_table_size,
|
||||
6 * u16::MAX as usize, Some(max_block_byte_size),
|
||||
"deep scan line block table size"
|
||||
)?;
|
||||
|
||||
let compressed_sample_data = u8::read_vec(
|
||||
read, compressed_sample_data_size,
|
||||
6 * u16::MAX as usize, Some(max_block_byte_size),
|
||||
"deep scan line block sample count"
|
||||
)?;
|
||||
|
||||
Ok(CompressedDeepScanLineBlock {
|
||||
y_coordinate,
|
||||
decompressed_sample_data_size,
|
||||
compressed_pixel_offset_table,
|
||||
compressed_sample_data,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl CompressedDeepTileBlock {
|
||||
|
||||
/// Without validation, write this instance to the byte stream.
|
||||
pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
|
||||
debug_assert_ne!(self.compressed_sample_data.len(), 0, "empty blocks should not be put in the file bug");
|
||||
|
||||
self.coordinates.write(write)?;
|
||||
u64::write(self.compressed_pixel_offset_table.len() as u64, write)?;
|
||||
u64::write(self.compressed_sample_data.len() as u64, write)?; // TODO just guessed
|
||||
u64::write(self.decompressed_sample_data_size as u64, write)?;
|
||||
i8::write_slice(write, &self.compressed_pixel_offset_table)?;
|
||||
u8::write_slice(write, &self.compressed_sample_data)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read the value without validating.
|
||||
pub fn read(read: &mut impl Read, hard_max_block_byte_size: usize) -> Result<Self> {
|
||||
let coordinates = TileCoordinates::read(read)?;
|
||||
let compressed_pixel_offset_table_size = u64_to_usize(u64::read(read)?);
|
||||
let compressed_sample_data_size = u64_to_usize(u64::read(read)?); // TODO u64 just guessed
|
||||
let decompressed_sample_data_size = u64_to_usize(u64::read(read)?);
|
||||
|
||||
let compressed_pixel_offset_table = i8::read_vec(
|
||||
read, compressed_pixel_offset_table_size,
|
||||
6 * u16::MAX as usize, Some(hard_max_block_byte_size),
|
||||
"deep tile block table size"
|
||||
)?;
|
||||
|
||||
let compressed_sample_data = u8::read_vec(
|
||||
read, compressed_sample_data_size,
|
||||
6 * u16::MAX as usize, Some(hard_max_block_byte_size),
|
||||
"deep tile block sample count"
|
||||
)?;
|
||||
|
||||
Ok(CompressedDeepTileBlock {
|
||||
coordinates,
|
||||
decompressed_sample_data_size,
|
||||
compressed_pixel_offset_table,
|
||||
compressed_sample_data,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
use crate::error::{UnitResult, Result, Error, u64_to_usize, usize_to_i32, i32_to_usize};
|
||||
use crate::math::Vec2;
|
||||
|
||||
/// Validation of chunks is done while reading and writing the actual data. (For example in exr::full_image)
|
||||
impl Chunk {
|
||||
|
||||
/// Without validation, write this instance to the byte stream.
|
||||
pub fn write(&self, write: &mut impl Write, header_count: usize) -> UnitResult {
|
||||
debug_assert!(self.layer_index < header_count, "layer index bug"); // validation is done in full_image or simple_image
|
||||
|
||||
if header_count != 1 { usize_to_i32(self.layer_index).write(write)?; }
|
||||
else { assert_eq!(self.layer_index, 0, "invalid header index for single layer file"); }
|
||||
|
||||
match self.compressed_block {
|
||||
CompressedBlock::ScanLine (ref value) => value.write(write),
|
||||
CompressedBlock::Tile (ref value) => value.write(write),
|
||||
CompressedBlock::DeepScanLine (ref value) => value.write(write),
|
||||
CompressedBlock::DeepTile (ref value) => value.write(write),
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the value without validating.
|
||||
pub fn read(read: &mut impl Read, meta_data: &MetaData) -> Result<Self> {
|
||||
let layer_number = i32_to_usize(
|
||||
if meta_data.requirements.is_multilayer() { i32::read(read)? } // documentation says u64, but is i32
|
||||
else { 0_i32 }, // reference the first header for single-layer images
|
||||
"chunk data part number"
|
||||
)?;
|
||||
|
||||
if layer_number >= meta_data.headers.len() {
|
||||
return Err(Error::invalid("chunk data part number"));
|
||||
}
|
||||
|
||||
let header = &meta_data.headers[layer_number];
|
||||
let max_block_byte_size = header.max_block_byte_size();
|
||||
|
||||
let chunk = Chunk {
|
||||
layer_index: layer_number,
|
||||
compressed_block: match header.blocks {
|
||||
// flat data
|
||||
BlockDescription::ScanLines if !header.deep => CompressedBlock::ScanLine(CompressedScanLineBlock::read(read, max_block_byte_size)?),
|
||||
BlockDescription::Tiles(_) if !header.deep => CompressedBlock::Tile(CompressedTileBlock::read(read, max_block_byte_size)?),
|
||||
|
||||
// deep data
|
||||
BlockDescription::ScanLines => CompressedBlock::DeepScanLine(CompressedDeepScanLineBlock::read(read, max_block_byte_size)?),
|
||||
BlockDescription::Tiles(_) => CompressedBlock::DeepTile(CompressedDeepTileBlock::read(read, max_block_byte_size)?),
|
||||
},
|
||||
};
|
||||
|
||||
Ok(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
197
vendor/exr/src/block/lines.rs
vendored
Normal file
197
vendor/exr/src/block/lines.rs
vendored
Normal file
@@ -0,0 +1,197 @@
|
||||
//! Extract lines from a block of pixel bytes.
|
||||
|
||||
use crate::math::*;
|
||||
use std::io::{Cursor};
|
||||
use crate::error::{Result, UnitResult};
|
||||
use smallvec::SmallVec;
|
||||
use std::ops::Range;
|
||||
use crate::block::{BlockIndex};
|
||||
use crate::meta::attribute::ChannelList;
|
||||
|
||||
|
||||
/// A single line of pixels.
|
||||
/// Use [LineRef] or [LineRefMut] for easier type names.
|
||||
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
|
||||
pub struct LineSlice<T> {
|
||||
|
||||
// TODO also store enum SampleType, as it would always be matched in every place it is used
|
||||
|
||||
/// Where this line is located inside the image.
|
||||
pub location: LineIndex,
|
||||
|
||||
/// The raw bytes of the pixel line, either `&[u8]` or `&mut [u8]`.
|
||||
/// Must be re-interpreted as slice of f16, f32, or u32,
|
||||
/// according to the channel data type.
|
||||
pub value: T,
|
||||
}
|
||||
|
||||
|
||||
/// An reference to a single line of pixels.
|
||||
/// May go across the whole image or just a tile section of it.
|
||||
///
|
||||
/// This line contains an immutable slice that all samples will be read from.
|
||||
pub type LineRef<'s> = LineSlice<&'s [u8]>;
|
||||
|
||||
/// A reference to a single mutable line of pixels.
|
||||
/// May go across the whole image or just a tile section of it.
|
||||
///
|
||||
/// This line contains a mutable slice that all samples will be written to.
|
||||
pub type LineRefMut<'s> = LineSlice<&'s mut [u8]>;
|
||||
|
||||
|
||||
/// Specifies where a row of pixels lies inside an image.
|
||||
/// This is a globally unique identifier which includes
|
||||
/// the layer, channel index, and pixel location.
|
||||
#[derive(Clone, Copy, Eq, PartialEq, Debug, Hash)]
|
||||
pub struct LineIndex {
|
||||
|
||||
/// Index of the layer.
|
||||
pub layer: usize,
|
||||
|
||||
/// The channel index of the layer.
|
||||
pub channel: usize,
|
||||
|
||||
/// Index of the mip or rip level in the image.
|
||||
pub level: Vec2<usize>,
|
||||
|
||||
/// Position of the most left pixel of the row.
|
||||
pub position: Vec2<usize>,
|
||||
|
||||
/// The width of the line; the number of samples in this row,
|
||||
/// that is, the number of f16, f32, or u32 values.
|
||||
pub sample_count: usize,
|
||||
}
|
||||
|
||||
|
||||
impl LineIndex {
|
||||
|
||||
/// Iterates the lines of this block index in interleaved fashion:
|
||||
/// For each line in this block, this iterator steps once through each channel.
|
||||
/// This is how lines are stored in a pixel data block.
|
||||
///
|
||||
/// Does not check whether `self.layer_index`, `self.level`, `self.size` and `self.position` are valid indices.__
|
||||
// TODO be sure this cannot produce incorrect data, as this is not further checked but only handled with panics
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn lines_in_block(block: BlockIndex, channels: &ChannelList) -> impl Iterator<Item=(Range<usize>, LineIndex)> {
|
||||
struct LineIter {
|
||||
layer: usize, level: Vec2<usize>, width: usize,
|
||||
end_y: usize, x: usize, channel_sizes: SmallVec<[usize; 8]>,
|
||||
byte: usize, channel: usize, y: usize,
|
||||
}
|
||||
|
||||
// FIXME what about sub sampling??
|
||||
|
||||
impl Iterator for LineIter {
|
||||
type Item = (Range<usize>, LineIndex);
|
||||
// TODO size hint?
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.y < self.end_y {
|
||||
|
||||
// compute return value before incrementing
|
||||
let byte_len = self.channel_sizes[self.channel];
|
||||
let return_value = (
|
||||
(self.byte .. self.byte + byte_len),
|
||||
LineIndex {
|
||||
channel: self.channel,
|
||||
layer: self.layer,
|
||||
level: self.level,
|
||||
position: Vec2(self.x, self.y),
|
||||
sample_count: self.width,
|
||||
}
|
||||
);
|
||||
|
||||
{ // increment indices
|
||||
self.byte += byte_len;
|
||||
self.channel += 1;
|
||||
|
||||
if self.channel == self.channel_sizes.len() {
|
||||
self.channel = 0;
|
||||
self.y += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Some(return_value)
|
||||
}
|
||||
|
||||
else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let channel_line_sizes: SmallVec<[usize; 8]> = channels.list.iter()
|
||||
.map(move |channel| block.pixel_size.0 * channel.sample_type.bytes_per_sample()) // FIXME is it fewer samples per tile or just fewer tiles for sampled images???
|
||||
.collect();
|
||||
|
||||
LineIter {
|
||||
layer: block.layer,
|
||||
level: block.level,
|
||||
width: block.pixel_size.0,
|
||||
x: block.pixel_position.0,
|
||||
end_y: block.pixel_position.y() + block.pixel_size.height(),
|
||||
channel_sizes: channel_line_sizes,
|
||||
|
||||
byte: 0,
|
||||
channel: 0,
|
||||
y: block.pixel_position.y()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
impl<'s> LineRefMut<'s> {
|
||||
|
||||
/// Writes the samples (f16, f32, u32 values) into this line value reference.
|
||||
/// Use `write_samples` if there is not slice available.
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn write_samples_from_slice<T: crate::io::Data>(self, slice: &[T]) -> UnitResult {
|
||||
debug_assert_eq!(slice.len(), self.location.sample_count, "slice size does not match the line width");
|
||||
debug_assert_eq!(self.value.len(), self.location.sample_count * T::BYTE_SIZE, "sample type size does not match line byte size");
|
||||
|
||||
T::write_slice(&mut Cursor::new(self.value), slice)
|
||||
}
|
||||
|
||||
/// Iterate over all samples in this line, from left to right.
|
||||
/// The supplied `get_line` function returns the sample value
|
||||
/// for a given sample index within the line,
|
||||
/// which starts at zero for each individual line.
|
||||
/// Use `write_samples_from_slice` if you already have a slice of samples.
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn write_samples<T: crate::io::Data>(self, mut get_sample: impl FnMut(usize) -> T) -> UnitResult {
|
||||
debug_assert_eq!(self.value.len(), self.location.sample_count * T::BYTE_SIZE, "sample type size does not match line byte size");
|
||||
|
||||
let mut write = Cursor::new(self.value);
|
||||
|
||||
for index in 0..self.location.sample_count {
|
||||
T::write(get_sample(index), &mut write)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl LineRef<'_> {
|
||||
|
||||
/// Read the samples (f16, f32, u32 values) from this line value reference.
|
||||
/// Use `read_samples` if there is not slice available.
|
||||
pub fn read_samples_into_slice<T: crate::io::Data>(self, slice: &mut [T]) -> UnitResult {
|
||||
debug_assert_eq!(slice.len(), self.location.sample_count, "slice size does not match the line width");
|
||||
debug_assert_eq!(self.value.len(), self.location.sample_count * T::BYTE_SIZE, "sample type size does not match line byte size");
|
||||
|
||||
T::read_slice(&mut Cursor::new(self.value), slice)
|
||||
}
|
||||
|
||||
/// Iterate over all samples in this line, from left to right.
|
||||
/// Use `read_sample_into_slice` if you already have a slice of samples.
|
||||
pub fn read_samples<T: crate::io::Data>(&self) -> impl Iterator<Item = Result<T>> + '_ {
|
||||
debug_assert_eq!(self.value.len(), self.location.sample_count * T::BYTE_SIZE, "sample type size does not match line byte size");
|
||||
|
||||
let mut read = self.value.clone(); // FIXME deep data
|
||||
(0..self.location.sample_count).map(move |_| T::read(&mut read))
|
||||
}
|
||||
}
|
||||
257
vendor/exr/src/block/mod.rs
vendored
Normal file
257
vendor/exr/src/block/mod.rs
vendored
Normal file
@@ -0,0 +1,257 @@
|
||||
//! This is the low-level interface for the raw blocks of an image.
|
||||
//! See `exr::image` module for a high-level interface.
|
||||
//!
|
||||
//! Handle compressed and uncompressed pixel byte blocks. Includes compression and decompression,
|
||||
//! and reading a complete image into blocks.
|
||||
//!
|
||||
//! Start with the `block::read(...)`
|
||||
//! and `block::write(...)` functions.
|
||||
|
||||
|
||||
pub mod writer;
|
||||
pub mod reader;
|
||||
|
||||
pub mod lines;
|
||||
pub mod samples;
|
||||
pub mod chunk;
|
||||
|
||||
|
||||
use std::io::{Read, Seek, Write};
|
||||
use crate::error::{Result, UnitResult, Error, usize_to_i32};
|
||||
use crate::meta::{Headers, MetaData, BlockDescription};
|
||||
use crate::math::Vec2;
|
||||
use crate::compression::ByteVec;
|
||||
use crate::block::chunk::{CompressedBlock, CompressedTileBlock, CompressedScanLineBlock, Chunk, TileCoordinates};
|
||||
use crate::meta::header::Header;
|
||||
use crate::block::lines::{LineIndex, LineRef, LineSlice, LineRefMut};
|
||||
use crate::meta::attribute::ChannelList;
|
||||
|
||||
|
||||
/// Specifies where a block of pixel data should be placed in the actual image.
|
||||
/// This is a globally unique identifier which
|
||||
/// includes the layer, level index, and pixel location.
|
||||
#[derive(Clone, Copy, Eq, Hash, PartialEq, Debug)]
|
||||
pub struct BlockIndex {
|
||||
|
||||
/// Index of the layer.
|
||||
pub layer: usize,
|
||||
|
||||
/// Index of the top left pixel from the block within the data window.
|
||||
pub pixel_position: Vec2<usize>,
|
||||
|
||||
/// Number of pixels in this block, extending to the right and downwards.
|
||||
/// Stays the same across all resolution levels.
|
||||
pub pixel_size: Vec2<usize>,
|
||||
|
||||
/// Index of the mip or rip level in the image.
|
||||
pub level: Vec2<usize>,
|
||||
}
|
||||
|
||||
/// Contains a block of pixel data and where that data should be placed in the actual image.
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub struct UncompressedBlock {
|
||||
|
||||
/// Location of the data inside the image.
|
||||
pub index: BlockIndex,
|
||||
|
||||
/// Uncompressed pixel values of the whole block.
|
||||
/// One or more scan lines may be stored together as a scan line block.
|
||||
/// This byte vector contains all pixel rows, one after another.
|
||||
/// For each line in the tile, for each channel, the row values are contiguous.
|
||||
/// Stores all samples of the first channel, then all samples of the second channel, and so on.
|
||||
pub data: ByteVec,
|
||||
}
|
||||
|
||||
/// Immediately reads the meta data from the file.
|
||||
/// Then, returns a reader that can be used to read all pixel blocks.
|
||||
/// From the reader, you can pull each compressed chunk from the file.
|
||||
/// Alternatively, you can create a decompressor, and pull the uncompressed data from it.
|
||||
/// The reader is assumed to be buffered.
|
||||
pub fn read<R: Read + Seek>(buffered_read: R, pedantic: bool) -> Result<self::reader::Reader<R>> {
|
||||
self::reader::Reader::read_from_buffered(buffered_read, pedantic)
|
||||
}
|
||||
|
||||
/// Immediately writes the meta data to the file.
|
||||
/// Then, calls a closure with a writer that can be used to write all pixel blocks.
|
||||
/// In the closure, you can push compressed chunks directly into the writer.
|
||||
/// Alternatively, you can create a compressor, wrapping the writer, and push the uncompressed data to it.
|
||||
/// The writer is assumed to be buffered.
|
||||
pub fn write<W: Write + Seek>(
|
||||
buffered_write: W, headers: Headers, compatibility_checks: bool,
|
||||
write_chunks: impl FnOnce(MetaData, &mut self::writer::ChunkWriter<W>) -> UnitResult
|
||||
) -> UnitResult {
|
||||
self::writer::write_chunks_with(buffered_write, headers, compatibility_checks, write_chunks)
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/// This iterator tells you the block indices of all blocks that must be in the image.
|
||||
/// The order of the blocks depends on the `LineOrder` attribute
|
||||
/// (unspecified line order is treated the same as increasing line order).
|
||||
/// The blocks written to the file must be exactly in this order,
|
||||
/// except for when the `LineOrder` is unspecified.
|
||||
/// The index represents the block index, in increasing line order, within the header.
|
||||
pub fn enumerate_ordered_header_block_indices(headers: &[Header]) -> impl '_ + Iterator<Item=(usize, BlockIndex)> {
|
||||
headers.iter().enumerate().flat_map(|(layer_index, header)|{
|
||||
header.enumerate_ordered_blocks().map(move |(index_in_header, tile)|{
|
||||
let data_indices = header.get_absolute_block_pixel_coordinates(tile.location).expect("tile coordinate bug");
|
||||
|
||||
let block = BlockIndex {
|
||||
layer: layer_index,
|
||||
level: tile.location.level_index,
|
||||
pixel_position: data_indices.position.to_usize("data indices start").expect("data index bug"),
|
||||
pixel_size: data_indices.size,
|
||||
};
|
||||
|
||||
(index_in_header, block)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
impl UncompressedBlock {
|
||||
|
||||
/// Decompress the possibly compressed chunk and returns an `UncompressedBlock`.
|
||||
// for uncompressed data, the ByteVec in the chunk is moved all the way
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn decompress_chunk(chunk: Chunk, meta_data: &MetaData, pedantic: bool) -> Result<Self> {
|
||||
let header: &Header = meta_data.headers.get(chunk.layer_index)
|
||||
.ok_or(Error::invalid("chunk layer index"))?;
|
||||
|
||||
let tile_data_indices = header.get_block_data_indices(&chunk.compressed_block)?;
|
||||
let absolute_indices = header.get_absolute_block_pixel_coordinates(tile_data_indices)?;
|
||||
|
||||
absolute_indices.validate(Some(header.layer_size))?;
|
||||
|
||||
match chunk.compressed_block {
|
||||
CompressedBlock::Tile(CompressedTileBlock { compressed_pixels, .. }) |
|
||||
CompressedBlock::ScanLine(CompressedScanLineBlock { compressed_pixels, .. }) => {
|
||||
Ok(UncompressedBlock {
|
||||
data: header.compression.decompress_image_section(header, compressed_pixels, absolute_indices, pedantic)?,
|
||||
index: BlockIndex {
|
||||
layer: chunk.layer_index,
|
||||
pixel_position: absolute_indices.position.to_usize("data indices start")?,
|
||||
level: tile_data_indices.level_index,
|
||||
pixel_size: absolute_indices.size,
|
||||
}
|
||||
})
|
||||
},
|
||||
|
||||
_ => return Err(Error::unsupported("deep data not supported yet"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Consume this block by compressing it, returning a `Chunk`.
|
||||
// for uncompressed data, the ByteVec in the chunk is moved all the way
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn compress_to_chunk(self, headers: &[Header]) -> Result<Chunk> {
|
||||
let UncompressedBlock { data, index } = self;
|
||||
|
||||
let header: &Header = headers.get(index.layer)
|
||||
.expect("block layer index bug");
|
||||
|
||||
let expected_byte_size = header.channels.bytes_per_pixel * self.index.pixel_size.area(); // TODO sampling??
|
||||
if expected_byte_size != data.len() {
|
||||
panic!("get_line byte size should be {} but was {}", expected_byte_size, data.len());
|
||||
}
|
||||
|
||||
let tile_coordinates = TileCoordinates {
|
||||
// FIXME this calculation should not be made here but elsewhere instead (in meta::header?)
|
||||
tile_index: index.pixel_position / header.max_block_pixel_size(), // TODO sampling??
|
||||
level_index: index.level,
|
||||
};
|
||||
|
||||
let absolute_indices = header.get_absolute_block_pixel_coordinates(tile_coordinates)?;
|
||||
absolute_indices.validate(Some(header.layer_size))?;
|
||||
|
||||
if !header.compression.may_loose_data() { debug_assert_eq!(
|
||||
&header.compression.decompress_image_section(
|
||||
header,
|
||||
header.compression.compress_image_section(header, data.clone(), absolute_indices)?,
|
||||
absolute_indices,
|
||||
true
|
||||
).unwrap(),
|
||||
&data,
|
||||
"compression method not round trippin'"
|
||||
); }
|
||||
|
||||
let compressed_data = header.compression.compress_image_section(header, data, absolute_indices)?;
|
||||
|
||||
Ok(Chunk {
|
||||
layer_index: index.layer,
|
||||
compressed_block : match header.blocks {
|
||||
BlockDescription::ScanLines => CompressedBlock::ScanLine(CompressedScanLineBlock {
|
||||
compressed_pixels: compressed_data,
|
||||
|
||||
// FIXME this calculation should not be made here but elsewhere instead (in meta::header?)
|
||||
y_coordinate: usize_to_i32(index.pixel_position.y()) + header.own_attributes.layer_position.y(), // TODO sampling??
|
||||
}),
|
||||
|
||||
BlockDescription::Tiles(_) => CompressedBlock::Tile(CompressedTileBlock {
|
||||
compressed_pixels: compressed_data,
|
||||
coordinates: tile_coordinates,
|
||||
}),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Iterate all the lines in this block.
|
||||
/// Each line contains the all samples for one of the channels.
|
||||
pub fn lines(&self, channels: &ChannelList) -> impl Iterator<Item=LineRef<'_>> {
|
||||
LineIndex::lines_in_block(self.index, channels)
|
||||
.map(move |(bytes, line)| LineSlice { location: line, value: &self.data[bytes] })
|
||||
}
|
||||
|
||||
/* TODO pub fn lines_mut<'s>(&'s mut self, header: &Header) -> impl 's + Iterator<Item=LineRefMut<'s>> {
|
||||
LineIndex::lines_in_block(self.index, &header.channels)
|
||||
.map(move |(bytes, line)| LineSlice { location: line, value: &mut self.data[bytes] })
|
||||
}*/
|
||||
|
||||
/*// TODO make iterator
|
||||
/// Call a closure for each line of samples in this uncompressed block.
|
||||
pub fn for_lines(
|
||||
&self, header: &Header,
|
||||
mut accept_line: impl FnMut(LineRef<'_>) -> UnitResult
|
||||
) -> UnitResult {
|
||||
for (bytes, line) in LineIndex::lines_in_block(self.index, &header.channels) {
|
||||
let line_ref = LineSlice { location: line, value: &self.data[bytes] };
|
||||
accept_line(line_ref)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}*/
|
||||
|
||||
// TODO from iterator??
|
||||
/// Create an uncompressed block byte vector by requesting one line of samples after another.
|
||||
pub fn collect_block_data_from_lines(
|
||||
channels: &ChannelList, block_index: BlockIndex,
|
||||
mut extract_line: impl FnMut(LineRefMut<'_>)
|
||||
) -> Vec<u8>
|
||||
{
|
||||
let byte_count = block_index.pixel_size.area() * channels.bytes_per_pixel;
|
||||
let mut block_bytes = vec![0_u8; byte_count];
|
||||
|
||||
for (byte_range, line_index) in LineIndex::lines_in_block(block_index, channels) {
|
||||
extract_line(LineRefMut { // TODO subsampling
|
||||
value: &mut block_bytes[byte_range],
|
||||
location: line_index,
|
||||
});
|
||||
}
|
||||
|
||||
block_bytes
|
||||
}
|
||||
|
||||
/// Create an uncompressed block by requesting one line of samples after another.
|
||||
pub fn from_lines(
|
||||
channels: &ChannelList, block_index: BlockIndex,
|
||||
extract_line: impl FnMut(LineRefMut<'_>)
|
||||
) -> Self {
|
||||
Self {
|
||||
index: block_index,
|
||||
data: Self::collect_block_data_from_lines(channels, block_index, extract_line)
|
||||
}
|
||||
}
|
||||
}
|
||||
527
vendor/exr/src/block/reader.rs
vendored
Normal file
527
vendor/exr/src/block/reader.rs
vendored
Normal file
@@ -0,0 +1,527 @@
|
||||
//! Composable structures to handle reading an image.
|
||||
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt::Debug;
|
||||
use std::io::{Read, Seek};
|
||||
use rayon_core::{ThreadPool, ThreadPoolBuildError};
|
||||
|
||||
use smallvec::alloc::sync::Arc;
|
||||
|
||||
use crate::block::{BlockIndex, UncompressedBlock};
|
||||
use crate::block::chunk::{Chunk, TileCoordinates};
|
||||
use crate::compression::Compression;
|
||||
use crate::error::{Error, Result, u64_to_usize, UnitResult};
|
||||
use crate::io::{PeekRead, Tracking};
|
||||
use crate::meta::{MetaData, OffsetTables};
|
||||
use crate::meta::header::Header;
|
||||
|
||||
/// Decode the meta data from a byte source, keeping the source ready for further reading.
|
||||
/// Continue decoding the remaining bytes by calling `filtered_chunks` or `all_chunks`.
|
||||
#[derive(Debug)]
|
||||
pub struct Reader<R> {
|
||||
meta_data: MetaData,
|
||||
remaining_reader: PeekRead<Tracking<R>>, // TODO does R need to be Seek or is Tracking enough?
|
||||
}
|
||||
|
||||
impl<R: Read + Seek> Reader<R> {
|
||||
|
||||
/// Start the reading process.
|
||||
/// Immediately decodes the meta data into an internal field.
|
||||
/// Access it via`meta_data()`.
|
||||
pub fn read_from_buffered(read: R, pedantic: bool) -> Result<Self> {
|
||||
let mut remaining_reader = PeekRead::new(Tracking::new(read));
|
||||
let meta_data = MetaData::read_validated_from_buffered_peekable(&mut remaining_reader, pedantic)?;
|
||||
Ok(Self { meta_data, remaining_reader })
|
||||
}
|
||||
|
||||
// must not be mutable, as reading the file later on relies on the meta data
|
||||
/// The decoded exr meta data from the file.
|
||||
pub fn meta_data(&self) -> &MetaData { &self.meta_data }
|
||||
|
||||
/// The decoded exr meta data from the file.
|
||||
pub fn headers(&self) -> &[Header] { &self.meta_data.headers }
|
||||
|
||||
/// Obtain the meta data ownership.
|
||||
pub fn into_meta_data(self) -> MetaData { self.meta_data }
|
||||
|
||||
/// Prepare to read all the chunks from the file.
|
||||
/// Does not decode the chunks now, but returns a decoder.
|
||||
/// Reading all chunks reduces seeking the file, but some chunks might be read without being used.
|
||||
pub fn all_chunks(mut self, pedantic: bool) -> Result<AllChunksReader<R>> {
|
||||
let total_chunk_count = {
|
||||
if pedantic {
|
||||
let offset_tables = MetaData::read_offset_tables(&mut self.remaining_reader, &self.meta_data.headers)?;
|
||||
validate_offset_tables(self.meta_data.headers.as_slice(), &offset_tables, self.remaining_reader.byte_position())?;
|
||||
offset_tables.iter().map(|table| table.len()).sum()
|
||||
}
|
||||
else {
|
||||
usize::try_from(MetaData::skip_offset_tables(&mut self.remaining_reader, &self.meta_data.headers)?)
|
||||
.expect("too large chunk count for this machine")
|
||||
}
|
||||
};
|
||||
|
||||
Ok(AllChunksReader {
|
||||
meta_data: self.meta_data,
|
||||
remaining_chunks: 0 .. total_chunk_count,
|
||||
remaining_bytes: self.remaining_reader,
|
||||
pedantic
|
||||
})
|
||||
}
|
||||
|
||||
/// Prepare to read some the chunks from the file.
|
||||
/// Does not decode the chunks now, but returns a decoder.
|
||||
/// Reading only some chunks may seeking the file, potentially skipping many bytes.
|
||||
// TODO tile indices add no new information to block index??
|
||||
pub fn filter_chunks(mut self, pedantic: bool, mut filter: impl FnMut(&MetaData, TileCoordinates, BlockIndex) -> bool) -> Result<FilteredChunksReader<R>> {
|
||||
let offset_tables = MetaData::read_offset_tables(&mut self.remaining_reader, &self.meta_data.headers)?;
|
||||
|
||||
// TODO regardless of pedantic, if invalid, read all chunks instead, and filter after reading each chunk?
|
||||
if pedantic {
|
||||
validate_offset_tables(
|
||||
self.meta_data.headers.as_slice(), &offset_tables,
|
||||
self.remaining_reader.byte_position()
|
||||
)?;
|
||||
}
|
||||
|
||||
let mut filtered_offsets = Vec::with_capacity(
|
||||
(self.meta_data.headers.len() * 32).min(2*2048)
|
||||
);
|
||||
|
||||
// TODO detect whether the filter actually would skip chunks, and aviod sorting etc when not filtering is applied
|
||||
|
||||
for (header_index, header) in self.meta_data.headers.iter().enumerate() { // offset tables are stored same order as headers
|
||||
for (block_index, tile) in header.blocks_increasing_y_order().enumerate() { // in increasing_y order
|
||||
let data_indices = header.get_absolute_block_pixel_coordinates(tile.location)?;
|
||||
|
||||
let block = BlockIndex {
|
||||
layer: header_index,
|
||||
level: tile.location.level_index,
|
||||
pixel_position: data_indices.position.to_usize("data indices start")?,
|
||||
pixel_size: data_indices.size,
|
||||
};
|
||||
|
||||
if filter(&self.meta_data, tile.location, block) {
|
||||
filtered_offsets.push(offset_tables[header_index][block_index]) // safe indexing from `enumerate()`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
filtered_offsets.sort_unstable(); // enables reading continuously if possible (already sorted where line order increasing)
|
||||
|
||||
if pedantic {
|
||||
// table is sorted. if any two neighbours are equal, we have duplicates. this is invalid.
|
||||
if filtered_offsets.windows(2).any(|pair| pair[0] == pair[1]) {
|
||||
return Err(Error::invalid("chunk offset table"))
|
||||
}
|
||||
}
|
||||
|
||||
Ok(FilteredChunksReader {
|
||||
meta_data: self.meta_data,
|
||||
expected_filtered_chunk_count: filtered_offsets.len(),
|
||||
remaining_filtered_chunk_indices: filtered_offsets.into_iter(),
|
||||
remaining_bytes: self.remaining_reader
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn validate_offset_tables(headers: &[Header], offset_tables: &OffsetTables, chunks_start_byte: usize) -> UnitResult {
|
||||
let max_pixel_bytes: usize = headers.iter() // when compressed, chunks are smaller, but never larger than max
|
||||
.map(|header| header.max_pixel_file_bytes())
|
||||
.sum();
|
||||
|
||||
// check that each offset is within the bounds
|
||||
let end_byte = chunks_start_byte + max_pixel_bytes;
|
||||
let is_invalid = offset_tables.iter().flatten().map(|&u64| u64_to_usize(u64))
|
||||
.any(|chunk_start| chunk_start < chunks_start_byte || chunk_start > end_byte);
|
||||
|
||||
if is_invalid { Err(Error::invalid("offset table")) }
|
||||
else { Ok(()) }
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/// Decode the desired chunks and skip the unimportant chunks in the file.
|
||||
/// The decoded chunks can be decompressed by calling
|
||||
/// `decompress_parallel`, `decompress_sequential`, or `sequential_decompressor` or `parallel_decompressor`.
|
||||
/// Call `on_progress` to have a callback with each block.
|
||||
/// Also contains the image meta data.
|
||||
#[derive(Debug)]
|
||||
pub struct FilteredChunksReader<R> {
|
||||
meta_data: MetaData,
|
||||
expected_filtered_chunk_count: usize,
|
||||
remaining_filtered_chunk_indices: std::vec::IntoIter<u64>,
|
||||
remaining_bytes: PeekRead<Tracking<R>>,
|
||||
}
|
||||
|
||||
/// Decode all chunks in the file without seeking.
|
||||
/// The decoded chunks can be decompressed by calling
|
||||
/// `decompress_parallel`, `decompress_sequential`, or `sequential_decompressor` or `parallel_decompressor`.
|
||||
/// Call `on_progress` to have a callback with each block.
|
||||
/// Also contains the image meta data.
|
||||
#[derive(Debug)]
|
||||
pub struct AllChunksReader<R> {
|
||||
meta_data: MetaData,
|
||||
remaining_chunks: std::ops::Range<usize>,
|
||||
remaining_bytes: PeekRead<Tracking<R>>,
|
||||
pedantic: bool,
|
||||
}
|
||||
|
||||
/// Decode chunks in the file without seeking.
|
||||
/// Calls the supplied closure for each chunk.
|
||||
/// The decoded chunks can be decompressed by calling
|
||||
/// `decompress_parallel`, `decompress_sequential`, or `sequential_decompressor`.
|
||||
/// Also contains the image meta data.
|
||||
#[derive(Debug)]
|
||||
pub struct OnProgressChunksReader<R, F> {
|
||||
chunks_reader: R,
|
||||
decoded_chunks: usize,
|
||||
callback: F,
|
||||
}
|
||||
|
||||
/// Decode chunks in the file.
|
||||
/// The decoded chunks can be decompressed by calling
|
||||
/// `decompress_parallel`, `decompress_sequential`, or `sequential_decompressor`.
|
||||
/// Call `on_progress` to have a callback with each block.
|
||||
/// Also contains the image meta data.
|
||||
pub trait ChunksReader: Sized + Iterator<Item=Result<Chunk>> + ExactSizeIterator {
|
||||
|
||||
/// The decoded exr meta data from the file.
|
||||
fn meta_data(&self) -> &MetaData;
|
||||
|
||||
/// The decoded exr headers from the file.
|
||||
fn headers(&self) -> &[Header] { &self.meta_data().headers }
|
||||
|
||||
/// The number of chunks that this reader will return in total.
|
||||
/// Can be less than the total number of chunks in the file, if some chunks are skipped.
|
||||
fn expected_chunk_count(&self) -> usize;
|
||||
|
||||
/// Read the next compressed chunk from the file.
|
||||
/// Equivalent to `.next()`, as this also is an iterator.
|
||||
/// Returns `None` if all chunks have been read.
|
||||
fn read_next_chunk(&mut self) -> Option<Result<Chunk>> { self.next() }
|
||||
|
||||
/// Create a new reader that calls the provided progress
|
||||
/// callback for each chunk that is read from the file.
|
||||
/// If the file can be successfully decoded,
|
||||
/// the progress will always at least once include 0.0 at the start and 1.0 at the end.
|
||||
fn on_progress<F>(self, on_progress: F) -> OnProgressChunksReader<Self, F> where F: FnMut(f64) {
|
||||
OnProgressChunksReader { chunks_reader: self, callback: on_progress, decoded_chunks: 0 }
|
||||
}
|
||||
|
||||
/// Decompress all blocks in the file, using multiple cpu cores, and call the supplied closure for each block.
|
||||
/// The order of the blocks is not deterministic.
|
||||
/// You can also use `parallel_decompressor` to obtain an iterator instead.
|
||||
/// Will fallback to sequential processing where threads are not available, or where it would not speed up the process.
|
||||
// FIXME try async + futures instead of rayon! Maybe even allows for external async decoding? (-> impl Stream<UncompressedBlock>)
|
||||
fn decompress_parallel(
|
||||
self, pedantic: bool,
|
||||
mut insert_block: impl FnMut(&MetaData, UncompressedBlock) -> UnitResult
|
||||
) -> UnitResult
|
||||
{
|
||||
let mut decompressor = match self.parallel_decompressor(pedantic) {
|
||||
Err(old_self) => return old_self.decompress_sequential(pedantic, insert_block),
|
||||
Ok(decompressor) => decompressor,
|
||||
};
|
||||
|
||||
while let Some(block) = decompressor.next() {
|
||||
insert_block(decompressor.meta_data(), block?)?;
|
||||
}
|
||||
|
||||
debug_assert_eq!(decompressor.len(), 0, "compressed blocks left after decompressing all blocks");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return an iterator that decompresses the chunks with multiple threads.
|
||||
/// The order of the blocks is not deterministic.
|
||||
/// Use `ParallelBlockDecompressor::new` if you want to use your own thread pool.
|
||||
/// By default, this uses as many threads as there are CPUs.
|
||||
/// Returns the `self` if there is no need for parallel decompression.
|
||||
fn parallel_decompressor(self, pedantic: bool) -> std::result::Result<ParallelBlockDecompressor<Self>, Self> {
|
||||
ParallelBlockDecompressor::new(self, pedantic)
|
||||
}
|
||||
|
||||
/// Return an iterator that decompresses the chunks in this thread.
|
||||
/// You can alternatively use `sequential_decompressor` if you prefer an external iterator.
|
||||
fn decompress_sequential(
|
||||
self, pedantic: bool,
|
||||
mut insert_block: impl FnMut(&MetaData, UncompressedBlock) -> UnitResult
|
||||
) -> UnitResult
|
||||
{
|
||||
let mut decompressor = self.sequential_decompressor(pedantic);
|
||||
while let Some(block) = decompressor.next() {
|
||||
insert_block(decompressor.meta_data(), block?)?;
|
||||
}
|
||||
|
||||
debug_assert_eq!(decompressor.len(), 0, "compressed blocks left after decompressing all blocks");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Prepare reading the chunks sequentially, only a single thread, but with less memory overhead.
|
||||
fn sequential_decompressor(self, pedantic: bool) -> SequentialBlockDecompressor<Self> {
|
||||
SequentialBlockDecompressor { remaining_chunks_reader: self, pedantic }
|
||||
}
|
||||
}
|
||||
|
||||
impl<R, F> ChunksReader for OnProgressChunksReader<R, F> where R: ChunksReader, F: FnMut(f64) {
|
||||
fn meta_data(&self) -> &MetaData { self.chunks_reader.meta_data() }
|
||||
fn expected_chunk_count(&self) -> usize { self.chunks_reader.expected_chunk_count() }
|
||||
}
|
||||
|
||||
impl<R, F> ExactSizeIterator for OnProgressChunksReader<R, F> where R: ChunksReader, F: FnMut(f64) {}
|
||||
impl<R, F> Iterator for OnProgressChunksReader<R, F> where R: ChunksReader, F: FnMut(f64) {
|
||||
type Item = Result<Chunk>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.chunks_reader.next().map(|item|{
|
||||
{
|
||||
let total_chunks = self.expected_chunk_count() as f64;
|
||||
let callback = &mut self.callback;
|
||||
callback(self.decoded_chunks as f64 / total_chunks);
|
||||
}
|
||||
|
||||
self.decoded_chunks += 1;
|
||||
item
|
||||
})
|
||||
.or_else(||{
|
||||
debug_assert_eq!(
|
||||
self.decoded_chunks, self.expected_chunk_count(),
|
||||
"chunks reader finished but not all chunks are decompressed"
|
||||
);
|
||||
|
||||
let callback = &mut self.callback;
|
||||
callback(1.0);
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.chunks_reader.size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read + Seek> ChunksReader for AllChunksReader<R> {
|
||||
fn meta_data(&self) -> &MetaData { &self.meta_data }
|
||||
fn expected_chunk_count(&self) -> usize { self.remaining_chunks.end }
|
||||
}
|
||||
|
||||
impl<R: Read + Seek> ExactSizeIterator for AllChunksReader<R> {}
|
||||
impl<R: Read + Seek> Iterator for AllChunksReader<R> {
|
||||
type Item = Result<Chunk>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
// read as many chunks as the file should contain (inferred from meta data)
|
||||
let next_chunk = self.remaining_chunks.next()
|
||||
.map(|_| Chunk::read(&mut self.remaining_bytes, &self.meta_data));
|
||||
|
||||
// if no chunks are left, but some bytes remain, return error
|
||||
if self.pedantic && next_chunk.is_none() && self.remaining_bytes.peek_u8().is_ok() {
|
||||
return Some(Err(Error::invalid("end of file expected")));
|
||||
}
|
||||
|
||||
next_chunk
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
(self.remaining_chunks.len(), Some(self.remaining_chunks.len()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read + Seek> ChunksReader for FilteredChunksReader<R> {
|
||||
fn meta_data(&self) -> &MetaData { &self.meta_data }
|
||||
fn expected_chunk_count(&self) -> usize { self.expected_filtered_chunk_count }
|
||||
}
|
||||
|
||||
impl<R: Read + Seek> ExactSizeIterator for FilteredChunksReader<R> {}
|
||||
impl<R: Read + Seek> Iterator for FilteredChunksReader<R> {
|
||||
type Item = Result<Chunk>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
// read as many chunks as we have desired chunk offsets
|
||||
self.remaining_filtered_chunk_indices.next().map(|next_chunk_location|{
|
||||
self.remaining_bytes.skip_to( // no-op for seek at current position, uses skip_bytes for small amounts
|
||||
usize::try_from(next_chunk_location)
|
||||
.expect("too large chunk position for this machine")
|
||||
)?;
|
||||
|
||||
let meta_data = &self.meta_data;
|
||||
Chunk::read(&mut self.remaining_bytes, meta_data)
|
||||
})
|
||||
|
||||
// TODO remember last chunk index and then seek to index+size and check whether bytes are left?
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
(self.remaining_filtered_chunk_indices.len(), Some(self.remaining_filtered_chunk_indices.len()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Read all chunks from the file, decompressing each chunk immediately.
|
||||
/// Implements iterator.
|
||||
#[derive(Debug)]
|
||||
pub struct SequentialBlockDecompressor<R: ChunksReader> {
|
||||
remaining_chunks_reader: R,
|
||||
pedantic: bool,
|
||||
}
|
||||
|
||||
impl<R: ChunksReader> SequentialBlockDecompressor<R> {
|
||||
|
||||
/// The extracted meta data from the image file.
|
||||
pub fn meta_data(&self) -> &MetaData { self.remaining_chunks_reader.meta_data() }
|
||||
|
||||
/// Read and then decompress a single block of pixels from the byte source.
|
||||
pub fn decompress_next_block(&mut self) -> Option<Result<UncompressedBlock>> {
|
||||
self.remaining_chunks_reader.read_next_chunk().map(|compressed_chunk|{
|
||||
UncompressedBlock::decompress_chunk(compressed_chunk?, &self.remaining_chunks_reader.meta_data(), self.pedantic)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Decompress the chunks in a file in parallel.
|
||||
/// The first call to `next` will fill the thread pool with jobs,
|
||||
/// starting to decompress the next few blocks.
|
||||
/// These jobs will finish, even if you stop reading more blocks.
|
||||
/// Implements iterator.
|
||||
#[derive(Debug)]
|
||||
pub struct ParallelBlockDecompressor<R: ChunksReader> {
|
||||
remaining_chunks: R,
|
||||
sender: flume::Sender<Result<UncompressedBlock>>,
|
||||
receiver: flume::Receiver<Result<UncompressedBlock>>,
|
||||
currently_decompressing_count: usize,
|
||||
max_threads: usize,
|
||||
|
||||
shared_meta_data_ref: Arc<MetaData>,
|
||||
pedantic: bool,
|
||||
|
||||
pool: ThreadPool,
|
||||
}
|
||||
|
||||
impl<R: ChunksReader> ParallelBlockDecompressor<R> {
|
||||
|
||||
/// Create a new decompressor. Does not immediately spawn any tasks.
|
||||
/// Decompression starts after the first call to `next`.
|
||||
/// Returns the chunks if parallel decompression should not be used.
|
||||
/// Use `new_with_thread_pool` to customize the threadpool.
|
||||
pub fn new(chunks: R, pedantic: bool) -> std::result::Result<Self, R> {
|
||||
Self::new_with_thread_pool(chunks, pedantic, ||{
|
||||
rayon_core::ThreadPoolBuilder::new()
|
||||
.thread_name(|index| format!("OpenEXR Block Decompressor Thread #{}", index))
|
||||
.build()
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new decompressor. Does not immediately spawn any tasks.
|
||||
/// Decompression starts after the first call to `next`.
|
||||
/// Returns the chunks if parallel decompression should not be used.
|
||||
pub fn new_with_thread_pool<CreatePool>(chunks: R, pedantic: bool, try_create_thread_pool: CreatePool)
|
||||
-> std::result::Result<Self, R>
|
||||
where CreatePool: FnOnce() -> std::result::Result<ThreadPool, ThreadPoolBuildError>
|
||||
{
|
||||
// if no compression is used in the file, don't use a threadpool
|
||||
if chunks.meta_data().headers.iter()
|
||||
.all(|head|head.compression == Compression::Uncompressed)
|
||||
{
|
||||
return Err(chunks);
|
||||
}
|
||||
|
||||
// in case thread pool creation fails (for example on WASM currently),
|
||||
// we revert to sequential decompression
|
||||
let pool = match try_create_thread_pool() {
|
||||
Ok(pool) => pool,
|
||||
|
||||
// TODO print warning?
|
||||
Err(_) => return Err(chunks),
|
||||
};
|
||||
|
||||
let max_threads = pool.current_num_threads().max(1).min(chunks.len()) + 2; // ca one block for each thread at all times
|
||||
|
||||
let (send, recv) = flume::unbounded(); // TODO bounded channel simplifies logic?
|
||||
|
||||
Ok(Self {
|
||||
shared_meta_data_ref: Arc::new(chunks.meta_data().clone()),
|
||||
currently_decompressing_count: 0,
|
||||
remaining_chunks: chunks,
|
||||
sender: send,
|
||||
receiver: recv,
|
||||
pedantic,
|
||||
max_threads,
|
||||
|
||||
pool,
|
||||
})
|
||||
}
|
||||
|
||||
/// Fill the pool with decompression jobs. Returns the first job that finishes.
|
||||
pub fn decompress_next_block(&mut self) -> Option<Result<UncompressedBlock>> {
|
||||
|
||||
while self.currently_decompressing_count < self.max_threads {
|
||||
let block = self.remaining_chunks.next();
|
||||
if let Some(block) = block {
|
||||
let block = match block {
|
||||
Ok(block) => block,
|
||||
Err(error) => return Some(Err(error))
|
||||
};
|
||||
|
||||
let sender = self.sender.clone();
|
||||
let meta = self.shared_meta_data_ref.clone();
|
||||
let pedantic = self.pedantic;
|
||||
|
||||
self.currently_decompressing_count += 1;
|
||||
|
||||
self.pool.spawn(move || {
|
||||
let decompressed_or_err = UncompressedBlock::decompress_chunk(
|
||||
block, &meta, pedantic
|
||||
);
|
||||
|
||||
// by now, decompressing could have failed in another thread.
|
||||
// the error is then already handled, so we simply
|
||||
// don't send the decompressed block and do nothing
|
||||
let _ = sender.send(decompressed_or_err);
|
||||
});
|
||||
}
|
||||
else {
|
||||
// there are no chunks left to decompress
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if self.currently_decompressing_count > 0 {
|
||||
let next = self.receiver.recv()
|
||||
.expect("all decompressing senders hung up but more messages were expected");
|
||||
|
||||
self.currently_decompressing_count -= 1;
|
||||
Some(next)
|
||||
}
|
||||
else {
|
||||
debug_assert!(self.receiver.try_recv().is_err(), "uncompressed chunks left in channel after decompressing all chunks"); // TODO not reliable
|
||||
debug_assert_eq!(self.len(), 0, "compressed chunks left after decompressing all chunks");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// The extracted meta data of the image file.
|
||||
pub fn meta_data(&self) -> &MetaData { self.remaining_chunks.meta_data() }
|
||||
}
|
||||
|
||||
impl<R: ChunksReader> ExactSizeIterator for SequentialBlockDecompressor<R> {}
|
||||
impl<R: ChunksReader> Iterator for SequentialBlockDecompressor<R> {
|
||||
type Item = Result<UncompressedBlock>;
|
||||
fn next(&mut self) -> Option<Self::Item> { self.decompress_next_block() }
|
||||
fn size_hint(&self) -> (usize, Option<usize>) { self.remaining_chunks_reader.size_hint() }
|
||||
}
|
||||
|
||||
impl<R: ChunksReader> ExactSizeIterator for ParallelBlockDecompressor<R> {}
|
||||
impl<R: ChunksReader> Iterator for ParallelBlockDecompressor<R> {
|
||||
type Item = Result<UncompressedBlock>;
|
||||
fn next(&mut self) -> Option<Self::Item> { self.decompress_next_block() }
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
let remaining = self.remaining_chunks.len() + self.currently_decompressing_count;
|
||||
(remaining, Some(remaining))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
248
vendor/exr/src/block/samples.rs
vendored
Normal file
248
vendor/exr/src/block/samples.rs
vendored
Normal file
@@ -0,0 +1,248 @@
|
||||
//! Extract pixel samples from a block of pixel bytes.
|
||||
|
||||
use crate::prelude::*;
|
||||
use half::prelude::HalfFloatSliceExt;
|
||||
|
||||
|
||||
/// A single red, green, blue, or alpha value.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum Sample {
|
||||
|
||||
/// A 16-bit float sample.
|
||||
F16(f16),
|
||||
|
||||
/// A 32-bit float sample.
|
||||
F32(f32),
|
||||
|
||||
/// An unsigned integer sample.
|
||||
U32(u32)
|
||||
}
|
||||
|
||||
impl Sample {
|
||||
|
||||
/// Create a sample containing a 32-bit float.
|
||||
pub fn f32(f32: f32) -> Self { Sample::F32(f32) }
|
||||
|
||||
/// Create a sample containing a 16-bit float.
|
||||
pub fn f16(f16: f16) -> Self { Sample::F16(f16) }
|
||||
|
||||
/// Create a sample containing a 32-bit integer.
|
||||
pub fn u32(u32: u32) -> Self { Sample::U32(u32) }
|
||||
|
||||
/// Convert the sample to an f16 value. This has lower precision than f32.
|
||||
/// Note: An f32 can only represent integers up to `1024` as precise as a u32 could.
|
||||
#[inline]
|
||||
pub fn to_f16(self) -> f16 {
|
||||
match self {
|
||||
Sample::F16(sample) => sample,
|
||||
Sample::F32(sample) => f16::from_f32(sample),
|
||||
Sample::U32(sample) => f16::from_f32(sample as f32),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the sample to an f32 value.
|
||||
/// Note: An f32 can only represent integers up to `8388608` as precise as a u32 could.
|
||||
#[inline]
|
||||
pub fn to_f32(self) -> f32 {
|
||||
match self {
|
||||
Sample::F32(sample) => sample,
|
||||
Sample::F16(sample) => sample.to_f32(),
|
||||
Sample::U32(sample) => sample as f32,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the sample to a u32. Rounds floats to integers the same way that `3.1 as u32` does.
|
||||
#[inline]
|
||||
pub fn to_u32(self) -> u32 {
|
||||
match self {
|
||||
Sample::F16(sample) => sample.to_f32() as u32,
|
||||
Sample::F32(sample) => sample as u32,
|
||||
Sample::U32(sample) => sample,
|
||||
}
|
||||
}
|
||||
|
||||
/// Is this value not a number?
|
||||
#[inline]
|
||||
pub fn is_nan(self) -> bool {
|
||||
match self {
|
||||
Sample::F16(value) => value.is_nan(),
|
||||
Sample::F32(value) => value.is_nan(),
|
||||
Sample::U32(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Is this value zero or negative zero?
|
||||
#[inline]
|
||||
pub fn is_zero(&self) -> bool {
|
||||
match *self {
|
||||
Sample::F16(value) => value == f16::ZERO || value == f16::NEG_ZERO,
|
||||
Sample::F32(value) => value == 0.0,
|
||||
Sample::U32(value) => value == 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Sample {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match *self {
|
||||
Sample::F16(num) => num == other.to_f16(),
|
||||
Sample::F32(num) => num == other.to_f32(),
|
||||
Sample::U32(num) => num == other.to_u32(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// this is not recommended because it may hide whether a color is transparent or opaque and might be undesired for depth channels
|
||||
impl Default for Sample {
|
||||
fn default() -> Self { Sample::F32(0.0) }
|
||||
}
|
||||
|
||||
impl From<f16> for Sample { #[inline] fn from(f: f16) -> Self { Sample::F16(f) } }
|
||||
impl From<f32> for Sample { #[inline] fn from(f: f32) -> Self { Sample::F32(f) } }
|
||||
impl From<u32> for Sample { #[inline] fn from(f: u32) -> Self { Sample::U32(f) } }
|
||||
|
||||
impl<T> From<Option<T>> for Sample where T: Into<Sample> + Default {
|
||||
#[inline] fn from(num: Option<T>) -> Self { num.unwrap_or_default().into() }
|
||||
}
|
||||
|
||||
|
||||
impl From<Sample> for f16 { #[inline] fn from(s: Sample) -> Self { s.to_f16() } }
|
||||
impl From<Sample> for f32 { #[inline] fn from(s: Sample) -> Self { s.to_f32() } }
|
||||
impl From<Sample> for u32 { #[inline] fn from(s: Sample) -> Self { s.to_u32() } }
|
||||
|
||||
|
||||
/// Create an arbitrary sample type from one of the defined sample types.
|
||||
/// Should be compiled to a no-op where the file contains the predicted sample type.
|
||||
/// The slice functions should be optimized into a `memcpy` where there is no conversion needed.
|
||||
pub trait FromNativeSample: Sized + Copy + Default + 'static {
|
||||
|
||||
/// Create this sample from a f16, trying to represent the same numerical value
|
||||
fn from_f16(value: f16) -> Self;
|
||||
|
||||
/// Create this sample from a f32, trying to represent the same numerical value
|
||||
fn from_f32(value: f32) -> Self;
|
||||
|
||||
/// Create this sample from a u32, trying to represent the same numerical value
|
||||
fn from_u32(value: u32) -> Self;
|
||||
|
||||
/// Convert all values from the slice into this type.
|
||||
/// This function exists to allow the compiler to perform a vectorization optimization.
|
||||
/// Note that this default implementation will **not** be vectorized by the compiler automatically.
|
||||
/// For maximum performance you will need to override this function and implement it via
|
||||
/// an explicit batched conversion such as [`convert_to_f32_slice`](https://docs.rs/half/2.3.1/half/slice/trait.HalfFloatSliceExt.html#tymethod.convert_to_f32_slice)
|
||||
#[inline]
|
||||
fn from_f16s(from: &[f16], to: &mut [Self]) {
|
||||
assert_eq!(from.len(), to.len(), "slices must have the same length");
|
||||
for (from, to) in from.iter().zip(to.iter_mut()) {
|
||||
*to = Self::from_f16(*from);
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert all values from the slice into this type.
|
||||
/// This function exists to allow the compiler to perform a vectorization optimization.
|
||||
/// Note that this default implementation will be vectorized by the compiler automatically.
|
||||
#[inline]
|
||||
fn from_f32s(from: &[f32], to: &mut [Self]) {
|
||||
assert_eq!(from.len(), to.len(), "slices must have the same length");
|
||||
for (from, to) in from.iter().zip(to.iter_mut()) {
|
||||
*to = Self::from_f32(*from);
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert all values from the slice into this type.
|
||||
/// This function exists to allow the compiler to perform a vectorization optimization.
|
||||
/// Note that this default implementation will be vectorized by the compiler automatically,
|
||||
/// provided that the CPU supports the necessary conversion instructions.
|
||||
/// For example, x86_64 lacks the instructions to convert `u32` to floats,
|
||||
/// so this will inevitably be slow on x86_64.
|
||||
#[inline]
|
||||
fn from_u32s(from: &[u32], to: &mut [Self]) {
|
||||
assert_eq!(from.len(), to.len(), "slices must have the same length");
|
||||
for (from, to) in from.iter().zip(to.iter_mut()) {
|
||||
*to = Self::from_u32(*from);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO haven't i implemented this exact behaviour already somewhere else in this library...??
|
||||
impl FromNativeSample for f32 {
|
||||
#[inline] fn from_f16(value: f16) -> Self { value.to_f32() }
|
||||
#[inline] fn from_f32(value: f32) -> Self { value }
|
||||
#[inline] fn from_u32(value: u32) -> Self { value as f32 }
|
||||
|
||||
// f16 is a custom type
|
||||
// so the compiler can not automatically vectorize the conversion
|
||||
// that's why we need to specialize this function
|
||||
#[inline]
|
||||
fn from_f16s(from: &[f16], to: &mut [Self]) {
|
||||
from.convert_to_f32_slice(to);
|
||||
}
|
||||
}
|
||||
|
||||
impl FromNativeSample for u32 {
|
||||
#[inline] fn from_f16(value: f16) -> Self { value.to_f32() as u32 }
|
||||
#[inline] fn from_f32(value: f32) -> Self { value as u32 }
|
||||
#[inline] fn from_u32(value: u32) -> Self { value }
|
||||
}
|
||||
|
||||
impl FromNativeSample for f16 {
|
||||
#[inline] fn from_f16(value: f16) -> Self { value }
|
||||
#[inline] fn from_f32(value: f32) -> Self { f16::from_f32(value) }
|
||||
#[inline] fn from_u32(value: u32) -> Self { f16::from_f32(value as f32) }
|
||||
|
||||
// f16 is a custom type
|
||||
// so the compiler can not automatically vectorize the conversion
|
||||
// that's why we need to specialize this function
|
||||
#[inline]
|
||||
fn from_f32s(from: &[f32], to: &mut [Self]) {
|
||||
to.convert_from_f32_slice(from)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromNativeSample for Sample {
|
||||
#[inline] fn from_f16(value: f16) -> Self { Self::from(value) }
|
||||
#[inline] fn from_f32(value: f32) -> Self { Self::from(value) }
|
||||
#[inline] fn from_u32(value: u32) -> Self { Self::from(value) }
|
||||
}
|
||||
|
||||
|
||||
/// Convert any type into one of the supported sample types.
|
||||
/// Should be compiled to a no-op where the file contains the predicted sample type
|
||||
pub trait IntoNativeSample: Copy + Default + Sync + 'static {
|
||||
|
||||
/// Convert this sample to an f16, trying to represent the same numerical value.
|
||||
fn to_f16(&self) -> f16;
|
||||
|
||||
/// Convert this sample to an f32, trying to represent the same numerical value.
|
||||
fn to_f32(&self) -> f32;
|
||||
|
||||
/// Convert this sample to an u16, trying to represent the same numerical value.
|
||||
fn to_u32(&self) -> u32;
|
||||
}
|
||||
|
||||
impl IntoNativeSample for f16 {
|
||||
fn to_f16(&self) -> f16 { f16::from_f16(*self) }
|
||||
fn to_f32(&self) -> f32 { f32::from_f16(*self) }
|
||||
fn to_u32(&self) -> u32 { u32::from_f16(*self) }
|
||||
}
|
||||
|
||||
impl IntoNativeSample for f32 {
|
||||
fn to_f16(&self) -> f16 { f16::from_f32(*self) }
|
||||
fn to_f32(&self) -> f32 { f32::from_f32(*self) }
|
||||
fn to_u32(&self) -> u32 { u32::from_f32(*self) }
|
||||
}
|
||||
|
||||
impl IntoNativeSample for u32 {
|
||||
fn to_f16(&self) -> f16 { f16::from_u32(*self) }
|
||||
fn to_f32(&self) -> f32 { f32::from_u32(*self) }
|
||||
fn to_u32(&self) -> u32 { u32::from_u32(*self) }
|
||||
}
|
||||
|
||||
impl IntoNativeSample for Sample {
|
||||
fn to_f16(&self) -> f16 { Sample::to_f16(*self) }
|
||||
fn to_f32(&self) -> f32 { Sample::to_f32(*self) }
|
||||
fn to_u32(&self) -> u32 { Sample::to_u32(*self) }
|
||||
}
|
||||
|
||||
|
||||
|
||||
468
vendor/exr/src/block/writer.rs
vendored
Normal file
468
vendor/exr/src/block/writer.rs
vendored
Normal file
@@ -0,0 +1,468 @@
|
||||
//! Composable structures to handle writing an image.
|
||||
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::io::Seek;
|
||||
use std::iter::Peekable;
|
||||
use std::ops::Not;
|
||||
use rayon_core::{ThreadPool, ThreadPoolBuildError};
|
||||
|
||||
use smallvec::alloc::collections::BTreeMap;
|
||||
|
||||
use crate::block::UncompressedBlock;
|
||||
use crate::block::chunk::{Chunk};
|
||||
use crate::compression::Compression;
|
||||
use crate::error::{Error, Result, UnitResult, usize_to_u64};
|
||||
use crate::io::{Data, Tracking, Write};
|
||||
use crate::meta::{Headers, MetaData, OffsetTables};
|
||||
use crate::meta::attribute::LineOrder;
|
||||
|
||||
/// Write an exr file by writing one chunk after another in a closure.
|
||||
/// In the closure, you are provided a chunk writer, which should be used to write all the chunks.
|
||||
/// Assumes the your write destination is buffered.
|
||||
pub fn write_chunks_with<W: Write + Seek>(
|
||||
buffered_write: W, headers: Headers, pedantic: bool,
|
||||
write_chunks: impl FnOnce(MetaData, &mut ChunkWriter<W>) -> UnitResult
|
||||
) -> UnitResult {
|
||||
// this closure approach ensures that after writing all chunks, the file is always completed and checked and flushed
|
||||
let (meta, mut writer) = ChunkWriter::new_for_buffered(buffered_write, headers, pedantic)?;
|
||||
write_chunks(meta, &mut writer)?;
|
||||
writer.complete_meta_data()
|
||||
}
|
||||
|
||||
/// Can consume compressed pixel chunks, writing them a file.
|
||||
/// Use `sequential_blocks_compressor` or `parallel_blocks_compressor` to compress your data,
|
||||
/// or use `compress_all_blocks_sequential` or `compress_all_blocks_parallel`.
|
||||
/// Use `on_progress` to obtain a new writer
|
||||
/// that triggers a callback for each block.
|
||||
// #[must_use]
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub struct ChunkWriter<W> {
|
||||
header_count: usize,
|
||||
byte_writer: Tracking<W>,
|
||||
chunk_indices_byte_location: std::ops::Range<usize>,
|
||||
chunk_indices_increasing_y: OffsetTables,
|
||||
chunk_count: usize, // TODO compose?
|
||||
}
|
||||
|
||||
/// A new writer that triggers a callback
|
||||
/// for each block written to the inner writer.
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub struct OnProgressChunkWriter<'w, W, F> {
|
||||
chunk_writer: &'w mut W,
|
||||
written_chunks: usize,
|
||||
on_progress: F,
|
||||
}
|
||||
|
||||
/// Write chunks to a byte destination.
|
||||
/// Then write each chunk with `writer.write_chunk(chunk)`.
|
||||
pub trait ChunksWriter: Sized {
|
||||
|
||||
/// The total number of chunks that the complete file will contain.
|
||||
fn total_chunks_count(&self) -> usize;
|
||||
|
||||
/// Any more calls will result in an error and have no effect.
|
||||
/// If writing results in an error, the file and the writer
|
||||
/// may remain in an invalid state and should not be used further.
|
||||
/// Errors when the chunk at this index was already written.
|
||||
fn write_chunk(&mut self, index_in_header_increasing_y: usize, chunk: Chunk) -> UnitResult;
|
||||
|
||||
/// Obtain a new writer that calls the specified closure for each block that is written to this writer.
|
||||
fn on_progress<F>(&mut self, on_progress: F) -> OnProgressChunkWriter<'_, Self, F> where F: FnMut(f64) {
|
||||
OnProgressChunkWriter { chunk_writer: self, written_chunks: 0, on_progress }
|
||||
}
|
||||
|
||||
/// Obtain a new writer that can compress blocks to chunks, which are then passed to this writer.
|
||||
fn sequential_blocks_compressor<'w>(&'w mut self, meta: &'w MetaData) -> SequentialBlocksCompressor<'w, Self> {
|
||||
SequentialBlocksCompressor::new(meta, self)
|
||||
}
|
||||
|
||||
/// Obtain a new writer that can compress blocks to chunks on multiple threads, which are then passed to this writer.
|
||||
/// Returns none if the sequential compressor should be used instead (thread pool creation failure or too large performance overhead).
|
||||
fn parallel_blocks_compressor<'w>(&'w mut self, meta: &'w MetaData) -> Option<ParallelBlocksCompressor<'w, Self>> {
|
||||
ParallelBlocksCompressor::new(meta, self)
|
||||
}
|
||||
|
||||
/// Compresses all blocks to the file.
|
||||
/// The index of the block must be in increasing line order within the header.
|
||||
/// Obtain iterator with `MetaData::collect_ordered_blocks(...)` or similar methods.
|
||||
fn compress_all_blocks_sequential(mut self, meta: &MetaData, blocks: impl Iterator<Item=(usize, UncompressedBlock)>) -> UnitResult {
|
||||
let mut writer = self.sequential_blocks_compressor(meta);
|
||||
|
||||
// TODO check block order if line order is not unspecified!
|
||||
for (index_in_header_increasing_y, block) in blocks {
|
||||
writer.compress_block(index_in_header_increasing_y, block)?;
|
||||
}
|
||||
|
||||
// TODO debug_assert_eq!(self.is_complete());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Compresses all blocks to the file.
|
||||
/// The index of the block must be in increasing line order within the header.
|
||||
/// Obtain iterator with `MetaData::collect_ordered_blocks(...)` or similar methods.
|
||||
/// Will fallback to sequential processing where threads are not available, or where it would not speed up the process.
|
||||
fn compress_all_blocks_parallel(mut self, meta: &MetaData, blocks: impl Iterator<Item=(usize, UncompressedBlock)>) -> UnitResult {
|
||||
let mut parallel_writer = match self.parallel_blocks_compressor(meta) {
|
||||
None => return self.compress_all_blocks_sequential(meta, blocks),
|
||||
Some(writer) => writer,
|
||||
};
|
||||
|
||||
// TODO check block order if line order is not unspecified!
|
||||
for (index_in_header_increasing_y, block) in blocks {
|
||||
parallel_writer.add_block_to_compression_queue(index_in_header_increasing_y, block)?;
|
||||
}
|
||||
|
||||
// TODO debug_assert_eq!(self.is_complete());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<W> ChunksWriter for ChunkWriter<W> where W: Write + Seek {
|
||||
|
||||
/// The total number of chunks that the complete file will contain.
|
||||
fn total_chunks_count(&self) -> usize { self.chunk_count }
|
||||
|
||||
/// Any more calls will result in an error and have no effect.
|
||||
/// If writing results in an error, the file and the writer
|
||||
/// may remain in an invalid state and should not be used further.
|
||||
/// Errors when the chunk at this index was already written.
|
||||
fn write_chunk(&mut self, index_in_header_increasing_y: usize, chunk: Chunk) -> UnitResult {
|
||||
let header_chunk_indices = &mut self.chunk_indices_increasing_y[chunk.layer_index];
|
||||
|
||||
if index_in_header_increasing_y >= header_chunk_indices.len() {
|
||||
return Err(Error::invalid("too large chunk index"));
|
||||
}
|
||||
|
||||
let chunk_index_slot = &mut header_chunk_indices[index_in_header_increasing_y];
|
||||
if *chunk_index_slot != 0 {
|
||||
return Err(Error::invalid(format!("chunk at index {} is already written", index_in_header_increasing_y)));
|
||||
}
|
||||
|
||||
*chunk_index_slot = usize_to_u64(self.byte_writer.byte_position());
|
||||
chunk.write(&mut self.byte_writer, self.header_count)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<W> ChunkWriter<W> where W: Write + Seek {
|
||||
// -- the following functions are private, because they must be called in a strict order --
|
||||
|
||||
/// Writes the meta data and zeroed offset tables as a placeholder.
|
||||
fn new_for_buffered(buffered_byte_writer: W, headers: Headers, pedantic: bool) -> Result<(MetaData, Self)> {
|
||||
let mut write = Tracking::new(buffered_byte_writer);
|
||||
let requirements = MetaData::write_validating_to_buffered(&mut write, headers.as_slice(), pedantic)?;
|
||||
|
||||
// TODO: use increasing line order where possible, but this requires us to know whether we want to be parallel right now
|
||||
/*// if non-parallel compression, we always use increasing order anyways
|
||||
if !parallel || !has_compression {
|
||||
for header in &mut headers {
|
||||
if header.line_order == LineOrder::Unspecified {
|
||||
header.line_order = LineOrder::Increasing;
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
let offset_table_size: usize = headers.iter().map(|header| header.chunk_count).sum();
|
||||
|
||||
let offset_table_start_byte = write.byte_position();
|
||||
let offset_table_end_byte = write.byte_position() + offset_table_size * u64::BYTE_SIZE;
|
||||
|
||||
// skip offset tables, filling with 0, will be updated after the last chunk has been written
|
||||
write.seek_write_to(offset_table_end_byte)?;
|
||||
|
||||
let header_count = headers.len();
|
||||
let chunk_indices_increasing_y = headers.iter()
|
||||
.map(|header| vec![0_u64; header.chunk_count]).collect();
|
||||
|
||||
let meta_data = MetaData { requirements, headers };
|
||||
|
||||
Ok((meta_data, ChunkWriter {
|
||||
header_count,
|
||||
byte_writer: write,
|
||||
chunk_count: offset_table_size,
|
||||
chunk_indices_byte_location: offset_table_start_byte .. offset_table_end_byte,
|
||||
chunk_indices_increasing_y,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Seek back to the meta data, write offset tables, and flush the byte writer.
|
||||
/// Leaves the writer seeked to the middle of the file.
|
||||
fn complete_meta_data(mut self) -> UnitResult {
|
||||
if self.chunk_indices_increasing_y.iter().flatten().any(|&index| index == 0) {
|
||||
return Err(Error::invalid("some chunks are not written yet"))
|
||||
}
|
||||
|
||||
// write all offset tables
|
||||
debug_assert_ne!(self.byte_writer.byte_position(), self.chunk_indices_byte_location.end, "offset table has already been updated");
|
||||
self.byte_writer.seek_write_to(self.chunk_indices_byte_location.start)?;
|
||||
|
||||
for table in self.chunk_indices_increasing_y {
|
||||
u64::write_slice(&mut self.byte_writer, table.as_slice())?;
|
||||
}
|
||||
|
||||
self.byte_writer.flush()?; // make sure we catch all (possibly delayed) io errors before returning
|
||||
Ok(())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
impl<'w, W, F> ChunksWriter for OnProgressChunkWriter<'w, W, F> where W: 'w + ChunksWriter, F: FnMut(f64) {
|
||||
fn total_chunks_count(&self) -> usize {
|
||||
self.chunk_writer.total_chunks_count()
|
||||
}
|
||||
|
||||
fn write_chunk(&mut self, index_in_header_increasing_y: usize, chunk: Chunk) -> UnitResult {
|
||||
let total_chunks = self.total_chunks_count();
|
||||
let on_progress = &mut self.on_progress;
|
||||
|
||||
// guarantee on_progress being called with 0 once
|
||||
if self.written_chunks == 0 { on_progress(0.0); }
|
||||
|
||||
self.chunk_writer.write_chunk(index_in_header_increasing_y, chunk)?;
|
||||
|
||||
self.written_chunks += 1;
|
||||
|
||||
on_progress({
|
||||
// guarantee finishing with progress 1.0 for last block at least once, float division might slightly differ from 1.0
|
||||
if self.written_chunks == total_chunks { 1.0 }
|
||||
else { self.written_chunks as f64 / total_chunks as f64 }
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Write blocks that appear in any order and reorder them before writing.
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub struct SortedBlocksWriter<'w, W> {
|
||||
chunk_writer: &'w mut W,
|
||||
pending_chunks: BTreeMap<usize, (usize, Chunk)>,
|
||||
unwritten_chunk_indices: Peekable<std::ops::Range<usize>>,
|
||||
requires_sorting: bool, // using this instead of Option, because of borrowing
|
||||
}
|
||||
|
||||
|
||||
impl<'w, W> SortedBlocksWriter<'w, W> where W: ChunksWriter {
|
||||
|
||||
/// New sorting writer. Returns `None` if sorting is not required.
|
||||
pub fn new(meta_data: &MetaData, chunk_writer: &'w mut W) -> SortedBlocksWriter<'w, W> {
|
||||
let requires_sorting = meta_data.headers.iter()
|
||||
.any(|header| header.line_order != LineOrder::Unspecified);
|
||||
|
||||
let total_chunk_count = chunk_writer.total_chunks_count();
|
||||
|
||||
SortedBlocksWriter {
|
||||
pending_chunks: BTreeMap::new(),
|
||||
unwritten_chunk_indices: (0 .. total_chunk_count).peekable(),
|
||||
requires_sorting,
|
||||
chunk_writer
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the chunk or stash it. In the closure, write all chunks that can be written now.
|
||||
pub fn write_or_stash_chunk(&mut self, chunk_index_in_file: usize, chunk_y_index: usize, chunk: Chunk) -> UnitResult {
|
||||
if self.requires_sorting.not() {
|
||||
return self.chunk_writer.write_chunk(chunk_y_index, chunk);
|
||||
}
|
||||
|
||||
// write this chunk now if possible
|
||||
if self.unwritten_chunk_indices.peek() == Some(&chunk_index_in_file){
|
||||
self.chunk_writer.write_chunk(chunk_y_index, chunk)?;
|
||||
self.unwritten_chunk_indices.next().expect("peeked chunk index is missing");
|
||||
|
||||
// write all pending blocks that are immediate successors of this block
|
||||
while let Some((next_chunk_y_index, next_chunk)) = self
|
||||
.unwritten_chunk_indices.peek().cloned()
|
||||
.and_then(|id| self.pending_chunks.remove(&id))
|
||||
{
|
||||
self.chunk_writer.write_chunk(next_chunk_y_index, next_chunk)?;
|
||||
self.unwritten_chunk_indices.next().expect("peeked chunk index is missing");
|
||||
}
|
||||
}
|
||||
|
||||
else {
|
||||
// the argument block is not to be written now,
|
||||
// and all the pending blocks are not next up either,
|
||||
// so just stash this block
|
||||
self.pending_chunks.insert(chunk_index_in_file, (chunk_y_index, chunk));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Where the chunks will be written to.
|
||||
pub fn inner_chunks_writer(&self) -> &W {
|
||||
&self.chunk_writer
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// Compress blocks to a chunk writer in this thread.
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub struct SequentialBlocksCompressor<'w, W> {
|
||||
meta: &'w MetaData,
|
||||
chunks_writer: &'w mut W,
|
||||
}
|
||||
|
||||
impl<'w, W> SequentialBlocksCompressor<'w, W> where W: 'w + ChunksWriter {
|
||||
|
||||
/// New blocks writer.
|
||||
pub fn new(meta: &'w MetaData, chunks_writer: &'w mut W) -> Self { Self { meta, chunks_writer, } }
|
||||
|
||||
/// This is where the compressed blocks are written to.
|
||||
pub fn inner_chunks_writer(&'w self) -> &'w W { self.chunks_writer }
|
||||
|
||||
/// Compress a single block immediately. The index of the block must be in increasing line order.
|
||||
pub fn compress_block(&mut self, index_in_header_increasing_y: usize, block: UncompressedBlock) -> UnitResult {
|
||||
self.chunks_writer.write_chunk(
|
||||
index_in_header_increasing_y,
|
||||
block.compress_to_chunk(&self.meta.headers)?
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Compress blocks to a chunk writer with multiple threads.
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub struct ParallelBlocksCompressor<'w, W> {
|
||||
meta: &'w MetaData,
|
||||
sorted_writer: SortedBlocksWriter<'w, W>,
|
||||
|
||||
sender: flume::Sender<Result<(usize, usize, Chunk)>>,
|
||||
receiver: flume::Receiver<Result<(usize, usize, Chunk)>>,
|
||||
pool: rayon_core::ThreadPool,
|
||||
|
||||
currently_compressing_count: usize,
|
||||
written_chunk_count: usize, // used to check for last chunk
|
||||
max_threads: usize,
|
||||
next_incoming_chunk_index: usize, // used to remember original chunk order
|
||||
}
|
||||
|
||||
impl<'w, W> ParallelBlocksCompressor<'w, W> where W: 'w + ChunksWriter {
|
||||
|
||||
/// New blocks writer. Returns none if sequential compression should be used.
|
||||
/// Use `new_with_thread_pool` to customize the threadpool.
|
||||
pub fn new(meta: &'w MetaData, chunks_writer: &'w mut W) -> Option<Self> {
|
||||
Self::new_with_thread_pool(meta, chunks_writer, ||{
|
||||
rayon_core::ThreadPoolBuilder::new()
|
||||
.thread_name(|index| format!("OpenEXR Block Compressor Thread #{}", index))
|
||||
.build()
|
||||
})
|
||||
}
|
||||
|
||||
/// New blocks writer. Returns none if sequential compression should be used.
|
||||
pub fn new_with_thread_pool<CreatePool>(
|
||||
meta: &'w MetaData, chunks_writer: &'w mut W, try_create_thread_pool: CreatePool)
|
||||
-> Option<Self>
|
||||
where CreatePool: FnOnce() -> std::result::Result<ThreadPool, ThreadPoolBuildError>
|
||||
{
|
||||
if meta.headers.iter().all(|head|head.compression == Compression::Uncompressed) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// in case thread pool creation fails (for example on WASM currently),
|
||||
// we revert to sequential compression
|
||||
let pool = match try_create_thread_pool() {
|
||||
Ok(pool) => pool,
|
||||
|
||||
// TODO print warning?
|
||||
Err(_) => return None,
|
||||
};
|
||||
|
||||
let max_threads = pool.current_num_threads().max(1).min(chunks_writer.total_chunks_count()) + 2; // ca one block for each thread at all times
|
||||
let (send, recv) = flume::unbounded(); // TODO bounded channel simplifies logic?
|
||||
|
||||
Some(Self {
|
||||
sorted_writer: SortedBlocksWriter::new(meta, chunks_writer),
|
||||
next_incoming_chunk_index: 0,
|
||||
currently_compressing_count: 0,
|
||||
written_chunk_count: 0,
|
||||
sender: send,
|
||||
receiver: recv,
|
||||
max_threads,
|
||||
pool,
|
||||
meta,
|
||||
})
|
||||
}
|
||||
|
||||
/// This is where the compressed blocks are written to.
|
||||
pub fn inner_chunks_writer(&'w self) -> &'w W { self.sorted_writer.inner_chunks_writer() }
|
||||
|
||||
// private, as may underflow counter in release mode
|
||||
fn write_next_queued_chunk(&mut self) -> UnitResult {
|
||||
debug_assert!(self.currently_compressing_count > 0, "cannot wait for chunks as there are none left");
|
||||
|
||||
let some_compressed_chunk = self.receiver.recv()
|
||||
.expect("cannot receive compressed block");
|
||||
|
||||
self.currently_compressing_count -= 1;
|
||||
let (chunk_file_index, chunk_y_index, chunk) = some_compressed_chunk?;
|
||||
self.sorted_writer.write_or_stash_chunk(chunk_file_index, chunk_y_index, chunk)?;
|
||||
|
||||
self.written_chunk_count += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Wait until all currently compressing chunks in the compressor have been written.
|
||||
pub fn write_all_queued_chunks(&mut self) -> UnitResult {
|
||||
while self.currently_compressing_count > 0 {
|
||||
self.write_next_queued_chunk()?;
|
||||
}
|
||||
|
||||
debug_assert_eq!(self.currently_compressing_count, 0, "counter does not match block count");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add a single block to the compressor queue. The index of the block must be in increasing line order.
|
||||
/// When calling this function for the last block, this method waits until all the blocks have been written.
|
||||
/// This only works when you write as many blocks as the image expects, otherwise you can use `wait_for_all_remaining_chunks`.
|
||||
/// Waits for a block from the queue to be written, if the queue already has enough items.
|
||||
pub fn add_block_to_compression_queue(&mut self, index_in_header_increasing_y: usize, block: UncompressedBlock) -> UnitResult {
|
||||
|
||||
// if pipe is full, block to wait for a slot to free up
|
||||
if self.currently_compressing_count >= self.max_threads {
|
||||
self.write_next_queued_chunk()?;
|
||||
}
|
||||
|
||||
// add the argument chunk to the compression queueue
|
||||
let index_in_file = self.next_incoming_chunk_index;
|
||||
let sender = self.sender.clone();
|
||||
let meta = self.meta.clone();
|
||||
|
||||
self.pool.spawn(move ||{
|
||||
let compressed_or_err = block.compress_to_chunk(&meta.headers);
|
||||
|
||||
// by now, decompressing could have failed in another thread.
|
||||
// the error is then already handled, so we simply
|
||||
// don't send the decompressed block and do nothing
|
||||
let _ = sender.send(compressed_or_err.map(move |compressed| (index_in_file, index_in_header_increasing_y, compressed)));
|
||||
});
|
||||
|
||||
self.currently_compressing_count += 1;
|
||||
self.next_incoming_chunk_index += 1;
|
||||
|
||||
// if this is the last chunk, wait for all chunks to complete before returning
|
||||
if self.written_chunk_count + self.currently_compressing_count == self.inner_chunks_writer().total_chunks_count() {
|
||||
self.write_all_queued_chunks()?;
|
||||
debug_assert_eq!(
|
||||
self.written_chunk_count, self.inner_chunks_writer().total_chunks_count(),
|
||||
"written chunk count mismatch"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
989
vendor/exr/src/compression/b44/mod.rs
vendored
Normal file
989
vendor/exr/src/compression/b44/mod.rs
vendored
Normal file
@@ -0,0 +1,989 @@
|
||||
mod table;
|
||||
|
||||
use crate::compression::{mod_p, ByteVec};
|
||||
use crate::error::usize_to_i32;
|
||||
use crate::io::Data;
|
||||
use crate::meta::attribute::ChannelList;
|
||||
use crate::prelude::*;
|
||||
use std::cmp::min;
|
||||
use std::mem::size_of;
|
||||
use table::{EXP_TABLE, LOG_TABLE};
|
||||
use lebe::io::{ReadPrimitive, WriteEndian};
|
||||
|
||||
const BLOCK_SAMPLE_COUNT: usize = 4;
|
||||
|
||||
// As B44 compression is only use on f16 channels, we can have a conste for this value.
|
||||
const BLOCK_X_BYTE_COUNT: usize = BLOCK_SAMPLE_COUNT * size_of::<u16>();
|
||||
|
||||
#[inline]
|
||||
fn convert_from_linear(s: &mut [u16; 16]) {
|
||||
for v in s {
|
||||
*v = EXP_TABLE[*v as usize];
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn convert_to_linear(s: &mut [u16; 16]) {
|
||||
for v in s {
|
||||
*v = LOG_TABLE[*v as usize];
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn shift_and_round(x: i32, shift: i32) -> i32 {
|
||||
let x = x << 1;
|
||||
let a = (1 << shift) - 1;
|
||||
let shift = shift + 1;
|
||||
let b = (x >> shift) & 1;
|
||||
(x + a + b) >> shift
|
||||
}
|
||||
|
||||
/// Pack a block of 4 by 4 16-bit pixels (32 bytes, the array `s`) into either 14 or 3 bytes.
|
||||
fn pack(s: [u16; 16], b: &mut [u8], optimize_flat_fields: bool, exact_max: bool) -> usize {
|
||||
|
||||
let mut t = [0u16; 16];
|
||||
|
||||
for i in 0..16 {
|
||||
if (s[i] & 0x7c00) == 0x7c00 {
|
||||
t[i] = 0x8000;
|
||||
} else if (s[i] & 0x8000) != 0 {
|
||||
t[i] = !s[i];
|
||||
} else {
|
||||
t[i] = s[i] | 0x8000;
|
||||
}
|
||||
}
|
||||
|
||||
let t_max = t.iter().max().unwrap();
|
||||
|
||||
// Compute a set of running differences, r[0] ... r[14]:
|
||||
// Find a shift value such that after rounding off the
|
||||
// rightmost bits and shifting all differences are between
|
||||
// -32 and +31. Then bias the differences so that they
|
||||
// end up between 0 and 63.
|
||||
let mut shift = -1;
|
||||
let mut d = [0i32; 16];
|
||||
let mut r = [0i32; 15];
|
||||
let mut r_min: i32;
|
||||
let mut r_max: i32;
|
||||
|
||||
const BIAS: i32 = 0x20;
|
||||
|
||||
loop {
|
||||
shift += 1;
|
||||
|
||||
// Compute absolute differences, d[0] ... d[15],
|
||||
// between t_max and t[0] ... t[15].
|
||||
//
|
||||
// Shift and round the absolute differences.
|
||||
d.iter_mut()
|
||||
.zip(&t)
|
||||
.for_each(|(d_v, t_v)| *d_v = shift_and_round((t_max - t_v).into(), shift));
|
||||
|
||||
// Convert d[0] .. d[15] into running differences
|
||||
r[0] = d[0] - d[4] + BIAS;
|
||||
r[1] = d[4] - d[8] + BIAS;
|
||||
r[2] = d[8] - d[12] + BIAS;
|
||||
|
||||
r[3] = d[0] - d[1] + BIAS;
|
||||
r[4] = d[4] - d[5] + BIAS;
|
||||
r[5] = d[8] - d[9] + BIAS;
|
||||
r[6] = d[12] - d[13] + BIAS;
|
||||
|
||||
r[7] = d[1] - d[2] + BIAS;
|
||||
r[8] = d[5] - d[6] + BIAS;
|
||||
r[9] = d[9] - d[10] + BIAS;
|
||||
r[10] = d[13] - d[14] + BIAS;
|
||||
|
||||
r[11] = d[2] - d[3] + BIAS;
|
||||
r[12] = d[6] - d[7] + BIAS;
|
||||
r[13] = d[10] - d[11] + BIAS;
|
||||
r[14] = d[14] - d[15] + BIAS;
|
||||
|
||||
r_min = r[0];
|
||||
r_max = r[0];
|
||||
|
||||
r.iter().copied().for_each(|v| {
|
||||
if r_min > v {
|
||||
r_min = v;
|
||||
}
|
||||
|
||||
if r_max < v {
|
||||
r_max = v;
|
||||
}
|
||||
});
|
||||
|
||||
if !(r_min < 0 || r_max > 0x3f) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if r_min == BIAS && r_max == BIAS && optimize_flat_fields {
|
||||
// Special case - all pixels have the same value.
|
||||
// We encode this in 3 instead of 14 bytes by
|
||||
// storing the value 0xfc in the third output byte,
|
||||
// which cannot occur in the 14-byte encoding.
|
||||
b[0] = (t[0] >> 8) as u8;
|
||||
b[1] = t[0] as u8;
|
||||
b[2] = 0xfc;
|
||||
|
||||
return 3;
|
||||
}
|
||||
|
||||
if exact_max {
|
||||
// Adjust t[0] so that the pixel whose value is equal
|
||||
// to t_max gets represented as accurately as possible.
|
||||
t[0] = t_max - (d[0] << shift) as u16;
|
||||
}
|
||||
|
||||
// Pack t[0], shift and r[0] ... r[14] into 14 bytes:
|
||||
b[0] = (t[0] >> 8) as u8;
|
||||
b[1] = t[0] as u8;
|
||||
|
||||
b[2] = ((shift << 2) | (r[0] >> 4)) as u8;
|
||||
b[3] = ((r[0] << 4) | (r[1] >> 2)) as u8;
|
||||
b[4] = ((r[1] << 6) | r[2]) as u8;
|
||||
|
||||
b[5] = ((r[3] << 2) | (r[4] >> 4)) as u8;
|
||||
b[6] = ((r[4] << 4) | (r[5] >> 2)) as u8;
|
||||
b[7] = ((r[5] << 6) | r[6]) as u8;
|
||||
|
||||
b[8] = ((r[7] << 2) | (r[8] >> 4)) as u8;
|
||||
b[9] = ((r[8] << 4) | (r[9] >> 2)) as u8;
|
||||
b[10] = ((r[9] << 6) | r[10]) as u8;
|
||||
|
||||
b[11] = ((r[11] << 2) | (r[12] >> 4)) as u8;
|
||||
b[12] = ((r[12] << 4) | (r[13] >> 2)) as u8;
|
||||
b[13] = ((r[13] << 6) | r[14]) as u8;
|
||||
|
||||
return 14;
|
||||
}
|
||||
|
||||
// Tiny macro to simply get block array value as a u32.
|
||||
macro_rules! b32 {
|
||||
($b:expr, $i:expr) => {
|
||||
$b[$i] as u32
|
||||
};
|
||||
}
|
||||
|
||||
// 0011 1111
|
||||
const SIX_BITS: u32 = 0x3f;
|
||||
|
||||
// Unpack a 14-byte block into 4 by 4 16-bit pixels.
|
||||
fn unpack14(b: &[u8], s: &mut [u16; 16]) {
|
||||
debug_assert_eq!(b.len(), 14);
|
||||
debug_assert_ne!(b[2], 0xfc);
|
||||
|
||||
s[0] = ((b32!(b, 0) << 8) | b32!(b, 1)) as u16;
|
||||
|
||||
let shift = b32!(b, 2) >> 2;
|
||||
let bias = 0x20 << shift;
|
||||
|
||||
s[4] = (s[0] as u32 + ((((b32!(b, 2) << 4) | (b32!(b, 3) >> 4)) & SIX_BITS) << shift) - bias) as u16;
|
||||
s[8] = (s[4] as u32 + ((((b32!(b, 3) << 2) | (b32!(b, 4) >> 6)) & SIX_BITS) << shift) - bias) as u16;
|
||||
s[12] = (s[8] as u32 + ((b32!(b, 4) & SIX_BITS) << shift) - bias) as u16;
|
||||
|
||||
s[1] = (s[0] as u32 + ((b32!(b, 5) >> 2) << shift) - bias) as u16;
|
||||
s[5] = (s[4] as u32 + ((((b32!(b, 5) << 4) | (b32!(b, 6) >> 4)) & SIX_BITS) << shift) - bias) as u16;
|
||||
s[9] = (s[8] as u32 + ((((b32!(b, 6) << 2) | (b32!(b, 7) >> 6)) & SIX_BITS) << shift) - bias) as u16;
|
||||
s[13] = (s[12] as u32 + ((b32!(b, 7) & SIX_BITS) << shift) - bias) as u16;
|
||||
|
||||
s[2] = (s[1] as u32 + ((b32!(b, 8) >> 2) << shift) - bias) as u16;
|
||||
s[6] = (s[5] as u32 + ((((b32!(b, 8) << 4) | (b32!(b, 9) >> 4)) & SIX_BITS) << shift) - bias) as u16;
|
||||
s[10] = (s[9] as u32 + ((((b32!(b, 9) << 2) | (b32!(b, 10) >> 6)) & SIX_BITS) << shift) - bias) as u16;
|
||||
s[14] = (s[13] as u32 + ((b32!(b, 10) & SIX_BITS) << shift) - bias) as u16;
|
||||
|
||||
s[3] = (s[2] as u32 + ((b32!(b, 11) >> 2) << shift) - bias) as u16;
|
||||
s[7] = (s[6] as u32 + ((((b32!(b, 11) << 4) | (b32!(b, 12) >> 4)) & SIX_BITS) << shift) - bias) as u16;
|
||||
s[11] = (s[10] as u32 + ((((b32!(b, 12) << 2) | (b32!(b, 13) >> 6)) & SIX_BITS) << shift) - bias) as u16;
|
||||
s[15] = (s[14] as u32 + ((b32!(b, 13) & SIX_BITS) << shift) - bias) as u16;
|
||||
|
||||
for i in 0..16 {
|
||||
if (s[i] & 0x8000) != 0 {
|
||||
s[i] &= 0x7fff;
|
||||
} else {
|
||||
s[i] = !s[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack a 3-byte block `b` into 4 by 4 identical 16-bit pixels in `s` array.
|
||||
fn unpack3(b: &[u8], s: &mut [u16; 16]) {
|
||||
// this assertion panics for fuzzed images.
|
||||
// assuming this debug assertion is an overly strict check to catch potential compression errors.
|
||||
// disabling because it panics when fuzzed.
|
||||
// when commenting out, it simply works (maybe it should return an error instead?).
|
||||
// debug_assert_eq!(b[2], 0xfc);
|
||||
|
||||
// Get the 16-bit value from the block.
|
||||
let mut value = ((b32!(b, 0) << 8) | b32!(b, 1)) as u16;
|
||||
|
||||
if (value & 0x8000) != 0 {
|
||||
value &= 0x7fff;
|
||||
} else {
|
||||
value = !value;
|
||||
}
|
||||
|
||||
s.fill(value); // All pixels have save value.
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ChannelData {
|
||||
tmp_start_index: usize,
|
||||
tmp_end_index: usize,
|
||||
resolution: Vec2<usize>,
|
||||
y_sampling: usize,
|
||||
sample_type: SampleType,
|
||||
quantize_linearly: bool,
|
||||
samples_per_pixel: usize,
|
||||
}
|
||||
|
||||
// TODO: Unsafe seems to be required to efficiently copy whole slice of u16 ot u8. For now, we use
|
||||
// a less efficient, yet safe, implementation.
|
||||
#[inline]
|
||||
fn memcpy_u16_to_u8(src: &[u16], mut dst: &mut [u8]) {
|
||||
use lebe::prelude::*;
|
||||
dst.write_as_native_endian(src).expect("byte copy error");
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn memcpy_u8_to_u16(mut src: &[u8], dst: &mut [u16]) {
|
||||
use lebe::prelude::*;
|
||||
src.read_from_native_endian_into(dst).expect("byte copy error");
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn cpy_u8(src: &[u16], src_i: usize, dst: &mut [u8], dst_i: usize, n: usize) {
|
||||
memcpy_u16_to_u8(&src[src_i..src_i + n], &mut dst[dst_i..dst_i + 2 * n]);
|
||||
}
|
||||
|
||||
pub fn decompress(
|
||||
channels: &ChannelList,
|
||||
compressed: ByteVec,
|
||||
rectangle: IntegerBounds,
|
||||
expected_byte_size: usize,
|
||||
_pedantic: bool,
|
||||
) -> Result<ByteVec> {
|
||||
debug_assert_eq!(
|
||||
expected_byte_size,
|
||||
rectangle.size.area() * channels.bytes_per_pixel,
|
||||
"expected byte size does not match header" // TODO compute instead of passing argument?
|
||||
);
|
||||
|
||||
debug_assert!(!channels.list.is_empty(), "no channels found");
|
||||
|
||||
if compressed.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
// Extract channel information needed for decompression.
|
||||
let mut channel_data: Vec<ChannelData> = Vec::with_capacity(channels.list.len());
|
||||
let mut tmp_read_index = 0;
|
||||
|
||||
for channel in channels.list.iter() {
|
||||
let channel = ChannelData {
|
||||
tmp_start_index: tmp_read_index,
|
||||
tmp_end_index: tmp_read_index,
|
||||
resolution: channel.subsampled_resolution(rectangle.size),
|
||||
y_sampling: channel.sampling.y(),
|
||||
sample_type: channel.sample_type,
|
||||
quantize_linearly: channel.quantize_linearly,
|
||||
samples_per_pixel: channel.sampling.area(),
|
||||
};
|
||||
|
||||
tmp_read_index += channel.resolution.area()
|
||||
* channel.samples_per_pixel
|
||||
* channel.sample_type.bytes_per_sample();
|
||||
|
||||
channel_data.push(channel);
|
||||
}
|
||||
|
||||
// Temporary buffer is used to decompress B44 datas the way they are stored in the compressed
|
||||
// buffer (channel by channel). We interleave the final result later.
|
||||
let mut tmp = Vec::with_capacity(expected_byte_size);
|
||||
|
||||
// Index in the compressed buffer.
|
||||
let mut in_i = 0usize;
|
||||
|
||||
let mut remaining = compressed.len();
|
||||
|
||||
for channel in &channel_data {
|
||||
|
||||
debug_assert_eq!(remaining, compressed.len()-in_i);
|
||||
|
||||
// Compute information for current channel.
|
||||
let sample_count = channel.resolution.area() * channel.samples_per_pixel;
|
||||
let byte_count = sample_count * channel.sample_type.bytes_per_sample();
|
||||
|
||||
// Sample types that does not support B44 compression (u32 and f32) are raw copied.
|
||||
// In this branch, "compressed" array is actually raw, uncompressed data.
|
||||
if channel.sample_type != SampleType::F16 {
|
||||
|
||||
debug_assert_eq!(channel.sample_type.bytes_per_sample(), 4);
|
||||
|
||||
if remaining < byte_count {
|
||||
return Err(Error::invalid("not enough data"));
|
||||
}
|
||||
|
||||
tmp.extend_from_slice(&compressed[in_i..(in_i + byte_count)]);
|
||||
|
||||
in_i += byte_count;
|
||||
remaining -= byte_count;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// HALF channel
|
||||
// The rest of the code assume we are manipulating u16 (2 bytes) values.
|
||||
debug_assert_eq!(channel.sample_type, SampleType::F16);
|
||||
debug_assert_eq!(channel.sample_type.bytes_per_sample(), size_of::<u16>());
|
||||
|
||||
// Increase buffer to get new uncompressed datas.
|
||||
tmp.resize(tmp.len() + byte_count, 0);
|
||||
|
||||
let x_sample_count = channel.resolution.x() * channel.samples_per_pixel;
|
||||
let y_sample_count = channel.resolution.y() * channel.samples_per_pixel;
|
||||
|
||||
let bytes_per_sample = size_of::<u16>();
|
||||
|
||||
let x_byte_count = x_sample_count * bytes_per_sample;
|
||||
let cd_start = channel.tmp_start_index;
|
||||
|
||||
for y in (0..y_sample_count).step_by(BLOCK_SAMPLE_COUNT) {
|
||||
// Compute index in output (decompressed) buffer. We have 4 rows, because we will
|
||||
// uncompress 4 by 4 data blocks.
|
||||
let mut row0 = cd_start + y * x_byte_count;
|
||||
let mut row1 = row0 + x_byte_count;
|
||||
let mut row2 = row1 + x_byte_count;
|
||||
let mut row3 = row2 + x_byte_count;
|
||||
|
||||
// Move in pixel x line, 4 by 4.
|
||||
for x in (0..x_sample_count).step_by(BLOCK_SAMPLE_COUNT) {
|
||||
|
||||
// Extract the 4 by 4 block of 16-bit floats from the compressed buffer.
|
||||
let mut s = [0u16; 16];
|
||||
|
||||
if remaining < 3 {
|
||||
return Err(Error::invalid("not enough data"));
|
||||
}
|
||||
|
||||
// If shift exponent is 63, call unpack14 (ignoring unused bits)
|
||||
if compressed[in_i + 2] >= (13 << 2) {
|
||||
if remaining < 3 {
|
||||
return Err(Error::invalid("not enough data"));
|
||||
}
|
||||
|
||||
unpack3(&compressed[in_i..(in_i + 3)], &mut s);
|
||||
|
||||
in_i += 3;
|
||||
remaining -= 3;
|
||||
} else {
|
||||
if remaining < 14 {
|
||||
return Err(Error::invalid("not enough data"));
|
||||
}
|
||||
|
||||
unpack14(&compressed[in_i..(in_i + 14)], &mut s);
|
||||
|
||||
in_i += 14;
|
||||
remaining -= 14;
|
||||
}
|
||||
|
||||
if channel.quantize_linearly {
|
||||
convert_to_linear(&mut s);
|
||||
}
|
||||
|
||||
// Get resting samples from the line to copy in temp buffer (without going outside channel).
|
||||
let x_resting_sample_count = match x + 3 < x_sample_count {
|
||||
true => BLOCK_SAMPLE_COUNT,
|
||||
false => x_sample_count - x,
|
||||
};
|
||||
|
||||
debug_assert!(x_resting_sample_count > 0);
|
||||
debug_assert!(x_resting_sample_count <= BLOCK_SAMPLE_COUNT);
|
||||
|
||||
// Copy rows (without going outside channel).
|
||||
if y + 3 < y_sample_count {
|
||||
cpy_u8(&s, 0, &mut tmp, row0, x_resting_sample_count);
|
||||
cpy_u8(&s, 4, &mut tmp, row1, x_resting_sample_count);
|
||||
cpy_u8(&s, 8, &mut tmp, row2, x_resting_sample_count);
|
||||
cpy_u8(&s, 12, &mut tmp, row3, x_resting_sample_count);
|
||||
} else {
|
||||
debug_assert!(y < y_sample_count);
|
||||
|
||||
cpy_u8(&s, 0, &mut tmp, row0, x_resting_sample_count);
|
||||
|
||||
if y + 1 < y_sample_count {
|
||||
cpy_u8(&s, 4, &mut tmp, row1, x_resting_sample_count);
|
||||
}
|
||||
|
||||
if y + 2 < y_sample_count {
|
||||
cpy_u8(&s, 8, &mut tmp, row2, x_resting_sample_count);
|
||||
}
|
||||
}
|
||||
|
||||
// Update row's array index to 4 next pixels.
|
||||
row0 += BLOCK_X_BYTE_COUNT;
|
||||
row1 += BLOCK_X_BYTE_COUNT;
|
||||
row2 += BLOCK_X_BYTE_COUNT;
|
||||
row3 += BLOCK_X_BYTE_COUNT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug_assert_eq!(tmp.len(), expected_byte_size);
|
||||
|
||||
// Interleave uncompressed channel data.
|
||||
let mut out = Vec::with_capacity(expected_byte_size);
|
||||
|
||||
for y in rectangle.position.y()..rectangle.end().y() {
|
||||
for channel in &mut channel_data {
|
||||
if mod_p(y, usize_to_i32(channel.y_sampling)) != 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Find data location in temporary buffer.
|
||||
let x_sample_count = channel.resolution.x() * channel.samples_per_pixel;
|
||||
let bytes_per_line = x_sample_count * channel.sample_type.bytes_per_sample();
|
||||
let next_tmp_end_index = channel.tmp_end_index + bytes_per_line;
|
||||
let channel_bytes = &tmp[channel.tmp_end_index..next_tmp_end_index];
|
||||
|
||||
channel.tmp_end_index = next_tmp_end_index;
|
||||
|
||||
// TODO do not convert endianness for f16-only images
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
||||
// We can support uncompressed data in the machine's native format
|
||||
// if all image channels are of type HALF, and if the Xdr and the
|
||||
// native representations of a half have the same size.
|
||||
|
||||
if channel.sample_type == SampleType::F16 {
|
||||
// TODO simplify this and make it memcpy on little endian systems
|
||||
// https://github.com/AcademySoftwareFoundation/openexr/blob/a03aca31fa1ce85d3f28627dbb3e5ded9494724a/src/lib/OpenEXR/ImfB44Compressor.cpp#L943
|
||||
for mut f16_bytes in channel_bytes.chunks(std::mem::size_of::<f16>()) {
|
||||
let native_endian_f16_bits = u16::read_from_little_endian(&mut f16_bytes).expect("memory read failed");
|
||||
out.write_as_native_endian(&native_endian_f16_bits).expect("memory write failed");
|
||||
}
|
||||
}
|
||||
else {
|
||||
u8::write_slice(&mut out, channel_bytes)
|
||||
.expect("write to in-memory failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for index in 1..channel_data.len() {
|
||||
debug_assert_eq!(
|
||||
channel_data[index - 1].tmp_end_index,
|
||||
channel_data[index].tmp_start_index
|
||||
);
|
||||
}
|
||||
|
||||
debug_assert_eq!(out.len(), expected_byte_size);
|
||||
|
||||
// TODO do not convert endianness for f16-only images
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
||||
Ok(super::convert_little_endian_to_current(out, channels, rectangle))
|
||||
}
|
||||
|
||||
pub fn compress(
|
||||
channels: &ChannelList,
|
||||
uncompressed: ByteVec,
|
||||
rectangle: IntegerBounds,
|
||||
optimize_flat_fields: bool,
|
||||
) -> Result<ByteVec> {
|
||||
if uncompressed.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
// TODO do not convert endianness for f16-only images
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
||||
let uncompressed = super::convert_current_to_little_endian(uncompressed, channels, rectangle);
|
||||
let uncompressed = uncompressed.as_slice(); // TODO no alloc
|
||||
|
||||
let mut channel_data = Vec::new();
|
||||
|
||||
let mut tmp_end_index = 0;
|
||||
for channel in &channels.list {
|
||||
let number_samples = channel.subsampled_resolution(rectangle.size);
|
||||
|
||||
let sample_count = channel.subsampled_resolution(rectangle.size).area();
|
||||
let byte_count = sample_count * channel.sample_type.bytes_per_sample();
|
||||
|
||||
let channel = ChannelData {
|
||||
tmp_start_index: tmp_end_index,
|
||||
tmp_end_index,
|
||||
y_sampling: channel.sampling.y(),
|
||||
resolution: number_samples,
|
||||
sample_type: channel.sample_type,
|
||||
quantize_linearly: channel.quantize_linearly,
|
||||
samples_per_pixel: channel.sampling.area(),
|
||||
};
|
||||
|
||||
tmp_end_index += byte_count;
|
||||
channel_data.push(channel);
|
||||
}
|
||||
|
||||
let mut tmp = vec![0_u8; uncompressed.len()];
|
||||
|
||||
debug_assert_eq!(tmp_end_index, tmp.len());
|
||||
|
||||
let mut remaining_uncompressed_bytes = uncompressed;
|
||||
|
||||
for y in rectangle.position.y()..rectangle.end().y() {
|
||||
for channel in &mut channel_data {
|
||||
if mod_p(y, usize_to_i32(channel.y_sampling)) != 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let x_sample_count = channel.resolution.x() * channel.samples_per_pixel;
|
||||
let bytes_per_line = x_sample_count * channel.sample_type.bytes_per_sample();
|
||||
let next_tmp_end_index = channel.tmp_end_index + bytes_per_line;
|
||||
let target = &mut tmp[channel.tmp_end_index..next_tmp_end_index];
|
||||
|
||||
channel.tmp_end_index = next_tmp_end_index;
|
||||
|
||||
// TODO do not convert endianness for f16-only images
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
||||
// We can support uncompressed data in the machine's native format
|
||||
// if all image channels are of type HALF, and if the Xdr and the
|
||||
// native representations of a half have the same size.
|
||||
|
||||
if channel.sample_type == SampleType::F16 {
|
||||
|
||||
// TODO simplify this and make it memcpy on little endian systems
|
||||
// https://github.com/AcademySoftwareFoundation/openexr/blob/a03aca31fa1ce85d3f28627dbb3e5ded9494724a/src/lib/OpenEXR/ImfB44Compressor.cpp#L640
|
||||
|
||||
for mut out_f16_bytes in target.chunks_mut(2) {
|
||||
let native_endian_f16_bits = u16::read_from_native_endian(&mut remaining_uncompressed_bytes).expect("memory read failed");
|
||||
out_f16_bytes.write_as_little_endian(&native_endian_f16_bits).expect("memory write failed");
|
||||
}
|
||||
}
|
||||
else {
|
||||
u8::read_slice(&mut remaining_uncompressed_bytes, target)
|
||||
.expect("in-memory read failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a whole buffer that we will crop to proper size once compression is done.
|
||||
let mut b44_compressed = vec![0; std::cmp::max(2048, uncompressed.len())];
|
||||
let mut b44_end = 0; // Buffer byte index for storing next compressed values.
|
||||
|
||||
for channel in &channel_data {
|
||||
// U32 and F32 channels are raw copied.
|
||||
if channel.sample_type != SampleType::F16 {
|
||||
|
||||
debug_assert_eq!(channel.sample_type.bytes_per_sample(), 4);
|
||||
|
||||
// Raw byte copy.
|
||||
let slice = &tmp[channel.tmp_start_index..channel.tmp_end_index];
|
||||
slice.iter().copied().for_each(|b| {
|
||||
b44_compressed[b44_end] = b;
|
||||
b44_end += 1;
|
||||
});
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// HALF channel
|
||||
debug_assert_eq!(channel.sample_type, SampleType::F16);
|
||||
debug_assert_eq!(channel.sample_type.bytes_per_sample(), size_of::<u16>());
|
||||
|
||||
let x_sample_count = channel.resolution.x() * channel.samples_per_pixel;
|
||||
let y_sample_count = channel.resolution.y() * channel.samples_per_pixel;
|
||||
|
||||
let x_byte_count = x_sample_count * size_of::<u16>();
|
||||
let cd_start = channel.tmp_start_index;
|
||||
|
||||
for y in (0..y_sample_count).step_by(BLOCK_SAMPLE_COUNT) {
|
||||
//
|
||||
// Copy the next 4x4 pixel block into array s.
|
||||
// If the width, cd.nx, or the height, cd.ny, of
|
||||
// the pixel data in _tmpBuffer is not divisible
|
||||
// by 4, then pad the data by repeating the
|
||||
// rightmost column and the bottom row.
|
||||
//
|
||||
|
||||
// Compute row index in temp buffer.
|
||||
let mut row0 = cd_start + y * x_byte_count;
|
||||
let mut row1 = row0 + x_byte_count;
|
||||
let mut row2 = row1 + x_byte_count;
|
||||
let mut row3 = row2 + x_byte_count;
|
||||
|
||||
if y + 3 >= y_sample_count {
|
||||
if y + 1 >= y_sample_count {
|
||||
row1 = row0;
|
||||
}
|
||||
|
||||
if y + 2 >= y_sample_count {
|
||||
row2 = row1;
|
||||
}
|
||||
|
||||
row3 = row2;
|
||||
}
|
||||
|
||||
for x in (0..x_sample_count).step_by(BLOCK_SAMPLE_COUNT) {
|
||||
let mut s = [0u16; 16];
|
||||
|
||||
if x + 3 >= x_sample_count {
|
||||
let n = x_sample_count - x;
|
||||
|
||||
for i in 0..BLOCK_SAMPLE_COUNT {
|
||||
let j = min(i, n - 1) * 2;
|
||||
|
||||
// TODO: Make [u8; 2] to u16 fast.
|
||||
s[i + 0] = u16::from_ne_bytes([tmp[row0 + j], tmp[row0 + j + 1]]);
|
||||
s[i + 4] = u16::from_ne_bytes([tmp[row1 + j], tmp[row1 + j + 1]]);
|
||||
s[i + 8] = u16::from_ne_bytes([tmp[row2 + j], tmp[row2 + j + 1]]);
|
||||
s[i + 12] = u16::from_ne_bytes([tmp[row3 + j], tmp[row3 + j + 1]]);
|
||||
}
|
||||
} else {
|
||||
memcpy_u8_to_u16(&tmp[row0..(row0 + BLOCK_X_BYTE_COUNT)], &mut s[0..4]);
|
||||
memcpy_u8_to_u16(&tmp[row1..(row1 + BLOCK_X_BYTE_COUNT)], &mut s[4..8]);
|
||||
memcpy_u8_to_u16(&tmp[row2..(row2 + BLOCK_X_BYTE_COUNT)], &mut s[8..12]);
|
||||
memcpy_u8_to_u16(&tmp[row3..(row3 + BLOCK_X_BYTE_COUNT)], &mut s[12..16]);
|
||||
}
|
||||
|
||||
// Move to next block.
|
||||
row0 += BLOCK_X_BYTE_COUNT;
|
||||
row1 += BLOCK_X_BYTE_COUNT;
|
||||
row2 += BLOCK_X_BYTE_COUNT;
|
||||
row3 += BLOCK_X_BYTE_COUNT;
|
||||
|
||||
// Compress the contents of array `s` and append the results to the output buffer.
|
||||
if channel.quantize_linearly {
|
||||
convert_from_linear(&mut s);
|
||||
}
|
||||
|
||||
b44_end += pack(
|
||||
s,
|
||||
&mut b44_compressed[b44_end..(b44_end + 14)],
|
||||
optimize_flat_fields,
|
||||
!channel.quantize_linearly,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b44_compressed.resize(b44_end, 0);
|
||||
|
||||
Ok(b44_compressed)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::compression::b44;
|
||||
use crate::compression::b44::{convert_from_linear, convert_to_linear};
|
||||
use crate::compression::ByteVec;
|
||||
use crate::image::validate_results::ValidateResult;
|
||||
use crate::meta::attribute::ChannelList;
|
||||
use crate::prelude::f16;
|
||||
use crate::prelude::*;
|
||||
|
||||
#[test]
|
||||
fn test_convert_from_to_linear() {
|
||||
// Create two identical arrays with random floats.
|
||||
let mut s1 = [0u16; 16];
|
||||
|
||||
for i in 0..16 {
|
||||
s1[i] = f16::from_f32(rand::random::<f32>()).to_bits();
|
||||
}
|
||||
|
||||
let s2 = s1.clone();
|
||||
|
||||
// Apply two reversible conversion.
|
||||
convert_from_linear(&mut s1);
|
||||
convert_to_linear(&mut s1);
|
||||
|
||||
// And check.
|
||||
for (u1, u2) in s1.iter().zip(&s2) {
|
||||
let f1 = f16::from_bits(*u1).to_f64();
|
||||
let f2 = f16::from_bits(*u2).to_f64();
|
||||
assert!((f1 - f2).abs() < 0.01);
|
||||
}
|
||||
}
|
||||
|
||||
fn test_roundtrip_noise_with(
|
||||
channels: ChannelList,
|
||||
rectangle: IntegerBounds,
|
||||
) -> (ByteVec, ByteVec, ByteVec) {
|
||||
let byte_count = channels
|
||||
.list
|
||||
.iter()
|
||||
.map(|c| {
|
||||
c.subsampled_resolution(rectangle.size).area() * c.sample_type.bytes_per_sample()
|
||||
})
|
||||
.sum();
|
||||
|
||||
assert!(byte_count > 0);
|
||||
|
||||
let pixel_bytes: ByteVec = (0..byte_count).map(|_| rand::random()).collect();
|
||||
|
||||
assert_eq!(pixel_bytes.len(), byte_count);
|
||||
|
||||
let compressed = b44::compress(&channels, pixel_bytes.clone(), rectangle, true).unwrap();
|
||||
|
||||
let decompressed =
|
||||
b44::decompress(&channels, compressed.clone(), rectangle, pixel_bytes.len(), true).unwrap();
|
||||
|
||||
assert_eq!(decompressed.len(), pixel_bytes.len());
|
||||
|
||||
(pixel_bytes, compressed, decompressed)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_noise_f16() {
|
||||
let channel = ChannelDescription {
|
||||
sample_type: SampleType::F16,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
};
|
||||
|
||||
// Two similar channels.
|
||||
let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
||||
|
||||
let rectangle = IntegerBounds {
|
||||
position: Vec2(-30, 100),
|
||||
size: Vec2(322, 731),
|
||||
};
|
||||
|
||||
let (pixel_bytes, compressed, decompressed) =
|
||||
test_roundtrip_noise_with(channels, rectangle);
|
||||
|
||||
// On my tests, B44 give a size of 44.08% the original data (this assert implies enough
|
||||
// pixels to be relevant).
|
||||
assert_eq!(pixel_bytes.len(), 941528);
|
||||
assert_eq!(compressed.len(), 415044);
|
||||
assert_eq!(decompressed.len(), 941528);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_noise_f16_tiny() {
|
||||
let channel = ChannelDescription {
|
||||
sample_type: SampleType::F16,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
};
|
||||
|
||||
// Two similar channels.
|
||||
let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
||||
|
||||
let rectangle = IntegerBounds {
|
||||
position: Vec2(0, 0),
|
||||
size: Vec2(3, 2),
|
||||
};
|
||||
|
||||
let (pixel_bytes, compressed, decompressed) =
|
||||
test_roundtrip_noise_with(channels, rectangle);
|
||||
|
||||
// B44 being 4 by 4 block, compression is less efficient for tiny images.
|
||||
assert_eq!(pixel_bytes.len(), 24);
|
||||
assert_eq!(compressed.len(), 28);
|
||||
assert_eq!(decompressed.len(), 24);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_noise_f32() {
|
||||
let channel = ChannelDescription {
|
||||
sample_type: SampleType::F32,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
};
|
||||
|
||||
// Two similar channels.
|
||||
let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
||||
|
||||
let rectangle = IntegerBounds {
|
||||
position: Vec2(-30, 100),
|
||||
size: Vec2(322, 731),
|
||||
};
|
||||
|
||||
let (pixel_bytes, compressed, decompressed) =
|
||||
test_roundtrip_noise_with(channels, rectangle);
|
||||
|
||||
assert_eq!(pixel_bytes.len(), 1883056);
|
||||
assert_eq!(compressed.len(), 1883056);
|
||||
assert_eq!(decompressed.len(), 1883056);
|
||||
assert_eq!(pixel_bytes, decompressed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_noise_f32_tiny() {
|
||||
let channel = ChannelDescription {
|
||||
sample_type: SampleType::F32,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
};
|
||||
|
||||
// Two similar channels.
|
||||
let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
||||
|
||||
let rectangle = IntegerBounds {
|
||||
position: Vec2(0, 0),
|
||||
size: Vec2(3, 2),
|
||||
};
|
||||
|
||||
let (pixel_bytes, compressed, decompressed) =
|
||||
test_roundtrip_noise_with(channels, rectangle);
|
||||
|
||||
assert_eq!(pixel_bytes.len(), 48);
|
||||
assert_eq!(compressed.len(), 48);
|
||||
assert_eq!(decompressed.len(), 48);
|
||||
assert_eq!(pixel_bytes, decompressed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_noise_u32() {
|
||||
let channel = ChannelDescription {
|
||||
sample_type: SampleType::U32,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
};
|
||||
|
||||
// Two similar channels.
|
||||
let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
||||
|
||||
let rectangle = IntegerBounds {
|
||||
position: Vec2(-30, 100),
|
||||
size: Vec2(322, 731),
|
||||
};
|
||||
|
||||
let (pixel_bytes, compressed, decompressed) =
|
||||
test_roundtrip_noise_with(channels, rectangle);
|
||||
|
||||
assert_eq!(pixel_bytes.len(), 1883056);
|
||||
assert_eq!(compressed.len(), 1883056);
|
||||
assert_eq!(decompressed.len(), 1883056);
|
||||
assert_eq!(pixel_bytes, decompressed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_noise_u32_tiny() {
|
||||
let channel = ChannelDescription {
|
||||
sample_type: SampleType::U32,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
};
|
||||
|
||||
// Two similar channels.
|
||||
let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
||||
|
||||
let rectangle = IntegerBounds {
|
||||
position: Vec2(0, 0),
|
||||
size: Vec2(3, 2),
|
||||
};
|
||||
|
||||
let (pixel_bytes, compressed, decompressed) =
|
||||
test_roundtrip_noise_with(channels, rectangle);
|
||||
|
||||
assert_eq!(pixel_bytes.len(), 48);
|
||||
assert_eq!(compressed.len(), 48);
|
||||
assert_eq!(decompressed.len(), 48);
|
||||
assert_eq!(pixel_bytes, decompressed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_noise_mix_f32_f16_u32() {
|
||||
let channels = ChannelList::new(smallvec![
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::F32,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
},
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::F16,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
},
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::U32,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
}
|
||||
]);
|
||||
|
||||
let rectangle = IntegerBounds {
|
||||
position: Vec2(-30, 100),
|
||||
size: Vec2(322, 731),
|
||||
};
|
||||
|
||||
let (pixel_bytes, compressed, decompressed) =
|
||||
test_roundtrip_noise_with(channels, rectangle);
|
||||
|
||||
assert_eq!(pixel_bytes.len(), 2353820);
|
||||
assert_eq!(compressed.len(), 2090578);
|
||||
assert_eq!(decompressed.len(), 2353820);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_noise_mix_f32_f16_u32_tiny() {
|
||||
let channels = ChannelList::new(smallvec![
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::F32,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
},
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::F16,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
},
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::U32,
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1),
|
||||
}
|
||||
]);
|
||||
|
||||
let rectangle = IntegerBounds {
|
||||
position: Vec2(0, 0),
|
||||
size: Vec2(3, 2),
|
||||
};
|
||||
|
||||
let (pixel_bytes, compressed, decompressed) =
|
||||
test_roundtrip_noise_with(channels, rectangle);
|
||||
|
||||
assert_eq!(pixel_bytes.len(), 60);
|
||||
assert_eq!(compressed.len(), 62);
|
||||
assert_eq!(decompressed.len(), 60);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn border_on_multiview() {
|
||||
// This test is hard to reproduce, so we use the direct image.
|
||||
let path = "tests/images/valid/openexr/MultiView/Adjuster.exr";
|
||||
|
||||
let read_image = read()
|
||||
.no_deep_data()
|
||||
.all_resolution_levels()
|
||||
.all_channels()
|
||||
.all_layers()
|
||||
.all_attributes()
|
||||
.non_parallel();
|
||||
|
||||
let image = read_image.clone().from_file(path).unwrap();
|
||||
|
||||
let mut tmp_bytes = Vec::new();
|
||||
image
|
||||
.write()
|
||||
.non_parallel()
|
||||
.to_buffered(std::io::Cursor::new(&mut tmp_bytes))
|
||||
.unwrap();
|
||||
|
||||
let image2 = read_image
|
||||
.from_buffered(std::io::Cursor::new(tmp_bytes))
|
||||
.unwrap();
|
||||
|
||||
image.assert_equals_result(&image2);
|
||||
}
|
||||
}
|
||||
10
vendor/exr/src/compression/b44/table.rs
vendored
Normal file
10
vendor/exr/src/compression/b44/table.rs
vendored
Normal file
File diff suppressed because one or more lines are too long
666
vendor/exr/src/compression/mod.rs
vendored
Normal file
666
vendor/exr/src/compression/mod.rs
vendored
Normal file
@@ -0,0 +1,666 @@
|
||||
|
||||
//! Contains the compression attribute definition
|
||||
//! and methods to compress and decompress data.
|
||||
|
||||
|
||||
// private modules make non-breaking changes easier
|
||||
mod zip;
|
||||
mod rle;
|
||||
mod piz;
|
||||
mod pxr24;
|
||||
mod b44;
|
||||
|
||||
|
||||
use std::convert::TryInto;
|
||||
use std::mem::size_of;
|
||||
use half::f16;
|
||||
use crate::meta::attribute::{IntegerBounds, SampleType, ChannelList};
|
||||
use crate::error::{Result, Error, usize_to_i32};
|
||||
use crate::meta::header::Header;
|
||||
|
||||
|
||||
/// A byte vector.
|
||||
pub type ByteVec = Vec<u8>;
|
||||
|
||||
/// A byte slice.
|
||||
pub type Bytes<'s> = &'s [u8];
|
||||
|
||||
/// Specifies which compression method to use.
|
||||
/// Use uncompressed data for fastest loading and writing speeds.
|
||||
/// Use RLE compression for fast loading and writing with slight memory savings.
|
||||
/// Use ZIP compression for slow processing with large memory savings.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum Compression {
|
||||
|
||||
/// Store uncompressed values.
|
||||
/// Produces large files that can be read and written very quickly.
|
||||
/// Consider using RLE instead, as it provides some compression with almost equivalent speed.
|
||||
Uncompressed,
|
||||
|
||||
/// Produces slightly smaller files
|
||||
/// that can still be read and written rather quickly.
|
||||
/// The compressed file size is usually between 60 and 75 percent of the uncompressed size.
|
||||
/// Works best for images with large flat areas, such as masks and abstract graphics.
|
||||
/// This compression method is lossless.
|
||||
RLE,
|
||||
|
||||
/// Uses ZIP compression to compress each line. Slowly produces small images
|
||||
/// which can be read with moderate speed. This compression method is lossless.
|
||||
/// Might be slightly faster but larger than `ZIP16´.
|
||||
ZIP1, // TODO ZIP { individual_lines: bool, compression_level: Option<u8> } // TODO specify zip compression level?
|
||||
|
||||
/// Uses ZIP compression to compress blocks of 16 lines. Slowly produces small images
|
||||
/// which can be read with moderate speed. This compression method is lossless.
|
||||
/// Might be slightly slower but smaller than `ZIP1´.
|
||||
ZIP16, // TODO collapse with ZIP1
|
||||
|
||||
/// PIZ compression works well for noisy and natural images. Works better with larger tiles.
|
||||
/// Only supported for flat images, but not for deep data.
|
||||
/// This compression method is lossless.
|
||||
// A wavelet transform is applied to the pixel data, and the result is Huffman-
|
||||
// encoded. This scheme tends to provide the best compression ratio for the types of
|
||||
// images that are typically processed at Industrial Light & Magic. Files are
|
||||
// compressed and decompressed at roughly the same speed. For photographic
|
||||
// images with film grain, the files are reduced to between 35 and 55 percent of their
|
||||
// uncompressed size.
|
||||
// PIZ compression works well for scan-line based files, and also for tiled files with
|
||||
// large tiles, but small tiles do not shrink much. (PIZ-compressed data start with a
|
||||
// relatively long header; if the input to the compressor is short, adding the header
|
||||
// tends to offset any size reduction of the input.)
|
||||
PIZ,
|
||||
|
||||
/// Like `ZIP1`, but reduces precision of `f32` images to `f24`.
|
||||
/// Therefore, this is lossless compression for `f16` and `u32` data, lossy compression for `f32` data.
|
||||
/// This compression method works well for depth
|
||||
/// buffers and similar images, where the possible range of values is very large, but
|
||||
/// where full 32-bit floating-point accuracy is not necessary. Rounding improves
|
||||
/// compression significantly by eliminating the pixels' 8 least significant bits, which
|
||||
/// tend to be very noisy, and therefore difficult to compress.
|
||||
/// This produces really small image files. Only supported for flat images, not for deep data.
|
||||
// After reducing 32-bit floating-point data to 24 bits by rounding (while leaving 16-bit
|
||||
// floating-point data unchanged), differences between horizontally adjacent pixels
|
||||
// are compressed with zlib, similar to ZIP. PXR24 compression preserves image
|
||||
// channels of type HALF and UINT exactly, but the relative error of FLOAT data
|
||||
// increases to about ???.
|
||||
PXR24, // TODO specify zip compression level?
|
||||
|
||||
/// This is a lossy compression method for f16 images.
|
||||
/// It's the predecessor of the `B44A` compression,
|
||||
/// which has improved compression rates for uniformly colored areas.
|
||||
/// You should probably use `B44A` instead of the plain `B44`.
|
||||
///
|
||||
/// Only supported for flat images, not for deep data.
|
||||
// lossy 4-by-4 pixel block compression,
|
||||
// flat fields are compressed more
|
||||
// Channels of type HALF are split into blocks of four by four pixels or 32 bytes. Each
|
||||
// block is then packed into 14 bytes, reducing the data to 44 percent of their
|
||||
// uncompressed size. When B44 compression is applied to RGB images in
|
||||
// combination with luminance/chroma encoding (see below), the size of the
|
||||
// compressed pixels is about 22 percent of the size of the original RGB data.
|
||||
// Channels of type UINT or FLOAT are not compressed.
|
||||
// Decoding is fast enough to allow real-time playback of B44-compressed OpenEXR
|
||||
// image sequences on commodity hardware.
|
||||
// The size of a B44-compressed file depends on the number of pixels in the image,
|
||||
// but not on the data in the pixels. All images with the same resolution and the same
|
||||
// set of channels have the same size. This can be advantageous for systems that
|
||||
// support real-time playback of image sequences; the predictable file size makes it
|
||||
// easier to allocate space on storage media efficiently.
|
||||
// B44 compression is only supported for flat images.
|
||||
B44, // TODO B44 { optimize_uniform_areas: bool }
|
||||
|
||||
/// This is a lossy compression method for f16 images.
|
||||
/// All f32 and u32 channels will be stored without compression.
|
||||
/// All the f16 pixels are divided into 4x4 blocks.
|
||||
/// Each block is then compressed as a whole.
|
||||
///
|
||||
/// The 32 bytes of a block will require only ~14 bytes after compression,
|
||||
/// independent of the actual pixel contents. With chroma subsampling,
|
||||
/// a block will be compressed to ~7 bytes.
|
||||
/// Uniformly colored blocks will be compressed to ~3 bytes.
|
||||
///
|
||||
/// The 512 bytes of an f32 block will not be compressed at all.
|
||||
///
|
||||
/// Should be fast enough for realtime playback.
|
||||
/// Only supported for flat images, not for deep data.
|
||||
B44A, // TODO collapse with B44
|
||||
|
||||
/// __This lossy compression is not yet supported by this implementation.__
|
||||
// lossy DCT based compression, in blocks
|
||||
// of 32 scanlines. More efficient for partial buffer access.
|
||||
DWAA(Option<f32>), // TODO does this have a default value? make this non optional? default Compression Level setting is 45.0
|
||||
|
||||
/// __This lossy compression is not yet supported by this implementation.__
|
||||
// lossy DCT based compression, in blocks
|
||||
// of 256 scanlines. More efficient space
|
||||
// wise and faster to decode full frames
|
||||
// than DWAA_COMPRESSION.
|
||||
DWAB(Option<f32>), // TODO collapse with B44. default Compression Level setting is 45.0
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Compression {
|
||||
fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(formatter, "{} compression", match self {
|
||||
Compression::Uncompressed => "no",
|
||||
Compression::RLE => "rle",
|
||||
Compression::ZIP1 => "zip line",
|
||||
Compression::ZIP16 => "zip block",
|
||||
Compression::B44 => "b44",
|
||||
Compression::B44A => "b44a",
|
||||
Compression::DWAA(_) => "dwaa",
|
||||
Compression::DWAB(_) => "dwab",
|
||||
Compression::PIZ => "piz",
|
||||
Compression::PXR24 => "pxr24",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
impl Compression {
|
||||
|
||||
/// Compress the image section of bytes.
|
||||
pub fn compress_image_section(self, header: &Header, uncompressed_native_endian: ByteVec, pixel_section: IntegerBounds) -> Result<ByteVec> {
|
||||
let max_tile_size = header.max_block_pixel_size();
|
||||
|
||||
assert!(pixel_section.validate(Some(max_tile_size)).is_ok(), "decompress tile coordinate bug");
|
||||
if header.deep { assert!(self.supports_deep_data()) }
|
||||
|
||||
use self::Compression::*;
|
||||
let compressed_little_endian = match self {
|
||||
Uncompressed => {
|
||||
return Ok(convert_current_to_little_endian(
|
||||
uncompressed_native_endian, &header.channels, pixel_section
|
||||
))
|
||||
},
|
||||
|
||||
// we need to clone here, because we might have to fallback to the uncompressed data later (when compressed data is larger than raw data)
|
||||
ZIP16 => zip::compress_bytes(&header.channels, uncompressed_native_endian.clone(), pixel_section),
|
||||
ZIP1 => zip::compress_bytes(&header.channels, uncompressed_native_endian.clone(), pixel_section),
|
||||
RLE => rle::compress_bytes(&header.channels, uncompressed_native_endian.clone(), pixel_section),
|
||||
PIZ => piz::compress(&header.channels, uncompressed_native_endian.clone(), pixel_section),
|
||||
PXR24 => pxr24::compress(&header.channels, uncompressed_native_endian.clone(), pixel_section),
|
||||
B44 => b44::compress(&header.channels, uncompressed_native_endian.clone(), pixel_section, false),
|
||||
B44A => b44::compress(&header.channels, uncompressed_native_endian.clone(), pixel_section, true),
|
||||
_ => return Err(Error::unsupported(format!("yet unimplemented compression method: {}", self)))
|
||||
};
|
||||
|
||||
let compressed_little_endian = compressed_little_endian.map_err(|_|
|
||||
Error::invalid(format!("pixels cannot be compressed ({})", self))
|
||||
)?;
|
||||
|
||||
if self == Uncompressed || compressed_little_endian.len() < uncompressed_native_endian.len() {
|
||||
// only write compressed if it actually is smaller than raw
|
||||
Ok(compressed_little_endian)
|
||||
}
|
||||
else {
|
||||
// if we do not use compression, manually convert uncompressed data
|
||||
Ok(convert_current_to_little_endian(uncompressed_native_endian, &header.channels, pixel_section))
|
||||
}
|
||||
}
|
||||
|
||||
/// Decompress the image section of bytes.
|
||||
pub fn decompress_image_section(self, header: &Header, compressed: ByteVec, pixel_section: IntegerBounds, pedantic: bool) -> Result<ByteVec> {
|
||||
let max_tile_size = header.max_block_pixel_size();
|
||||
|
||||
assert!(pixel_section.validate(Some(max_tile_size)).is_ok(), "decompress tile coordinate bug");
|
||||
if header.deep { assert!(self.supports_deep_data()) }
|
||||
|
||||
let expected_byte_size = pixel_section.size.area() * header.channels.bytes_per_pixel; // FIXME this needs to account for subsampling anywhere
|
||||
|
||||
// note: always true where self == Uncompressed
|
||||
if compressed.len() == expected_byte_size {
|
||||
// the compressed data was larger than the raw data, so the small raw data has been written
|
||||
Ok(convert_little_endian_to_current(compressed, &header.channels, pixel_section))
|
||||
}
|
||||
else {
|
||||
use self::Compression::*;
|
||||
let bytes = match self {
|
||||
Uncompressed => Ok(convert_little_endian_to_current(compressed, &header.channels, pixel_section)),
|
||||
ZIP16 => zip::decompress_bytes(&header.channels, compressed, pixel_section, expected_byte_size, pedantic),
|
||||
ZIP1 => zip::decompress_bytes(&header.channels, compressed, pixel_section, expected_byte_size, pedantic),
|
||||
RLE => rle::decompress_bytes(&header.channels, compressed, pixel_section, expected_byte_size, pedantic),
|
||||
PIZ => piz::decompress(&header.channels, compressed, pixel_section, expected_byte_size, pedantic),
|
||||
PXR24 => pxr24::decompress(&header.channels, compressed, pixel_section, expected_byte_size, pedantic),
|
||||
B44 | B44A => b44::decompress(&header.channels, compressed, pixel_section, expected_byte_size, pedantic),
|
||||
_ => return Err(Error::unsupported(format!("yet unimplemented compression method: {}", self)))
|
||||
};
|
||||
|
||||
// map all errors to compression errors
|
||||
let bytes = bytes
|
||||
.map_err(|decompression_error| match decompression_error {
|
||||
Error::NotSupported(message) =>
|
||||
Error::unsupported(format!("yet unimplemented compression special case ({})", message)),
|
||||
|
||||
error => Error::invalid(format!(
|
||||
"compressed {:?} data ({})",
|
||||
self, error.to_string()
|
||||
)),
|
||||
})?;
|
||||
|
||||
if bytes.len() != expected_byte_size {
|
||||
Err(Error::invalid("decompressed data"))
|
||||
}
|
||||
|
||||
else { Ok(bytes) }
|
||||
}
|
||||
}
|
||||
|
||||
/// For scan line images and deep scan line images, one or more scan lines may be
|
||||
/// stored together as a scan line block. The number of scan lines per block
|
||||
/// depends on how the pixel data are compressed.
|
||||
pub fn scan_lines_per_block(self) -> usize {
|
||||
use self::Compression::*;
|
||||
match self {
|
||||
Uncompressed | RLE | ZIP1 => 1,
|
||||
ZIP16 | PXR24 => 16,
|
||||
PIZ | B44 | B44A | DWAA(_) => 32,
|
||||
DWAB(_) => 256,
|
||||
}
|
||||
}
|
||||
|
||||
/// Deep data can only be compressed using RLE or ZIP compression.
|
||||
pub fn supports_deep_data(self) -> bool {
|
||||
use self::Compression::*;
|
||||
match self {
|
||||
Uncompressed | RLE | ZIP1 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Most compression methods will reconstruct the exact pixel bytes,
|
||||
/// but some might throw away unimportant data for specific types of samples.
|
||||
pub fn is_lossless_for(self, sample_type: SampleType) -> bool {
|
||||
use self::Compression::*;
|
||||
match self {
|
||||
PXR24 => sample_type != SampleType::F32, // pxr reduces f32 to f24
|
||||
B44 | B44A => sample_type != SampleType::F16, // b44 only compresses f16 values, others are left uncompressed
|
||||
Uncompressed | RLE | ZIP1 | ZIP16 | PIZ => true,
|
||||
DWAB(_) | DWAA(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Most compression methods will reconstruct the exact pixel bytes,
|
||||
/// but some might throw away unimportant data in some cases.
|
||||
pub fn may_loose_data(self) -> bool {
|
||||
use self::Compression::*;
|
||||
match self {
|
||||
Uncompressed | RLE | ZIP1 | ZIP16 | PIZ => false,
|
||||
PXR24 | B44 | B44A | DWAB(_) | DWAA(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Most compression methods will reconstruct the exact pixel bytes,
|
||||
/// but some might replace NaN with zeroes.
|
||||
pub fn supports_nan(self) -> bool {
|
||||
use self::Compression::*;
|
||||
match self {
|
||||
B44 | B44A | DWAB(_) | DWAA(_) => false, // TODO dwa might support it?
|
||||
_ => true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/6a9f8af6e89547bcd370ae3cec2b12849eee0b54/OpenEXR/IlmImf/ImfMisc.cpp#L1456-L1541
|
||||
|
||||
#[allow(unused)] // allows the extra parameters to be unused
|
||||
fn convert_current_to_little_endian(mut bytes: ByteVec, channels: &ChannelList, rectangle: IntegerBounds) -> ByteVec {
|
||||
#[cfg(target = "big_endian")]
|
||||
reverse_block_endianness(&mut byte_vec, channels, rectangle);
|
||||
|
||||
bytes
|
||||
}
|
||||
|
||||
#[allow(unused)] // allows the extra parameters to be unused
|
||||
fn convert_little_endian_to_current(mut bytes: ByteVec, channels: &ChannelList, rectangle: IntegerBounds) -> ByteVec {
|
||||
#[cfg(target = "big_endian")]
|
||||
reverse_block_endianness(&mut bytes, channels, rectangle);
|
||||
|
||||
bytes
|
||||
}
|
||||
|
||||
#[allow(unused)] // unused when on little endian system
|
||||
fn reverse_block_endianness(bytes: &mut [u8], channels: &ChannelList, rectangle: IntegerBounds){
|
||||
let mut remaining_bytes: &mut [u8] = bytes;
|
||||
|
||||
for y in rectangle.position.y() .. rectangle.end().y() {
|
||||
for channel in &channels.list {
|
||||
let line_is_subsampled = mod_p(y, usize_to_i32(channel.sampling.y())) != 0;
|
||||
if line_is_subsampled { continue; }
|
||||
|
||||
let sample_count = rectangle.size.width() / channel.sampling.x();
|
||||
|
||||
match channel.sample_type {
|
||||
SampleType::F16 => remaining_bytes = chomp_convert_n::<f16>(reverse_2_bytes, remaining_bytes, sample_count),
|
||||
SampleType::F32 => remaining_bytes = chomp_convert_n::<f32>(reverse_4_bytes, remaining_bytes, sample_count),
|
||||
SampleType::U32 => remaining_bytes = chomp_convert_n::<u32>(reverse_4_bytes, remaining_bytes, sample_count),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chomp_convert_n<T>(convert_single_value: fn(&mut[u8]), mut bytes: &mut [u8], count: usize) -> &mut [u8] {
|
||||
let type_size = size_of::<T>();
|
||||
let (line_bytes, rest) = bytes.split_at_mut(count * type_size);
|
||||
let value_byte_chunks = line_bytes.chunks_exact_mut(type_size);
|
||||
|
||||
for value_bytes in value_byte_chunks {
|
||||
convert_single_value(value_bytes);
|
||||
}
|
||||
|
||||
rest
|
||||
}
|
||||
|
||||
debug_assert!(remaining_bytes.is_empty(), "not all bytes were converted to little endian");
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reverse_2_bytes(bytes: &mut [u8]){
|
||||
// this code seems like it could be optimized easily by the compiler
|
||||
let two_bytes: [u8; 2] = bytes.try_into().expect("invalid byte count");
|
||||
bytes.copy_from_slice(&[two_bytes[1], two_bytes[0]]);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reverse_4_bytes(bytes: &mut [u8]){
|
||||
let four_bytes: [u8; 4] = bytes.try_into().expect("invalid byte count");
|
||||
bytes.copy_from_slice(&[four_bytes[3], four_bytes[2], four_bytes[1], four_bytes[0]]);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn div_p (x: i32, y: i32) -> i32 {
|
||||
if x >= 0 {
|
||||
if y >= 0 { x / y }
|
||||
else { -(x / -y) }
|
||||
}
|
||||
else {
|
||||
if y >= 0 { -((y-1-x) / y) }
|
||||
else { (-y-1-x) / -y }
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn mod_p(x: i32, y: i32) -> i32 {
|
||||
x - y * div_p(x, y)
|
||||
}
|
||||
|
||||
/// A collection of functions used to prepare data for compression.
|
||||
mod optimize_bytes {
|
||||
|
||||
/// Integrate over all differences to the previous value in order to reconstruct sample values.
|
||||
pub fn differences_to_samples(buffer: &mut [u8]) {
|
||||
// The naive implementation is very simple:
|
||||
//
|
||||
// for index in 1..buffer.len() {
|
||||
// buffer[index] = (buffer[index - 1] as i32 + buffer[index] as i32 - 128) as u8;
|
||||
// }
|
||||
//
|
||||
// But we process elements in pairs to take advantage of instruction-level parallelism.
|
||||
// When computations within a pair do not depend on each other, they can be processed in parallel.
|
||||
// Since this function is responsible for a very large chunk of execution time,
|
||||
// this tweak alone improves decoding performance of RLE images by 20%.
|
||||
if let Some(first) = buffer.get(0) {
|
||||
let mut previous = *first as i16;
|
||||
for chunk in &mut buffer[1..].chunks_exact_mut(2) {
|
||||
// no bounds checks here due to indices and chunk size being constant
|
||||
let diff0 = chunk[0] as i16;
|
||||
let diff1 = chunk[1] as i16;
|
||||
// these two computations do not depend on each other, unlike in the naive version,
|
||||
// so they can be executed by the CPU in parallel via instruction-level parallelism
|
||||
let sample0 = (previous + diff0 - 128) as u8;
|
||||
let sample1 = (previous + diff0 + diff1 - 128 * 2) as u8;
|
||||
chunk[0] = sample0;
|
||||
chunk[1] = sample1;
|
||||
previous = sample1 as i16;
|
||||
}
|
||||
// handle the remaining element at the end not processed by the loop over pairs, if present
|
||||
for elem in &mut buffer[1..].chunks_exact_mut(2).into_remainder().iter_mut() {
|
||||
let sample = (previous + *elem as i16 - 128) as u8;
|
||||
*elem = sample;
|
||||
previous = sample as i16;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive over all values in order to produce differences to the previous value.
|
||||
pub fn samples_to_differences(buffer: &mut [u8]){
|
||||
// naive version:
|
||||
// for index in (1..buffer.len()).rev() {
|
||||
// buffer[index] = (buffer[index] as i32 - buffer[index - 1] as i32 + 128) as u8;
|
||||
// }
|
||||
//
|
||||
// But we process elements in batches to take advantage of autovectorization.
|
||||
// If the target platform has no vector instructions (e.g. 32-bit ARM without `-C target-cpu=native`)
|
||||
// this will instead take advantage of instruction-level parallelism.
|
||||
if let Some(first) = buffer.get(0) {
|
||||
let mut previous = *first as i16;
|
||||
// Chunk size is 16 because we process bytes (8 bits),
|
||||
// and 8*16 = 128 bits is the size of a typical SIMD register.
|
||||
// Even WASM has 128-bit SIMD registers.
|
||||
for chunk in &mut buffer[1..].chunks_exact_mut(16) {
|
||||
// no bounds checks here due to indices and chunk size being constant
|
||||
let sample0 = chunk[0] as i16;
|
||||
let sample1 = chunk[1] as i16;
|
||||
let sample2 = chunk[2] as i16;
|
||||
let sample3 = chunk[3] as i16;
|
||||
let sample4 = chunk[4] as i16;
|
||||
let sample5 = chunk[5] as i16;
|
||||
let sample6 = chunk[6] as i16;
|
||||
let sample7 = chunk[7] as i16;
|
||||
let sample8 = chunk[8] as i16;
|
||||
let sample9 = chunk[9] as i16;
|
||||
let sample10 = chunk[10] as i16;
|
||||
let sample11 = chunk[11] as i16;
|
||||
let sample12 = chunk[12] as i16;
|
||||
let sample13 = chunk[13] as i16;
|
||||
let sample14 = chunk[14] as i16;
|
||||
let sample15 = chunk[15] as i16;
|
||||
// Unlike in decoding, computations in here are truly independent from each other,
|
||||
// which enables the compiler to vectorize this loop.
|
||||
// Even if the target platform has no vector instructions,
|
||||
// so using more parallelism doesn't imply doing more work,
|
||||
// and we're not really limited in how wide we can go.
|
||||
chunk[0] = (sample0 - previous + 128) as u8;
|
||||
chunk[1] = (sample1 - sample0 + 128) as u8;
|
||||
chunk[2] = (sample2 - sample1 + 128) as u8;
|
||||
chunk[3] = (sample3 - sample2 + 128) as u8;
|
||||
chunk[4] = (sample4 - sample3 + 128) as u8;
|
||||
chunk[5] = (sample5 - sample4 + 128) as u8;
|
||||
chunk[6] = (sample6 - sample5 + 128) as u8;
|
||||
chunk[7] = (sample7 - sample6 + 128) as u8;
|
||||
chunk[8] = (sample8 - sample7 + 128) as u8;
|
||||
chunk[9] = (sample9 - sample8 + 128) as u8;
|
||||
chunk[10] = (sample10 - sample9 + 128) as u8;
|
||||
chunk[11] = (sample11 - sample10 + 128) as u8;
|
||||
chunk[12] = (sample12 - sample11 + 128) as u8;
|
||||
chunk[13] = (sample13 - sample12 + 128) as u8;
|
||||
chunk[14] = (sample14 - sample13 + 128) as u8;
|
||||
chunk[15] = (sample15 - sample14 + 128) as u8;
|
||||
previous = sample15;
|
||||
}
|
||||
// Handle the remaining element at the end not processed by the loop over batches, if present
|
||||
// This is what the iterator-based version of this function would look like without vectorization
|
||||
for elem in &mut buffer[1..].chunks_exact_mut(16).into_remainder().iter_mut() {
|
||||
let diff = (*elem as i16 - previous + 128) as u8;
|
||||
previous = *elem as i16;
|
||||
*elem = diff;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use std::cell::Cell;
|
||||
thread_local! {
|
||||
// A buffer for reusing between invocations of interleaving and deinterleaving.
|
||||
// Allocating memory is cheap, but zeroing or otherwise initializing it is not.
|
||||
// Doing it hundreds of times (once per block) would be expensive.
|
||||
// This optimization brings down the time spent in interleaving from 15% to 5%.
|
||||
static SCRATCH_SPACE: Cell<Vec<u8>> = Cell::new(Vec::new());
|
||||
}
|
||||
|
||||
fn with_reused_buffer<F>(length: usize, mut func: F) where F: FnMut(&mut [u8]) {
|
||||
SCRATCH_SPACE.with(|scratch_space| {
|
||||
// reuse a buffer if we've already initialized one
|
||||
let mut buffer = scratch_space.take();
|
||||
if buffer.len() < length {
|
||||
// Efficiently create a zeroed Vec by requesting zeroed memory from the OS.
|
||||
// This is slightly faster than a `memcpy()` plus `memset()` that would happen otherwise,
|
||||
// but is not a big deal either way since it's not a hot codepath.
|
||||
buffer = vec![0u8; length];
|
||||
}
|
||||
|
||||
// call the function
|
||||
func(&mut buffer[..length]);
|
||||
|
||||
// save the internal buffer for reuse
|
||||
scratch_space.set(buffer);
|
||||
});
|
||||
}
|
||||
|
||||
/// Interleave the bytes such that the second half of the array is every other byte.
|
||||
pub fn interleave_byte_blocks(separated: &mut [u8]) {
|
||||
with_reused_buffer(separated.len(), |interleaved| {
|
||||
|
||||
// Split the two halves that we are going to interleave.
|
||||
let (first_half, second_half) = separated.split_at((separated.len() + 1) / 2);
|
||||
// The first half can be 1 byte longer than the second if the length of the input is odd,
|
||||
// but the loop below only processes numbers in pairs.
|
||||
// To handle it, preserve the last element of the first slice, to be handled after the loop.
|
||||
let first_half_last = first_half.last();
|
||||
// Truncate the first half to match the lenght of the second one; more optimizer-friendly
|
||||
let first_half_iter = &first_half[..second_half.len()];
|
||||
|
||||
// Main loop that performs the interleaving
|
||||
for ((first, second), interleaved) in first_half_iter.iter().zip(second_half.iter())
|
||||
.zip(interleaved.chunks_exact_mut(2)) {
|
||||
// The length of each chunk is known to be 2 at compile time,
|
||||
// and each index is also a constant.
|
||||
// This allows the compiler to remove the bounds checks.
|
||||
interleaved[0] = *first;
|
||||
interleaved[1] = *second;
|
||||
}
|
||||
|
||||
// If the length of the slice was odd, restore the last element of the first half that we saved
|
||||
if interleaved.len() % 2 == 1 {
|
||||
if let Some(value) = first_half_last {
|
||||
// we can unwrap() here because we just checked that the lenght is non-zero:
|
||||
// `% 2 == 1` will fail for zero
|
||||
*interleaved.last_mut().unwrap() = *value;
|
||||
}
|
||||
}
|
||||
|
||||
// write out the results
|
||||
separated.copy_from_slice(&interleaved);
|
||||
});
|
||||
}
|
||||
|
||||
/// Separate the bytes such that the second half contains every other byte.
|
||||
/// This performs deinterleaving - the inverse of interleaving.
|
||||
pub fn separate_bytes_fragments(source: &mut [u8]) {
|
||||
with_reused_buffer(source.len(), |separated| {
|
||||
|
||||
// Split the two halves that we are going to interleave.
|
||||
let (first_half, second_half) = separated.split_at_mut((source.len() + 1) / 2);
|
||||
// The first half can be 1 byte longer than the second if the length of the input is odd,
|
||||
// but the loop below only processes numbers in pairs.
|
||||
// To handle it, preserve the last element of the input, to be handled after the loop.
|
||||
let last = source.last();
|
||||
let first_half_iter = &mut first_half[..second_half.len()];
|
||||
|
||||
// Main loop that performs the deinterleaving
|
||||
for ((first, second), interleaved) in first_half_iter.iter_mut().zip(second_half.iter_mut())
|
||||
.zip(source.chunks_exact(2)) {
|
||||
// The length of each chunk is known to be 2 at compile time,
|
||||
// and each index is also a constant.
|
||||
// This allows the compiler to remove the bounds checks.
|
||||
*first = interleaved[0];
|
||||
*second = interleaved[1];
|
||||
}
|
||||
|
||||
// If the length of the slice was odd, restore the last element of the input that we saved
|
||||
if source.len() % 2 == 1 {
|
||||
if let Some(value) = last {
|
||||
// we can unwrap() here because we just checked that the lenght is non-zero:
|
||||
// `% 2 == 1` will fail for zero
|
||||
*first_half.last_mut().unwrap() = *value;
|
||||
}
|
||||
}
|
||||
|
||||
// write out the results
|
||||
source.copy_from_slice(&separated);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
|
||||
#[test]
|
||||
fn roundtrip_interleave(){
|
||||
let source = vec![ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ];
|
||||
let mut modified = source.clone();
|
||||
|
||||
super::separate_bytes_fragments(&mut modified);
|
||||
super::interleave_byte_blocks(&mut modified);
|
||||
|
||||
assert_eq!(source, modified);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_derive(){
|
||||
let source = vec![ 0, 1, 2, 7, 4, 5, 6, 7, 13, 9, 10 ];
|
||||
let mut modified = source.clone();
|
||||
|
||||
super::samples_to_differences(&mut modified);
|
||||
super::differences_to_samples(&mut modified);
|
||||
|
||||
assert_eq!(source, modified);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use crate::meta::attribute::ChannelDescription;
|
||||
use crate::block::samples::IntoNativeSample;
|
||||
|
||||
#[test]
|
||||
fn roundtrip_endianness_mixed_channels(){
|
||||
let a32 = ChannelDescription::new("A", SampleType::F32, true);
|
||||
let y16 = ChannelDescription::new("Y", SampleType::F16, true);
|
||||
let channels = ChannelList::new(smallvec![ a32, y16 ]);
|
||||
|
||||
let data = vec![
|
||||
23582740683_f32.to_ne_bytes().as_slice(),
|
||||
35827420683_f32.to_ne_bytes().as_slice(),
|
||||
27406832358_f32.to_f16().to_ne_bytes().as_slice(),
|
||||
74062358283_f32.to_f16().to_ne_bytes().as_slice(),
|
||||
|
||||
52582740683_f32.to_ne_bytes().as_slice(),
|
||||
45827420683_f32.to_ne_bytes().as_slice(),
|
||||
15406832358_f32.to_f16().to_ne_bytes().as_slice(),
|
||||
65062358283_f32.to_f16().to_ne_bytes().as_slice(),
|
||||
].into_iter().flatten().map(|x| *x).collect();
|
||||
|
||||
roundtrip_convert_endianness(
|
||||
data, &channels,
|
||||
IntegerBounds::from_dimensions((2, 2))
|
||||
);
|
||||
}
|
||||
|
||||
fn roundtrip_convert_endianness(
|
||||
current_endian: ByteVec, channels: &ChannelList, rectangle: IntegerBounds
|
||||
){
|
||||
let little_endian = convert_current_to_little_endian(
|
||||
current_endian.clone(), channels, rectangle
|
||||
);
|
||||
|
||||
let current_endian_decoded = convert_little_endian_to_current(
|
||||
little_endian.clone(), channels, rectangle
|
||||
);
|
||||
|
||||
assert_eq!(current_endian, current_endian_decoded, "endianness conversion failed");
|
||||
}
|
||||
}
|
||||
988
vendor/exr/src/compression/piz/huffman.rs
vendored
Normal file
988
vendor/exr/src/compression/piz/huffman.rs
vendored
Normal file
@@ -0,0 +1,988 @@
|
||||
//! 16-bit Huffman compression and decompression.
|
||||
//! Huffman compression and decompression routines written
|
||||
//! by Christian Rouet for his PIZ image file format.
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/88246d991e0318c043e6f584f7493da08a31f9f8/OpenEXR/IlmImf/ImfHuf.cpp
|
||||
|
||||
use crate::math::RoundingMode;
|
||||
use crate::error::{Error, Result, UnitResult, u64_to_usize, u32_to_usize};
|
||||
use crate::io::Data;
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
collections::BinaryHeap,
|
||||
io::{Cursor, Read, Write},
|
||||
};
|
||||
use std::convert::TryFrom;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
|
||||
pub fn decompress(compressed: &[u8], expected_size: usize) -> Result<Vec<u16>> {
|
||||
let mut remaining_compressed = compressed;
|
||||
|
||||
let min_code_index = usize::try_from(u32::read(&mut remaining_compressed)?)?;
|
||||
let max_code_index_32 = u32::read(&mut remaining_compressed)?;
|
||||
let _table_size = usize::try_from(u32::read(&mut remaining_compressed)?)?; // TODO check this and return Err?
|
||||
let bit_count = usize::try_from(u32::read(&mut remaining_compressed)?)?;
|
||||
let _skipped = u32::read(&mut remaining_compressed)?; // what is this
|
||||
|
||||
let max_code_index = usize::try_from(max_code_index_32).unwrap();
|
||||
if min_code_index >= ENCODING_TABLE_SIZE || max_code_index >= ENCODING_TABLE_SIZE {
|
||||
return Err(Error::invalid(INVALID_TABLE_SIZE));
|
||||
}
|
||||
|
||||
if RoundingMode::Up.divide(bit_count, 8) > remaining_compressed.len() {
|
||||
return Err(Error::invalid(NOT_ENOUGH_DATA));
|
||||
}
|
||||
|
||||
let encoding_table = read_encoding_table(&mut remaining_compressed, min_code_index, max_code_index)?;
|
||||
if bit_count > 8 * remaining_compressed.len() { return Err(Error::invalid(INVALID_BIT_COUNT)); }
|
||||
|
||||
let decoding_table = build_decoding_table(&encoding_table, min_code_index, max_code_index)?;
|
||||
|
||||
let result = decode_with_tables(
|
||||
&encoding_table,
|
||||
&decoding_table,
|
||||
&remaining_compressed,
|
||||
i32::try_from(bit_count)?,
|
||||
max_code_index_32,
|
||||
expected_size,
|
||||
)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn compress(uncompressed: &[u16]) -> Result<Vec<u8>> {
|
||||
if uncompressed.is_empty() { return Ok(vec![]); }
|
||||
|
||||
let mut frequencies = count_frequencies(uncompressed);
|
||||
let (min_code_index, max_code_index) = build_encoding_table(&mut frequencies);
|
||||
|
||||
let mut result = Cursor::new(Vec::with_capacity(uncompressed.len()));
|
||||
u32::write_slice(&mut result, &[0; 5])?; // we come back to these later after we know more about the compressed data
|
||||
|
||||
let table_start = result.position();
|
||||
pack_encoding_table(
|
||||
&frequencies,
|
||||
min_code_index,
|
||||
max_code_index,
|
||||
&mut result,
|
||||
)?;
|
||||
|
||||
let data_start = result.position();
|
||||
let bit_count = encode_with_frequencies(
|
||||
&frequencies,
|
||||
uncompressed,
|
||||
max_code_index,
|
||||
&mut result
|
||||
)?;
|
||||
|
||||
// write meta data after this
|
||||
result.set_position(0);
|
||||
let table_length = data_start - table_start;
|
||||
|
||||
u32::try_from(min_code_index)?.write(&mut result)?;
|
||||
u32::try_from(max_code_index)?.write(&mut result)?;
|
||||
u32::try_from(table_length)?.write(&mut result)?;
|
||||
u32::try_from(bit_count)?.write(&mut result)?;
|
||||
0_u32.write(&mut result)?;
|
||||
|
||||
Ok(result.into_inner())
|
||||
}
|
||||
|
||||
|
||||
const ENCODE_BITS: u64 = 16; // literal (value) bit length
|
||||
const DECODE_BITS: u64 = 14; // decoding bit size (>= 8)
|
||||
|
||||
const ENCODING_TABLE_SIZE: usize = ((1 << ENCODE_BITS) + 1) as usize;
|
||||
const DECODING_TABLE_SIZE: usize = (1 << DECODE_BITS) as usize;
|
||||
const DECODE_MASK: u64 = DECODING_TABLE_SIZE as u64 - 1;
|
||||
|
||||
const SHORT_ZEROCODE_RUN: u64 = 59;
|
||||
const LONG_ZEROCODE_RUN: u64 = 63;
|
||||
const SHORTEST_LONG_RUN: u64 = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
|
||||
const LONGEST_LONG_RUN: u64 = 255 + SHORTEST_LONG_RUN;
|
||||
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
enum Code {
|
||||
Empty,
|
||||
Short(ShortCode),
|
||||
Long(SmallVec<[u32; 2]>), // often 2, sometimes 4, rarely 8
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
struct ShortCode {
|
||||
value: u32,
|
||||
len: u8,
|
||||
}
|
||||
|
||||
impl ShortCode {
|
||||
#[inline] fn len(&self) -> u64 { u64::from(self.len) }
|
||||
}
|
||||
|
||||
/// Decode (uncompress) n bits based on encoding & decoding tables:
|
||||
fn decode_with_tables(
|
||||
encoding_table: &[u64],
|
||||
decoding_table: &[Code],
|
||||
mut input: &[u8],
|
||||
input_bit_count: i32,
|
||||
run_length_code: u32,
|
||||
expected_output_size: usize,
|
||||
) -> Result<Vec<u16>>
|
||||
{
|
||||
let mut output = Vec::with_capacity(expected_output_size);
|
||||
let mut code_bits = 0_u64;
|
||||
let mut code_bit_count = 0_u64;
|
||||
|
||||
while input.len() > 0 {
|
||||
read_byte(&mut code_bits, &mut code_bit_count, &mut input)?;
|
||||
|
||||
// Access decoding table
|
||||
while code_bit_count >= DECODE_BITS {
|
||||
let code_index = (code_bits >> (code_bit_count - DECODE_BITS)) & DECODE_MASK;
|
||||
let code = &decoding_table[u64_to_usize(code_index)];
|
||||
|
||||
// Get short code
|
||||
if let Code::Short(code) = code {
|
||||
code_bit_count -= code.len();
|
||||
|
||||
read_code_into_vec(
|
||||
code.value,
|
||||
run_length_code,
|
||||
&mut code_bits,
|
||||
&mut code_bit_count,
|
||||
&mut input,
|
||||
&mut output,
|
||||
expected_output_size,
|
||||
)?;
|
||||
}
|
||||
else if let Code::Long(ref long_codes) = code {
|
||||
debug_assert_ne!(long_codes.len(), 0);
|
||||
|
||||
let long_code = long_codes.iter()
|
||||
.filter_map(|&long_code|{
|
||||
let encoded_long_code = encoding_table[u32_to_usize(long_code)];
|
||||
let length = length(encoded_long_code);
|
||||
|
||||
while code_bit_count < length && input.len() > 0 {
|
||||
let err = read_byte(&mut code_bits, &mut code_bit_count, &mut input);
|
||||
if let Err(err) = err { return Some(Err(err)); }
|
||||
}
|
||||
|
||||
if code_bit_count >= length {
|
||||
let required_code = (code_bits >> (code_bit_count - length)) & ((1 << length) - 1);
|
||||
|
||||
if self::code(encoded_long_code) == required_code {
|
||||
code_bit_count -= length;
|
||||
return Some(Ok(long_code));
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
|
||||
})
|
||||
.next()
|
||||
.ok_or(Error::invalid(INVALID_CODE))?;
|
||||
|
||||
read_code_into_vec(
|
||||
long_code?,
|
||||
run_length_code,
|
||||
&mut code_bits,
|
||||
&mut code_bit_count,
|
||||
&mut input,
|
||||
&mut output,
|
||||
expected_output_size,
|
||||
)?;
|
||||
}
|
||||
else {
|
||||
return Err(Error::invalid(INVALID_CODE));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let count = u64::try_from((8 - input_bit_count) & 7)?;
|
||||
code_bits >>= count;
|
||||
code_bit_count -= count;
|
||||
|
||||
while code_bit_count > 0 {
|
||||
let index = (code_bits << (DECODE_BITS - code_bit_count)) & DECODE_MASK;
|
||||
let code = &decoding_table[u64_to_usize(index)];
|
||||
|
||||
if let Code::Short(short_code) = code {
|
||||
if short_code.len() > code_bit_count { return Err(Error::invalid("code")) }; // FIXME why does this happen??
|
||||
code_bit_count -= short_code.len(); // FIXME may throw "attempted to subtract with overflow"
|
||||
|
||||
read_code_into_vec(
|
||||
short_code.value,
|
||||
run_length_code,
|
||||
&mut code_bits,
|
||||
&mut code_bit_count,
|
||||
&mut input,
|
||||
&mut output,
|
||||
expected_output_size,
|
||||
)?;
|
||||
}
|
||||
else {
|
||||
return Err(Error::invalid(INVALID_CODE));
|
||||
}
|
||||
}
|
||||
|
||||
if output.len() != expected_output_size {
|
||||
return Err(Error::invalid(NOT_ENOUGH_DATA));
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// Build a decoding hash table based on the encoding table code:
|
||||
/// - short codes (<= HUF_DECBITS) are resolved with a single table access;
|
||||
/// - long code entry allocations are not optimized, because long codes are
|
||||
/// unfrequent;
|
||||
/// - decoding tables are used by hufDecode();
|
||||
fn build_decoding_table(
|
||||
encoding_table: &[u64],
|
||||
min_code_index: usize,
|
||||
max_code_index: usize,
|
||||
) -> Result<Vec<Code>>
|
||||
{
|
||||
let mut decoding_table = vec![Code::Empty; DECODING_TABLE_SIZE]; // not an array because of code not being copy
|
||||
|
||||
for (code_index, &encoded_code) in encoding_table[..= max_code_index].iter().enumerate().skip(min_code_index) {
|
||||
let code_index = u32::try_from(code_index).unwrap();
|
||||
|
||||
let code = code(encoded_code);
|
||||
let length = length(encoded_code);
|
||||
|
||||
if code >> length != 0 {
|
||||
return Err(Error::invalid(INVALID_TABLE_ENTRY));
|
||||
}
|
||||
|
||||
if length > DECODE_BITS {
|
||||
let long_code = &mut decoding_table[u64_to_usize(code >> (length - DECODE_BITS))];
|
||||
|
||||
match long_code {
|
||||
Code::Empty => *long_code = Code::Long(smallvec![code_index]),
|
||||
Code::Long(lits) => lits.push(code_index),
|
||||
_ => { return Err(Error::invalid(INVALID_TABLE_ENTRY)); }
|
||||
}
|
||||
}
|
||||
else if length != 0 {
|
||||
let default_value = Code::Short(ShortCode {
|
||||
value: code_index,
|
||||
len: length as u8,
|
||||
});
|
||||
|
||||
let start_index = u64_to_usize(code << (DECODE_BITS - length));
|
||||
let count = u64_to_usize(1 << (DECODE_BITS - length));
|
||||
|
||||
for value in &mut decoding_table[start_index .. start_index + count] {
|
||||
*value = default_value.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(decoding_table)
|
||||
}
|
||||
|
||||
/// Run-length-decompresses all zero runs from the packed table to the encoding table
|
||||
fn read_encoding_table(
|
||||
packed: &mut impl Read,
|
||||
min_code_index: usize,
|
||||
max_code_index: usize,
|
||||
) -> Result<Vec<u64>>
|
||||
{
|
||||
let mut code_bits = 0_u64;
|
||||
let mut code_bit_count = 0_u64;
|
||||
|
||||
// TODO push() into encoding table instead of index stuff?
|
||||
let mut encoding_table = vec![0_u64; ENCODING_TABLE_SIZE];
|
||||
let mut code_index = min_code_index;
|
||||
while code_index <= max_code_index {
|
||||
let code_len = read_bits(6, &mut code_bits, &mut code_bit_count, packed)?;
|
||||
encoding_table[code_index] = code_len;
|
||||
|
||||
if code_len == LONG_ZEROCODE_RUN {
|
||||
let zerun_bits = read_bits(8, &mut code_bits, &mut code_bit_count, packed)?;
|
||||
let zerun = usize::try_from(zerun_bits + SHORTEST_LONG_RUN).unwrap();
|
||||
|
||||
if code_index + zerun > max_code_index + 1 {
|
||||
return Err(Error::invalid(TABLE_TOO_LONG));
|
||||
}
|
||||
|
||||
for value in &mut encoding_table[code_index..code_index + zerun] {
|
||||
*value = 0;
|
||||
}
|
||||
|
||||
code_index += zerun;
|
||||
}
|
||||
else if code_len >= SHORT_ZEROCODE_RUN {
|
||||
let duplication_count = usize::try_from(code_len - SHORT_ZEROCODE_RUN + 2).unwrap();
|
||||
if code_index + duplication_count > max_code_index + 1 {
|
||||
return Err(Error::invalid(TABLE_TOO_LONG));
|
||||
}
|
||||
|
||||
for value in &mut encoding_table[code_index .. code_index + duplication_count] {
|
||||
*value = 0;
|
||||
}
|
||||
|
||||
code_index += duplication_count;
|
||||
}
|
||||
else {
|
||||
code_index += 1;
|
||||
}
|
||||
}
|
||||
|
||||
build_canonical_table(&mut encoding_table);
|
||||
Ok(encoding_table)
|
||||
}
|
||||
|
||||
// TODO Use BitStreamReader for all the bit reads?!
|
||||
#[inline]
|
||||
fn read_bits(
|
||||
count: u64,
|
||||
code_bits: &mut u64,
|
||||
code_bit_count: &mut u64,
|
||||
input: &mut impl Read,
|
||||
) -> Result<u64>
|
||||
{
|
||||
while *code_bit_count < count {
|
||||
read_byte(code_bits, code_bit_count, input)?;
|
||||
}
|
||||
|
||||
*code_bit_count -= count;
|
||||
Ok((*code_bits >> *code_bit_count) & ((1 << count) - 1))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_byte(code_bits: &mut u64, bit_count: &mut u64, input: &mut impl Read) -> UnitResult {
|
||||
*code_bits = (*code_bits << 8) | u8::read(input)? as u64;
|
||||
*bit_count += 8;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_code_into_vec(
|
||||
code: u32,
|
||||
run_length_code: u32,
|
||||
code_bits: &mut u64,
|
||||
code_bit_count: &mut u64,
|
||||
read: &mut impl Read,
|
||||
out: &mut Vec<u16>,
|
||||
max_len: usize,
|
||||
) -> UnitResult
|
||||
{
|
||||
if code == run_length_code { // code may be too large for u16
|
||||
if *code_bit_count < 8 {
|
||||
read_byte(code_bits, code_bit_count, read)?;
|
||||
}
|
||||
|
||||
*code_bit_count -= 8;
|
||||
|
||||
let code_repetitions = usize::from((*code_bits >> *code_bit_count) as u8);
|
||||
|
||||
if out.len() + code_repetitions > max_len {
|
||||
return Err(Error::invalid(TOO_MUCH_DATA));
|
||||
}
|
||||
else if out.is_empty() {
|
||||
return Err(Error::invalid(NOT_ENOUGH_DATA));
|
||||
}
|
||||
|
||||
let repeated_code = *out.last().unwrap();
|
||||
out.extend(std::iter::repeat(repeated_code).take(code_repetitions));
|
||||
}
|
||||
else if out.len() < max_len { // implies that code is not larger than u16???
|
||||
out.push(u16::try_from(code)?);
|
||||
}
|
||||
else {
|
||||
return Err(Error::invalid(TOO_MUCH_DATA));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn count_frequencies(data: &[u16]) -> Vec<u64> {
|
||||
let mut frequencies = vec![0_u64; ENCODING_TABLE_SIZE];
|
||||
|
||||
for value in data {
|
||||
frequencies[*value as usize] += 1;
|
||||
}
|
||||
|
||||
frequencies
|
||||
}
|
||||
|
||||
fn write_bits(
|
||||
count: u64,
|
||||
bits: u64,
|
||||
code_bits: &mut u64,
|
||||
code_bit_count: &mut u64,
|
||||
mut out: impl Write,
|
||||
) -> UnitResult
|
||||
{
|
||||
*code_bits = (*code_bits << count) | bits;
|
||||
*code_bit_count += count;
|
||||
|
||||
while *code_bit_count >= 8 {
|
||||
*code_bit_count -= 8;
|
||||
out.write(&[
|
||||
(*code_bits >> *code_bit_count) as u8 // TODO make sure never or always wraps?
|
||||
])?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_code(scode: u64, code_bits: &mut u64, code_bit_count: &mut u64, mut out: impl Write) -> UnitResult {
|
||||
write_bits(length(scode), code(scode), code_bits, code_bit_count, &mut out)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn send_code(
|
||||
scode: u64,
|
||||
run_count: u64,
|
||||
run_code: u64,
|
||||
code_bits: &mut u64,
|
||||
code_bit_count: &mut u64,
|
||||
mut out: impl Write,
|
||||
) -> UnitResult
|
||||
{
|
||||
// Output a run of runCount instances of the symbol sCount.
|
||||
// Output the symbols explicitly, or if that is shorter, output
|
||||
// the sCode symbol once followed by a runCode symbol and runCount
|
||||
// expressed as an 8-bit number.
|
||||
if length(scode) + length(run_code) + 8 < length(scode) * run_count {
|
||||
write_code(scode, code_bits, code_bit_count, &mut out)?;
|
||||
write_code(run_code, code_bits, code_bit_count, &mut out)?;
|
||||
write_bits(8, run_count, code_bits, code_bit_count, &mut out)?;
|
||||
}
|
||||
else {
|
||||
for _ in 0 ..= run_count {
|
||||
write_code(scode, code_bits, code_bit_count, &mut out)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn encode_with_frequencies(
|
||||
frequencies: &[u64],
|
||||
uncompressed: &[u16],
|
||||
run_length_code: usize,
|
||||
mut out: &mut Cursor<Vec<u8>>,
|
||||
) -> Result<u64>
|
||||
{
|
||||
let mut code_bits = 0;
|
||||
let mut code_bit_count = 0;
|
||||
|
||||
let mut run_start_value = uncompressed[0];
|
||||
let mut run_length = 0;
|
||||
|
||||
let start_position = out.position();
|
||||
|
||||
// Loop on input values
|
||||
for ¤t_value in &uncompressed[1..] {
|
||||
// Count same values or send code
|
||||
if run_start_value == current_value && run_length < 255 {
|
||||
run_length += 1;
|
||||
}
|
||||
else {
|
||||
send_code(
|
||||
frequencies[run_start_value as usize],
|
||||
run_length,
|
||||
frequencies[run_length_code],
|
||||
&mut code_bits,
|
||||
&mut code_bit_count,
|
||||
&mut out,
|
||||
)?;
|
||||
|
||||
run_length = 0;
|
||||
}
|
||||
|
||||
run_start_value = current_value;
|
||||
}
|
||||
|
||||
// Send remaining code
|
||||
send_code(
|
||||
frequencies[run_start_value as usize],
|
||||
run_length,
|
||||
frequencies[run_length_code],
|
||||
&mut code_bits,
|
||||
&mut code_bit_count,
|
||||
&mut out,
|
||||
)?;
|
||||
|
||||
let data_length = out.position() - start_position; // we shouldn't count the last byte write
|
||||
|
||||
if code_bit_count != 0 {
|
||||
out.write(&[
|
||||
(code_bits << (8 - code_bit_count) & 0xff) as u8
|
||||
])?;
|
||||
}
|
||||
|
||||
Ok(data_length * 8 + code_bit_count)
|
||||
}
|
||||
|
||||
///
|
||||
/// Pack an encoding table:
|
||||
/// - only code lengths, not actual codes, are stored
|
||||
/// - runs of zeroes are compressed as follows:
|
||||
///
|
||||
/// unpacked packed
|
||||
/// --------------------------------
|
||||
/// 1 zero 0 (6 bits)
|
||||
/// 2 zeroes 59
|
||||
/// 3 zeroes 60
|
||||
/// 4 zeroes 61
|
||||
/// 5 zeroes 62
|
||||
/// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
|
||||
///
|
||||
fn pack_encoding_table(
|
||||
frequencies: &[u64],
|
||||
min_index: usize,
|
||||
max_index: usize,
|
||||
mut out: &mut Cursor<Vec<u8>>,
|
||||
) -> UnitResult
|
||||
{
|
||||
let mut code_bits = 0_u64;
|
||||
let mut code_bit_count = 0_u64;
|
||||
|
||||
let mut frequency_index = min_index;
|
||||
while frequency_index <= max_index { // TODO slice iteration?
|
||||
let code_length = length(frequencies[frequency_index]);
|
||||
|
||||
if code_length == 0 {
|
||||
let mut zero_run = 1;
|
||||
|
||||
while frequency_index < max_index && zero_run < LONGEST_LONG_RUN {
|
||||
if length(frequencies[frequency_index + 1]) > 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
frequency_index += 1;
|
||||
zero_run += 1;
|
||||
}
|
||||
|
||||
if zero_run >= 2 {
|
||||
if zero_run >= SHORTEST_LONG_RUN {
|
||||
write_bits(6, LONG_ZEROCODE_RUN, &mut code_bits, &mut code_bit_count, &mut out)?;
|
||||
write_bits(8, zero_run - SHORTEST_LONG_RUN, &mut code_bits, &mut code_bit_count, &mut out)?;
|
||||
}
|
||||
else {
|
||||
write_bits(6, SHORT_ZEROCODE_RUN + zero_run - 2, &mut code_bits, &mut code_bit_count, &mut out)?;
|
||||
}
|
||||
|
||||
frequency_index += 1; // we must increment or else this may go very wrong
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
write_bits(6, code_length, &mut code_bits, &mut code_bit_count, &mut out)?;
|
||||
frequency_index += 1;
|
||||
}
|
||||
|
||||
if code_bit_count > 0 {
|
||||
out.write(&[
|
||||
(code_bits << (8 - code_bit_count)) as u8
|
||||
])?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build a "canonical" Huffman code table:
|
||||
/// - for each (uncompressed) symbol, code contains the length
|
||||
/// of the corresponding code (in the compressed data)
|
||||
/// - canonical codes are computed and stored in code
|
||||
/// - the rules for constructing canonical codes are as follows:
|
||||
/// * shorter codes (if filled with zeroes to the right)
|
||||
/// have a numerically higher value than longer codes
|
||||
/// * for codes with the same length, numerical values
|
||||
/// increase with numerical symbol values
|
||||
/// - because the canonical code table can be constructed from
|
||||
/// symbol lengths alone, the code table can be transmitted
|
||||
/// without sending the actual code values
|
||||
/// - see http://www.compressconsult.com/huffman/
|
||||
fn build_canonical_table(code_table: &mut [u64]) {
|
||||
debug_assert_eq!(code_table.len(), ENCODING_TABLE_SIZE);
|
||||
|
||||
let mut count_per_code = [0_u64; 59];
|
||||
|
||||
for &code in code_table.iter() {
|
||||
count_per_code[u64_to_usize(code)] += 1;
|
||||
}
|
||||
|
||||
// For each i from 58 through 1, compute the
|
||||
// numerically lowest code with length i, and
|
||||
// store that code in n[i].
|
||||
{
|
||||
let mut code = 0_u64; // TODO use foldr?
|
||||
for count in &mut count_per_code.iter_mut().rev() {
|
||||
let next_code = (code + *count) >> 1;
|
||||
*count = code;
|
||||
code = next_code;
|
||||
}
|
||||
}
|
||||
|
||||
// code[i] contains the length, l, of the
|
||||
// code for symbol i. Assign the next available
|
||||
// code of length l to the symbol and store both
|
||||
// l and the code in code[i]. // TODO iter + filter ?
|
||||
for symbol_length in code_table.iter_mut() {
|
||||
let current_length = *symbol_length;
|
||||
let code_index = u64_to_usize(current_length);
|
||||
if current_length > 0 {
|
||||
*symbol_length = current_length | (count_per_code[code_index] << 6);
|
||||
count_per_code[code_index] += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Compute Huffman codes (based on frq input) and store them in frq:
|
||||
/// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
|
||||
/// - max code length is 58 bits;
|
||||
/// - codes outside the range [im-iM] have a null length (unused values);
|
||||
/// - original frequencies are destroyed;
|
||||
/// - encoding tables are used by hufEncode() and hufBuildDecTable();
|
||||
///
|
||||
/// NB: The following code "(*a == *b) && (a > b))" was added to ensure
|
||||
/// elements in the heap with the same value are sorted by index.
|
||||
/// This is to ensure, the STL make_heap()/pop_heap()/push_heap() methods
|
||||
/// produced a resultant sorted heap that is identical across OSes.
|
||||
fn build_encoding_table(
|
||||
frequencies: &mut [u64], // input frequencies, output encoding table
|
||||
) -> (usize, usize) // return frequency max min range
|
||||
{
|
||||
debug_assert_eq!(frequencies.len(), ENCODING_TABLE_SIZE);
|
||||
|
||||
/// Frequency with position, used for MinHeap.
|
||||
#[derive(Eq, PartialEq, Copy, Clone)]
|
||||
struct HeapFrequency {
|
||||
position: usize,
|
||||
frequency: u64,
|
||||
}
|
||||
|
||||
impl Ord for HeapFrequency {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
other.frequency.cmp(&self.frequency)
|
||||
.then_with(|| other.position.cmp(&self.position))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for HeapFrequency {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) }
|
||||
}
|
||||
|
||||
// This function assumes that when it is called, array frq
|
||||
// indicates the frequency of all possible symbols in the data
|
||||
// that are to be Huffman-encoded. (frq[i] contains the number
|
||||
// of occurrences of symbol i in the data.)
|
||||
//
|
||||
// The loop below does three things:
|
||||
//
|
||||
// 1) Finds the minimum and maximum indices that point
|
||||
// to non-zero entries in frq:
|
||||
//
|
||||
// frq[im] != 0, and frq[i] == 0 for all i < im
|
||||
// frq[iM] != 0, and frq[i] == 0 for all i > iM
|
||||
//
|
||||
// 2) Fills array fHeap with pointers to all non-zero
|
||||
// entries in frq.
|
||||
//
|
||||
// 3) Initializes array hlink such that hlink[i] == i
|
||||
// for all array entries.
|
||||
|
||||
// We need to use vec here or we overflow the stack.
|
||||
let mut links = vec![0_usize; ENCODING_TABLE_SIZE];
|
||||
let mut frequency_heap = vec![0_usize; ENCODING_TABLE_SIZE];
|
||||
|
||||
// This is a good solution since we don't have usize::MAX items (no panics or UB),
|
||||
// and since this is short-circuit, it stops at the first in order non zero element.
|
||||
let min_frequency_index = frequencies.iter().position(|f| *f != 0).unwrap_or(0);
|
||||
|
||||
let mut max_frequency_index = 0;
|
||||
let mut frequency_count = 0;
|
||||
|
||||
// assert bounds check to optimize away bounds check in loops
|
||||
assert!(links.len() >= ENCODING_TABLE_SIZE);
|
||||
assert!(frequencies.len() >= ENCODING_TABLE_SIZE);
|
||||
|
||||
for index in min_frequency_index..ENCODING_TABLE_SIZE {
|
||||
links[index] = index; // TODO for x in links.iter().enumerate()
|
||||
|
||||
if frequencies[index] != 0 {
|
||||
frequency_heap[frequency_count] = index;
|
||||
max_frequency_index = index;
|
||||
frequency_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Add a pseudo-symbol, with a frequency count of 1, to frq;
|
||||
// adjust the fHeap and hlink array accordingly. Function
|
||||
// hufEncode() uses the pseudo-symbol for run-length encoding.
|
||||
|
||||
max_frequency_index += 1;
|
||||
frequencies[max_frequency_index] = 1;
|
||||
frequency_heap[frequency_count] = max_frequency_index;
|
||||
frequency_count += 1;
|
||||
|
||||
// Build an array, scode, such that scode[i] contains the number
|
||||
// of bits assigned to symbol i. Conceptually this is done by
|
||||
// constructing a tree whose leaves are the symbols with non-zero
|
||||
// frequency:
|
||||
//
|
||||
// Make a heap that contains all symbols with a non-zero frequency,
|
||||
// with the least frequent symbol on top.
|
||||
//
|
||||
// Repeat until only one symbol is left on the heap:
|
||||
//
|
||||
// Take the two least frequent symbols off the top of the heap.
|
||||
// Create a new node that has first two nodes as children, and
|
||||
// whose frequency is the sum of the frequencies of the first
|
||||
// two nodes. Put the new node back into the heap.
|
||||
//
|
||||
// The last node left on the heap is the root of the tree. For each
|
||||
// leaf node, the distance between the root and the leaf is the length
|
||||
// of the code for the corresponding symbol.
|
||||
//
|
||||
// The loop below doesn't actually build the tree; instead we compute
|
||||
// the distances of the leaves from the root on the fly. When a new
|
||||
// node is added to the heap, then that node's descendants are linked
|
||||
// into a single linear list that starts at the new node, and the code
|
||||
// lengths of the descendants (that is, their distance from the root
|
||||
// of the tree) are incremented by one.
|
||||
let mut heap = BinaryHeap::with_capacity(frequency_count);
|
||||
for index in frequency_heap.drain(..frequency_count) {
|
||||
heap.push(HeapFrequency { position: index, frequency: frequencies[index] });
|
||||
}
|
||||
|
||||
let mut s_code = vec![0_u64; ENCODING_TABLE_SIZE];
|
||||
|
||||
while frequency_count > 1 {
|
||||
// Find the indices, mm and m, of the two smallest non-zero frq
|
||||
// values in fHeap, add the smallest frq to the second-smallest
|
||||
// frq, and remove the smallest frq value from fHeap.
|
||||
let (high_position, low_position) = {
|
||||
let smallest_frequency = heap.pop().expect("heap empty bug");
|
||||
frequency_count -= 1;
|
||||
|
||||
let mut second_smallest_frequency = heap.peek_mut().expect("heap empty bug");
|
||||
second_smallest_frequency.frequency += smallest_frequency.frequency;
|
||||
|
||||
(second_smallest_frequency.position, smallest_frequency.position)
|
||||
};
|
||||
|
||||
// The entries in scode are linked into lists with the
|
||||
// entries in hlink serving as "next" pointers and with
|
||||
// the end of a list marked by hlink[j] == j.
|
||||
//
|
||||
// Traverse the lists that start at scode[m] and scode[mm].
|
||||
// For each element visited, increment the length of the
|
||||
// corresponding code by one bit. (If we visit scode[j]
|
||||
// during the traversal, then the code for symbol j becomes
|
||||
// one bit longer.)
|
||||
//
|
||||
// Merge the lists that start at scode[m] and scode[mm]
|
||||
// into a single list that starts at scode[m].
|
||||
|
||||
// Add a bit to all codes in the first list.
|
||||
let mut index = high_position; // TODO fold()
|
||||
loop {
|
||||
s_code[index] += 1;
|
||||
debug_assert!(s_code[index] <= 58);
|
||||
|
||||
// merge the two lists
|
||||
if links[index] == index {
|
||||
links[index] = low_position;
|
||||
break;
|
||||
}
|
||||
|
||||
index = links[index];
|
||||
}
|
||||
|
||||
// Add a bit to all codes in the second list
|
||||
let mut index = low_position; // TODO fold()
|
||||
loop {
|
||||
s_code[index] += 1;
|
||||
debug_assert!(s_code[index] <= 58);
|
||||
|
||||
if links[index] == index {
|
||||
break;
|
||||
}
|
||||
|
||||
index = links[index];
|
||||
}
|
||||
}
|
||||
|
||||
// Build a canonical Huffman code table, replacing the code
|
||||
// lengths in scode with (code, code length) pairs. Copy the
|
||||
// code table from scode into frq.
|
||||
build_canonical_table(&mut s_code);
|
||||
frequencies.copy_from_slice(&s_code);
|
||||
|
||||
(min_frequency_index, max_frequency_index)
|
||||
}
|
||||
|
||||
|
||||
#[inline] fn length(code: u64) -> u64 { code & 63 }
|
||||
#[inline] fn code(code: u64) -> u64 { code >> 6 }
|
||||
|
||||
const INVALID_BIT_COUNT: &'static str = "invalid number of bits";
|
||||
const INVALID_TABLE_ENTRY: &'static str = "invalid code table entry";
|
||||
const NOT_ENOUGH_DATA: &'static str = "decoded data are shorter than expected";
|
||||
const INVALID_TABLE_SIZE: &'static str = "unexpected end of code table data";
|
||||
const TABLE_TOO_LONG: &'static str = "code table is longer than expected";
|
||||
const INVALID_CODE: &'static str = "invalid code";
|
||||
const TOO_MUCH_DATA: &'static str = "decoded data are longer than expected";
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use rand::{Rng, SeedableRng};
|
||||
|
||||
const UNCOMPRESSED_ARRAY: [u16; 100] = [
|
||||
3852, 2432, 33635, 49381, 10100, 15095, 62693, 63738, 62359, 5013, 7715, 59875, 28182,
|
||||
34449, 19983, 20399, 63407, 29486, 4877, 26738, 44815, 14042, 46091, 48228, 25682, 35412,
|
||||
7582, 65069, 6632, 54124, 13798, 27503, 52154, 61961, 30474, 46880, 39097, 15754, 52897,
|
||||
42371, 54053, 14178, 48276, 34591, 42602, 32126, 42062, 31474, 16274, 55991, 2882, 17039,
|
||||
56389, 20835, 57057, 54081, 3414, 33957, 52584, 10222, 25139, 40002, 44980, 1602, 48021,
|
||||
19703, 6562, 61777, 41582, 201, 31253, 51790, 15888, 40921, 3627, 12184, 16036, 26349,
|
||||
3159, 29002, 14535, 50632, 18118, 33583, 18878, 59470, 32835, 9347, 16991, 21303, 26263,
|
||||
8312, 14017, 41777, 43240, 3500, 60250, 52437, 45715, 61520,
|
||||
];
|
||||
|
||||
const UNCOMPRESSED_ARRAY_SPECIAL: [u16; 100] = [
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28182,
|
||||
0, 65534, 0, 65534, 0, 65534, 0, 65534, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 54124, 13798, 27503, 52154, 61961, 30474, 46880, 39097, 15754, 52897,
|
||||
42371, 54053, 14178, 48276, 34591, 42602, 32126, 42062, 31474, 16274, 55991, 2882, 17039,
|
||||
56389, 20835, 57057, 54081, 3414, 33957, 52584, 10222, 25139, 40002, 44980, 1602, 48021,
|
||||
19703, 6562, 61777, 41582, 201, 31253, 51790, 15888, 40921, 3627, 12184, 16036, 26349,
|
||||
3159, 29002, 14535, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
65534, 65534, 65534, 65534, 65534, 65534, 65534, 65534, 65534,
|
||||
];
|
||||
|
||||
const COMPRESSED_ARRAY: [u8; 703] = [
|
||||
0xc9, 0x0, 0x0, 0x0, 0x2e, 0xfe, 0x0, 0x0, 0x56, 0x2, 0x0, 0x0, 0xa2, 0x2, 0x0, 0x0, 0x0,
|
||||
0x0, 0x0, 0x0, 0x1f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd6, 0x47,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x28, 0x1f, 0xff, 0xff, 0xed, 0x87, 0xff, 0xff, 0xf0,
|
||||
0x91, 0xff, 0xf8, 0x1f, 0xf4, 0xf1, 0xff, 0x78, 0x1f, 0xfd, 0xa1, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xfa, 0xc7, 0xfe, 0x4, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xed, 0x1f, 0xf3, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x7, 0xfd, 0xf8,
|
||||
0x7f, 0xff, 0xff, 0xff, 0xfd, 0x10, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x51, 0xff,
|
||||
0xff, 0xff, 0xff, 0xfe, 0x1, 0xff, 0x73, 0x1f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x0, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xfc, 0xa4, 0x7f, 0xf5, 0x7, 0xfc, 0x48, 0x7f, 0xe0, 0x47, 0xff, 0xff,
|
||||
0xf5, 0x91, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x21, 0xff,
|
||||
0x7f, 0x1f, 0xf8, 0xd1, 0xff, 0xe7, 0x1f, 0xff, 0xff, 0xff, 0xff, 0xbc, 0x1f, 0xf2, 0x91,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1c, 0x1f, 0xff, 0xff, 0xff, 0xff, 0xe7,
|
||||
0x1f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x8c, 0x7f, 0xff, 0xff, 0xc, 0x1f, 0xff, 0xff,
|
||||
0xe5, 0x7, 0xff, 0xff, 0xfa, 0x81, 0xff, 0xff, 0xff, 0x20, 0x7f, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xbc, 0x7f, 0xff, 0xff, 0xff, 0xfc, 0x38, 0x7f, 0xff,
|
||||
0xff, 0xff, 0xfc, 0xd0, 0x7f, 0xd3, 0xc7, 0xff, 0xff, 0xf7, 0x91, 0xff, 0xff, 0xff, 0xff,
|
||||
0xfe, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x61, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
|
||||
0x87, 0xff, 0xff, 0xfd, 0x81, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x87, 0xff, 0xff,
|
||||
0xff, 0xff, 0xfe, 0x87, 0xff, 0x58, 0x7f, 0xff, 0xff, 0xff, 0xfd, 0xec, 0x7f, 0xff, 0xff,
|
||||
0xff, 0xfe, 0xd0, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x6c, 0x7f, 0xcb, 0x47, 0xff, 0xff, 0xf3,
|
||||
0x61, 0xff, 0xff, 0xff, 0x80, 0x7f, 0xe1, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f,
|
||||
0x1f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x18, 0x1f, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xfd, 0xcc, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x11, 0xff, 0xff,
|
||||
0xff, 0xff, 0xf8, 0x41, 0xff, 0xbc, 0x1f, 0xff, 0xff, 0xc4, 0x47, 0xff, 0xff, 0xf2, 0x91,
|
||||
0xff, 0xe0, 0x1f, 0xff, 0xff, 0xff, 0xff, 0x6d, 0x1f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0x2, 0x1f, 0xf9, 0xe1, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe1,
|
||||
0xff, 0xff, 0xfd, 0xb0, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe1, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0x5a, 0x1f, 0xfc, 0x81, 0xbf, 0x29, 0x1b, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xf3, 0x61, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x1b,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xb1, 0xbf, 0xff, 0xfd, 0x80, 0x6f, 0xff,
|
||||
0xff, 0xf, 0x1b, 0xf8, 0xc1, 0xbf, 0xff, 0xfc, 0xb4, 0x6f, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xda, 0x46, 0xfc, 0x54, 0x6f, 0xc9, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x21, 0x1b, 0xff, 0xff, 0xe0, 0x86, 0xff, 0xff,
|
||||
0xff, 0xff, 0xe2, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x91, 0xbf, 0xff, 0xfe, 0x24, 0x6f, 0xff, 0xff, 0x6b,
|
||||
0x1b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xb1, 0xbf, 0xfa, 0x1b, 0xfb, 0x11,
|
||||
0xbf, 0xff, 0xfe, 0x8, 0x6f, 0xff, 0xff, 0x42, 0x1b, 0xff, 0xff, 0xff, 0xff, 0xb9, 0x1b,
|
||||
0xff, 0xff, 0xcf, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x31,
|
||||
0x86, 0x10, 0x9, 0xb4, 0xe4, 0x4c, 0xf7, 0xef, 0x42, 0x87, 0x6a, 0xb5, 0xc2, 0x34, 0x9e,
|
||||
0x2f, 0x12, 0xae, 0x21, 0x68, 0xf2, 0xa8, 0x74, 0x37, 0xe1, 0x98, 0x14, 0x59, 0x57, 0x2c,
|
||||
0x24, 0x3b, 0x35, 0x6c, 0x1b, 0x8b, 0xcc, 0xe6, 0x13, 0x38, 0xc, 0x8e, 0xe2, 0xc, 0xfe,
|
||||
0x49, 0x73, 0xbc, 0x2b, 0x7b, 0x9, 0x27, 0x79, 0x14, 0xc, 0x94, 0x42, 0xf8, 0x7c, 0x1,
|
||||
0x8d, 0x26, 0xde, 0x87, 0x26, 0x71, 0x50, 0x45, 0xc6, 0x28, 0x40, 0xd5, 0xe, 0x8d, 0x8,
|
||||
0x1e, 0x4c, 0xa4, 0x79, 0x57, 0xf0, 0xc3, 0x6d, 0x5c, 0x6d, 0xc0,
|
||||
];
|
||||
|
||||
fn fill(rng: &mut impl Rng, size: usize) -> Vec<u16> {
|
||||
if rng.gen_bool(0.2) {
|
||||
let value = if rng.gen_bool(0.5) { 0 } else { u16::MAX };
|
||||
return vec![ value; size ];
|
||||
}
|
||||
|
||||
let mut data = vec![0_u16; size];
|
||||
|
||||
data.iter_mut().for_each(|v| {
|
||||
*v = rng.gen_range(0_u16 .. u16::MAX);
|
||||
});
|
||||
|
||||
data
|
||||
}
|
||||
|
||||
/// Test using both input and output from a custom ILM OpenEXR test.
|
||||
#[test]
|
||||
fn compression_comparation() {
|
||||
let raw = compress(&UNCOMPRESSED_ARRAY).unwrap();
|
||||
assert_eq!(raw, COMPRESSED_ARRAY.to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip() {
|
||||
let mut random = rand::rngs::StdRng::from_seed(SEED);
|
||||
let raw = fill(&mut random, u16::MAX as usize);
|
||||
|
||||
let compressed = compress(&raw).unwrap();
|
||||
let uncompressed = decompress(&compressed, raw.len()).unwrap();
|
||||
|
||||
assert_eq!(uncompressed, raw);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn repetitions_special() {
|
||||
let raw = UNCOMPRESSED_ARRAY_SPECIAL;
|
||||
|
||||
let compressed = compress(&raw).unwrap();
|
||||
let uncompressed = decompress(&compressed, raw.len()).unwrap();
|
||||
|
||||
assert_eq!(uncompressed, raw.to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip100() {
|
||||
let mut random = rand::rngs::StdRng::from_seed(SEED);
|
||||
|
||||
for size_multiplier in 1..10 {
|
||||
let raw = fill(&mut random, size_multiplier * 50_000);
|
||||
|
||||
let compressed = compress(&raw).unwrap();
|
||||
let uncompressed = decompress(&compressed, raw.len()).unwrap();
|
||||
|
||||
assert_eq!(uncompressed, raw);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zeroes(){
|
||||
let uncompressed: &[u16] = &[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ];
|
||||
|
||||
let compressed = compress(uncompressed).unwrap();
|
||||
let decompressed = decompress(&compressed, uncompressed.len()).unwrap();
|
||||
|
||||
assert_eq!(uncompressed, decompressed.as_slice());
|
||||
}
|
||||
|
||||
const SEED: [u8; 32] = [
|
||||
12,155,32,34,112,109,98,54,
|
||||
12,255,32,34,112,109,98,55,
|
||||
12,155,32,34,12,109,98,54,
|
||||
12,35,32,34,112,109,48,54,
|
||||
];
|
||||
}
|
||||
437
vendor/exr/src/compression/piz/mod.rs
vendored
Normal file
437
vendor/exr/src/compression/piz/mod.rs
vendored
Normal file
@@ -0,0 +1,437 @@
|
||||
|
||||
|
||||
//! The PIZ compression method is a wavelet compression,
|
||||
//! based on the PIZ image format, customized for OpenEXR.
|
||||
// inspired by https://github.com/AcademySoftwareFoundation/openexr/blob/master/OpenEXR/IlmImf/ImfPizCompressor.cpp
|
||||
|
||||
mod huffman;
|
||||
mod wavelet;
|
||||
|
||||
use crate::prelude::*;
|
||||
use crate::io::Data;
|
||||
use crate::meta::attribute::*;
|
||||
use crate::compression::{ByteVec, Bytes, mod_p};
|
||||
use crate::error::{usize_to_i32, usize_to_u16};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
|
||||
const U16_RANGE: usize = (1_i32 << 16_i32) as usize;
|
||||
const BITMAP_SIZE: usize = (U16_RANGE as i32 >> 3_i32) as usize;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ChannelData {
|
||||
tmp_start_index: usize,
|
||||
tmp_end_index: usize,
|
||||
|
||||
resolution: Vec2<usize>,
|
||||
y_sampling: usize,
|
||||
samples_per_pixel: usize,
|
||||
}
|
||||
|
||||
|
||||
pub fn decompress(
|
||||
channels: &ChannelList,
|
||||
compressed: ByteVec,
|
||||
rectangle: IntegerBounds,
|
||||
expected_byte_size: usize, // TODO remove expected byte size as it can be computed with `rectangle.size.area() * channels.bytes_per_pixel`
|
||||
pedantic: bool
|
||||
) -> Result<ByteVec>
|
||||
{
|
||||
let expected_u16_count = expected_byte_size / 2;
|
||||
debug_assert_eq!(expected_byte_size, rectangle.size.area() * channels.bytes_per_pixel);
|
||||
debug_assert!(!channels.list.is_empty());
|
||||
|
||||
if compressed.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
debug_assert_ne!(expected_u16_count, 0);
|
||||
|
||||
let mut bitmap = vec![0_u8; BITMAP_SIZE]; // FIXME use bit_vec!
|
||||
|
||||
let mut remaining_input = compressed.as_slice();
|
||||
let min_non_zero = u16::read(&mut remaining_input)? as usize;
|
||||
let max_non_zero = u16::read(&mut remaining_input)? as usize;
|
||||
|
||||
if max_non_zero >= BITMAP_SIZE || min_non_zero >= BITMAP_SIZE {
|
||||
return Err(Error::invalid("compression data"));
|
||||
}
|
||||
|
||||
if min_non_zero <= max_non_zero {
|
||||
u8::read_slice(&mut remaining_input, &mut bitmap[min_non_zero ..= max_non_zero])?;
|
||||
}
|
||||
|
||||
let (lookup_table, max_value) = reverse_lookup_table_from_bitmap(&bitmap);
|
||||
|
||||
{
|
||||
let length = i32::read(&mut remaining_input)?;
|
||||
if pedantic && length as i64 != remaining_input.len() as i64 {
|
||||
// TODO length might be smaller than remaining??
|
||||
return Err(Error::invalid("compression data"));
|
||||
}
|
||||
}
|
||||
|
||||
let mut tmp_u16_buffer = huffman::decompress(remaining_input, expected_u16_count)?;
|
||||
|
||||
let mut channel_data: SmallVec<[ChannelData; 6]> = {
|
||||
let mut tmp_read_index = 0;
|
||||
|
||||
let channel_data = channels.list.iter().map(|channel| {
|
||||
let channel_data = ChannelData {
|
||||
tmp_start_index: tmp_read_index,
|
||||
tmp_end_index: tmp_read_index,
|
||||
y_sampling: channel.sampling.y(),
|
||||
resolution: channel.subsampled_resolution(rectangle.size),
|
||||
samples_per_pixel: channel.sample_type.bytes_per_sample() / SampleType::F16.bytes_per_sample()
|
||||
};
|
||||
|
||||
tmp_read_index += channel_data.resolution.area() * channel_data.samples_per_pixel;
|
||||
channel_data
|
||||
}).collect();
|
||||
|
||||
debug_assert_eq!(tmp_read_index, expected_u16_count);
|
||||
channel_data
|
||||
};
|
||||
|
||||
for channel in &channel_data {
|
||||
let u16_count = channel.resolution.area() * channel.samples_per_pixel;
|
||||
let u16s = &mut tmp_u16_buffer[channel.tmp_start_index .. channel.tmp_start_index + u16_count];
|
||||
|
||||
for offset in 0..channel.samples_per_pixel { // if channel is 32 bit, compress interleaved as two 16 bit values
|
||||
wavelet::decode(
|
||||
&mut u16s[offset..],
|
||||
channel.resolution,
|
||||
Vec2(channel.samples_per_pixel, channel.resolution.x() * channel.samples_per_pixel),
|
||||
max_value
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Expand the pixel data to their original range
|
||||
apply_lookup_table(&mut tmp_u16_buffer, &lookup_table);
|
||||
|
||||
// let out_buffer_size = (max_scan_line_size * scan_line_count) + 65536 + 8192; // TODO not use expected byte size?
|
||||
let mut out = Vec::with_capacity(expected_byte_size);
|
||||
|
||||
for y in rectangle.position.y() .. rectangle.end().y() {
|
||||
for channel in &mut channel_data {
|
||||
if mod_p(y, usize_to_i32(channel.y_sampling)) != 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let u16s_per_line = channel.resolution.x() * channel.samples_per_pixel;
|
||||
let next_tmp_end_index = channel.tmp_end_index + u16s_per_line;
|
||||
let values = &tmp_u16_buffer[channel.tmp_end_index .. next_tmp_end_index];
|
||||
channel.tmp_end_index = next_tmp_end_index;
|
||||
|
||||
// TODO do not convert endianness for f16-only images
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
||||
// We can support uncompressed data in the machine's native format
|
||||
// if all image channels are of type HALF, and if the Xdr and the
|
||||
// native representations of a half have the same size.
|
||||
u16::write_slice(&mut out, values).expect("write to in-memory failed");
|
||||
}
|
||||
}
|
||||
|
||||
for (previous, current) in channel_data.iter().zip(channel_data.iter().skip(1)) {
|
||||
debug_assert_eq!(previous.tmp_end_index, current.tmp_start_index);
|
||||
}
|
||||
|
||||
debug_assert_eq!(channel_data.last().unwrap().tmp_end_index, tmp_u16_buffer.len());
|
||||
debug_assert_eq!(out.len(), expected_byte_size);
|
||||
|
||||
// TODO optimize for when all channels are f16!
|
||||
// we should be able to omit endianness conversions in that case
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
||||
Ok(super::convert_little_endian_to_current(out, channels, rectangle))
|
||||
}
|
||||
|
||||
|
||||
|
||||
pub fn compress(
|
||||
channels: &ChannelList,
|
||||
uncompressed: ByteVec,
|
||||
rectangle: IntegerBounds
|
||||
) -> Result<ByteVec>
|
||||
{
|
||||
if uncompressed.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
// TODO do not convert endianness for f16-only images
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
||||
let uncompressed = super::convert_current_to_little_endian(uncompressed, channels, rectangle);
|
||||
let uncompressed = uncompressed.as_slice();// TODO no alloc
|
||||
|
||||
let mut tmp = vec![0_u16; uncompressed.len() / 2 ];
|
||||
let mut channel_data: SmallVec<[ChannelData; 6]> = {
|
||||
let mut tmp_end_index = 0;
|
||||
|
||||
let vec = channels.list.iter().map(|channel| {
|
||||
let number_samples = channel.subsampled_resolution(rectangle.size);
|
||||
let byte_size = channel.sample_type.bytes_per_sample() / SampleType::F16.bytes_per_sample();
|
||||
let byte_count = byte_size * number_samples.area();
|
||||
|
||||
let channel = ChannelData {
|
||||
tmp_end_index,
|
||||
tmp_start_index: tmp_end_index,
|
||||
y_sampling: channel.sampling.y(),
|
||||
resolution: number_samples,
|
||||
samples_per_pixel: byte_size,
|
||||
};
|
||||
|
||||
tmp_end_index += byte_count;
|
||||
channel
|
||||
}).collect();
|
||||
|
||||
debug_assert_eq!(tmp_end_index, tmp.len());
|
||||
vec
|
||||
};
|
||||
|
||||
let mut remaining_uncompressed_bytes = uncompressed;
|
||||
for y in rectangle.position.y() .. rectangle.end().y() {
|
||||
for channel in &mut channel_data {
|
||||
if mod_p(y, usize_to_i32(channel.y_sampling)) != 0 { continue; }
|
||||
let u16s_per_line = channel.resolution.x() * channel.samples_per_pixel;
|
||||
let next_tmp_end_index = channel.tmp_end_index + u16s_per_line;
|
||||
let target = &mut tmp[channel.tmp_end_index .. next_tmp_end_index];
|
||||
channel.tmp_end_index = next_tmp_end_index;
|
||||
|
||||
// TODO do not convert endianness for f16-only images
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
||||
// We can support uncompressed data in the machine's native format
|
||||
// if all image channels are of type HALF, and if the Xdr and the
|
||||
// native representations of a half have the same size.
|
||||
u16::read_slice(&mut remaining_uncompressed_bytes, target).expect("in-memory read failed");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
let (min_non_zero, max_non_zero, bitmap) = bitmap_from_data(&tmp);
|
||||
let (max_value, table) = forward_lookup_table_from_bitmap(&bitmap);
|
||||
apply_lookup_table(&mut tmp, &table);
|
||||
|
||||
let mut piz_compressed = Vec::with_capacity(uncompressed.len() / 2);
|
||||
u16::try_from(min_non_zero)?.write(&mut piz_compressed)?;
|
||||
u16::try_from(max_non_zero)?.write(&mut piz_compressed)?;
|
||||
|
||||
if min_non_zero <= max_non_zero {
|
||||
piz_compressed.extend_from_slice(&bitmap[min_non_zero ..= max_non_zero]);
|
||||
}
|
||||
|
||||
for channel in channel_data {
|
||||
for offset in 0 .. channel.samples_per_pixel {
|
||||
wavelet::encode(
|
||||
&mut tmp[channel.tmp_start_index + offset .. channel.tmp_end_index],
|
||||
channel.resolution,
|
||||
Vec2(channel.samples_per_pixel, channel.resolution.x() * channel.samples_per_pixel),
|
||||
max_value
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
let huffman_compressed: Vec<u8> = huffman::compress(&tmp)?;
|
||||
u8::write_i32_sized_slice(&mut piz_compressed, &huffman_compressed).expect("in-memory write failed");
|
||||
|
||||
Ok(piz_compressed)
|
||||
}
|
||||
|
||||
|
||||
pub fn bitmap_from_data(data: &[u16]) -> (usize, usize, Vec<u8>) {
|
||||
let mut bitmap = vec![0_u8; BITMAP_SIZE];
|
||||
|
||||
for value in data {
|
||||
bitmap[*value as usize >> 3] |= 1 << (*value as u8 & 7);
|
||||
}
|
||||
|
||||
bitmap[0] = bitmap[0] & !1; // zero is not explicitly stored in the bitmap; we assume that the data always contain zeroes
|
||||
|
||||
let min_index = bitmap.iter().position(|&value| value != 0);
|
||||
let max_index = min_index.map(|min| // only if min was found
|
||||
min + bitmap[min..].iter().rposition(|&value| value != 0).expect("[min] not found")
|
||||
);
|
||||
|
||||
(min_index.unwrap_or(0), max_index.unwrap_or(0), bitmap)
|
||||
}
|
||||
|
||||
pub fn forward_lookup_table_from_bitmap(bitmap: &[u8]) -> (u16, Vec<u16>) {
|
||||
debug_assert_eq!(bitmap.len(), BITMAP_SIZE);
|
||||
|
||||
let mut table = vec![0_u16; U16_RANGE];
|
||||
let mut count = 0_usize;
|
||||
|
||||
for (index, entry) in table.iter_mut().enumerate() {
|
||||
if index == 0 || bitmap[index >> 3] as usize & (1 << (index & 7)) != 0 {
|
||||
*entry = usize_to_u16(count).unwrap();
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
(usize_to_u16(count - 1).unwrap(), table)
|
||||
}
|
||||
|
||||
fn reverse_lookup_table_from_bitmap(bitmap: Bytes<'_>) -> (Vec<u16>, u16) {
|
||||
let mut table = Vec::with_capacity(U16_RANGE);
|
||||
|
||||
for index in 0 .. U16_RANGE { // cannot use iter because filter removes capacity sizehint
|
||||
if index == 0 || ((bitmap[index >> 3] as usize & (1 << (index & 7))) != 0) {
|
||||
table.push(usize_to_u16(index).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
debug_assert!(!table.is_empty());
|
||||
let max_value = usize_to_u16(table.len() - 1).unwrap();
|
||||
|
||||
// fill remaining up to u16 range
|
||||
assert!(table.len() <= U16_RANGE);
|
||||
table.resize(U16_RANGE, 0);
|
||||
|
||||
(table, max_value)
|
||||
}
|
||||
|
||||
fn apply_lookup_table(data: &mut [u16], table: &[u16]) {
|
||||
for data in data {
|
||||
*data = table[*data as usize];
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::prelude::*;
|
||||
use crate::compression::ByteVec;
|
||||
use crate::compression::piz;
|
||||
use crate::meta::attribute::*;
|
||||
|
||||
fn test_roundtrip_noise_with(channels: ChannelList, rectangle: IntegerBounds){
|
||||
let pixel_bytes: ByteVec = (0 .. 37).map(|_| rand::random()).collect::<Vec<u8>>().into_iter()
|
||||
.cycle().take(channels.bytes_per_pixel * rectangle.size.area())
|
||||
.collect();
|
||||
|
||||
let compressed = piz::compress(&channels, pixel_bytes.clone(), rectangle).unwrap();
|
||||
let decompressed = piz::decompress(&channels, compressed, rectangle, pixel_bytes.len(), true).unwrap();
|
||||
|
||||
assert_eq!(pixel_bytes, decompressed);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn roundtrip_any_sample_type(){
|
||||
for &sample_type in &[SampleType::F16, SampleType::F32, SampleType::U32] {
|
||||
let channel = ChannelDescription {
|
||||
sample_type,
|
||||
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1,1)
|
||||
};
|
||||
|
||||
let channels = ChannelList::new(smallvec![ channel.clone(), channel ]);
|
||||
|
||||
let rectangle = IntegerBounds {
|
||||
position: Vec2(-30, 100),
|
||||
size: Vec2(1080, 720),
|
||||
};
|
||||
|
||||
test_roundtrip_noise_with(channels, rectangle);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_two_channels(){
|
||||
let channel = ChannelDescription {
|
||||
sample_type: SampleType::F16,
|
||||
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1,1)
|
||||
};
|
||||
|
||||
let channel2 = ChannelDescription {
|
||||
sample_type: SampleType::F32,
|
||||
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1,1)
|
||||
};
|
||||
|
||||
let channels = ChannelList::new(smallvec![ channel, channel2 ]);
|
||||
|
||||
let rectangle = IntegerBounds {
|
||||
position: Vec2(-3, 1),
|
||||
size: Vec2(223, 3132),
|
||||
};
|
||||
|
||||
test_roundtrip_noise_with(channels, rectangle);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[test]
|
||||
fn roundtrip_seven_channels(){
|
||||
let channels = ChannelList::new(smallvec![
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::F32,
|
||||
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1,1)
|
||||
},
|
||||
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::F32,
|
||||
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1,1)
|
||||
},
|
||||
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::F32,
|
||||
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1,1)
|
||||
},
|
||||
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::F16,
|
||||
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1,1)
|
||||
},
|
||||
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::F32,
|
||||
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1,1)
|
||||
},
|
||||
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::F32,
|
||||
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1,1)
|
||||
},
|
||||
|
||||
ChannelDescription {
|
||||
sample_type: SampleType::U32,
|
||||
|
||||
name: Default::default(),
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1,1)
|
||||
},
|
||||
]);
|
||||
|
||||
let rectangle = IntegerBounds {
|
||||
position: Vec2(-3, 1),
|
||||
size: Vec2(1323, 132),
|
||||
};
|
||||
|
||||
test_roundtrip_noise_with(channels, rectangle);
|
||||
}
|
||||
|
||||
}
|
||||
422
vendor/exr/src/compression/piz/wavelet.rs
vendored
Normal file
422
vendor/exr/src/compression/piz/wavelet.rs
vendored
Normal file
@@ -0,0 +1,422 @@
|
||||
|
||||
//! Wavelet encoding and decoding.
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/8cd1b9210855fa4f6923c1b94df8a86166be19b1/OpenEXR/IlmImf/ImfWav.cpp
|
||||
|
||||
use crate::error::IoResult;
|
||||
use crate::math::Vec2;
|
||||
|
||||
#[allow(unused)]
|
||||
#[inline]
|
||||
pub fn encode(buffer: &mut [u16], count: Vec2<usize>, size: Vec2<usize>, max_value: u16) -> IoResult<()> {
|
||||
if is_14_bit(max_value) { encode_14_or_16_bit(buffer, count, size, true) }
|
||||
else { encode_14_or_16_bit(buffer, count, size, false) }
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
#[inline]
|
||||
pub fn encode_14_or_16_bit(
|
||||
buffer: &mut [u16],
|
||||
Vec2(count_x, count_y): Vec2<usize>,
|
||||
Vec2(offset_x, offset_y): Vec2<usize>,
|
||||
is_14_bit: bool // true if maximum buffer[i] value < (1 << 14)
|
||||
) -> IoResult<()>
|
||||
{
|
||||
let count = count_x.min(count_y);
|
||||
let encode = if is_14_bit { encode_14bit } else { encode_16bit }; // assume inlining and constant propagation
|
||||
|
||||
let mut p: usize = 1; // TODO i32?
|
||||
let mut p2: usize = 2; // TODO what is p??
|
||||
|
||||
while p2 <= count {
|
||||
|
||||
let mut position_y = 0;
|
||||
let end_y = 0 + offset_y * (count_y - p2);
|
||||
let (offset1_x, offset1_y) = (offset_x * p, offset_y * p);
|
||||
let (offset2_x, offset2_y) = (offset_x * p2, offset_y * p2);
|
||||
|
||||
// y-loop
|
||||
while position_y <= end_y { // TODO: for py in (index..ey).nth(offset_2.0)
|
||||
|
||||
let mut position_x = position_y;
|
||||
let end_x = position_x + offset_x * (count_x - p2);
|
||||
|
||||
// x-loop
|
||||
while position_x <= end_x {
|
||||
let pos_right = position_x + offset1_x;
|
||||
let pos_top = position_x + offset1_y;
|
||||
let pos_top_right = pos_top + offset1_x;
|
||||
|
||||
assert!(position_x < buffer.len());
|
||||
assert!(pos_right < buffer.len());
|
||||
assert!(pos_top < buffer.len());
|
||||
assert!(pos_top_right < buffer.len());
|
||||
|
||||
if is_14_bit {
|
||||
debug_assert!(self::is_14_bit(buffer[position_x]));
|
||||
debug_assert!(self::is_14_bit(buffer[pos_right]));
|
||||
}
|
||||
|
||||
let (center, right) = encode(buffer[position_x], buffer[pos_right]);
|
||||
let (top, top_right) = encode(buffer[pos_top], buffer[pos_top_right]);
|
||||
|
||||
let (center, top) = encode(center, top);
|
||||
let (right, top_right) = encode(right, top_right);
|
||||
|
||||
buffer[position_x] = center; // TODO rustify
|
||||
buffer[pos_top] = top;
|
||||
buffer[pos_right] = right;
|
||||
buffer[pos_top_right] = top_right;
|
||||
|
||||
position_x += offset2_x;
|
||||
}
|
||||
|
||||
// encode remaining odd pixel column
|
||||
if count_x & p != 0 {
|
||||
let pos_top = position_x + offset1_y;
|
||||
let (center, top) = encode(buffer[position_x], buffer[pos_top]);
|
||||
|
||||
buffer[position_x] = center;
|
||||
buffer[pos_top] = top;
|
||||
}
|
||||
|
||||
position_y += offset2_y;
|
||||
}
|
||||
|
||||
// encode possibly remaining odd row
|
||||
if count_y & p != 0 {
|
||||
let mut position_x = position_y;
|
||||
let end_x = position_y + offset_x * (count_x - p2);
|
||||
|
||||
while position_x <= end_x {
|
||||
let pos_right = position_x + offset1_x;
|
||||
let (center, right) = encode(buffer[position_x], buffer[pos_right]);
|
||||
|
||||
buffer[pos_right] = right;
|
||||
buffer[position_x] = center;
|
||||
|
||||
position_x += offset2_x;
|
||||
}
|
||||
}
|
||||
|
||||
p = p2;
|
||||
p2 <<= 1;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn decode(buffer: &mut [u16], count: Vec2<usize>, size: Vec2<usize>, max_value: u16) -> IoResult<()> {
|
||||
if is_14_bit(max_value) { decode_14_or_16_bit(buffer, count, size, true) }
|
||||
else { decode_14_or_16_bit(buffer, count, size, false) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn decode_14_or_16_bit(
|
||||
buffer: &mut [u16],
|
||||
Vec2(count_x, count_y): Vec2<usize>,
|
||||
Vec2(offset_x, offset_y): Vec2<usize>,
|
||||
is_14_bit: bool // true if maximum buffer[i] value < (1 << 14)
|
||||
) -> IoResult<()>
|
||||
{
|
||||
let count = count_x.min(count_y);
|
||||
let decode = if is_14_bit { decode_14bit } else { decode_16bit }; // assume inlining and constant propagation
|
||||
|
||||
let mut p: usize = 1; // TODO i32?
|
||||
let mut p2: usize; // TODO i32?
|
||||
|
||||
// search max level
|
||||
while p <= count {
|
||||
p <<= 1;
|
||||
}
|
||||
|
||||
p >>= 1;
|
||||
p2 = p;
|
||||
p >>= 1;
|
||||
|
||||
while p >= 1 {
|
||||
|
||||
let mut position_y = 0;
|
||||
let end_y = 0 + offset_y * (count_y - p2);
|
||||
|
||||
let (offset1_x, offset1_y) = (offset_x * p, offset_y * p);
|
||||
let (offset2_x, offset2_y) = (offset_x * p2, offset_y * p2);
|
||||
|
||||
debug_assert_ne!(offset_x, 0, "offset should not be zero");
|
||||
debug_assert_ne!(offset_y, 0, "offset should not be zero");
|
||||
|
||||
while position_y <= end_y {
|
||||
let mut position_x = position_y;
|
||||
let end_x = position_x + offset_x * (count_x - p2);
|
||||
|
||||
while position_x <= end_x {
|
||||
let pos_right = position_x + offset1_x;
|
||||
let pos_top = position_x + offset1_y;
|
||||
let pos_top_right = pos_top + offset1_x;
|
||||
|
||||
assert!(position_x < buffer.len());
|
||||
assert!(pos_right < buffer.len());
|
||||
assert!(pos_top < buffer.len());
|
||||
assert!(pos_top_right < buffer.len());
|
||||
|
||||
let (center, top) = decode(buffer[position_x], buffer[pos_top]);
|
||||
let (right, top_right) = decode(buffer[pos_right], buffer[pos_top_right]);
|
||||
|
||||
let (center, right) = decode(center, right);
|
||||
let (top, top_right) = decode(top, top_right);
|
||||
|
||||
buffer[position_x] = center; // TODO rustify
|
||||
buffer[pos_top] = top;
|
||||
buffer[pos_right] = right;
|
||||
buffer[pos_top_right] = top_right;
|
||||
|
||||
position_x += offset2_x;
|
||||
}
|
||||
|
||||
// decode last odd remaining x value
|
||||
if count_x & p != 0 {
|
||||
let pos_top = position_x + offset1_y;
|
||||
let (center, top) = decode(buffer[position_x], buffer[pos_top]);
|
||||
|
||||
buffer[position_x] = center;
|
||||
buffer[pos_top] = top;
|
||||
}
|
||||
|
||||
position_y += offset2_y;
|
||||
}
|
||||
|
||||
// decode remaining odd row
|
||||
if count_y & p != 0 {
|
||||
let mut position_x = position_y;
|
||||
let end_x = position_x + offset_x * (count_x - p2);
|
||||
|
||||
while position_x <= end_x {
|
||||
let pos_right = position_x + offset1_x;
|
||||
let (center, right) = decode(buffer[position_x], buffer[pos_right]);
|
||||
|
||||
buffer[position_x] = center;
|
||||
buffer[pos_right] = right;
|
||||
|
||||
position_x += offset2_x;
|
||||
}
|
||||
}
|
||||
|
||||
p2 = p;
|
||||
p >>= 1;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_14_bit(value: u16) -> bool {
|
||||
value < (1 << 14)
|
||||
}
|
||||
|
||||
/// Untransformed data values should be less than (1 << 14).
|
||||
#[inline]
|
||||
#[allow(unused)]
|
||||
fn encode_14bit(a: u16, b: u16) -> (u16, u16) {
|
||||
let (a, b) = (a as i16, b as i16);
|
||||
|
||||
let m = (a + b) >> 1;
|
||||
let d = a - b;
|
||||
|
||||
(m as u16, d as u16) // TODO explicitly wrap?
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(unused)]
|
||||
fn decode_14bit(l: u16, h: u16) -> (u16, u16) {
|
||||
let (l, h) = (l as i16, h as i16);
|
||||
|
||||
let hi = h as i32;
|
||||
let ai = l as i32 + (hi & 1) + (hi >> 1);
|
||||
|
||||
let a = ai as i16; // TODO explicitly wrap?
|
||||
let b = (ai - hi) as i16; // TODO explicitly wrap?
|
||||
|
||||
(a as u16, b as u16) // TODO explicitly wrap?
|
||||
}
|
||||
|
||||
|
||||
const BIT_COUNT: i32 = 16;
|
||||
const OFFSET: i32 = 1 << (BIT_COUNT - 1);
|
||||
const MOD_MASK: i32 = (1 << BIT_COUNT) - 1;
|
||||
|
||||
#[inline]
|
||||
fn encode_16bit(a: u16, b: u16) -> (u16, u16) {
|
||||
let (a, b) = (a as i32, b as i32);
|
||||
|
||||
let a_offset = (a + OFFSET) & MOD_MASK;
|
||||
let mut m = (a_offset + b) >> 1;
|
||||
let d = a_offset - b;
|
||||
|
||||
if d < 0 { m = (m + OFFSET) & MOD_MASK; }
|
||||
let d = d & MOD_MASK;
|
||||
|
||||
(m as u16, d as u16) // TODO explicitly wrap?
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn decode_16bit(l: u16, h: u16) -> (u16, u16) {
|
||||
let (m, d) = (l as i32, h as i32);
|
||||
|
||||
let b = (m - (d >> 1)) & MOD_MASK;
|
||||
let a = (d + b - OFFSET) & MOD_MASK;
|
||||
|
||||
(a as u16, b as u16) // TODO explicitly wrap?
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::math::Vec2;
|
||||
use crate::compression::piz::wavelet::is_14_bit;
|
||||
|
||||
#[test]
|
||||
fn roundtrip_14_bit_values(){
|
||||
let data = [
|
||||
(13, 54), (3, 123), (423, 53), (1, 23), (23, 515), (513, 43),
|
||||
(16374, 16381), (16284, 3), (2, 1), (0, 0), (0, 4), (3, 0)
|
||||
];
|
||||
|
||||
for &values in &data {
|
||||
let (l, h) = super::encode_14bit(values.0, values.1);
|
||||
let result = super::decode_14bit(l, h);
|
||||
assert_eq!(values, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_16_bit_values(){
|
||||
let data = [
|
||||
(13, 54), (3, 123), (423, 53), (1, 23), (23, 515), (513, 43),
|
||||
(16385, 56384), (18384, 36384), (2, 1), (0, 0), (0, 4), (3, 0)
|
||||
];
|
||||
|
||||
for &values in &data {
|
||||
let (l, h) = super::encode_16bit(values.0, values.1);
|
||||
let result = super::decode_16bit(l, h);
|
||||
assert_eq!(values, result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_14bit_image(){
|
||||
let data: [u16; 6 * 4] = [
|
||||
13, 54, 3, 123, 423, 53,
|
||||
1, 23, 23, 515, 513, 43,
|
||||
16374, 16381, 16284, 3, 2, 1,
|
||||
0, 0, 0, 4, 3, 0,
|
||||
];
|
||||
|
||||
let max = *data.iter().max().unwrap();
|
||||
debug_assert!(is_14_bit(max));
|
||||
|
||||
let mut transformed = data.clone();
|
||||
|
||||
super::encode(&mut transformed, Vec2(6, 4), Vec2(1,6), max).unwrap();
|
||||
super::decode(&mut transformed, Vec2(6, 4), Vec2(1,6), max).unwrap();
|
||||
|
||||
assert_eq!(data, transformed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_16bit_image(){
|
||||
let data: [u16; 6 * 4] = [
|
||||
13, 54, 3, 123, 423, 53,
|
||||
1, 23, 23, 515, 513, 43,
|
||||
16385, 56384, 18384, 36384, 2, 1,
|
||||
0, 0, 0, 4, 3, 0,
|
||||
];
|
||||
|
||||
let max = *data.iter().max().unwrap();
|
||||
debug_assert!(!is_14_bit(max));
|
||||
|
||||
let mut transformed = data.clone();
|
||||
|
||||
super::encode(&mut transformed, Vec2(6, 4), Vec2(1,6), max).unwrap();
|
||||
super::decode(&mut transformed, Vec2(6, 4), Vec2(1,6), max).unwrap();
|
||||
|
||||
assert_eq!(data, transformed);
|
||||
}
|
||||
|
||||
/// inspired by https://github.com/AcademySoftwareFoundation/openexr/blob/master/OpenEXR/IlmImfTest/testWav.cpp
|
||||
#[test]
|
||||
fn ground_truth(){
|
||||
test_size(1, 1);
|
||||
test_size(2, 2);
|
||||
test_size(32, 32);
|
||||
test_size(1024, 16);
|
||||
test_size(16, 1024);
|
||||
test_size(997, 37);
|
||||
test_size(37, 997);
|
||||
test_size(1024, 1024);
|
||||
test_size(997, 997);
|
||||
|
||||
fn test_size(x: usize, y: usize) {
|
||||
let xy = Vec2(x, y);
|
||||
roundtrip(noise_14bit(xy), xy);
|
||||
roundtrip(noise_16bit(xy), xy);
|
||||
roundtrip(solid(xy, 0), xy);
|
||||
roundtrip(solid(xy, 1), xy);
|
||||
roundtrip(solid(xy, 0xffff), xy);
|
||||
roundtrip(solid(xy, 0x3fff), xy);
|
||||
roundtrip(solid(xy, 0x3ffe), xy);
|
||||
roundtrip(solid(xy, 0x3fff), xy);
|
||||
roundtrip(solid(xy, 0xfffe), xy);
|
||||
roundtrip(solid(xy, 0xffff), xy);
|
||||
roundtrip(verticals(xy, 0xffff), xy);
|
||||
roundtrip(verticals(xy, 0x3fff), xy);
|
||||
roundtrip(horizontals(xy, 0xffff), xy);
|
||||
roundtrip(horizontals(xy, 0x3fff), xy);
|
||||
roundtrip(diagonals(xy, 0xffff), xy);
|
||||
roundtrip(diagonals(xy, 0x3fff), xy);
|
||||
}
|
||||
|
||||
fn roundtrip(data: Vec<u16>, size: Vec2<usize>){
|
||||
assert_eq!(data.len(), size.area());
|
||||
|
||||
let max = *data.iter().max().unwrap();
|
||||
let offset = Vec2(1, size.0);
|
||||
|
||||
let mut transformed = data.clone();
|
||||
super::encode(&mut transformed, size, offset, max).unwrap();
|
||||
super::decode(&mut transformed, size, offset, max).unwrap();
|
||||
|
||||
assert_eq!(data, transformed);
|
||||
}
|
||||
|
||||
fn noise_14bit(size: Vec2<usize>) -> Vec<u16> {
|
||||
(0..size.area()).map(|_| (rand::random::<i32>() & 0x3fff) as u16).collect()
|
||||
}
|
||||
|
||||
fn noise_16bit(size: Vec2<usize>) -> Vec<u16> {
|
||||
(0..size.area()).map(|_| rand::random::<u16>()).collect()
|
||||
}
|
||||
|
||||
fn solid(size: Vec2<usize>, value: u16) -> Vec<u16> {
|
||||
vec![value; size.area()]
|
||||
}
|
||||
|
||||
fn verticals(size: Vec2<usize>, max_value: u16) -> Vec<u16> {
|
||||
std::iter::repeat_with(|| (0 .. size.0).map(|x| if x & 1 != 0 { 0 } else { max_value }))
|
||||
.take(size.1).flatten().collect()
|
||||
}
|
||||
|
||||
fn horizontals(size: Vec2<usize>, max_value: u16) -> Vec<u16> {
|
||||
(0 .. size.1)
|
||||
.flat_map(|y| std::iter::repeat(if y & 1 != 0 { 0 } else { max_value }).take(size.0))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn diagonals(size: Vec2<usize>, max_value: u16) -> Vec<u16> {
|
||||
(0 .. size.1).flat_map(|y| {
|
||||
(0 .. size.0).map(move |x| if (x + y) & 1 != 0 { 0 } else { max_value })
|
||||
}).collect()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
261
vendor/exr/src/compression/pxr24.rs
vendored
Normal file
261
vendor/exr/src/compression/pxr24.rs
vendored
Normal file
@@ -0,0 +1,261 @@
|
||||
|
||||
//! Lossy compression for F32 data, but lossless compression for U32 and F16 data.
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/master/OpenEXR/IlmImf/ImfPxr24Compressor.cpp
|
||||
|
||||
// This compressor is based on source code that was contributed to
|
||||
// OpenEXR by Pixar Animation Studios. The compression method was
|
||||
// developed by Loren Carpenter.
|
||||
|
||||
|
||||
// The compressor preprocesses the pixel data to reduce entropy, and then calls zlib.
|
||||
// Compression of HALF and UINT channels is lossless, but compressing
|
||||
// FLOAT channels is lossy: 32-bit floating-point numbers are converted
|
||||
// to 24 bits by rounding the significand to 15 bits.
|
||||
//
|
||||
// When the compressor is invoked, the caller has already arranged
|
||||
// the pixel data so that the values for each channel appear in a
|
||||
// contiguous block of memory. The compressor converts the pixel
|
||||
// values to unsigned integers: For UINT, this is a no-op. HALF
|
||||
// values are simply re-interpreted as 16-bit integers. FLOAT
|
||||
// values are converted to 24 bits, and the resulting bit patterns
|
||||
// are interpreted as integers. The compressor then replaces each
|
||||
// value with the difference between the value and its left neighbor.
|
||||
// This turns flat fields in the image into zeroes, and ramps into
|
||||
// strings of similar values. Next, each difference is split into
|
||||
// 2, 3 or 4 bytes, and the bytes are transposed so that all the
|
||||
// most significant bytes end up in a contiguous block, followed
|
||||
// by the second most significant bytes, and so on. The resulting
|
||||
// string of bytes is compressed with zlib.
|
||||
|
||||
use super::*;
|
||||
|
||||
use crate::error::Result;
|
||||
use lebe::io::ReadPrimitive;
|
||||
|
||||
|
||||
// scanline decompression routine, see https://github.com/openexr/openexr/blob/master/OpenEXR/IlmImf/ImfScanLineInputFile.cpp
|
||||
// 1. Uncompress the data, if necessary (If the line is uncompressed, it's in XDR format, regardless of the compressor's output format.)
|
||||
// 3. Convert one scan line's worth of pixel data back from the machine-independent representation
|
||||
// 4. Fill the frame buffer with pixel data, respective to sampling and whatnot
|
||||
|
||||
|
||||
#[cfg_attr(target_endian = "big", allow(unused, unreachable_code))]
|
||||
pub fn compress(channels: &ChannelList, remaining_bytes: ByteVec, area: IntegerBounds) -> Result<ByteVec> {
|
||||
#[cfg(target_endian = "big")] {
|
||||
return Err(Error::unsupported(
|
||||
"PXR24 compression method not supported yet on big endian processor architecture"
|
||||
))
|
||||
}
|
||||
|
||||
if remaining_bytes.is_empty() { return Ok(Vec::new()); }
|
||||
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
||||
let remaining_bytes = super::convert_current_to_little_endian(remaining_bytes, channels, area);
|
||||
let mut remaining_bytes = remaining_bytes.as_slice(); // TODO less allocation
|
||||
|
||||
let bytes_per_pixel: usize = channels.list.iter()
|
||||
.map(|channel| match channel.sample_type {
|
||||
SampleType::F16 => 2, SampleType::F32 => 3, SampleType::U32 => 4,
|
||||
})
|
||||
.sum();
|
||||
|
||||
let mut raw = vec![0_u8; bytes_per_pixel * area.size.area()];
|
||||
|
||||
{
|
||||
let mut write = raw.as_mut_slice();
|
||||
|
||||
// TODO this loop should be an iterator in the `IntegerBounds` class, as it is used in all compressio methods
|
||||
for y in area.position.1..area.end().1 {
|
||||
for channel in &channels.list {
|
||||
if mod_p(y, usize_to_i32(channel.sampling.1)) != 0 { continue; }
|
||||
|
||||
// this apparently can't be a closure in Rust 1.43 due to borrowing ambiguity
|
||||
let sample_count_x = channel.subsampled_resolution(area.size).0;
|
||||
macro_rules! split_off_write_slice { () => {{
|
||||
let (slice, rest) = write.split_at_mut(sample_count_x);
|
||||
write = rest;
|
||||
slice
|
||||
}}; }
|
||||
|
||||
let mut previous_pixel: u32 = 0;
|
||||
|
||||
match channel.sample_type {
|
||||
SampleType::F16 => {
|
||||
let out_byte_tuples = split_off_write_slice!().iter_mut()
|
||||
.zip(split_off_write_slice!());
|
||||
|
||||
for (out_byte_0, out_byte_1) in out_byte_tuples {
|
||||
let pixel = u16::read_from_native_endian(&mut remaining_bytes).unwrap() as u32;
|
||||
let [byte_1, byte_0] = (pixel.wrapping_sub(previous_pixel) as u16).to_ne_bytes();
|
||||
|
||||
*out_byte_0 = byte_0;
|
||||
*out_byte_1 = byte_1;
|
||||
previous_pixel = pixel;
|
||||
}
|
||||
},
|
||||
|
||||
SampleType::U32 => {
|
||||
let out_byte_quadruplets = split_off_write_slice!().iter_mut()
|
||||
.zip(split_off_write_slice!())
|
||||
.zip(split_off_write_slice!())
|
||||
.zip(split_off_write_slice!());
|
||||
|
||||
for (((out_byte_0, out_byte_1), out_byte_2), out_byte_3) in out_byte_quadruplets {
|
||||
let pixel = u32::read_from_native_endian(&mut remaining_bytes).unwrap();
|
||||
let [byte_3, byte_2, byte_1, byte_0] = pixel.wrapping_sub(previous_pixel).to_ne_bytes();
|
||||
|
||||
*out_byte_0 = byte_0;
|
||||
*out_byte_1 = byte_1;
|
||||
*out_byte_2 = byte_2;
|
||||
*out_byte_3 = byte_3;
|
||||
previous_pixel = pixel;
|
||||
}
|
||||
},
|
||||
|
||||
SampleType::F32 => {
|
||||
let out_byte_triplets = split_off_write_slice!().iter_mut()
|
||||
.zip(split_off_write_slice!())
|
||||
.zip(split_off_write_slice!());
|
||||
|
||||
for ((out_byte_0, out_byte_1), out_byte_2) in out_byte_triplets {
|
||||
let pixel = f32_to_f24(f32::read_from_native_endian(&mut remaining_bytes).unwrap());
|
||||
let [byte_2, byte_1, byte_0, _] = pixel.wrapping_sub(previous_pixel).to_ne_bytes();
|
||||
previous_pixel = pixel;
|
||||
|
||||
*out_byte_0 = byte_0;
|
||||
*out_byte_1 = byte_1;
|
||||
*out_byte_2 = byte_2;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug_assert_eq!(write.len(), 0, "bytes left after compression");
|
||||
}
|
||||
|
||||
Ok(miniz_oxide::deflate::compress_to_vec_zlib(raw.as_slice(), 4))
|
||||
}
|
||||
|
||||
#[cfg_attr(target_endian = "big", allow(unused, unreachable_code))]
|
||||
pub fn decompress(channels: &ChannelList, bytes: ByteVec, area: IntegerBounds, expected_byte_size: usize, pedantic: bool) -> Result<ByteVec> {
|
||||
#[cfg(target_endian = "big")] {
|
||||
return Err(Error::unsupported(
|
||||
"PXR24 decompression method not supported yet on big endian processor architecture"
|
||||
))
|
||||
}
|
||||
|
||||
let options = zune_inflate::DeflateOptions::default().set_limit(expected_byte_size).set_size_hint(expected_byte_size);
|
||||
let mut decoder = zune_inflate::DeflateDecoder::new_with_options(&bytes, options);
|
||||
let raw = decoder.decode_zlib()
|
||||
.map_err(|_| Error::invalid("zlib-compressed data malformed"))?; // TODO share code with zip?
|
||||
|
||||
let mut read = raw.as_slice();
|
||||
let mut out = Vec::with_capacity(expected_byte_size.min(2048*4));
|
||||
|
||||
for y in area.position.1 .. area.end().1 {
|
||||
for channel in &channels.list {
|
||||
if mod_p(y, usize_to_i32(channel.sampling.1)) != 0 { continue; }
|
||||
|
||||
let sample_count_x = channel.subsampled_resolution(area.size).0;
|
||||
let mut read_sample_line = ||{
|
||||
if sample_count_x > read.len() { return Err(Error::invalid("not enough data")) }
|
||||
let (samples, rest) = read.split_at(sample_count_x);
|
||||
read = rest;
|
||||
Ok(samples)
|
||||
};
|
||||
|
||||
let mut pixel_accumulation: u32 = 0;
|
||||
|
||||
match channel.sample_type {
|
||||
SampleType::F16 => {
|
||||
let sample_byte_pairs = read_sample_line()?.iter()
|
||||
.zip(read_sample_line()?);
|
||||
|
||||
for (&in_byte_0, &in_byte_1) in sample_byte_pairs {
|
||||
let difference = u16::from_ne_bytes([in_byte_1, in_byte_0]) as u32;
|
||||
pixel_accumulation = pixel_accumulation.overflowing_add(difference).0;
|
||||
out.extend_from_slice(&(pixel_accumulation as u16).to_ne_bytes());
|
||||
}
|
||||
},
|
||||
|
||||
SampleType::U32 => {
|
||||
let sample_byte_quads = read_sample_line()?.iter()
|
||||
.zip(read_sample_line()?)
|
||||
.zip(read_sample_line()?)
|
||||
.zip(read_sample_line()?);
|
||||
|
||||
for (((&in_byte_0, &in_byte_1), &in_byte_2), &in_byte_3) in sample_byte_quads {
|
||||
let difference = u32::from_ne_bytes([in_byte_3, in_byte_2, in_byte_1, in_byte_0]);
|
||||
pixel_accumulation = pixel_accumulation.overflowing_add(difference).0;
|
||||
out.extend_from_slice(&pixel_accumulation.to_ne_bytes());
|
||||
}
|
||||
},
|
||||
|
||||
SampleType::F32 => {
|
||||
let sample_byte_triplets = read_sample_line()?.iter()
|
||||
.zip(read_sample_line()?).zip(read_sample_line()?);
|
||||
|
||||
for ((&in_byte_0, &in_byte_1), &in_byte_2) in sample_byte_triplets {
|
||||
let difference = u32::from_ne_bytes([0, in_byte_2, in_byte_1, in_byte_0]);
|
||||
pixel_accumulation = pixel_accumulation.overflowing_add(difference).0;
|
||||
out.extend_from_slice(&pixel_accumulation.to_ne_bytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pedantic && !read.is_empty() {
|
||||
return Err(Error::invalid("too much data"));
|
||||
}
|
||||
|
||||
Ok(super::convert_little_endian_to_current(out, channels, area))
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/// Conversion from 32-bit to 24-bit floating-point numbers.
|
||||
/// Reverse conversion is just a simple 8-bit left shift.
|
||||
pub fn f32_to_f24(float: f32) -> u32 {
|
||||
let bits = float.to_bits();
|
||||
|
||||
let sign = bits & 0x80000000;
|
||||
let exponent = bits & 0x7f800000;
|
||||
let mantissa = bits & 0x007fffff;
|
||||
|
||||
let result = if exponent == 0x7f800000 {
|
||||
if mantissa != 0 {
|
||||
// F is a NAN; we preserve the sign bit and
|
||||
// the 15 leftmost bits of the significand,
|
||||
// with one exception: If the 15 leftmost
|
||||
// bits are all zero, the NAN would turn
|
||||
// into an infinity, so we have to set at
|
||||
// least one bit in the significand.
|
||||
|
||||
let mantissa = mantissa >> 8;
|
||||
(exponent >> 8) | mantissa | if mantissa == 0 { 1 } else { 0 }
|
||||
}
|
||||
else { // F is an infinity.
|
||||
exponent >> 8
|
||||
}
|
||||
}
|
||||
else { // F is finite, round the significand to 15 bits.
|
||||
let result = ((exponent | mantissa) + (mantissa & 0x00000080)) >> 8;
|
||||
|
||||
if result >= 0x7f8000 {
|
||||
// F was close to FLT_MAX, and the significand was
|
||||
// rounded up, resulting in an exponent overflow.
|
||||
// Avoid the overflow by truncating the significand
|
||||
// instead of rounding it.
|
||||
|
||||
(exponent | mantissa) >> 8
|
||||
}
|
||||
else {
|
||||
result
|
||||
}
|
||||
};
|
||||
|
||||
return (sign >> 8) | result;
|
||||
}
|
||||
112
vendor/exr/src/compression/rle.rs
vendored
Normal file
112
vendor/exr/src/compression/rle.rs
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
use super::*;
|
||||
use super::optimize_bytes::*;
|
||||
use super::Error;
|
||||
use super::Result;
|
||||
|
||||
// inspired by https://github.com/openexr/openexr/blob/master/OpenEXR/IlmImf/ImfRle.cpp
|
||||
|
||||
const MIN_RUN_LENGTH : usize = 3;
|
||||
const MAX_RUN_LENGTH : usize = 127;
|
||||
|
||||
|
||||
pub fn decompress_bytes(
|
||||
channels: &ChannelList,
|
||||
compressed: ByteVec,
|
||||
rectangle: IntegerBounds,
|
||||
expected_byte_size: usize,
|
||||
pedantic: bool,
|
||||
) -> Result<ByteVec> {
|
||||
let mut remaining = compressed.as_slice();
|
||||
let mut decompressed = Vec::with_capacity(expected_byte_size.min(8*2048));
|
||||
|
||||
while !remaining.is_empty() && decompressed.len() != expected_byte_size {
|
||||
let count = take_1(&mut remaining)? as i8 as i32;
|
||||
|
||||
if count < 0 {
|
||||
// take the next '-count' bytes as-is
|
||||
let values = take_n(&mut remaining, (-count) as usize)?;
|
||||
decompressed.extend_from_slice(values);
|
||||
}
|
||||
else {
|
||||
// repeat the next value 'count + 1' times
|
||||
let value = take_1(&mut remaining)?;
|
||||
decompressed.resize(decompressed.len() + count as usize + 1, value);
|
||||
}
|
||||
}
|
||||
|
||||
if pedantic && !remaining.is_empty() {
|
||||
return Err(Error::invalid("data amount"));
|
||||
}
|
||||
|
||||
differences_to_samples(&mut decompressed);
|
||||
interleave_byte_blocks(&mut decompressed);
|
||||
Ok(super::convert_little_endian_to_current(decompressed, channels, rectangle))// TODO no alloc
|
||||
}
|
||||
|
||||
pub fn compress_bytes(channels: &ChannelList, uncompressed: ByteVec, rectangle: IntegerBounds) -> Result<ByteVec> {
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
||||
let mut data = super::convert_current_to_little_endian(uncompressed, channels, rectangle);// TODO no alloc
|
||||
|
||||
separate_bytes_fragments(&mut data);
|
||||
samples_to_differences(&mut data);
|
||||
|
||||
let mut compressed = Vec::with_capacity(data.len());
|
||||
let mut run_start = 0;
|
||||
let mut run_end = 1;
|
||||
|
||||
while run_start < data.len() {
|
||||
while
|
||||
run_end < data.len()
|
||||
&& data[run_start] == data[run_end]
|
||||
&& (run_end - run_start) as i32 - 1 < MAX_RUN_LENGTH as i32
|
||||
{
|
||||
run_end += 1;
|
||||
}
|
||||
|
||||
if run_end - run_start >= MIN_RUN_LENGTH {
|
||||
compressed.push(((run_end - run_start) as i32 - 1) as u8);
|
||||
compressed.push(data[run_start]);
|
||||
run_start = run_end;
|
||||
|
||||
} else {
|
||||
while
|
||||
run_end < data.len() && (
|
||||
(run_end + 1 >= data.len() || data[run_end] != data[run_end + 1])
|
||||
|| (run_end + 2 >= data.len() || data[run_end + 1] != data[run_end + 2])
|
||||
) && run_end - run_start < MAX_RUN_LENGTH
|
||||
{
|
||||
run_end += 1;
|
||||
}
|
||||
|
||||
compressed.push((run_start as i32 - run_end as i32) as u8);
|
||||
compressed.extend_from_slice(&data[run_start .. run_end]);
|
||||
|
||||
run_start = run_end;
|
||||
run_end += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(compressed)
|
||||
}
|
||||
|
||||
fn take_1(slice: &mut &[u8]) -> Result<u8> {
|
||||
if !slice.is_empty() {
|
||||
let result = slice[0];
|
||||
*slice = &slice[1..];
|
||||
Ok(result)
|
||||
|
||||
} else {
|
||||
Err(Error::invalid("compressed data"))
|
||||
}
|
||||
}
|
||||
|
||||
fn take_n<'s>(slice: &mut &'s [u8], n: usize) -> Result<&'s [u8]> {
|
||||
if n <= slice.len() {
|
||||
let (front, back) = slice.split_at(n);
|
||||
*slice = back;
|
||||
Ok(front)
|
||||
|
||||
} else {
|
||||
Err(Error::invalid("compressed data"))
|
||||
}
|
||||
}
|
||||
41
vendor/exr/src/compression/zip.rs
vendored
Normal file
41
vendor/exr/src/compression/zip.rs
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
|
||||
// see https://github.com/openexr/openexr/blob/master/OpenEXR/IlmImf/ImfCompressor.cpp
|
||||
|
||||
|
||||
use super::*;
|
||||
use super::optimize_bytes::*;
|
||||
use crate::error::Result;
|
||||
|
||||
// scanline decompression routine, see https://github.com/openexr/openexr/blob/master/OpenEXR/IlmImf/ImfScanLineInputFile.cpp
|
||||
// 1. Uncompress the data, if necessary (If the line is uncompressed, it's in XDR format, regardless of the compressor's output format.)
|
||||
// 3. Convert one scan line's worth of pixel data back from the machine-independent representation
|
||||
// 4. Fill the frame buffer with pixel data, respective to sampling and whatnot
|
||||
|
||||
|
||||
pub fn decompress_bytes(
|
||||
channels: &ChannelList,
|
||||
data: ByteVec,
|
||||
rectangle: IntegerBounds,
|
||||
expected_byte_size: usize,
|
||||
_pedantic: bool,
|
||||
) -> Result<ByteVec> {
|
||||
let options = zune_inflate::DeflateOptions::default().set_limit(expected_byte_size).set_size_hint(expected_byte_size);
|
||||
let mut decoder = zune_inflate::DeflateDecoder::new_with_options(&data, options);
|
||||
let mut decompressed = decoder.decode_zlib()
|
||||
.map_err(|_| Error::invalid("zlib-compressed data malformed"))?;
|
||||
|
||||
differences_to_samples(&mut decompressed);
|
||||
interleave_byte_blocks(&mut decompressed);
|
||||
|
||||
Ok(super::convert_little_endian_to_current(decompressed, channels, rectangle))// TODO no alloc
|
||||
}
|
||||
|
||||
pub fn compress_bytes(channels: &ChannelList, uncompressed: ByteVec, rectangle: IntegerBounds) -> Result<ByteVec> {
|
||||
// see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
||||
let mut packed = convert_current_to_little_endian(uncompressed, channels, rectangle);
|
||||
|
||||
separate_bytes_fragments(&mut packed);
|
||||
samples_to_differences(&mut packed);
|
||||
|
||||
Ok(miniz_oxide::deflate::compress_to_vec_zlib(packed.as_slice(), 4))
|
||||
}
|
||||
136
vendor/exr/src/error.rs
vendored
Normal file
136
vendor/exr/src/error.rs
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
|
||||
//! Error type definitions.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::io::ErrorKind;
|
||||
pub use std::io::Error as IoError;
|
||||
pub use std::io::Result as IoResult;
|
||||
use std::convert::TryFrom;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::num::TryFromIntError;
|
||||
|
||||
|
||||
// Export types
|
||||
|
||||
/// A result that may contain an exr error.
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
/// A result that, if ok, contains nothing, and otherwise contains an exr error.
|
||||
pub type UnitResult = Result<()>;
|
||||
|
||||
|
||||
/// An error that may happen while reading or writing an exr file.
|
||||
/// Distinguishes between three types of errors:
|
||||
/// unsupported features, invalid data, and file system errors.
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
|
||||
/// Reading or Writing the file has been aborted by the caller.
|
||||
/// This error will never be triggered by this crate itself,
|
||||
/// only by users of this library.
|
||||
/// It exists to be returned from a progress callback.
|
||||
Aborted, // FIXME remove?? is not used really?
|
||||
|
||||
/// The contents of the file are not supported by
|
||||
/// this specific implementation of open exr,
|
||||
/// even though the data may be valid.
|
||||
NotSupported(Cow<'static, str>),
|
||||
|
||||
/// The contents of the image are contradicting or insufficient.
|
||||
/// Also returned for `ErrorKind::UnexpectedEof` errors.
|
||||
Invalid(Cow<'static, str>),
|
||||
|
||||
/// The underlying byte stream could not be read successfully,
|
||||
/// probably due to file system related errors.
|
||||
Io(IoError),
|
||||
}
|
||||
|
||||
|
||||
impl Error {
|
||||
|
||||
/// Create an error of the variant `Invalid`.
|
||||
pub(crate) fn invalid(message: impl Into<Cow<'static, str>>) -> Self {
|
||||
Error::Invalid(message.into())
|
||||
}
|
||||
|
||||
/// Create an error of the variant `NotSupported`.
|
||||
pub(crate) fn unsupported(message: impl Into<Cow<'static, str>>) -> Self {
|
||||
Error::NotSupported(message.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Enable using the `?` operator on `std::io::Result`.
|
||||
impl From<IoError> for Error {
|
||||
fn from(error: IoError) -> Self {
|
||||
if error.kind() == ErrorKind::UnexpectedEof {
|
||||
Error::invalid("reference to missing bytes")
|
||||
}
|
||||
else {
|
||||
Error::Io(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO use `usize::try_from(x)?` everywhere
|
||||
impl From<TryFromIntError> for Error {
|
||||
fn from(_: TryFromIntError) -> Self {
|
||||
Error::invalid("invalid size")
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for Error {
|
||||
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
|
||||
match *self {
|
||||
Error::Io(ref err) => Some(err),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Error::Io(err) => err.fmt(formatter),
|
||||
Error::NotSupported(message) => write!(formatter, "not supported: {}", message),
|
||||
Error::Invalid(message) => write!(formatter, "invalid: {}", message),
|
||||
Error::Aborted => write!(formatter, "cancelled"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return error on invalid range.
|
||||
#[inline]
|
||||
pub(crate) fn i32_to_usize(value: i32, error_message: &'static str) -> Result<usize> {
|
||||
usize::try_from(value).map_err(|_| Error::invalid(error_message))
|
||||
}
|
||||
|
||||
/// Return error on invalid range.
|
||||
#[inline]
|
||||
pub(crate) fn usize_to_u16(value: usize) -> Result<u16> {
|
||||
Ok(u16::try_from(value)?)
|
||||
}
|
||||
|
||||
/// Panic on overflow.
|
||||
#[inline]
|
||||
pub(crate) fn u64_to_usize(value: u64) -> usize {
|
||||
usize::try_from(value).expect("(u64 as usize) overflowed")
|
||||
}
|
||||
|
||||
/// Panic on overflow.
|
||||
#[inline]
|
||||
pub(crate) fn u32_to_usize(value: u32) -> usize {
|
||||
usize::try_from(value).expect("(u32 as usize) overflowed")
|
||||
}
|
||||
|
||||
/// Panic on overflow.
|
||||
#[inline]
|
||||
pub(crate) fn usize_to_i32(value: usize) -> i32 {
|
||||
i32::try_from(value).expect("(usize as i32) overflowed")
|
||||
}
|
||||
|
||||
/// Panic on overflow.
|
||||
#[inline]
|
||||
pub(crate) fn usize_to_u64(value: usize) -> u64 {
|
||||
u64::try_from(value).expect("(usize as u64) overflowed")
|
||||
}
|
||||
267
vendor/exr/src/image/channel_groups.rs
vendored
Normal file
267
vendor/exr/src/image/channel_groups.rs
vendored
Normal file
@@ -0,0 +1,267 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use crate::image::write::channels::{WritableChannels, ChannelsWriter};
|
||||
use crate::meta::attribute::{LevelMode, ChannelList, Text, TextSlice, ChannelInfo};
|
||||
use crate::meta::header::Header;
|
||||
use crate::image::read::layers::{ReadChannels, ChannelsReader};
|
||||
use crate::block::{BlockIndex, UncompressedBlock};
|
||||
use crate::block::lines::{collect_uncompressed_block_from_lines, LineIndex};
|
||||
use std::io::{Cursor, Read};
|
||||
use crate::error::{Result, UnitResult};
|
||||
use crate::block::chunk::TileCoordinates;
|
||||
use crate::prelude::SmallVec;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
pub struct ChannelGroups<ChannelGroup> {
|
||||
channel_group: Option<ChannelGroup>,
|
||||
children: HashMap<Text, Self>
|
||||
}
|
||||
|
||||
|
||||
impl<ChannelGroup> ChannelGroups<ChannelGroup> {
|
||||
|
||||
|
||||
// pub fn visit_groups_mut(&mut self, visitor: impl Fn(&mut Channels)) {
|
||||
// }
|
||||
|
||||
|
||||
|
||||
pub fn groups(&self) -> SmallVec<[&ChannelGroup; 12]> {
|
||||
let children = self.children.iter().flat_map(|group| group.groups());
|
||||
self.channel_group.iter().chain(children).collect()
|
||||
}
|
||||
|
||||
pub fn lookup_group(&self, group_name: &TextSlice) -> Option<&ChannelGroup> {
|
||||
let dot_index = group_name.iter().position('.');
|
||||
if let Some(dot_index) = dot_index {
|
||||
let group_name = &group_name[.. dot_index];
|
||||
let child_name = &group_name[dot_index + 1 ..];
|
||||
self.children.get(group_name)
|
||||
.and_then(|child| child.lookup(child_name))
|
||||
}
|
||||
else {
|
||||
self.channel_group.lookup(name)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*pub fn insert_group(&mut self, full_name: &TextSlice, value: ChannelGroup) {
|
||||
let dot_index = full_name.iter().position('.');
|
||||
if let Some(dot_index) = dot_index {
|
||||
let group_name = &group_name[.. dot_index];
|
||||
let name_rest = &group_name[dot_index + 1 ..];
|
||||
|
||||
self.children.entry(Text::from_slice_unchecked(group_name))
|
||||
.or_insert(|| );
|
||||
|
||||
// self.children.insert(Text::from_slice_unchecked(group_name), value)
|
||||
// .and_then(|child| child.lookup(name_rest));
|
||||
}
|
||||
else {
|
||||
self.channel_group.lookup(name);
|
||||
}
|
||||
}*/
|
||||
|
||||
pub fn map<T>(self, mapper: impl FnMut(ChannelGroup) -> T) -> ChannelGroups<T> {
|
||||
ChannelGroups {
|
||||
children: self.channel_group.iter().map(&mapper).collect(),
|
||||
channel_group: self.channel_group.map(mapper),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn parse_channel_list_groups<T>(channels: impl Iterator<Item=(Text, T)>)
|
||||
-> ChannelGroups<SmallVec<(Text, T)>>
|
||||
{
|
||||
fn insert_into_groups(groups: &mut ChannelGroups<SmallVec<(Text, T)>>, name: Text, value: T) {
|
||||
let dot_index = name.as_slice().iter().position('.');
|
||||
|
||||
if let Some(dot_index) = dot_index {
|
||||
// insert into child group
|
||||
|
||||
let group_name = Text::from_slice_unchecked(&name.as_slice()[.. dot_index]);
|
||||
let child_channel = Text::from_slice_unchecked(&name.as_slice()[dot_index + 1 ..]);
|
||||
|
||||
let child_group = groups.children.entry(group_name)
|
||||
.or_insert(ChannelGroups { channel_group: None, children: Default::default() });
|
||||
|
||||
insert_into_groups(child_group, child_channel, value);
|
||||
}
|
||||
|
||||
else {
|
||||
// insert directly into group
|
||||
|
||||
if groups.channel_group.is_none() {
|
||||
groups.channel_group = Some(SmallVec::new());
|
||||
}
|
||||
|
||||
groups.channel_group.unwrap().push(value);
|
||||
}
|
||||
}
|
||||
|
||||
let mut result = ChannelGroups { channel_group: None, children: HashMap::default() };
|
||||
for (name, value) in channels { insert_into_groups(&mut result, name, value); }
|
||||
result
|
||||
}
|
||||
|
||||
|
||||
impl<'slf, ChannelGroup> WritableChannels<'slf> for ChannelGroups<ChannelGroup>
|
||||
where ChannelGroup: WritableChannels<'slf>
|
||||
{
|
||||
fn infer_channel_list(&self) -> ChannelList {
|
||||
// TODO what about empty groups with NO channels??
|
||||
|
||||
let child_channels = self.children.iter().flat_map(|(group_name, child)| {
|
||||
let mut child_channels = child.infer_channel_list().list;
|
||||
for channel in &mut child_channels { channel.name.push_front(group_name) };
|
||||
child_channels
|
||||
});
|
||||
|
||||
let mut own_channels = self.channel_group
|
||||
.map(|chans| chans.infer_channel_list().list)
|
||||
.unwrap_or_default();
|
||||
|
||||
own_channels.extend(child_channels);
|
||||
own_channels.sort_unstable(); // TODO only once at end
|
||||
ChannelList::new(own_channels) // might be empty, but will be checked in MetaData::validate()
|
||||
}
|
||||
|
||||
fn level_mode(&self) -> LevelMode {
|
||||
fn find_mode_or_none(channels: &Self) -> Option<LevelMode> {
|
||||
channels.channel_group.map(WritableChannels::level_mode).or_else(|| {
|
||||
channels.children.iter().map(find_mode_or_none).next()
|
||||
})
|
||||
}
|
||||
|
||||
let mode = find_mode_or_none(self)
|
||||
.expect("empty channel groups (check failed)"); // TODO only happens for empty channels, right? panic maybe?
|
||||
|
||||
if let Some(chans) = self.channel_group.as_ref() {
|
||||
debug_assert_eq!(chans.level_mode(), mode, "level mode must be equal for all legacy channel groups")
|
||||
}
|
||||
|
||||
debug_assert!(
|
||||
self.children.values()
|
||||
.flat_map(find_mode_or_none)
|
||||
.all(|child_mode| child_mode == mode),
|
||||
|
||||
"level mode must be equal for all legacy channel groups"
|
||||
);
|
||||
|
||||
mode
|
||||
}
|
||||
|
||||
type Writer = GroupChannelsWriter<'slf, ChannelGroup>;
|
||||
|
||||
fn create_writer(&'slf self, header: &Header) -> Self::Writer {
|
||||
let channels = header.channels.list.iter()
|
||||
.map(|channel_info|{
|
||||
// hashmap order is not guaranteed? so look up each channel group manually instead of generating new
|
||||
let channels = self.lookup_group(channel_info.name.as_slice())
|
||||
.expect("channels not found bug");
|
||||
|
||||
channels.create_writer(header) // channel_info.name.clone()
|
||||
})
|
||||
.collect();
|
||||
|
||||
GroupChannelsWriter { channels_list: channels }
|
||||
}
|
||||
}
|
||||
|
||||
struct GroupChannelsWriter<'c, ChannelGroupWriter> {
|
||||
channels_list: Vec<&'c ChannelGroupWriter>,
|
||||
}
|
||||
|
||||
impl<'c, Channels> ChannelsWriter for GroupChannelsWriter<'c, Channels> where Channels: ChannelsWriter {
|
||||
fn extract_uncompressed_block(&self, header: &Header, block: BlockIndex) -> Vec<u8> {
|
||||
let mut blocks_per_channel: Vec<Cursor<Vec<u8>>> = self
|
||||
.channels_list.iter()
|
||||
.map(|channels| Cursor::new(channels.extract_uncompressed_block(header, block)))
|
||||
.collect();
|
||||
|
||||
UncompressedBlock::uncompressed_block_from_lines(header, block, |line|{
|
||||
let channel_reader = &mut blocks_per_channel[line.location.channel]; // TODO subsampling
|
||||
|
||||
// read from specific channel into total byte block
|
||||
// this assumes that the lines in the callback are iterated in strictly increasing order
|
||||
// because each channel reader is consumed
|
||||
channel_reader.read_exact(line.value)
|
||||
.expect("collecting grouped channel byte block failed");
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct ReadChannelGroups<ReadChannelGroup> {
|
||||
read_channels: ReadChannelGroup
|
||||
}
|
||||
|
||||
struct ChannelGroupsReader<ChannelGroupReader> {
|
||||
channels: ChannelGroups<usize>,
|
||||
indexed_channels: Vec<ChannelGroupReader>,
|
||||
}
|
||||
|
||||
impl<'s, ReadChannelGroup> ReadChannels<'s> for ReadChannelGroups<ReadChannelGroup>
|
||||
where ReadChannelGroup: ReadChannels<'s>
|
||||
{
|
||||
type Reader = ChannelGroupsReader<ReadChannelGroup::Reader>;
|
||||
|
||||
fn create_channels_reader(&'s self, header: &Header) -> Result<Self::Reader> {
|
||||
let swap = |(a,b)| (b,a);
|
||||
let channel_groups = parse_channel_list_groups(
|
||||
header.channels.list.iter().enumerate().map(swap)
|
||||
);
|
||||
|
||||
let mut indexed_channels = Vec::new();
|
||||
let channel_groups = channel_groups.map(|channels| {
|
||||
|
||||
let mut channels_header = header.clone(); // TODO no clone?
|
||||
channels_header.channels = ChannelList::new(channels.iter().map(|(name, index)|{
|
||||
let mut channel_info = header.channels.list[index].clone();
|
||||
channel_info.name = name;
|
||||
channel_info
|
||||
}).collect()); // FIXME does not comply to `header.chunk_count` and that stuff?? change ReadChannels fn signature?
|
||||
|
||||
indexed_channels.push(self.read_channels.create_channels_reader(&channels_header));
|
||||
|
||||
// FIXME this is not the original order indexed_channels.len() - 1
|
||||
indexed_channels[]
|
||||
});
|
||||
|
||||
Ok(ChannelGroupsReader {
|
||||
channels: channel_groups,
|
||||
indexed_channels,
|
||||
})
|
||||
|
||||
/*Ok(ChannelGroupsReader {
|
||||
channels: header.channels.list.iter().map(|channel| {
|
||||
let mut channels_header = header.clone();
|
||||
|
||||
let reader = self.read_channels.create_channels_reader(&channels_header);
|
||||
(channels_header, reader)
|
||||
}).collect(),
|
||||
})*/
|
||||
}
|
||||
}
|
||||
|
||||
impl<ChannelGroupReader> ChannelsReader for ChannelGroupsReader<ChannelGroupReader> where ChannelGroupReader: ChannelsReader {
|
||||
type Channels = ChannelGroups<ChannelGroupReader::Channels>;
|
||||
|
||||
fn filter_block(&self, tile: (usize, &TileCoordinates)) -> bool {
|
||||
self.indexed_channels.iter().any(|channel| channel.filter_block(tile))
|
||||
}
|
||||
|
||||
fn read_block(&mut self, header: &Header, block: UncompressedBlock) -> UnitResult {
|
||||
block.for_lines(|line|{
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
fn into_channels(self) -> Self::Channels {
|
||||
|
||||
}
|
||||
}
|
||||
801
vendor/exr/src/image/crop.rs
vendored
Normal file
801
vendor/exr/src/image/crop.rs
vendored
Normal file
@@ -0,0 +1,801 @@
|
||||
//! Crop away unwanted pixels. Includes automatic detection of bounding rectangle.
|
||||
//! Currently does not support deep data and resolution levels.
|
||||
|
||||
use crate::meta::attribute::{IntegerBounds, LevelMode, ChannelList};
|
||||
use crate::math::{Vec2, RoundingMode};
|
||||
use crate::image::{Layer, FlatSamples, SpecificChannels, AnyChannels, FlatSamplesPixel, AnyChannel};
|
||||
use crate::image::write::channels::{GetPixel, WritableChannels, ChannelsWriter};
|
||||
use crate::meta::header::{LayerAttributes, Header};
|
||||
use crate::block::BlockIndex;
|
||||
|
||||
/// Something that has a two-dimensional rectangular shape
|
||||
pub trait GetBounds {
|
||||
|
||||
/// The bounding rectangle of this pixel grid.
|
||||
fn bounds(&self) -> IntegerBounds;
|
||||
}
|
||||
|
||||
/// Inspect the pixels in this image to determine where to crop some away
|
||||
pub trait InspectSample: GetBounds {
|
||||
|
||||
/// The type of pixel in this pixel grid.
|
||||
type Sample;
|
||||
|
||||
/// Index is not in world coordinates, but within the data window.
|
||||
/// Position `(0,0)` always represents the top left pixel.
|
||||
fn inspect_sample(&self, local_index: Vec2<usize>) -> Self::Sample;
|
||||
}
|
||||
|
||||
/// Crop some pixels ways when specifying a smaller rectangle
|
||||
pub trait Crop: Sized {
|
||||
|
||||
/// The type of this image after cropping (probably the same as before)
|
||||
type Cropped;
|
||||
|
||||
/// Crop the image to exclude unwanted pixels.
|
||||
/// Panics for invalid (larger than previously) bounds.
|
||||
/// The bounds are specified in absolute coordinates.
|
||||
/// Does not reduce allocation size of the current image, but instead only adjust a few boundary numbers.
|
||||
/// Use `reallocate_cropped()` on the return value to actually reduce the memory footprint.
|
||||
fn crop(self, bounds: IntegerBounds) -> Self::Cropped;
|
||||
|
||||
/// Reduce your image to a smaller part, usually to save memory.
|
||||
/// Crop if bounds are specified, return the original if no bounds are specified.
|
||||
/// Does not reduce allocation size of the current image, but instead only adjust a few boundary numbers.
|
||||
/// Use `reallocate_cropped()` on the return value to actually reduce the memory footprint.
|
||||
fn try_crop(self, bounds: Option<IntegerBounds>) -> CropResult<Self::Cropped, Self> {
|
||||
match bounds {
|
||||
Some(bounds) => CropResult::Cropped(self.crop(bounds)),
|
||||
None => CropResult::Empty { original: self },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cropping an image fails if the image is fully transparent.
|
||||
/// Use [`or_crop_to_1x1_if_empty`] or [`or_none_if_empty`] to obtain a normal image again.
|
||||
#[must_use]
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||
pub enum CropResult<Cropped, Old> {
|
||||
|
||||
/// The image contained some pixels and has been cropped or left untouched
|
||||
Cropped (Cropped),
|
||||
|
||||
/// All pixels in the image would be discarded, removing the whole image
|
||||
Empty {
|
||||
|
||||
/// The fully discarded image which caused the cropping to fail
|
||||
original: Old
|
||||
}
|
||||
}
|
||||
|
||||
/// Crop away unwanted pixels from the border if they match the specified rule.
|
||||
pub trait CropWhere<Sample>: Sized {
|
||||
|
||||
/// The type of the cropped image (probably the same as the original image).
|
||||
type Cropped;
|
||||
|
||||
/// Crop away unwanted pixels from the border if they match the specified rule.
|
||||
/// Does not reduce allocation size of the current image, but instead only adjust a few boundary numbers.
|
||||
/// Use `reallocate_cropped()` on the return value to actually reduce the memory footprint.
|
||||
fn crop_where(self, discard_if: impl Fn(Sample) -> bool) -> CropResult<Self::Cropped, Self>;
|
||||
|
||||
/// Crop away unwanted pixels from the border if they match the specified color.
|
||||
/// If you want discard based on a rule, use `crop_where` with a closure instead.
|
||||
/// Does not reduce allocation size of the current image, but instead only adjust a few boundary numbers.
|
||||
/// Use `reallocate_cropped()` on the return value to actually reduce the memory footprint.
|
||||
fn crop_where_eq(self, discard_color: impl Into<Sample>) -> CropResult<Self::Cropped, Self> where Sample: PartialEq;
|
||||
|
||||
/// Convert this data to cropped data without discarding any pixels.
|
||||
fn crop_nowhere(self) -> Self::Cropped;
|
||||
}
|
||||
|
||||
impl<Channels> Crop for Layer<Channels> {
|
||||
type Cropped = Layer<CroppedChannels<Channels>>;
|
||||
|
||||
fn crop(self, bounds: IntegerBounds) -> Self::Cropped {
|
||||
CroppedChannels::crop_layer(bounds, self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> CropWhere<T::Sample> for T where T: Crop + InspectSample {
|
||||
type Cropped = <Self as Crop>::Cropped;
|
||||
|
||||
fn crop_where(self, discard_if: impl Fn(T::Sample) -> bool) -> CropResult<Self::Cropped, Self> {
|
||||
let smaller_bounds = {
|
||||
let keep_if = |position| !discard_if(self.inspect_sample(position));
|
||||
try_find_smaller_bounds(self.bounds(), keep_if)
|
||||
};
|
||||
|
||||
self.try_crop(smaller_bounds)
|
||||
}
|
||||
|
||||
fn crop_where_eq(self, discard_color: impl Into<T::Sample>) -> CropResult<Self::Cropped, Self> where T::Sample: PartialEq {
|
||||
let discard_color: T::Sample = discard_color.into();
|
||||
self.crop_where(|sample| sample == discard_color)
|
||||
}
|
||||
|
||||
fn crop_nowhere(self) -> Self::Cropped {
|
||||
let current_bounds = self.bounds();
|
||||
self.crop(current_bounds)
|
||||
}
|
||||
}
|
||||
|
||||
/// A smaller window into an existing pixel storage
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct CroppedChannels<Channels> {
|
||||
|
||||
/// The uncropped pixel storage
|
||||
pub full_channels: Channels,
|
||||
|
||||
/// The uncropped pixel storage bounds
|
||||
pub full_bounds: IntegerBounds,
|
||||
|
||||
/// The cropped pixel storage bounds
|
||||
pub cropped_bounds: IntegerBounds,
|
||||
}
|
||||
|
||||
impl<Channels> CroppedChannels<Channels> {
|
||||
|
||||
/// Wrap a layer in a cropped view with adjusted bounds, but without reallocating your pixels
|
||||
pub fn crop_layer(new_bounds: IntegerBounds, layer: Layer<Channels>) -> Layer<CroppedChannels<Channels>> {
|
||||
Layer {
|
||||
channel_data: CroppedChannels {
|
||||
cropped_bounds: new_bounds,
|
||||
full_bounds: layer.absolute_bounds(),
|
||||
full_channels: layer.channel_data,
|
||||
},
|
||||
|
||||
size: new_bounds.size,
|
||||
|
||||
attributes: LayerAttributes {
|
||||
layer_position: new_bounds.position,
|
||||
.. layer.attributes
|
||||
},
|
||||
|
||||
encoding: layer.encoding
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO make cropped view readable if you only need a specific section of the image?
|
||||
|
||||
// make cropped view writable:
|
||||
|
||||
impl<'slf, Channels:'slf> WritableChannels<'slf> for CroppedChannels<Channels> where Channels: WritableChannels<'slf> {
|
||||
fn infer_channel_list(&self) -> ChannelList {
|
||||
self.full_channels.infer_channel_list() // no need for adjustments, as the layer content already reflects the changes
|
||||
}
|
||||
|
||||
fn infer_level_modes(&self) -> (LevelMode, RoundingMode) {
|
||||
self.full_channels.infer_level_modes()
|
||||
}
|
||||
|
||||
type Writer = CroppedWriter<Channels::Writer>;
|
||||
|
||||
fn create_writer(&'slf self, header: &Header) -> Self::Writer {
|
||||
let offset = (self.cropped_bounds.position - self.full_bounds.position)
|
||||
.to_usize("invalid cropping bounds for cropped view").unwrap();
|
||||
|
||||
CroppedWriter { channels: self.full_channels.create_writer(header), offset }
|
||||
}
|
||||
}
|
||||
|
||||
/// A writer for the cropped view layer
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct CroppedWriter<ChannelsWriter> {
|
||||
channels: ChannelsWriter,
|
||||
offset: Vec2<usize>
|
||||
}
|
||||
|
||||
impl<'c, Channels> ChannelsWriter for CroppedWriter<Channels> where Channels: ChannelsWriter {
|
||||
fn extract_uncompressed_block(&self, header: &Header, block: BlockIndex) -> Vec<u8> {
|
||||
let block = BlockIndex {
|
||||
pixel_position: block.pixel_position + self.offset,
|
||||
.. block
|
||||
};
|
||||
|
||||
self.channels.extract_uncompressed_block(header, block)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Samples, Channels> InspectSample for Layer<SpecificChannels<Samples, Channels>> where Samples: GetPixel {
|
||||
type Sample = Samples::Pixel;
|
||||
fn inspect_sample(&self, local_index: Vec2<usize>) -> Samples::Pixel {
|
||||
self.channel_data.pixels.get_pixel(local_index)
|
||||
}
|
||||
}
|
||||
|
||||
impl InspectSample for Layer<AnyChannels<FlatSamples>> {
|
||||
type Sample = FlatSamplesPixel;
|
||||
|
||||
fn inspect_sample(&self, local_index: Vec2<usize>) -> FlatSamplesPixel {
|
||||
self.sample_vec_at(local_index)
|
||||
}
|
||||
}
|
||||
|
||||
// ALGORITHM IDEA: for arbitrary channels, find the most desired channel,
|
||||
// and process that first, keeping the processed bounds as starting point for the other layers
|
||||
|
||||
/// Realize a cropped view of the original data,
|
||||
/// by actually removing the unwanted original pixels,
|
||||
/// reducing the memory consumption.
|
||||
/// Currently not supported for `SpecificChannels`.
|
||||
pub trait ApplyCroppedView {
|
||||
|
||||
/// The simpler type after cropping is realized
|
||||
type Reallocated;
|
||||
|
||||
/// Make the cropping real by reallocating the underlying storage,
|
||||
/// with the goal of reducing total memory usage.
|
||||
/// Currently not supported for `SpecificChannels`.
|
||||
fn reallocate_cropped(self) -> Self::Reallocated;
|
||||
}
|
||||
|
||||
impl ApplyCroppedView for Layer<CroppedChannels<AnyChannels<FlatSamples>>> {
|
||||
type Reallocated = Layer<AnyChannels<FlatSamples>>;
|
||||
|
||||
fn reallocate_cropped(self) -> Self::Reallocated {
|
||||
let cropped_absolute_bounds = self.channel_data.cropped_bounds;
|
||||
let cropped_relative_bounds = cropped_absolute_bounds.with_origin(-self.channel_data.full_bounds.position);
|
||||
|
||||
assert!(self.absolute_bounds().contains(cropped_absolute_bounds), "bounds not valid for layer dimensions");
|
||||
assert!(cropped_relative_bounds.size.area() > 0, "the cropped image would be empty");
|
||||
|
||||
Layer {
|
||||
channel_data: if cropped_relative_bounds.size == self.channel_data.full_bounds.size {
|
||||
assert_eq!(cropped_absolute_bounds.position, self.channel_data.full_bounds.position, "crop bounds size equals, but position does not");
|
||||
|
||||
// the cropping would not remove any pixels
|
||||
self.channel_data.full_channels
|
||||
}
|
||||
else {
|
||||
let start_x = cropped_relative_bounds.position.x() as usize; // safe, because just checked above
|
||||
let start_y = cropped_relative_bounds.position.y() as usize; // safe, because just checked above
|
||||
let x_range = start_x .. start_x + cropped_relative_bounds.size.width();
|
||||
let old_width = self.channel_data.full_bounds.size.width();
|
||||
let new_height = cropped_relative_bounds.size.height();
|
||||
|
||||
let channels = self.channel_data.full_channels.list.into_iter().map(|channel: AnyChannel<FlatSamples>| {
|
||||
fn crop_samples<T:Copy>(samples: Vec<T>, old_width: usize, new_height: usize, x_range: std::ops::Range<usize>, y_start: usize) -> Vec<T> {
|
||||
let filtered_lines = samples.chunks_exact(old_width).skip(y_start).take(new_height);
|
||||
let trimmed_lines = filtered_lines.map(|line| &line[x_range.clone()]);
|
||||
trimmed_lines.flatten().map(|x|*x).collect() // TODO does this use memcpy?
|
||||
}
|
||||
|
||||
let samples = match channel.sample_data {
|
||||
FlatSamples::F16(samples) => FlatSamples::F16(crop_samples(
|
||||
samples, old_width, new_height, x_range.clone(), start_y
|
||||
)),
|
||||
|
||||
FlatSamples::F32(samples) => FlatSamples::F32(crop_samples(
|
||||
samples, old_width, new_height, x_range.clone(), start_y
|
||||
)),
|
||||
|
||||
FlatSamples::U32(samples) => FlatSamples::U32(crop_samples(
|
||||
samples, old_width, new_height, x_range.clone(), start_y
|
||||
)),
|
||||
};
|
||||
|
||||
AnyChannel { sample_data: samples, ..channel }
|
||||
}).collect();
|
||||
|
||||
AnyChannels { list: channels }
|
||||
},
|
||||
|
||||
attributes: self.attributes,
|
||||
encoding: self.encoding,
|
||||
size: self.size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// Return the smallest bounding rectangle including all pixels that satisfy the predicate.
|
||||
/// Worst case: Fully transparent image, visits each pixel once.
|
||||
/// Best case: Fully opaque image, visits two pixels.
|
||||
/// Returns `None` if the image is fully transparent.
|
||||
/// Returns `[(0,0), size]` if the image is fully opaque.
|
||||
/// Designed to be cache-friendly linear search. Optimized for row-major image vectors.
|
||||
pub fn try_find_smaller_bounds(current_bounds: IntegerBounds, pixel_at: impl Fn(Vec2<usize>) -> bool) -> Option<IntegerBounds> {
|
||||
assert_ne!(current_bounds.size.area(), 0, "cannot find smaller bounds of an image with zero width or height");
|
||||
let Vec2(width, height) = current_bounds.size;
|
||||
|
||||
// scans top to bottom (left to right)
|
||||
let first_top_left_pixel = (0 .. height)
|
||||
.flat_map(|y| (0 .. width).map(move |x| Vec2(x,y)))
|
||||
.find(|&position| pixel_at(position))?; // return none if no pixel should be kept
|
||||
|
||||
// scans bottom to top (right to left)
|
||||
let first_bottom_right_pixel = (first_top_left_pixel.y() + 1 .. height) // excluding the top line
|
||||
.flat_map(|y| (0 .. width).map(move |x| Vec2(x, y))) // x search cannot start at first_top.x, because this must catch all bottom pixels
|
||||
.rev().find(|&position| pixel_at(position))
|
||||
.unwrap_or(first_top_left_pixel); // did not find any at bottom, but we know top has some pixel
|
||||
|
||||
// now we know exactly how much we can throw away top and bottom,
|
||||
// but we don't know exactly about left or right
|
||||
let top = first_top_left_pixel.y();
|
||||
let bottom = first_bottom_right_pixel.y();
|
||||
|
||||
// we only now some arbitrary left and right bounds which we need to refine.
|
||||
// because the actual image contents might be wider than the corner points.
|
||||
// we know that we do not need to look in the center between min x and max x,
|
||||
// as these must be included in any case.
|
||||
let mut min_left_x = first_top_left_pixel.x().min(first_bottom_right_pixel.x());
|
||||
let mut max_right_x = first_bottom_right_pixel.x().max(first_top_left_pixel.x());
|
||||
|
||||
// requires for loop, because bounds change while searching
|
||||
for y in top ..= bottom {
|
||||
|
||||
// escape the loop if there is nothing left to crop
|
||||
if min_left_x == 0 && max_right_x == width - 1 { break; }
|
||||
|
||||
// search from right image edge towards image center, until known max x, for existing pixels,
|
||||
// possibly including some pixels that would have been cropped otherwise
|
||||
if max_right_x != width - 1 {
|
||||
max_right_x = (max_right_x + 1 .. width).rev() // excluding current max
|
||||
.find(|&x| pixel_at(Vec2(x, y)))
|
||||
.unwrap_or(max_right_x);
|
||||
}
|
||||
|
||||
// search from left image edge towards image center, until known min x, for existing pixels,
|
||||
// possibly including some pixels that would have been cropped otherwise
|
||||
if min_left_x != 0 {
|
||||
min_left_x = (0 .. min_left_x) // excluding current min
|
||||
.find(|&x| pixel_at(Vec2(x, y)))
|
||||
.unwrap_or(min_left_x);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO add 1px margin to avoid interpolation issues?
|
||||
let local_start = Vec2(min_left_x, top);
|
||||
let local_end = Vec2(max_right_x + 1, bottom + 1);
|
||||
Some(IntegerBounds::new(
|
||||
current_bounds.position + local_start.to_i32(),
|
||||
local_end - local_start
|
||||
))
|
||||
}
|
||||
|
||||
impl<S> GetBounds for Layer<S> {
|
||||
fn bounds(&self) -> IntegerBounds {
|
||||
self.absolute_bounds()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Cropped, Original> CropResult<Cropped, Original> {
|
||||
|
||||
/// If the image was fully empty, return `None`, otherwise return `Some(cropped_image)`.
|
||||
pub fn or_none_if_empty(self) -> Option<Cropped> {
|
||||
match self {
|
||||
CropResult::Cropped (cropped) => Some(cropped),
|
||||
CropResult::Empty { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// If the image was fully empty, crop to one single pixel of all the transparent pixels instead,
|
||||
/// leaving the layer intact while reducing memory usage.
|
||||
pub fn or_crop_to_1x1_if_empty(self) -> Cropped where Original: Crop<Cropped=Cropped> + GetBounds {
|
||||
match self {
|
||||
CropResult::Cropped (cropped) => cropped,
|
||||
CropResult::Empty { original } => {
|
||||
let bounds = original.bounds();
|
||||
if bounds.size == Vec2(0,0) { panic!("layer has width and height of zero") }
|
||||
original.crop(IntegerBounds::new(bounds.position, Vec2(1,1)))
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn find_bounds() {
|
||||
fn find_bounds(offset: Vec2<i32>, lines: &Vec<Vec<i32>>) -> IntegerBounds {
|
||||
if let Some(first_line) = lines.first() {
|
||||
assert!(lines.iter().all(|line| line.len() == first_line.len()), "invalid test input");
|
||||
IntegerBounds::new(offset, (first_line.len(), lines.len()))
|
||||
}
|
||||
else {
|
||||
IntegerBounds::new(offset, (0,0))
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_found_smaller_bounds(offset: Vec2<i32>, uncropped_lines: Vec<Vec<i32>>, expected_cropped_lines: Vec<Vec<i32>>) {
|
||||
let old_bounds = find_bounds(offset, &uncropped_lines);
|
||||
|
||||
let found_bounds = try_find_smaller_bounds(
|
||||
old_bounds,
|
||||
|position| uncropped_lines[position.y()][position.x()] != 0
|
||||
).unwrap();
|
||||
|
||||
let found_bounds = found_bounds.with_origin(-offset); // make indices local
|
||||
|
||||
let cropped_lines: Vec<Vec<i32>> =
|
||||
uncropped_lines[found_bounds.position.y() as usize .. found_bounds.end().y() as usize]
|
||||
.iter().map(|uncropped_line|{
|
||||
uncropped_line[found_bounds.position.x() as usize .. found_bounds.end().x() as usize].to_vec()
|
||||
}).collect();
|
||||
|
||||
assert_eq!(cropped_lines, expected_cropped_lines);
|
||||
}
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-3,-3),
|
||||
|
||||
vec![
|
||||
vec![ 2, 3, 4 ],
|
||||
vec![ 2, 3, 4 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 2, 3, 4 ],
|
||||
vec![ 2, 3, 4 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-3,-3),
|
||||
|
||||
vec![
|
||||
vec![ 2 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 2 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-3,-3),
|
||||
|
||||
vec![
|
||||
vec![ 0 ],
|
||||
vec![ 2 ],
|
||||
vec![ 0 ],
|
||||
vec![ 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 2 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-3,-3),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 3, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 3 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(3,3),
|
||||
|
||||
vec![
|
||||
vec![ 0, 1, 1, 2, 1, 0 ],
|
||||
vec![ 0, 1, 3, 1, 1, 0 ],
|
||||
vec![ 0, 1, 1, 1, 1, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1, 1, 2, 1 ],
|
||||
vec![ 1, 3, 1, 1 ],
|
||||
vec![ 1, 1, 1, 1 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(3,3),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 0 ],
|
||||
vec![ 1, 1, 2, 1 ],
|
||||
vec![ 1, 3, 1, 1 ],
|
||||
vec![ 1, 1, 1, 1 ],
|
||||
vec![ 0, 0, 0, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1, 1, 2, 1 ],
|
||||
vec![ 1, 3, 1, 1 ],
|
||||
vec![ 1, 1, 1, 1 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(3,3),
|
||||
|
||||
vec![
|
||||
vec![ 0, 1, 1, 2, 1, 0 ],
|
||||
vec![ 0, 0, 3, 1, 0, 0 ],
|
||||
vec![ 0, 1, 1, 1, 1, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1, 1, 2, 1 ],
|
||||
vec![ 0, 3, 1, 0 ],
|
||||
vec![ 1, 1, 1, 1 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(3,3),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 1, 2, 0, 0 ],
|
||||
vec![ 0, 1, 3, 1, 1, 0 ],
|
||||
vec![ 0, 0, 1, 1, 0, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 0, 1, 2, 0 ],
|
||||
vec![ 1, 3, 1, 1 ],
|
||||
vec![ 0, 1, 1, 0 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(1,3),
|
||||
|
||||
vec![
|
||||
vec![ 1, 0, 0, 0, ],
|
||||
vec![ 0, 0, 0, 0, ],
|
||||
vec![ 0, 0, 0, 0, ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(1,3),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 0, ],
|
||||
vec![ 0, 1, 0, 0, ],
|
||||
vec![ 0, 0, 0, 0, ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-1,-3),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 0, ],
|
||||
vec![ 0, 0, 0, 1, ],
|
||||
vec![ 0, 0, 0, 0, ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-1,-3),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 1, 1, 1, 0, 0 ],
|
||||
vec![ 0, 0, 1, 1, 1, 0, 0 ],
|
||||
vec![ 0, 0, 1, 1, 1, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1, 1, 1 ],
|
||||
vec![ 1, 1, 1 ],
|
||||
vec![ 1, 1, 1 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(1000,-300),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 1, 1, 1, 0, 0 ],
|
||||
vec![ 0, 1, 1, 1, 1, 1, 0 ],
|
||||
vec![ 0, 0, 1, 1, 1, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 0, 1, 1, 1, 0 ],
|
||||
vec![ 1, 1, 1, 1, 1 ],
|
||||
vec![ 0, 1, 1, 1, 0 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-10,-300),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 1, 0, 1, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 1, 0, 1, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1, 0, 1 ],
|
||||
vec![ 0, 0, 0 ],
|
||||
vec![ 1, 0, 1 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-10,-300),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 1, 0, 1, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1, 0, 1 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-10,-300),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 1, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 2, 0, 0, 0 ],
|
||||
vec![ 0, 0, 3, 3, 3, 0, 0 ],
|
||||
vec![ 0, 0, 0, 4, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 0, 1, 0 ],
|
||||
vec![ 0, 2, 0 ],
|
||||
vec![ 3, 3, 3 ],
|
||||
vec![ 0, 4, 0 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-10,-300),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 1, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 1, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 1 ],
|
||||
vec![ 0, 0, 0 ],
|
||||
vec![ 0, 0, 0 ],
|
||||
vec![ 1, 0, 0 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-10,-300),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 1, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 1, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 1 ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-10,-300),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 1, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 1, 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0, 0, 0, 0 ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1 ],
|
||||
vec![ 0 ],
|
||||
vec![ 0 ],
|
||||
vec![ 1 ],
|
||||
]
|
||||
);
|
||||
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-1,-3),
|
||||
|
||||
vec![
|
||||
vec![ 0, 0, 1, 0, ],
|
||||
vec![ 0, 0, 0, 1, ],
|
||||
vec![ 0, 0, 0, 0, ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1, 0, ],
|
||||
vec![ 0, 1, ],
|
||||
]
|
||||
);
|
||||
|
||||
assert_found_smaller_bounds(
|
||||
Vec2(-1,-3),
|
||||
|
||||
vec![
|
||||
vec![ 1, 0, 0, 0, ],
|
||||
vec![ 0, 1, 0, 0, ],
|
||||
vec![ 0, 0, 0, 0, ],
|
||||
vec![ 0, 0, 0, 0, ],
|
||||
],
|
||||
|
||||
vec![
|
||||
vec![ 1, 0, ],
|
||||
vec![ 0, 1, ],
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn find_no_bounds() {
|
||||
let pixels = vec![
|
||||
vec![ 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0 ],
|
||||
vec![ 0, 0, 0, 0 ],
|
||||
];
|
||||
|
||||
let bounds = try_find_smaller_bounds(
|
||||
IntegerBounds::new((0,0), (4,3)),
|
||||
|position| pixels[position.y()][position.x()] != 0
|
||||
);
|
||||
|
||||
assert_eq!(bounds, None)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
1326
vendor/exr/src/image/mod.rs
vendored
Normal file
1326
vendor/exr/src/image/mod.rs
vendored
Normal file
File diff suppressed because it is too large
Load Diff
97
vendor/exr/src/image/pixel_vec.rs
vendored
Normal file
97
vendor/exr/src/image/pixel_vec.rs
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
|
||||
//! Provides a predefined pixel storage.
|
||||
//! Currently only contains a simple flattened vector storage.
|
||||
//! Use the functions `create_pixel_vec::<YourPixelTuple>` and
|
||||
//! `set_pixel_in_vec::<YourPixelTuple>` for reading a predefined pixel vector.
|
||||
//! Use the function `PixelVec::new` to create a pixel vector which can be written to a file.
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Store all samples in a single array.
|
||||
/// All samples will be converted to the type `T`.
|
||||
/// This supports all the sample types, `f16`, `f32`, and `u32`.
|
||||
///
|
||||
/// The flattened vector contains all rows one after another.
|
||||
/// In each row, for each pixel, its red, green, blue, and then alpha
|
||||
/// samples are stored one after another.
|
||||
///
|
||||
/// Use `PixelVec.compute_pixel_index(position)`
|
||||
/// to compute the flat index of a specific pixel.
|
||||
#[derive(Eq, PartialEq, Clone)]
|
||||
pub struct PixelVec<T> {
|
||||
|
||||
/// The resolution of this layer.
|
||||
pub resolution: Vec2<usize>,
|
||||
|
||||
/// The flattened vector contains all rows one after another.
|
||||
/// In each row, for each pixel, its red, green, blue, and then alpha
|
||||
/// samples are stored one after another.
|
||||
///
|
||||
/// Use `Flattened::compute_pixel_index(image, position)`
|
||||
/// to compute the flat index of a specific pixel.
|
||||
pub pixels: Vec<T>,
|
||||
}
|
||||
|
||||
impl<Pixel> PixelVec<Pixel> {
|
||||
|
||||
/// Create a new flattened pixel storage, filled with default pixels.
|
||||
/// Accepts a `Channels` parameter, which is not used, so that it can be passed as a function pointer instead of calling it.
|
||||
pub fn constructor<Channels>(resolution: Vec2<usize>, _: &Channels) -> Self where Pixel: Default + Clone {
|
||||
PixelVec { resolution, pixels: vec![Pixel::default(); resolution.area()] }
|
||||
}
|
||||
|
||||
/// Examine a pixel of a `PixelVec<T>` image.
|
||||
/// Can usually be used as a function reference instead of calling it directly.
|
||||
#[inline]
|
||||
pub fn get_pixel(&self, position: Vec2<usize>) -> &Pixel where Pixel: Sync {
|
||||
&self.pixels[self.compute_pixel_index(position)]
|
||||
}
|
||||
|
||||
/// Update a pixel of a `PixelVec<T>` image.
|
||||
/// Can usually be used as a function reference instead of calling it directly.
|
||||
#[inline]
|
||||
pub fn set_pixel(&mut self, position: Vec2<usize>, pixel: Pixel) {
|
||||
let index = self.compute_pixel_index(position);
|
||||
self.pixels[index] = pixel;
|
||||
}
|
||||
|
||||
/// Create a new flattened pixel storage, checking the length of the provided pixels vector.
|
||||
pub fn new(resolution: impl Into<Vec2<usize>>, pixels: Vec<Pixel>) -> Self {
|
||||
let size = resolution.into();
|
||||
assert_eq!(size.area(), pixels.len(), "expected {} samples, but vector length is {}", size.area(), pixels.len());
|
||||
Self { resolution: size, pixels }
|
||||
}
|
||||
|
||||
/// Compute the flat index of a specific pixel. Returns a range of either 3 or 4 samples.
|
||||
/// The computed index can be used with `PixelVec.samples[index]`.
|
||||
/// Panics for invalid sample coordinates.
|
||||
#[inline]
|
||||
pub fn compute_pixel_index(&self, position: Vec2<usize>) -> usize {
|
||||
position.flat_index_for_size(self.resolution)
|
||||
}
|
||||
}
|
||||
|
||||
use crate::image::validate_results::{ValidateResult, ValidationResult};
|
||||
|
||||
impl<Px> ValidateResult for PixelVec<Px> where Px: ValidateResult {
|
||||
fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn() -> String) -> ValidationResult {
|
||||
if self.resolution != other.resolution { Err(location() + " > resolution") }
|
||||
else { self.pixels.as_slice().validate_result(&other.pixels.as_slice(), options, || location() + " > pixels") }
|
||||
}
|
||||
}
|
||||
|
||||
impl<Px> GetPixel for PixelVec<Px> where Px: Clone + Sync {
|
||||
type Pixel = Px;
|
||||
fn get_pixel(&self, position: Vec2<usize>) -> Self::Pixel {
|
||||
self.get_pixel(position).clone()
|
||||
}
|
||||
}
|
||||
|
||||
use std::fmt::*;
|
||||
|
||||
impl<T> Debug for PixelVec<T> {
|
||||
#[inline] fn fmt(&self, formatter: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(formatter, "[{}; {}]", std::any::type_name::<T>(), self.pixels.len())
|
||||
}
|
||||
}
|
||||
|
||||
128
vendor/exr/src/image/read/any_channels.rs
vendored
Normal file
128
vendor/exr/src/image/read/any_channels.rs
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
//! How to read arbitrary channels.
|
||||
|
||||
use crate::image::*;
|
||||
use crate::meta::header::{Header};
|
||||
use crate::error::{Result, UnitResult};
|
||||
use crate::block::UncompressedBlock;
|
||||
use crate::block::lines::{LineRef};
|
||||
use crate::math::Vec2;
|
||||
use crate::meta::attribute::{Text, ChannelDescription};
|
||||
use crate::image::read::layers::{ReadChannels, ChannelsReader};
|
||||
use crate::block::chunk::TileCoordinates;
|
||||
|
||||
/// A template that creates an [AnyChannelsReader] for each layer in the image.
|
||||
/// This loads all channels for each layer.
|
||||
/// The `ReadSamples` can, for example, be [ReadFlatSamples] or [ReadAllLevels<ReadFlatSamples>].
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct ReadAnyChannels<ReadSamples> {
|
||||
|
||||
/// The sample reading specification
|
||||
pub read_samples: ReadSamples
|
||||
}
|
||||
|
||||
/// A template that creates a new [`SampleReader`] for each channel in each layer.
|
||||
pub trait ReadSamples {
|
||||
|
||||
/// The type of the temporary samples reader
|
||||
type Reader: SamplesReader;
|
||||
|
||||
/// Create a single reader for a single channel of a layer
|
||||
fn create_sample_reader(&self, header: &Header, channel: &ChannelDescription) -> Result<Self::Reader>;
|
||||
}
|
||||
|
||||
/// Processes pixel blocks from a file and accumulates them into a collection of arbitrary channels.
|
||||
/// Loads all channels for each layer.
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct AnyChannelsReader<SamplesReader> {
|
||||
|
||||
/// Stores a separate sample reader per channel in the layer
|
||||
sample_channels_reader: SmallVec<[AnyChannelReader<SamplesReader>; 4]>,
|
||||
}
|
||||
|
||||
/// Processes pixel blocks from a file and accumulates them into a single arbitrary channel.
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct AnyChannelReader<SamplesReader> {
|
||||
|
||||
/// The custom reader that accumulates the pixel data for a single channel
|
||||
samples: SamplesReader,
|
||||
|
||||
/// Temporarily accumulated meta data.
|
||||
name: Text,
|
||||
|
||||
/// Temporarily accumulated meta data.
|
||||
sampling_rate: Vec2<usize>,
|
||||
|
||||
/// Temporarily accumulated meta data.
|
||||
quantize_linearly: bool,
|
||||
}
|
||||
|
||||
/// Processes pixel blocks from a file and accumulates them into a single pixel channel.
|
||||
/// For example, stores thousands of "Red" pixel values for a single layer.
|
||||
pub trait SamplesReader {
|
||||
|
||||
/// The type of resulting sample storage
|
||||
type Samples;
|
||||
|
||||
/// Specify whether a single block of pixels should be loaded from the file
|
||||
fn filter_block(&self, tile: TileCoordinates) -> bool;
|
||||
|
||||
/// Load a single pixel line, which has not been filtered, into the reader, accumulating the sample data
|
||||
fn read_line(&mut self, line: LineRef<'_>) -> UnitResult;
|
||||
|
||||
/// Deliver the final accumulated sample storage for the image
|
||||
fn into_samples(self) -> Self::Samples;
|
||||
}
|
||||
|
||||
|
||||
impl<'s, S: 's + ReadSamples> ReadChannels<'s> for ReadAnyChannels<S> {
|
||||
type Reader = AnyChannelsReader<S::Reader>;
|
||||
|
||||
fn create_channels_reader(&self, header: &Header) -> Result<Self::Reader> {
|
||||
let samples: Result<_> = header.channels.list.iter()
|
||||
.map(|channel: &ChannelDescription| Ok(AnyChannelReader {
|
||||
samples: self.read_samples.create_sample_reader(header, channel)?,
|
||||
name: channel.name.clone(),
|
||||
sampling_rate: channel.sampling,
|
||||
quantize_linearly: channel.quantize_linearly
|
||||
}))
|
||||
.collect();
|
||||
|
||||
Ok(AnyChannelsReader { sample_channels_reader: samples? })
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: SamplesReader> ChannelsReader for AnyChannelsReader<S> {
|
||||
type Channels = AnyChannels<S::Samples>;
|
||||
|
||||
fn filter_block(&self, tile: TileCoordinates) -> bool {
|
||||
self.sample_channels_reader.iter().any(|channel| channel.samples.filter_block(tile))
|
||||
}
|
||||
|
||||
fn read_block(&mut self, header: &Header, decompressed: UncompressedBlock) -> UnitResult {
|
||||
/*for (bytes, line) in LineIndex::lines_in_block(decompressed.index, header) {
|
||||
let channel = self.sample_channels_reader.get_mut(line.channel).unwrap();
|
||||
channel.samples.read_line(LineSlice { location: line, value: &decompressed.data[bytes] })?;
|
||||
}
|
||||
|
||||
Ok(())*/
|
||||
for line in decompressed.lines(&header.channels) {
|
||||
self.sample_channels_reader[line.location.channel].samples.read_line(line)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn into_channels(self) -> Self::Channels {
|
||||
AnyChannels { // not using `new()` as the channels are already sorted
|
||||
list: self.sample_channels_reader.into_iter()
|
||||
.map(|channel| AnyChannel {
|
||||
sample_data: channel.samples.into_samples(),
|
||||
|
||||
name: channel.name,
|
||||
quantize_linearly: channel.quantize_linearly,
|
||||
sampling: channel.sampling_rate
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
}
|
||||
209
vendor/exr/src/image/read/image.rs
vendored
Normal file
209
vendor/exr/src/image/read/image.rs
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
//! The last wrapper of image readers, finally containing the [`from_file(path)`] method.
|
||||
//! This completes the builder and reads a complete image.
|
||||
|
||||
use crate::image::*;
|
||||
use crate::meta::header::{Header, ImageAttributes};
|
||||
use crate::error::{Result, UnitResult};
|
||||
use crate::block::{UncompressedBlock, BlockIndex};
|
||||
use crate::block::chunk::TileCoordinates;
|
||||
use std::path::Path;
|
||||
use std::io::{Read, BufReader};
|
||||
use std::io::Seek;
|
||||
use crate::meta::MetaData;
|
||||
use crate::block::reader::ChunksReader;
|
||||
|
||||
/// Specify whether to read the image in parallel,
|
||||
/// whether to use pedantic error handling,
|
||||
/// and a callback for the reading progress.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ReadImage<OnProgress, ReadLayers> {
|
||||
on_progress: OnProgress,
|
||||
read_layers: ReadLayers,
|
||||
pedantic: bool,
|
||||
parallel: bool,
|
||||
}
|
||||
|
||||
impl<F, L> ReadImage<F, L> where F: FnMut(f64)
|
||||
{
|
||||
/// Uses relaxed error handling and parallel decompression.
|
||||
pub fn new(read_layers: L, on_progress: F) -> Self {
|
||||
Self {
|
||||
on_progress, read_layers,
|
||||
pedantic: false, parallel: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Specify that any missing or unusual information should result in an error.
|
||||
/// Otherwise, `exrs` will try to compute or ignore missing information.
|
||||
///
|
||||
/// If pedantic is true, then an error will be returned as soon as anything is missing in the file,
|
||||
/// or two values in the image contradict each other. If pedantic is false,
|
||||
/// then only fatal errors will be thrown. By default, reading an image is not pedantic,
|
||||
/// which means that slightly invalid files might still be readable.
|
||||
/// For example, if some attribute is missing but can be recomputed, this flag decides whether an error is thrown.
|
||||
/// Or if the pedantic flag is true and there are still bytes left after the decompression algorithm finished,
|
||||
/// an error is thrown, because this should not happen and something might be wrong with the file.
|
||||
/// Or if your application is a target of attacks, or if you want to emulate the original C++ library,
|
||||
/// you might want to switch to pedantic reading.
|
||||
pub fn pedantic(self) -> Self { Self { pedantic: true, ..self } }
|
||||
|
||||
/// Specify that multiple pixel blocks should never be decompressed using multiple threads at once.
|
||||
/// This might be slower but uses less memory and less synchronization.
|
||||
pub fn non_parallel(self) -> Self { Self { parallel: false, ..self } }
|
||||
|
||||
/// Specify a function to be called regularly throughout the loading process.
|
||||
/// Replaces all previously specified progress functions in this reader.
|
||||
pub fn on_progress<OnProgress>(self, on_progress: OnProgress) -> ReadImage<OnProgress, L>
|
||||
where OnProgress: FnMut(f64)
|
||||
{
|
||||
ReadImage {
|
||||
on_progress,
|
||||
read_layers: self.read_layers,
|
||||
pedantic: self.pedantic,
|
||||
parallel: self.parallel
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Read the exr image from a file.
|
||||
/// Use [`ReadImage::read_from_unbuffered`] instead, if you do not have a file.
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn from_file<Layers>(self, path: impl AsRef<Path>) -> Result<Image<Layers>>
|
||||
where for<'s> L: ReadLayers<'s, Layers = Layers>
|
||||
{
|
||||
self.from_unbuffered(std::fs::File::open(path)?)
|
||||
}
|
||||
|
||||
/// Buffer the reader and then read the exr image from it.
|
||||
/// Use [`ReadImage::read_from_buffered`] instead, if your reader is an in-memory reader.
|
||||
/// Use [`ReadImage::read_from_file`] instead, if you have a file path.
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn from_unbuffered<Layers>(self, unbuffered: impl Read + Seek) -> Result<Image<Layers>>
|
||||
where for<'s> L: ReadLayers<'s, Layers = Layers>
|
||||
{
|
||||
self.from_buffered(BufReader::new(unbuffered))
|
||||
}
|
||||
|
||||
/// Read the exr image from a buffered reader.
|
||||
/// Use [`ReadImage::read_from_file`] instead, if you have a file path.
|
||||
/// Use [`ReadImage::read_from_unbuffered`] instead, if this is not an in-memory reader.
|
||||
// TODO Use Parallel<> Wrapper to only require sendable byte source where parallel decompression is required
|
||||
#[must_use]
|
||||
pub fn from_buffered<Layers>(self, buffered: impl Read + Seek) -> Result<Image<Layers>>
|
||||
where for<'s> L: ReadLayers<'s, Layers = Layers>
|
||||
{
|
||||
let chunks = crate::block::read(buffered, self.pedantic)?;
|
||||
self.from_chunks(chunks)
|
||||
}
|
||||
|
||||
/// Read the exr image from an initialized chunks reader
|
||||
/// that has already extracted the meta data from the file.
|
||||
/// Use [`ReadImage::read_from_file`] instead, if you have a file path.
|
||||
/// Use [`ReadImage::read_from_buffered`] instead, if this is an in-memory reader.
|
||||
// TODO Use Parallel<> Wrapper to only require sendable byte source where parallel decompression is required
|
||||
#[must_use]
|
||||
pub fn from_chunks<Layers>(mut self, chunks_reader: crate::block::reader::Reader<impl Read + Seek>) -> Result<Image<Layers>>
|
||||
where for<'s> L: ReadLayers<'s, Layers = Layers>
|
||||
{
|
||||
let Self { pedantic, parallel, ref mut on_progress, ref mut read_layers } = self;
|
||||
|
||||
let layers_reader = read_layers.create_layers_reader(chunks_reader.headers())?;
|
||||
let mut image_collector = ImageWithAttributesReader::new(chunks_reader.headers(), layers_reader)?;
|
||||
|
||||
let block_reader = chunks_reader
|
||||
.filter_chunks(pedantic, |meta, tile, block| {
|
||||
image_collector.filter_block(meta, tile, block)
|
||||
})?
|
||||
.on_progress(on_progress);
|
||||
|
||||
// TODO propagate send requirement further upwards
|
||||
if parallel {
|
||||
block_reader.decompress_parallel(pedantic, |meta_data, block|{
|
||||
image_collector.read_block(&meta_data.headers, block)
|
||||
})?;
|
||||
}
|
||||
else {
|
||||
block_reader.decompress_sequential(pedantic, |meta_data, block|{
|
||||
image_collector.read_block(&meta_data.headers, block)
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(image_collector.into_image())
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes blocks from a file and collects them into a complete `Image`.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct ImageWithAttributesReader<L> {
|
||||
image_attributes: ImageAttributes,
|
||||
layers_reader: L,
|
||||
}
|
||||
|
||||
impl<L> ImageWithAttributesReader<L> where L: LayersReader {
|
||||
|
||||
/// A new image reader with image attributes.
|
||||
pub fn new(headers: &[Header], layers_reader: L) -> Result<Self>
|
||||
{
|
||||
Ok(ImageWithAttributesReader {
|
||||
image_attributes: headers.first().as_ref().expect("invalid headers").shared_attributes.clone(),
|
||||
layers_reader,
|
||||
})
|
||||
}
|
||||
|
||||
/// Specify whether a single block of pixels should be loaded from the file
|
||||
fn filter_block(&self, meta: &MetaData, tile: TileCoordinates, block: BlockIndex) -> bool {
|
||||
self.layers_reader.filter_block(meta, tile, block)
|
||||
}
|
||||
|
||||
/// Load a single pixel block, which has not been filtered, into the reader, accumulating the image
|
||||
fn read_block(&mut self, headers: &[Header], block: UncompressedBlock) -> UnitResult {
|
||||
self.layers_reader.read_block(headers, block)
|
||||
}
|
||||
|
||||
/// Deliver the complete accumulated image
|
||||
fn into_image(self) -> Image<L::Layers> {
|
||||
Image {
|
||||
attributes: self.image_attributes,
|
||||
layer_data: self.layers_reader.into_layers()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// A template that creates a `LayerReader` for each layer in the file.
|
||||
pub trait ReadLayers<'s> {
|
||||
|
||||
/// The type of the resulting Layers
|
||||
type Layers;
|
||||
|
||||
/// The type of the temporary layer reader
|
||||
type Reader: LayersReader<Layers = Self::Layers>;
|
||||
|
||||
/// Create a single reader for a single layer
|
||||
fn create_layers_reader(&'s self, headers: &[Header]) -> Result<Self::Reader>;
|
||||
|
||||
/// Specify that all attributes should be read from an image.
|
||||
/// Use `from_file(path)` on the return value of this method to actually decode an image.
|
||||
fn all_attributes(self) -> ReadImage<fn(f64), Self> where Self: Sized {
|
||||
ReadImage::new(self, ignore_progress)
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes pixel blocks from a file and accumulates them into a single image layer.
|
||||
pub trait LayersReader {
|
||||
|
||||
/// The type of resulting layers
|
||||
type Layers;
|
||||
|
||||
/// Specify whether a single block of pixels should be loaded from the file
|
||||
fn filter_block(&self, meta: &MetaData, tile: TileCoordinates, block: BlockIndex) -> bool;
|
||||
|
||||
/// Load a single pixel block, which has not been filtered, into the reader, accumulating the layer
|
||||
fn read_block(&mut self, headers: &[Header], block: UncompressedBlock) -> UnitResult;
|
||||
|
||||
/// Deliver the final accumulated layers for the image
|
||||
fn into_layers(self) -> Self::Layers;
|
||||
}
|
||||
|
||||
204
vendor/exr/src/image/read/layers.rs
vendored
Normal file
204
vendor/exr/src/image/read/layers.rs
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
//! How to read either a single or a list of layers.
|
||||
|
||||
use crate::image::*;
|
||||
use crate::meta::header::{Header, LayerAttributes};
|
||||
use crate::error::{Result, UnitResult, Error};
|
||||
use crate::block::{UncompressedBlock, BlockIndex};
|
||||
use crate::math::Vec2;
|
||||
use crate::image::read::image::{ReadLayers, LayersReader};
|
||||
use crate::block::chunk::TileCoordinates;
|
||||
use crate::meta::MetaData;
|
||||
|
||||
/// Specify to read all channels, aborting if any one is invalid.
|
||||
/// [`ReadRgbaChannels`] or [`ReadAnyChannels<ReadFlatSamples>`].
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct ReadAllLayers<ReadChannels> {
|
||||
|
||||
/// The channel reading specification
|
||||
pub read_channels: ReadChannels,
|
||||
}
|
||||
|
||||
/// Specify to read only the first layer which meets the previously specified requirements
|
||||
// FIXME do not throw error on deep data but just skip it!
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct ReadFirstValidLayer<ReadChannels> {
|
||||
|
||||
/// The channel reading specification
|
||||
pub read_channels: ReadChannels,
|
||||
}
|
||||
|
||||
/// A template that creates a [`ChannelsReader`] once for all channels per layer.
|
||||
pub trait ReadChannels<'s> {
|
||||
|
||||
/// The type of the temporary channels reader
|
||||
type Reader: ChannelsReader;
|
||||
|
||||
/// Create a single reader for all channels of a specific layer
|
||||
fn create_channels_reader(&'s self, header: &Header) -> Result<Self::Reader>;
|
||||
|
||||
|
||||
/// Read only the first layer which meets the previously specified requirements
|
||||
/// For example, skips layers with deep data, if specified earlier.
|
||||
/// Aborts if the image contains no layers.
|
||||
// TODO test if this filters non-deep layers while ignoring deep data layers!
|
||||
fn first_valid_layer(self) -> ReadFirstValidLayer<Self> where Self:Sized { ReadFirstValidLayer { read_channels: self } }
|
||||
|
||||
// FIXME do not throw error on deep data but just skip it!
|
||||
|
||||
|
||||
/// Reads all layers, including an empty list. Aborts if any of the layers are invalid,
|
||||
/// even if only one of the layers contains unexpected data.
|
||||
fn all_layers(self) -> ReadAllLayers<Self> where Self:Sized { ReadAllLayers { read_channels: self } }
|
||||
|
||||
// TODO pub fn all_valid_layers(self) -> ReadAllValidLayers<Self> { ReadAllValidLayers { read_channels: self } }
|
||||
}
|
||||
|
||||
|
||||
/// Processes pixel blocks from a file and accumulates them into a list of layers.
|
||||
/// For example, `ChannelsReader` can be
|
||||
/// [`SpecificChannelsReader`] or [`AnyChannelsReader<FlatSamplesReader>`].
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct AllLayersReader<ChannelsReader> {
|
||||
layer_readers: SmallVec<[LayerReader<ChannelsReader>; 2]>, // TODO unpack struct?
|
||||
}
|
||||
|
||||
/// Processes pixel blocks from a file and accumulates them into a single layers, using only the first.
|
||||
/// For example, `ChannelsReader` can be
|
||||
/// `SpecificChannelsReader` or `AnyChannelsReader<FlatSamplesReader>`.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct FirstValidLayerReader<ChannelsReader> {
|
||||
layer_reader: LayerReader<ChannelsReader>,
|
||||
layer_index: usize,
|
||||
}
|
||||
|
||||
/// Processes pixel blocks from a file and accumulates them into a single layers.
|
||||
/// For example, `ChannelsReader` can be
|
||||
/// `SpecificChannelsReader` or `AnyChannelsReader<FlatSamplesReader>`.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct LayerReader<ChannelsReader> {
|
||||
channels_reader: ChannelsReader,
|
||||
attributes: LayerAttributes,
|
||||
size: Vec2<usize>,
|
||||
encoding: Encoding
|
||||
}
|
||||
|
||||
/// Processes pixel blocks from a file and accumulates them into multiple channels per layer.
|
||||
pub trait ChannelsReader {
|
||||
|
||||
/// The type of the resulting channel collection
|
||||
type Channels;
|
||||
|
||||
/// Specify whether a single block of pixels should be loaded from the file
|
||||
fn filter_block(&self, tile: TileCoordinates) -> bool;
|
||||
|
||||
/// Load a single pixel block, which has not been filtered, into the reader, accumulating the channel data
|
||||
fn read_block(&mut self, header: &Header, block: UncompressedBlock) -> UnitResult;
|
||||
|
||||
/// Deliver the final accumulated channel collection for the image
|
||||
fn into_channels(self) -> Self::Channels;
|
||||
}
|
||||
|
||||
|
||||
impl<C> LayerReader<C> {
|
||||
fn new(header: &Header, channels_reader: C) -> Result<Self> {
|
||||
Ok(LayerReader {
|
||||
channels_reader,
|
||||
attributes: header.own_attributes.clone(),
|
||||
size: header.layer_size,
|
||||
encoding: Encoding {
|
||||
compression: header.compression,
|
||||
line_order: header.line_order,
|
||||
blocks: match header.blocks {
|
||||
crate::meta::BlockDescription::ScanLines => Blocks::ScanLines,
|
||||
crate::meta::BlockDescription::Tiles(TileDescription { tile_size, .. }) => Blocks::Tiles(tile_size)
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'s, C> ReadLayers<'s> for ReadAllLayers<C> where C: ReadChannels<'s> {
|
||||
type Layers = Layers<<C::Reader as ChannelsReader>::Channels>;
|
||||
type Reader = AllLayersReader<C::Reader>;
|
||||
|
||||
fn create_layers_reader(&'s self, headers: &[Header]) -> Result<Self::Reader> {
|
||||
let readers: Result<_> = headers.iter()
|
||||
.map(|header| LayerReader::new(header, self.read_channels.create_channels_reader(header)?))
|
||||
.collect();
|
||||
|
||||
Ok(AllLayersReader {
|
||||
layer_readers: readers?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> LayersReader for AllLayersReader<C> where C: ChannelsReader {
|
||||
type Layers = Layers<C::Channels>;
|
||||
|
||||
fn filter_block(&self, _: &MetaData, tile: TileCoordinates, block: BlockIndex) -> bool {
|
||||
let layer = self.layer_readers.get(block.layer).expect("invalid layer index argument");
|
||||
layer.channels_reader.filter_block(tile)
|
||||
}
|
||||
|
||||
fn read_block(&mut self, headers: &[Header], block: UncompressedBlock) -> UnitResult {
|
||||
self.layer_readers
|
||||
.get_mut(block.index.layer).expect("invalid layer index argument")
|
||||
.channels_reader.read_block(headers.get(block.index.layer).expect("invalid header index in block"), block)
|
||||
}
|
||||
|
||||
fn into_layers(self) -> Self::Layers {
|
||||
self.layer_readers
|
||||
.into_iter()
|
||||
.map(|layer| Layer {
|
||||
channel_data: layer.channels_reader.into_channels(),
|
||||
attributes: layer.attributes,
|
||||
size: layer.size,
|
||||
encoding: layer.encoding
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'s, C> ReadLayers<'s> for ReadFirstValidLayer<C> where C: ReadChannels<'s> {
|
||||
type Layers = Layer<<C::Reader as ChannelsReader>::Channels>;
|
||||
type Reader = FirstValidLayerReader<C::Reader>;
|
||||
|
||||
fn create_layers_reader(&'s self, headers: &[Header]) -> Result<Self::Reader> {
|
||||
headers.iter().enumerate()
|
||||
.flat_map(|(index, header)|
|
||||
self.read_channels.create_channels_reader(header)
|
||||
.and_then(|reader| Ok(FirstValidLayerReader {
|
||||
layer_reader: LayerReader::new(header, reader)?,
|
||||
layer_index: index
|
||||
}))
|
||||
.ok()
|
||||
)
|
||||
.next()
|
||||
.ok_or(Error::invalid("no layer in the image matched your specified requirements"))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<C> LayersReader for FirstValidLayerReader<C> where C: ChannelsReader {
|
||||
type Layers = Layer<C::Channels>;
|
||||
|
||||
fn filter_block(&self, _: &MetaData, tile: TileCoordinates, block: BlockIndex) -> bool {
|
||||
block.layer == self.layer_index && self.layer_reader.channels_reader.filter_block(tile)
|
||||
}
|
||||
|
||||
fn read_block(&mut self, headers: &[Header], block: UncompressedBlock) -> UnitResult {
|
||||
debug_assert_eq!(block.index.layer, self.layer_index, "block should have been filtered out");
|
||||
self.layer_reader.channels_reader.read_block(&headers[self.layer_index], block)
|
||||
}
|
||||
|
||||
fn into_layers(self) -> Self::Layers {
|
||||
Layer {
|
||||
channel_data: self.layer_reader.channels_reader.into_channels(),
|
||||
attributes: self.layer_reader.attributes,
|
||||
size: self.layer_reader.size,
|
||||
encoding: self.layer_reader.encoding
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
219
vendor/exr/src/image/read/levels.rs
vendored
Normal file
219
vendor/exr/src/image/read/levels.rs
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
//! How to read a set of resolution levels.
|
||||
|
||||
use crate::meta::*;
|
||||
use crate::image::*;
|
||||
use crate::error::*;
|
||||
use crate::meta::attribute::*;
|
||||
use crate::image::read::any_channels::*;
|
||||
use crate::block::chunk::TileCoordinates;
|
||||
use crate::image::read::specific_channels::*;
|
||||
use crate::image::recursive::*;
|
||||
use crate::math::Vec2;
|
||||
use crate::block::lines::LineRef;
|
||||
use crate::block::samples::*;
|
||||
use crate::meta::header::{Header};
|
||||
|
||||
|
||||
// Note: In the resulting image, the `FlatSamples` are placed
|
||||
// directly inside the channels, without `LargestLevel<>` indirection
|
||||
/// Specify to read only the highest resolution level, skipping all smaller variations.
|
||||
/// The sample storage can be [`ReadFlatSamples`].
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct ReadLargestLevel<DeepOrFlatSamples> {
|
||||
|
||||
/// The sample reading specification
|
||||
pub read_samples: DeepOrFlatSamples
|
||||
}
|
||||
|
||||
|
||||
// FIXME rgba levels???
|
||||
|
||||
// Read the largest level, directly, without intermediate structs
|
||||
impl<DeepOrFlatSamples> ReadLargestLevel<DeepOrFlatSamples> {
|
||||
|
||||
/// Read all arbitrary channels in each layer.
|
||||
pub fn all_channels(self) -> ReadAnyChannels<DeepOrFlatSamples> { ReadAnyChannels { read_samples: self.read_samples } } // Instead of Self, the `FlatSamples` are used directly
|
||||
|
||||
/// Read only layers that contain rgba channels. Skips any other channels in the layer.
|
||||
/// The alpha channel will contain the value `1.0` if no alpha channel can be found in the image.
|
||||
///
|
||||
/// Using two closures, define how to store the pixels.
|
||||
/// The first closure creates an image, and the second closure inserts a single pixel.
|
||||
/// The type of the pixel can be defined by the second closure;
|
||||
/// it must be a tuple containing four values, each being either `f16`, `f32`, `u32` or `Sample`.
|
||||
///
|
||||
/// Throws an error for images with deep data or subsampling.
|
||||
/// Use `specific_channels` or `all_channels` if you want to read something other than rgba.
|
||||
pub fn rgba_channels<R,G,B,A, Create, Set, Pixels>(
|
||||
self, create_pixels: Create, set_pixel: Set
|
||||
) -> CollectPixels<
|
||||
ReadOptionalChannel<ReadRequiredChannel<ReadRequiredChannel<ReadRequiredChannel<NoneMore, R>, G>, B>, A>,
|
||||
(R, G, B, A), Pixels, Create, Set
|
||||
>
|
||||
where
|
||||
R: FromNativeSample, G: FromNativeSample, B: FromNativeSample, A: FromNativeSample,
|
||||
Create: Fn(Vec2<usize>, &RgbaChannels) -> Pixels,
|
||||
Set: Fn(&mut Pixels, Vec2<usize>, (R,G,B,A)),
|
||||
{
|
||||
self.specific_channels()
|
||||
.required("R").required("G").required("B")
|
||||
.optional("A", A::from_f32(1.0))
|
||||
.collect_pixels(create_pixels, set_pixel)
|
||||
}
|
||||
|
||||
/// Read only layers that contain rgb channels. Skips any other channels in the layer.
|
||||
///
|
||||
/// Using two closures, define how to store the pixels.
|
||||
/// The first closure creates an image, and the second closure inserts a single pixel.
|
||||
/// The type of the pixel can be defined by the second closure;
|
||||
/// it must be a tuple containing three values, each being either `f16`, `f32`, `u32` or `Sample`.
|
||||
///
|
||||
/// Throws an error for images with deep data or subsampling.
|
||||
/// Use `specific_channels` or `all_channels` if you want to read something other than rgb.
|
||||
pub fn rgb_channels<R,G,B, Create, Set, Pixels>(
|
||||
self, create_pixels: Create, set_pixel: Set
|
||||
) -> CollectPixels<
|
||||
ReadRequiredChannel<ReadRequiredChannel<ReadRequiredChannel<NoneMore, R>, G>, B>,
|
||||
(R, G, B), Pixels, Create, Set
|
||||
>
|
||||
where
|
||||
R: FromNativeSample, G: FromNativeSample, B: FromNativeSample,
|
||||
Create: Fn(Vec2<usize>, &RgbChannels) -> Pixels,
|
||||
Set: Fn(&mut Pixels, Vec2<usize>, (R,G,B)),
|
||||
{
|
||||
self.specific_channels()
|
||||
.required("R").required("G").required("B")
|
||||
.collect_pixels(create_pixels, set_pixel)
|
||||
}
|
||||
|
||||
/// Read only layers that contain the specified channels, skipping any other channels in the layer.
|
||||
/// Further specify which channels should be included by calling `.required("ChannelName")`
|
||||
/// or `.optional("ChannelName", default_value)` on the result of this function.
|
||||
/// Call `collect_pixels` afterwards to define the pixel container for your set of channels.
|
||||
///
|
||||
/// Throws an error for images with deep data or subsampling.
|
||||
pub fn specific_channels(self) -> ReadZeroChannels {
|
||||
ReadZeroChannels { }
|
||||
}
|
||||
}
|
||||
|
||||
/// Specify to read all contained resolution levels from the image, if any.
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct ReadAllLevels<DeepOrFlatSamples> {
|
||||
|
||||
/// The sample reading specification
|
||||
pub read_samples: DeepOrFlatSamples
|
||||
}
|
||||
|
||||
impl<ReadDeepOrFlatSamples> ReadAllLevels<ReadDeepOrFlatSamples> {
|
||||
|
||||
/// Read all arbitrary channels in each layer.
|
||||
pub fn all_channels(self) -> ReadAnyChannels<Self> { ReadAnyChannels { read_samples: self } }
|
||||
|
||||
// TODO specific channels for multiple resolution levels
|
||||
|
||||
}
|
||||
|
||||
/*pub struct ReadLevels<S> {
|
||||
read_samples: S,
|
||||
}*/
|
||||
|
||||
/// Processes pixel blocks from a file and accumulates them into multiple levels per channel.
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct AllLevelsReader<SamplesReader> {
|
||||
levels: Levels<SamplesReader>,
|
||||
}
|
||||
|
||||
/// A template that creates a [`SamplesReader`] once for each resolution level.
|
||||
pub trait ReadSamplesLevel {
|
||||
|
||||
/// The type of the temporary level reader
|
||||
type Reader: SamplesReader;
|
||||
|
||||
/// Create a single reader for a single resolution level
|
||||
fn create_samples_level_reader(&self, header: &Header, channel: &ChannelDescription, level: Vec2<usize>, resolution: Vec2<usize>) -> Result<Self::Reader>;
|
||||
}
|
||||
|
||||
|
||||
impl<S: ReadSamplesLevel> ReadSamples for ReadAllLevels<S> {
|
||||
type Reader = AllLevelsReader<S::Reader>;
|
||||
|
||||
fn create_sample_reader(&self, header: &Header, channel: &ChannelDescription) -> Result<Self::Reader> {
|
||||
let data_size = header.layer_size / channel.sampling;
|
||||
|
||||
let levels = {
|
||||
if let crate::meta::BlockDescription::Tiles(tiles) = &header.blocks {
|
||||
match tiles.level_mode {
|
||||
LevelMode::Singular => Levels::Singular(self.read_samples.create_samples_level_reader(header, channel, Vec2(0,0), header.layer_size)?),
|
||||
|
||||
LevelMode::MipMap => Levels::Mip {
|
||||
rounding_mode: tiles.rounding_mode,
|
||||
level_data: {
|
||||
let round = tiles.rounding_mode;
|
||||
let maps: Result<LevelMaps<S::Reader>> = mip_map_levels(round, data_size)
|
||||
.map(|(index, level_size)| self.read_samples.create_samples_level_reader(header, channel, Vec2(index, index), level_size))
|
||||
.collect();
|
||||
|
||||
maps?
|
||||
},
|
||||
},
|
||||
|
||||
// TODO put this into Levels::new(..) ?
|
||||
LevelMode::RipMap => Levels::Rip {
|
||||
rounding_mode: tiles.rounding_mode,
|
||||
level_data: {
|
||||
let round = tiles.rounding_mode;
|
||||
let level_count_x = compute_level_count(round, data_size.width());
|
||||
let level_count_y = compute_level_count(round, data_size.height());
|
||||
let maps: Result<LevelMaps<S::Reader>> = rip_map_levels(round, data_size)
|
||||
.map(|(index, level_size)| self.read_samples.create_samples_level_reader(header, channel, index, level_size))
|
||||
.collect();
|
||||
|
||||
RipMaps {
|
||||
map_data: maps?,
|
||||
level_count: Vec2(level_count_x, level_count_y)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// scan line blocks never have mip maps
|
||||
else {
|
||||
Levels::Singular(self.read_samples.create_samples_level_reader(header, channel, Vec2(0, 0), data_size)?)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(AllLevelsReader { levels })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<S: SamplesReader> SamplesReader for AllLevelsReader<S> {
|
||||
type Samples = Levels<S::Samples>;
|
||||
|
||||
fn filter_block(&self, _: TileCoordinates) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn read_line(&mut self, line: LineRef<'_>) -> UnitResult {
|
||||
self.levels.get_level_mut(line.location.level)?.read_line(line)
|
||||
}
|
||||
|
||||
fn into_samples(self) -> Self::Samples {
|
||||
match self.levels {
|
||||
Levels::Singular(level) => Levels::Singular(level.into_samples()),
|
||||
Levels::Mip { rounding_mode, level_data } => Levels::Mip {
|
||||
rounding_mode, level_data: level_data.into_iter().map(|s| s.into_samples()).collect(),
|
||||
},
|
||||
|
||||
Levels::Rip { rounding_mode, level_data } => Levels::Rip {
|
||||
rounding_mode,
|
||||
level_data: RipMaps {
|
||||
level_count: level_data.level_count,
|
||||
map_data: level_data.map_data.into_iter().map(|s| s.into_samples()).collect(),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
207
vendor/exr/src/image/read/mod.rs
vendored
Normal file
207
vendor/exr/src/image/read/mod.rs
vendored
Normal file
@@ -0,0 +1,207 @@
|
||||
|
||||
//! Read an exr image.
|
||||
//!
|
||||
//! For great flexibility and customization, use the `read()` function.
|
||||
//! The return value of the `read()` function must be further customized before reading a file.
|
||||
|
||||
//!
|
||||
//! For very simple applications, you can alternatively use one of these functions:
|
||||
//!
|
||||
//! 1. `read_first_rgba_layer_from_file(path, your_constructor, your_pixel_setter)`:
|
||||
//! You specify how to store the pixels.
|
||||
//! The first layer containing rgba channels is then loaded from the file.
|
||||
//! Fails if no rgba layer can be found.
|
||||
//!
|
||||
//! 1. `read_all_rgba_layers_from_file(path, your_constructor, your_pixel_setter)`:
|
||||
//! You specify how to store the pixels.
|
||||
//! All layers containing rgba channels are then loaded from the file.
|
||||
//! Fails if any layer in the image does not contain rgba channels.
|
||||
//!
|
||||
//! 1. `read_first_flat_layer_from_file(path)`:
|
||||
//! The first layer containing non-deep data with arbitrary channels is loaded from the file.
|
||||
//! Fails if no non-deep layer can be found.
|
||||
//!
|
||||
//! 1. `read_all_flat_layers_from_file(path)`:
|
||||
//! All layers containing non-deep data with arbitrary channels are loaded from the file.
|
||||
//! Fails if any layer in the image contains deep data.
|
||||
//!
|
||||
//! 1. `read_all_data_from_file(path)`:
|
||||
//! All layers with arbitrary channels and all resolution levels are extracted from the file.
|
||||
//!
|
||||
//! Note: Currently does not support deep data, and currently fails
|
||||
//! if any layer in the image contains deep data.
|
||||
//!
|
||||
|
||||
// The following three stages are internally used to read an image.
|
||||
// 1. `ReadImage` - The specification. Contains everything the user wants to tell us about loading an image.
|
||||
// The data in this structure will be instantiated and might be borrowed.
|
||||
// 2. `ImageReader` - The temporary reader. Based on the specification of the blueprint,
|
||||
// a reader is instantiated, once for each layer.
|
||||
// This data structure accumulates the image data from the file.
|
||||
// It also owns temporary data and references the blueprint.
|
||||
// 3. `Image` - The clean image. The accumulated data from the Reader
|
||||
// is converted to the clean image structure, without temporary data.
|
||||
|
||||
pub mod image;
|
||||
pub mod layers;
|
||||
pub mod any_channels;
|
||||
pub mod levels;
|
||||
pub mod samples;
|
||||
pub mod specific_channels;
|
||||
|
||||
use crate::error::{Result};
|
||||
use crate::image::read::samples::{ReadFlatSamples};
|
||||
use std::path::Path;
|
||||
use crate::image::{AnyImage, AnyChannels, FlatSamples, Image, Layer, FlatImage, PixelLayersImage, RgbaChannels};
|
||||
use crate::image::read::image::ReadLayers;
|
||||
use crate::image::read::layers::ReadChannels;
|
||||
use crate::math::Vec2;
|
||||
use crate::prelude::{PixelImage};
|
||||
use crate::block::samples::FromNativeSample;
|
||||
|
||||
|
||||
/// All resolution levels, all channels, all layers.
|
||||
/// Does not support deep data yet. Uses parallel decompression and relaxed error handling.
|
||||
/// Inspect the source code of this function if you need customization.
|
||||
pub fn read_all_data_from_file(path: impl AsRef<Path>) -> Result<AnyImage> {
|
||||
read()
|
||||
.no_deep_data() // TODO deep data
|
||||
.all_resolution_levels()
|
||||
.all_channels()
|
||||
.all_layers()
|
||||
.all_attributes()
|
||||
.from_file(path)
|
||||
}
|
||||
|
||||
// FIXME do not throw error on deep data but just skip it!
|
||||
/// No deep data, no resolution levels, all channels, all layers.
|
||||
/// Uses parallel decompression and relaxed error handling.
|
||||
/// Inspect the source code of this function if you need customization.
|
||||
pub fn read_all_flat_layers_from_file(path: impl AsRef<Path>) -> Result<FlatImage> {
|
||||
read()
|
||||
.no_deep_data()
|
||||
.largest_resolution_level()
|
||||
.all_channels()
|
||||
.all_layers()
|
||||
.all_attributes()
|
||||
.from_file(path)
|
||||
}
|
||||
|
||||
/// No deep data, no resolution levels, all channels, first layer.
|
||||
/// Uses parallel decompression and relaxed error handling.
|
||||
/// Inspect the source code of this function if you need customization.
|
||||
pub fn read_first_flat_layer_from_file(path: impl AsRef<Path>) -> Result<Image<Layer<AnyChannels<FlatSamples>>>> {
|
||||
read()
|
||||
.no_deep_data()
|
||||
.largest_resolution_level()
|
||||
.all_channels()
|
||||
.first_valid_layer()
|
||||
.all_attributes()
|
||||
.from_file(path)
|
||||
}
|
||||
|
||||
/// No deep data, no resolution levels, rgba channels, all layers.
|
||||
/// If a single layer does not contain rgba data, this method returns an error.
|
||||
/// Uses parallel decompression and relaxed error handling.
|
||||
/// `Create` and `Set` can be closures, see the examples for more information.
|
||||
/// Inspect the source code of this function if you need customization.
|
||||
/// The alpha channel will contain the value `1.0` if no alpha channel can be found in the image.
|
||||
///
|
||||
/// Using two closures, define how to store the pixels.
|
||||
/// The first closure creates an image, and the second closure inserts a single pixel.
|
||||
/// The type of the pixel can be defined by the second closure;
|
||||
/// it must be a tuple containing four values, each being either `f16`, `f32`, `u32` or `Sample`.
|
||||
// FIXME Set and Create should not need to be static
|
||||
pub fn read_all_rgba_layers_from_file<R,G,B,A, Set:'static, Create:'static, Pixels: 'static>(
|
||||
path: impl AsRef<Path>, create: Create, set_pixel: Set
|
||||
)
|
||||
-> Result<PixelLayersImage<Pixels, RgbaChannels>>
|
||||
where
|
||||
R: FromNativeSample, G: FromNativeSample, B: FromNativeSample, A: FromNativeSample,
|
||||
Create: Fn(Vec2<usize>, &RgbaChannels) -> Pixels, // TODO type alias? CreateRgbaPixels<Pixels=Pixels>,
|
||||
Set: Fn(&mut Pixels, Vec2<usize>, (R,G,B,A)),
|
||||
{
|
||||
read()
|
||||
.no_deep_data()
|
||||
.largest_resolution_level()
|
||||
.rgba_channels(create, set_pixel)
|
||||
.all_layers()
|
||||
.all_attributes()
|
||||
.from_file(path)
|
||||
}
|
||||
|
||||
/// No deep data, no resolution levels, rgba channels, choosing the first layer with rgba channels.
|
||||
/// Uses parallel decompression and relaxed error handling.
|
||||
/// `Create` and `Set` can be closures, see the examples for more information.
|
||||
/// Inspect the source code of this function if you need customization.
|
||||
/// The alpha channel will contain the value `1.0` if no alpha channel can be found in the image.
|
||||
///
|
||||
/// Using two closures, define how to store the pixels.
|
||||
/// The first closure creates an image, and the second closure inserts a single pixel.
|
||||
/// The type of the pixel can be defined by the second closure;
|
||||
/// it must be a tuple containing four values, each being either `f16`, `f32`, `u32` or `Sample`.
|
||||
// FIXME Set and Create should not need to be static
|
||||
pub fn read_first_rgba_layer_from_file<R,G,B,A, Set:'static, Create:'static, Pixels: 'static>(
|
||||
path: impl AsRef<Path>, create: Create, set_pixel: Set
|
||||
)
|
||||
-> Result<PixelImage<Pixels, RgbaChannels>>
|
||||
where
|
||||
R: FromNativeSample, G: FromNativeSample, B: FromNativeSample, A: FromNativeSample,
|
||||
Create: Fn(Vec2<usize>, &RgbaChannels) -> Pixels, // TODO type alias? CreateRgbaPixels<Pixels=Pixels>,
|
||||
Set: Fn(&mut Pixels, Vec2<usize>, (R,G,B,A)),
|
||||
{
|
||||
read()
|
||||
.no_deep_data()
|
||||
.largest_resolution_level()
|
||||
.rgba_channels(create, set_pixel)
|
||||
.first_valid_layer()
|
||||
.all_attributes()
|
||||
.from_file(path)
|
||||
}
|
||||
|
||||
|
||||
/// Utilizes the builder pattern to configure an image reader. This is the initial struct.
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
pub struct ReadBuilder;
|
||||
|
||||
/// Create a reader which can be used to load an exr image.
|
||||
/// Allows you to exactly specify how to load the image, for example:
|
||||
///
|
||||
/// ```no_run
|
||||
/// use exr::prelude::*;
|
||||
///
|
||||
/// // the type of the this image depends on the chosen options
|
||||
/// let image = read()
|
||||
/// .no_deep_data() // (currently required)
|
||||
/// .largest_resolution_level() // or `all_resolution_levels()`
|
||||
/// .all_channels() // or `rgba_channels(constructor, setter)`
|
||||
/// .all_layers() // or `first_valid_layer()`
|
||||
/// .all_attributes() // (currently required)
|
||||
/// .on_progress(|progress| println!("progress: {:.1}", progress*100.0)) // optional
|
||||
/// .from_file("image.exr").unwrap(); // or `from_buffered(my_byte_slice)`
|
||||
/// ```
|
||||
///
|
||||
/// You can alternatively use one of the following simpler functions:
|
||||
/// 1. `read_first_flat_layer_from_file`
|
||||
/// 1. `read_all_rgba_layers_from_file`
|
||||
/// 1. `read_all_flat_layers_from_file`
|
||||
/// 1. `read_all_data_from_file`
|
||||
///
|
||||
// TODO not panic but skip deep layers!
|
||||
pub fn read() -> ReadBuilder { ReadBuilder }
|
||||
|
||||
impl ReadBuilder {
|
||||
|
||||
/// Specify to handle only one sample per channel, disabling "deep data".
|
||||
// TODO not panic but skip deep layers!
|
||||
pub fn no_deep_data(self) -> ReadFlatSamples { ReadFlatSamples }
|
||||
|
||||
// pub fn any_resolution_levels() -> ReadBuilder<> {}
|
||||
|
||||
// TODO
|
||||
// e. g. `let sum = reader.any_channels_with(|sample, sum| sum += sample)`
|
||||
// e. g. `let floats = reader.any_channels_with(|sample, f32_samples| f32_samples[index] = sample as f32)`
|
||||
// pub fn no_deep_data_with <S> (self, storage: S) -> FlatSamplesWith<S> { }
|
||||
|
||||
// pub fn flat_and_deep_data(self) -> ReadAnySamples { ReadAnySamples }
|
||||
}
|
||||
122
vendor/exr/src/image/read/samples.rs
vendored
Normal file
122
vendor/exr/src/image/read/samples.rs
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
//! How to read samples (a grid of `f32`, `f16` or `u32` values).
|
||||
|
||||
use crate::image::*;
|
||||
use crate::meta::header::{Header};
|
||||
use crate::error::{Result, UnitResult};
|
||||
use crate::block::lines::LineRef;
|
||||
use crate::math::Vec2;
|
||||
use crate::meta::attribute::{ChannelDescription, SampleType};
|
||||
use crate::image::read::any_channels::{SamplesReader, ReadSamples};
|
||||
use crate::image::read::levels::{ReadSamplesLevel, ReadAllLevels, ReadLargestLevel};
|
||||
use crate::block::chunk::TileCoordinates;
|
||||
// use crate::image::read::layers::ReadChannels;
|
||||
|
||||
/// Specify to read only flat samples and no "deep data"
|
||||
// FIXME do not throw error on deep data but just skip it!
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub struct ReadFlatSamples;
|
||||
// pub struct ReadAnySamples;
|
||||
|
||||
impl ReadFlatSamples {
|
||||
|
||||
// TODO
|
||||
// e. g. `let sum = reader.any_channels_with(|sample, sum| sum += sample)`
|
||||
// pub fn any_channels_with <S> (self, storage: S) -> { }
|
||||
|
||||
/// Specify to read only the highest resolution level, skipping all smaller variations.
|
||||
pub fn largest_resolution_level(self) -> ReadLargestLevel<Self> { ReadLargestLevel { read_samples: self } }
|
||||
|
||||
/// Specify to read all contained resolution levels from the image, if any.
|
||||
pub fn all_resolution_levels(self) -> ReadAllLevels<Self> { ReadAllLevels { read_samples: self } }
|
||||
|
||||
// TODO pub fn specific_resolution_level<F: Fn(&[Vec2<usize>])->usize >(self, select_level: F) -> ReadLevelBy<Self> { ReadAllLevels { read_samples: self } }
|
||||
}
|
||||
|
||||
|
||||
/*pub struct AnySamplesReader { TODO
|
||||
resolution: Vec2<usize>,
|
||||
samples: DeepAndFlatSamples
|
||||
}*/
|
||||
|
||||
/// Processes pixel blocks from a file and accumulates them into a grid of samples, for example "Red" or "Alpha".
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct FlatSamplesReader {
|
||||
level: Vec2<usize>,
|
||||
resolution: Vec2<usize>,
|
||||
samples: FlatSamples
|
||||
}
|
||||
|
||||
|
||||
// only used when samples is directly inside a channel, without levels
|
||||
impl ReadSamples for ReadFlatSamples {
|
||||
type Reader = FlatSamplesReader;
|
||||
|
||||
fn create_sample_reader(&self, header: &Header, channel: &ChannelDescription) -> Result<Self::Reader> {
|
||||
self.create_samples_level_reader(header, channel, Vec2(0, 0), header.layer_size)
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadSamplesLevel for ReadFlatSamples {
|
||||
type Reader = FlatSamplesReader;
|
||||
|
||||
fn create_samples_level_reader(&self, _header: &Header, channel: &ChannelDescription, level: Vec2<usize>, resolution: Vec2<usize>) -> Result<Self::Reader> {
|
||||
Ok(FlatSamplesReader {
|
||||
level, resolution, // TODO sampling
|
||||
samples: match channel.sample_type {
|
||||
SampleType::F16 => FlatSamples::F16(vec![f16::ZERO; resolution.area()]),
|
||||
SampleType::F32 => FlatSamples::F32(vec![0.0; resolution.area()]),
|
||||
SampleType::U32 => FlatSamples::U32(vec![0; resolution.area()]),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl SamplesReader for FlatSamplesReader {
|
||||
type Samples = FlatSamples;
|
||||
|
||||
fn filter_block(&self, tile: TileCoordinates) -> bool {
|
||||
tile.level_index == self.level
|
||||
}
|
||||
|
||||
fn read_line(&mut self, line: LineRef<'_>) -> UnitResult {
|
||||
let index = line.location;
|
||||
let resolution = self.resolution;
|
||||
|
||||
// the index is generated by ourselves and must always be correct
|
||||
debug_assert_eq!(index.level, self.level, "line should have been filtered");
|
||||
debug_assert!(index.position.x() + index.sample_count <= resolution.width(), "line index calculation bug");
|
||||
debug_assert!(index.position.y() < resolution.height(), "line index calculation bug");
|
||||
debug_assert_ne!(resolution.0, 0, "sample size bug");
|
||||
|
||||
let start_index = index.position.y() * resolution.width() + index.position.x();
|
||||
let end_index = start_index + index.sample_count;
|
||||
|
||||
debug_assert!(
|
||||
start_index < end_index && end_index <= self.samples.len(),
|
||||
"for resolution {:?}, this is an invalid line: {:?}",
|
||||
self.resolution, line.location
|
||||
);
|
||||
|
||||
match &mut self.samples {
|
||||
FlatSamples::F16(samples) =>
|
||||
line.read_samples_into_slice(&mut samples[start_index .. end_index])
|
||||
.expect("writing line bytes failed"),
|
||||
|
||||
FlatSamples::F32(samples) =>
|
||||
line.read_samples_into_slice(&mut samples[start_index .. end_index])
|
||||
.expect("writing line bytes failed"),
|
||||
|
||||
FlatSamples::U32(samples) =>
|
||||
line.read_samples_into_slice(&mut samples[start_index .. end_index])
|
||||
.expect("writing line bytes failed"),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn into_samples(self) -> FlatSamples {
|
||||
self.samples
|
||||
}
|
||||
}
|
||||
|
||||
463
vendor/exr/src/image/read/specific_channels.rs
vendored
Normal file
463
vendor/exr/src/image/read/specific_channels.rs
vendored
Normal file
@@ -0,0 +1,463 @@
|
||||
//! How to read arbitrary but specific selection of arbitrary channels.
|
||||
//! This is not a zero-cost abstraction.
|
||||
|
||||
use crate::image::recursive::*;
|
||||
use crate::block::samples::*;
|
||||
use crate::image::*;
|
||||
use crate::math::*;
|
||||
use crate::meta::header::*;
|
||||
use crate::error::*;
|
||||
use crate::block::UncompressedBlock;
|
||||
use crate::image::read::layers::{ChannelsReader, ReadChannels};
|
||||
use crate::block::chunk::TileCoordinates;
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use crate::io::Read;
|
||||
|
||||
|
||||
/// Can be attached one more channel reader.
|
||||
/// Call `required` or `optional` on this object to declare another channel to be read from the file.
|
||||
/// Call `collect_pixels` at last to define how the previously declared pixels should be stored.
|
||||
pub trait ReadSpecificChannel: Sized + CheckDuplicates {
|
||||
|
||||
/// A separate internal reader for the pixels. Will be of type `Recursive<_, SampleReader<_>>`,
|
||||
/// depending on the pixels of the specific channel combination.
|
||||
type RecursivePixelReader: RecursivePixelReader;
|
||||
|
||||
/// Create a separate internal reader for the pixels of the specific channel combination.
|
||||
fn create_recursive_reader(&self, channels: &ChannelList) -> Result<Self::RecursivePixelReader>;
|
||||
|
||||
/// Plan to read an additional channel from the image, with the specified name.
|
||||
/// If the channel cannot be found in the image when the image is read, the image will not be loaded.
|
||||
/// The generic parameter can usually be inferred from the closure in `collect_pixels`.
|
||||
fn required<Sample>(self, channel_name: impl Into<Text>) -> ReadRequiredChannel<Self, Sample> {
|
||||
let channel_name = channel_name.into();
|
||||
assert!(self.already_contains(&channel_name).not(), "a channel with the name `{}` is already defined", channel_name);
|
||||
ReadRequiredChannel { channel_name, previous_channels: self, px: Default::default() }
|
||||
}
|
||||
|
||||
/// Plan to read an additional channel from the image, with the specified name.
|
||||
/// If the file does not contain this channel, the specified default sample will be returned instead.
|
||||
/// You can check whether the channel has been loaded by
|
||||
/// checking the presence of the optional channel description before instantiating your own image.
|
||||
/// The generic parameter can usually be inferred from the closure in `collect_pixels`.
|
||||
fn optional<Sample>(self, channel_name: impl Into<Text>, default_sample: Sample)
|
||||
-> ReadOptionalChannel<Self, Sample>
|
||||
{
|
||||
let channel_name = channel_name.into();
|
||||
assert!(self.already_contains(&channel_name).not(), "a channel with the name `{}` is already defined", channel_name);
|
||||
ReadOptionalChannel { channel_name, previous_channels: self, default_sample }
|
||||
}
|
||||
|
||||
/// Using two closures, define how to store the pixels.
|
||||
/// The first closure creates an image, and the second closure inserts a single pixel.
|
||||
/// The type of the pixel can be defined by the second closure;
|
||||
/// it must be a tuple containing `f16`, `f32`, `u32` or `Sample` values.
|
||||
/// See the examples for more information.
|
||||
fn collect_pixels<Pixel, PixelStorage, CreatePixels, SetPixel>(
|
||||
self, create_pixels: CreatePixels, set_pixel: SetPixel
|
||||
) -> CollectPixels<Self, Pixel, PixelStorage, CreatePixels, SetPixel>
|
||||
where
|
||||
<Self::RecursivePixelReader as RecursivePixelReader>::RecursivePixel: IntoTuple<Pixel>,
|
||||
<Self::RecursivePixelReader as RecursivePixelReader>::RecursiveChannelDescriptions: IntoNonRecursive,
|
||||
CreatePixels: Fn(
|
||||
Vec2<usize>,
|
||||
&<<Self::RecursivePixelReader as RecursivePixelReader>::RecursiveChannelDescriptions as IntoNonRecursive>::NonRecursive
|
||||
) -> PixelStorage,
|
||||
SetPixel: Fn(&mut PixelStorage, Vec2<usize>, Pixel),
|
||||
{
|
||||
CollectPixels { read_channels: self, set_pixel, create_pixels, px: Default::default() }
|
||||
}
|
||||
}
|
||||
|
||||
/// A reader containing sub-readers for reading the pixel content of an image.
|
||||
pub trait RecursivePixelReader {
|
||||
|
||||
/// The channel descriptions from the image.
|
||||
/// Will be converted to a tuple before being stored in `SpecificChannels<_, ChannelDescriptions>`.
|
||||
type RecursiveChannelDescriptions;
|
||||
|
||||
/// Returns the channel descriptions based on the channels in the file.
|
||||
fn get_descriptions(&self) -> Self::RecursiveChannelDescriptions;
|
||||
|
||||
/// The pixel type. Will be converted to a tuple at the end of the process.
|
||||
type RecursivePixel: Copy + Default + 'static;
|
||||
|
||||
/// Read the line of pixels.
|
||||
fn read_pixels<'s, FullPixel>(
|
||||
&self, bytes: &'s[u8], pixels: &mut [FullPixel],
|
||||
get_pixel: impl Fn(&mut FullPixel) -> &mut Self::RecursivePixel
|
||||
);
|
||||
}
|
||||
|
||||
// does not use the generic `Recursive` struct to reduce the number of angle brackets in the public api
|
||||
/// Used to read another specific channel from an image.
|
||||
/// Contains the previous `ReadChannels` objects.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ReadOptionalChannel<ReadChannels, Sample> {
|
||||
previous_channels: ReadChannels,
|
||||
channel_name: Text,
|
||||
default_sample: Sample,
|
||||
}
|
||||
|
||||
// does not use the generic `Recursive` struct to reduce the number of angle brackets in the public api
|
||||
/// Used to read another specific channel from an image.
|
||||
/// Contains the previous `ReadChannels` objects.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ReadRequiredChannel<ReadChannels, Sample> {
|
||||
previous_channels: ReadChannels,
|
||||
channel_name: Text,
|
||||
px: PhantomData<Sample>,
|
||||
}
|
||||
|
||||
/// Specifies how to collect all the specified channels into a number of individual pixels.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CollectPixels<ReadChannels, Pixel, PixelStorage, CreatePixels, SetPixel> {
|
||||
read_channels: ReadChannels,
|
||||
create_pixels: CreatePixels,
|
||||
set_pixel: SetPixel,
|
||||
px: PhantomData<(Pixel, PixelStorage)>,
|
||||
}
|
||||
|
||||
impl<Inner: CheckDuplicates, Sample> CheckDuplicates for ReadRequiredChannel<Inner, Sample> {
|
||||
fn already_contains(&self, name: &Text) -> bool {
|
||||
&self.channel_name == name || self.previous_channels.already_contains(name)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Inner: CheckDuplicates, Sample> CheckDuplicates for ReadOptionalChannel<Inner, Sample> {
|
||||
fn already_contains(&self, name: &Text) -> bool {
|
||||
&self.channel_name == name || self.previous_channels.already_contains(name)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'s, InnerChannels, Pixel, PixelStorage, CreatePixels, SetPixel: 's>
|
||||
ReadChannels<'s> for CollectPixels<InnerChannels, Pixel, PixelStorage, CreatePixels, SetPixel>
|
||||
where
|
||||
InnerChannels: ReadSpecificChannel,
|
||||
<InnerChannels::RecursivePixelReader as RecursivePixelReader>::RecursivePixel: IntoTuple<Pixel>,
|
||||
<InnerChannels::RecursivePixelReader as RecursivePixelReader>::RecursiveChannelDescriptions: IntoNonRecursive,
|
||||
CreatePixels: Fn(Vec2<usize>, &<<InnerChannels::RecursivePixelReader as RecursivePixelReader>::RecursiveChannelDescriptions as IntoNonRecursive>::NonRecursive) -> PixelStorage,
|
||||
SetPixel: Fn(&mut PixelStorage, Vec2<usize>, Pixel),
|
||||
{
|
||||
type Reader = SpecificChannelsReader<
|
||||
PixelStorage, &'s SetPixel,
|
||||
InnerChannels::RecursivePixelReader,
|
||||
Pixel,
|
||||
>;
|
||||
|
||||
fn create_channels_reader(&'s self, header: &Header) -> Result<Self::Reader> {
|
||||
if header.deep { return Err(Error::invalid("`SpecificChannels` does not support deep data yet")) }
|
||||
|
||||
let pixel_reader = self.read_channels.create_recursive_reader(&header.channels)?;
|
||||
let channel_descriptions = pixel_reader.get_descriptions().into_non_recursive();// TODO not call this twice
|
||||
|
||||
let create = &self.create_pixels;
|
||||
let pixel_storage = create(header.layer_size, &channel_descriptions);
|
||||
|
||||
Ok(SpecificChannelsReader {
|
||||
set_pixel: &self.set_pixel,
|
||||
pixel_storage,
|
||||
pixel_reader,
|
||||
px: Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// The reader that holds the temporary data that is required to read some specified channels.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct SpecificChannelsReader<PixelStorage, SetPixel, PixelReader, Pixel> {
|
||||
set_pixel: SetPixel,
|
||||
pixel_storage: PixelStorage,
|
||||
pixel_reader: PixelReader,
|
||||
px: PhantomData<Pixel>
|
||||
}
|
||||
|
||||
impl<PixelStorage, SetPixel, PxReader, Pixel>
|
||||
ChannelsReader for SpecificChannelsReader<PixelStorage, SetPixel, PxReader, Pixel>
|
||||
where PxReader: RecursivePixelReader,
|
||||
PxReader::RecursivePixel: IntoTuple<Pixel>,
|
||||
PxReader::RecursiveChannelDescriptions: IntoNonRecursive,
|
||||
SetPixel: Fn(&mut PixelStorage, Vec2<usize>, Pixel),
|
||||
{
|
||||
type Channels = SpecificChannels<PixelStorage, <PxReader::RecursiveChannelDescriptions as IntoNonRecursive>::NonRecursive>;
|
||||
|
||||
fn filter_block(&self, tile: TileCoordinates) -> bool { tile.is_largest_resolution_level() } // TODO all levels
|
||||
|
||||
fn read_block(&mut self, header: &Header, block: UncompressedBlock) -> UnitResult {
|
||||
let mut pixels = vec![PxReader::RecursivePixel::default(); block.index.pixel_size.width()]; // TODO allocate once in self
|
||||
|
||||
let byte_lines = block.data.chunks_exact(header.channels.bytes_per_pixel * block.index.pixel_size.width());
|
||||
debug_assert_eq!(byte_lines.len(), block.index.pixel_size.height(), "invalid block lines split");
|
||||
|
||||
for (y_offset, line_bytes) in byte_lines.enumerate() { // TODO sampling
|
||||
// this two-step copy method should be very cache friendly in theory, and also reduce sample_type lookup count
|
||||
self.pixel_reader.read_pixels(line_bytes, &mut pixels, |px| px);
|
||||
|
||||
for (x_offset, pixel) in pixels.iter().enumerate() {
|
||||
let set_pixel = &self.set_pixel;
|
||||
set_pixel(&mut self.pixel_storage, block.index.pixel_position + Vec2(x_offset, y_offset), pixel.into_tuple());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn into_channels(self) -> Self::Channels {
|
||||
SpecificChannels { channels: self.pixel_reader.get_descriptions().into_non_recursive(), pixels: self.pixel_storage }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Read zero channels from an image. Call `with_named_channel` on this object
|
||||
/// to read as many channels as desired.
|
||||
pub type ReadZeroChannels = NoneMore;
|
||||
|
||||
impl ReadSpecificChannel for NoneMore {
|
||||
type RecursivePixelReader = NoneMore;
|
||||
fn create_recursive_reader(&self, _: &ChannelList) -> Result<Self::RecursivePixelReader> { Ok(NoneMore) }
|
||||
}
|
||||
|
||||
impl<DefaultSample, ReadChannels> ReadSpecificChannel for ReadOptionalChannel<ReadChannels, DefaultSample>
|
||||
where ReadChannels: ReadSpecificChannel, DefaultSample: FromNativeSample + 'static,
|
||||
{
|
||||
type RecursivePixelReader = Recursive<ReadChannels::RecursivePixelReader, OptionalSampleReader<DefaultSample>>;
|
||||
|
||||
fn create_recursive_reader(&self, channels: &ChannelList) -> Result<Self::RecursivePixelReader> {
|
||||
debug_assert!(self.previous_channels.already_contains(&self.channel_name).not(), "duplicate channel name: {}", self.channel_name);
|
||||
|
||||
let inner_samples_reader = self.previous_channels.create_recursive_reader(channels)?;
|
||||
let reader = channels.channels_with_byte_offset()
|
||||
.find(|(_, channel)| channel.name == self.channel_name)
|
||||
.map(|(channel_byte_offset, channel)| SampleReader {
|
||||
channel_byte_offset, channel: channel.clone(),
|
||||
px: Default::default()
|
||||
});
|
||||
|
||||
Ok(Recursive::new(inner_samples_reader, OptionalSampleReader {
|
||||
reader, default_sample: self.default_sample,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl<Sample, ReadChannels> ReadSpecificChannel for ReadRequiredChannel<ReadChannels, Sample>
|
||||
where ReadChannels: ReadSpecificChannel, Sample: FromNativeSample + 'static
|
||||
{
|
||||
type RecursivePixelReader = Recursive<ReadChannels::RecursivePixelReader, SampleReader<Sample>>;
|
||||
|
||||
fn create_recursive_reader(&self, channels: &ChannelList) -> Result<Self::RecursivePixelReader> {
|
||||
let previous_samples_reader = self.previous_channels.create_recursive_reader(channels)?;
|
||||
let (channel_byte_offset, channel) = channels.channels_with_byte_offset()
|
||||
.find(|(_, channel)| channel.name == self.channel_name)
|
||||
.ok_or_else(|| Error::invalid(format!(
|
||||
"layer does not contain all of your specified channels (`{}` is missing)",
|
||||
self.channel_name
|
||||
)))?;
|
||||
|
||||
Ok(Recursive::new(previous_samples_reader, SampleReader { channel_byte_offset, channel: channel.clone(), px: Default::default() }))
|
||||
}
|
||||
}
|
||||
|
||||
/// Reader for a single channel. Generic over the concrete sample type (f16, f32, u32).
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SampleReader<Sample> {
|
||||
|
||||
/// to be multiplied with line width!
|
||||
channel_byte_offset: usize,
|
||||
|
||||
channel: ChannelDescription,
|
||||
px: PhantomData<Sample>
|
||||
}
|
||||
|
||||
/// Reader for a single channel. Generic over the concrete sample type (f16, f32, u32).
|
||||
/// Can also skip reading a channel if it could not be found in the image.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OptionalSampleReader<DefaultSample> {
|
||||
reader: Option<SampleReader<DefaultSample>>,
|
||||
default_sample: DefaultSample,
|
||||
}
|
||||
|
||||
impl<Sample: FromNativeSample> SampleReader<Sample> {
|
||||
fn read_own_samples<'s, FullPixel>(
|
||||
&self, bytes: &'s[u8], pixels: &mut [FullPixel],
|
||||
get_sample: impl Fn(&mut FullPixel) -> &mut Sample
|
||||
){
|
||||
let start_index = pixels.len() * self.channel_byte_offset;
|
||||
let byte_count = pixels.len() * self.channel.sample_type.bytes_per_sample();
|
||||
let mut own_bytes_reader = &mut &bytes[start_index .. start_index + byte_count]; // TODO check block size somewhere
|
||||
let mut samples_out = pixels.iter_mut().map(|pixel| get_sample(pixel));
|
||||
|
||||
// match the type once for the whole line, not on every single sample
|
||||
match self.channel.sample_type {
|
||||
SampleType::F16 => read_and_convert_all_samples_batched(
|
||||
&mut own_bytes_reader, &mut samples_out,
|
||||
Sample::from_f16s
|
||||
),
|
||||
|
||||
SampleType::F32 => read_and_convert_all_samples_batched(
|
||||
&mut own_bytes_reader, &mut samples_out,
|
||||
Sample::from_f32s
|
||||
),
|
||||
|
||||
SampleType::U32 => read_and_convert_all_samples_batched(
|
||||
&mut own_bytes_reader, &mut samples_out,
|
||||
Sample::from_u32s
|
||||
),
|
||||
}
|
||||
|
||||
debug_assert!(samples_out.next().is_none(), "not all samples have been converted");
|
||||
debug_assert!(own_bytes_reader.is_empty(), "bytes left after reading all samples");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Does the same as `convert_batch(in_bytes.chunks().map(From::from_bytes))`, but vectorized.
|
||||
/// Reads the samples for one line, using the sample type specified in the file,
|
||||
/// and then converts those to the desired sample types.
|
||||
/// Uses batches to allow vectorization, converting multiple values with one instruction.
|
||||
fn read_and_convert_all_samples_batched<'t, From, To>(
|
||||
mut in_bytes: impl Read,
|
||||
out_samples: &mut impl ExactSizeIterator<Item=&'t mut To>,
|
||||
convert_batch: fn(&[From], &mut [To])
|
||||
) where From: Data + Default + Copy, To: 't + Default + Copy
|
||||
{
|
||||
// this is not a global! why is this warning triggered?
|
||||
#[allow(non_upper_case_globals)]
|
||||
const batch_size: usize = 16;
|
||||
|
||||
let total_sample_count = out_samples.len();
|
||||
let batch_count = total_sample_count / batch_size;
|
||||
let remaining_samples_count = total_sample_count % batch_size;
|
||||
|
||||
let len_error_msg = "sample count was miscalculated";
|
||||
let byte_error_msg = "error when reading from in-memory slice";
|
||||
|
||||
// write samples from a given slice to the output iterator. should be inlined.
|
||||
let output_n_samples = &mut move |samples: &[To]| {
|
||||
for converted_sample in samples {
|
||||
*out_samples.next().expect(len_error_msg) = *converted_sample;
|
||||
}
|
||||
};
|
||||
|
||||
// read samples from the byte source into a given slice. should be inlined.
|
||||
// todo: use #[inline] when available
|
||||
// error[E0658]: attributes on expressions are experimental,
|
||||
// see issue #15701 <https://github.com/rust-lang/rust/issues/15701> for more information
|
||||
let read_n_samples = &mut move |samples: &mut [From]| {
|
||||
Data::read_slice(&mut in_bytes, samples).expect(byte_error_msg);
|
||||
};
|
||||
|
||||
// temporary arrays with fixed size, operations should be vectorized within these arrays
|
||||
let mut source_samples_batch: [From; batch_size] = Default::default();
|
||||
let mut desired_samples_batch: [To; batch_size] = Default::default();
|
||||
|
||||
// first convert all whole batches, size statically known to be 16 element arrays
|
||||
for _ in 0 .. batch_count {
|
||||
read_n_samples(&mut source_samples_batch);
|
||||
convert_batch(source_samples_batch.as_slice(), desired_samples_batch.as_mut_slice());
|
||||
output_n_samples(&desired_samples_batch);
|
||||
}
|
||||
|
||||
// then convert a partial remaining batch, size known only at runtime
|
||||
if remaining_samples_count != 0 {
|
||||
let source_samples_batch = &mut source_samples_batch[..remaining_samples_count];
|
||||
let desired_samples_batch = &mut desired_samples_batch[..remaining_samples_count];
|
||||
|
||||
read_n_samples(source_samples_batch);
|
||||
convert_batch(source_samples_batch, desired_samples_batch);
|
||||
output_n_samples(desired_samples_batch);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn equals_naive_f32(){
|
||||
for total_array_size in [3, 7, 30, 41, 120, 10_423] {
|
||||
let input_f32s = (0..total_array_size).map(|_| rand::random::<f32>()).collect::<Vec<f32>>();
|
||||
let in_f32s_bytes = input_f32s.iter().cloned().flat_map(f32::to_le_bytes).collect::<Vec<u8>>();
|
||||
|
||||
let mut out_f16_samples_batched = vec![
|
||||
f16::from_f32(rand::random::<f32>());
|
||||
total_array_size
|
||||
];
|
||||
|
||||
read_and_convert_all_samples_batched(
|
||||
&mut in_f32s_bytes.as_slice(),
|
||||
&mut out_f16_samples_batched.iter_mut(),
|
||||
f16::from_f32s
|
||||
);
|
||||
|
||||
let out_f16_samples_naive = input_f32s.iter()
|
||||
.cloned().map(f16::from_f32);
|
||||
|
||||
assert!(out_f16_samples_naive.eq(out_f16_samples_batched));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl RecursivePixelReader for NoneMore {
|
||||
type RecursiveChannelDescriptions = NoneMore;
|
||||
fn get_descriptions(&self) -> Self::RecursiveChannelDescriptions { NoneMore }
|
||||
|
||||
type RecursivePixel = NoneMore;
|
||||
|
||||
fn read_pixels<'s, FullPixel>(
|
||||
&self, _: &'s[u8], _: &mut [FullPixel],
|
||||
_: impl Fn(&mut FullPixel) -> &mut NoneMore
|
||||
){}
|
||||
}
|
||||
|
||||
impl<Sample, InnerReader: RecursivePixelReader>
|
||||
RecursivePixelReader
|
||||
for Recursive<InnerReader, SampleReader<Sample>>
|
||||
where Sample: FromNativeSample + 'static
|
||||
{
|
||||
type RecursiveChannelDescriptions = Recursive<InnerReader::RecursiveChannelDescriptions, ChannelDescription>;
|
||||
fn get_descriptions(&self) -> Self::RecursiveChannelDescriptions { Recursive::new(self.inner.get_descriptions(), self.value.channel.clone()) }
|
||||
|
||||
type RecursivePixel = Recursive<InnerReader::RecursivePixel, Sample>;
|
||||
|
||||
fn read_pixels<'s, FullPixel>(
|
||||
&self, bytes: &'s[u8], pixels: &mut [FullPixel],
|
||||
get_pixel: impl Fn(&mut FullPixel) -> &mut Self::RecursivePixel
|
||||
) {
|
||||
self.value.read_own_samples(bytes, pixels, |px| &mut get_pixel(px).value);
|
||||
self.inner.read_pixels(bytes, pixels, |px| &mut get_pixel(px).inner);
|
||||
}
|
||||
}
|
||||
|
||||
impl<Sample, InnerReader: RecursivePixelReader>
|
||||
RecursivePixelReader
|
||||
for Recursive<InnerReader, OptionalSampleReader<Sample>>
|
||||
where Sample: FromNativeSample + 'static
|
||||
{
|
||||
type RecursiveChannelDescriptions = Recursive<InnerReader::RecursiveChannelDescriptions, Option<ChannelDescription>>;
|
||||
fn get_descriptions(&self) -> Self::RecursiveChannelDescriptions { Recursive::new(
|
||||
self.inner.get_descriptions(), self.value.reader.as_ref().map(|reader| reader.channel.clone())
|
||||
) }
|
||||
|
||||
type RecursivePixel = Recursive<InnerReader::RecursivePixel, Sample>;
|
||||
|
||||
fn read_pixels<'s, FullPixel>(
|
||||
&self, bytes: &'s[u8], pixels: &mut [FullPixel],
|
||||
get_pixel: impl Fn(&mut FullPixel) -> &mut Self::RecursivePixel
|
||||
) {
|
||||
if let Some(reader) = &self.value.reader {
|
||||
reader.read_own_samples(bytes, pixels, |px| &mut get_pixel(px).value);
|
||||
}
|
||||
else {
|
||||
// if this channel is optional and was not found in the file, fill the default sample
|
||||
for pixel in pixels.iter_mut() {
|
||||
get_pixel(pixel).value = self.value.default_sample;
|
||||
}
|
||||
}
|
||||
|
||||
self.inner.read_pixels(bytes, pixels, |px| &mut get_pixel(px).inner);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
178
vendor/exr/src/image/recursive.rs
vendored
Normal file
178
vendor/exr/src/image/recursive.rs
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
//! A generic wrapper which can be used to represent recursive types.
|
||||
//! Supports conversion from and to tuples of the same size.
|
||||
|
||||
/// No more recursion. Can be used within any `Recursive<NoneMore, YourValue>` type.
|
||||
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
|
||||
pub struct NoneMore;
|
||||
|
||||
/// A recursive type-level linked list of `Value` entries.
|
||||
/// Mainly used to represent an arbitrary number of channels.
|
||||
/// The recursive architecture removes the need to implement traits for many different tuples.
|
||||
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
|
||||
pub struct Recursive<Inner, Value> {
|
||||
/// The remaining values of this linked list,
|
||||
/// probably either `NoneMore` or another instance of the same `Recursive<Inner - 1, Value>`.
|
||||
pub inner: Inner,
|
||||
|
||||
/// The next item in this linked list.
|
||||
pub value: Value,
|
||||
}
|
||||
|
||||
impl<Inner, Value> Recursive<Inner, Value> {
|
||||
/// Create a new recursive type. Equivalent to the manual constructor, but less verbose.
|
||||
pub fn new(inner: Inner, value: Value) -> Self { Self { inner, value } }
|
||||
}
|
||||
|
||||
/// Convert this recursive type into a tuple.
|
||||
/// This is nice as it will require less typing for the same type.
|
||||
/// A type might or might not be convertible to the specified `Tuple` type.
|
||||
pub trait IntoTuple<Tuple> {
|
||||
/// Convert this recursive type to a nice tuple.
|
||||
fn into_tuple(self) -> Tuple;
|
||||
}
|
||||
|
||||
/// Convert this recursive type into a tuple.
|
||||
/// This is nice as it will require less typing for the same type.
|
||||
/// A type will be converted to the specified `Self::NonRecursive` type.
|
||||
pub trait IntoNonRecursive {
|
||||
/// The resulting tuple type.
|
||||
type NonRecursive;
|
||||
|
||||
/// Convert this recursive type to a nice tuple.
|
||||
fn into_non_recursive(self) -> Self::NonRecursive;
|
||||
}
|
||||
|
||||
/// Create a recursive type from this tuple.
|
||||
pub trait IntoRecursive {
|
||||
/// The recursive type resulting from this tuple.
|
||||
type Recursive;
|
||||
|
||||
/// Create a recursive type from this tuple.
|
||||
fn into_recursive(self) -> Self::Recursive;
|
||||
}
|
||||
|
||||
impl IntoRecursive for NoneMore {
|
||||
type Recursive = Self;
|
||||
fn into_recursive(self) -> Self::Recursive { self }
|
||||
}
|
||||
|
||||
impl<Inner: IntoRecursive, Value> IntoRecursive for Recursive<Inner, Value> {
|
||||
type Recursive = Recursive<Inner::Recursive, Value>;
|
||||
fn into_recursive(self) -> Self::Recursive { Recursive::new(self.inner.into_recursive(), self.value) }
|
||||
}
|
||||
|
||||
// Automatically implement IntoTuple so we have to generate less code in the macros
|
||||
impl<I: IntoNonRecursive> IntoTuple<I::NonRecursive> for I {
|
||||
fn into_tuple(self) -> <I as IntoNonRecursive>::NonRecursive {
|
||||
self.into_non_recursive()
|
||||
}
|
||||
}
|
||||
|
||||
//Implement traits for the empty tuple, the macro doesn't handle that
|
||||
impl IntoRecursive for () {
|
||||
type Recursive = NoneMore;
|
||||
fn into_recursive(self) -> Self::Recursive { NoneMore }
|
||||
}
|
||||
|
||||
impl IntoNonRecursive for NoneMore {
|
||||
type NonRecursive = ();
|
||||
|
||||
fn into_non_recursive(self) -> Self::NonRecursive {
|
||||
()
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates the recursive type corresponding to this tuple:
|
||||
/// ```nocheck
|
||||
/// gen_recursive_type!(A, B, C)
|
||||
/// => Recursive<Recursive<Recursive<NoneMore, A>, B>, C>
|
||||
/// ```
|
||||
macro_rules! gen_recursive_type {
|
||||
() => { NoneMore };
|
||||
($last:ident $(,$not_last:ident)*) => {
|
||||
Recursive<gen_recursive_type!($($not_last),*), $last>
|
||||
};
|
||||
}
|
||||
|
||||
/// Generates the recursive value corresponding to the given indices:
|
||||
/// ```nocheck
|
||||
/// gen_recursive_value(self; 1, 0)
|
||||
/// => Recursive { inner: Recursive { inner: NoneMore, value: self.0 }, value: self.1 }
|
||||
/// ```
|
||||
macro_rules! gen_recursive_value {
|
||||
($self:ident;) => { NoneMore };
|
||||
($self:ident; $last:tt $(,$not_last:tt)*) => {
|
||||
Recursive { inner: gen_recursive_value!($self; $($not_last),*), value: $self.$last }
|
||||
};
|
||||
}
|
||||
|
||||
/// Generates the into_tuple value corresponding to the given type names:
|
||||
/// ```nocheck
|
||||
/// gen_tuple_value(self; A, B, C)
|
||||
/// => (self.inner.inner.value, self.inner.value, self.value)
|
||||
/// ```
|
||||
macro_rules! gen_tuple_value {
|
||||
($self:ident; $($all:ident),* ) => {
|
||||
gen_tuple_value!(@ $self; (); $($all),* )
|
||||
};
|
||||
|
||||
(@ $self:ident; ($($state:expr),*);) => { ($($state .value,)*) };
|
||||
(@ $self:ident; ($($state:expr),*); $last:ident $(,$not_last:ident)* ) => {
|
||||
gen_tuple_value!(@ $self; ($($state .inner,)* $self); $($not_last),* )
|
||||
};
|
||||
}
|
||||
|
||||
/// Generate the trait implementations given a sequence of type names in both directions and the indices backwards:
|
||||
/// ```nocheck
|
||||
/// generate_single(A, B, C; C, B, A; 2, 1, 0)
|
||||
/// ```
|
||||
macro_rules! generate_single {
|
||||
( $($name_fwd:ident),* ; $($name_back:ident),* ; $($index_back:tt),*) => {
|
||||
impl<$($name_fwd),*> IntoNonRecursive for gen_recursive_type!($($name_back),*) {
|
||||
type NonRecursive = ($($name_fwd,)*);
|
||||
fn into_non_recursive(self) -> Self::NonRecursive {
|
||||
gen_tuple_value!(self; $($name_fwd),*)
|
||||
}
|
||||
}
|
||||
|
||||
impl<$($name_fwd),*> IntoRecursive for ($($name_fwd,)*) {
|
||||
type Recursive = gen_recursive_type!($($name_back),*);
|
||||
fn into_recursive(self) -> Self::Recursive {
|
||||
gen_recursive_value!(self; $($index_back),*)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
generate_single!(A; A; 0);
|
||||
generate_single!(A,B; B,A; 1,0);
|
||||
generate_single!(A,B,C; C,B,A; 2,1,0);
|
||||
generate_single!(A,B,C,D; D,C,B,A; 3,2,1,0);
|
||||
generate_single!(A,B,C,D,E; E,D,C,B,A; 4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F; F,E,D,C,B,A; 5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G; G,F,E,D,C,B,A; 6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H; H,G,F,E,D,C,B,A; 7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I; I,H,G,F,E,D,C,B,A; 8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J; J,I,H,G,F,E,D,C,B,A; 9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K; K,J,I,H,G,F,E,D,C,B,A; 10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L; L,K,J,I,H,G,F,E,D,C,B,A; 11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M; M,L,K,J,I,H,G,F,E,D,C,B,A; 12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N; N,M,L,K,J,I,H,G,F,E,D,C,B,A; 13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O; O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P; P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q; Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R; R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S; S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T; T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U; U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V; V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W; W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X; X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y; Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z; Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1; A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1,B1; B1,A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1,B1,C1; C1,B1,A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1,B1,C1,D1; D1,C1,B1,A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1,B1,C1,D1,E1; E1,D1,C1,B1,A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1,B1,C1,D1,E1,F1; F1,E1,D1,C1,B1,A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
|
||||
407
vendor/exr/src/image/write/channels.rs
vendored
Normal file
407
vendor/exr/src/image/write/channels.rs
vendored
Normal file
@@ -0,0 +1,407 @@
|
||||
//! How to read arbitrary channels and rgb channels.
|
||||
|
||||
use crate::prelude::*;
|
||||
use crate::io::*;
|
||||
use crate::math::*;
|
||||
use crate::meta::{header::*, attribute::*};
|
||||
use crate::block::*;
|
||||
use crate::image::recursive::*;
|
||||
use crate::block::samples::*;
|
||||
use crate::image::write::samples::*;
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
|
||||
/// Enables an image containing this list of channels to be written to a file.
|
||||
pub trait WritableChannels<'slf> {
|
||||
|
||||
/// Generate the file meta data for this list of channel
|
||||
fn infer_channel_list(&self) -> ChannelList;
|
||||
|
||||
/// Generate the file meta data of whether and how resolution levels should be stored in the file
|
||||
fn infer_level_modes(&self) -> (LevelMode, RoundingMode);
|
||||
|
||||
/// The type of temporary writer
|
||||
type Writer: ChannelsWriter;
|
||||
|
||||
/// Create a temporary writer for this list of channels
|
||||
fn create_writer(&'slf self, header: &Header) -> Self::Writer;
|
||||
}
|
||||
|
||||
/// A temporary writer for a list of channels
|
||||
pub trait ChannelsWriter: Sync {
|
||||
|
||||
/// Deliver a block of pixels, containing all channel data, to be stored in the file
|
||||
fn extract_uncompressed_block(&self, header: &Header, block: BlockIndex) -> Vec<u8>; // TODO return uncompressed block?
|
||||
}
|
||||
|
||||
|
||||
/// Define how to get a pixel from your custom pixel storage.
|
||||
/// Can be a closure of type [`Sync + Fn(Vec2<usize>) -> YourPixel`].
|
||||
pub trait GetPixel: Sync {
|
||||
|
||||
/// The pixel tuple containing `f32`, `f16`, `u32` and `Sample` values.
|
||||
/// The length of the tuple must match the number of channels in the image.
|
||||
type Pixel;
|
||||
|
||||
/// Inspect a single pixel at the requested position.
|
||||
/// Will be called exactly once for each pixel in the image.
|
||||
/// The position will not exceed the image dimensions.
|
||||
/// Might be called from multiple threads at the same time.
|
||||
fn get_pixel(&self, position: Vec2<usize>) -> Self::Pixel;
|
||||
}
|
||||
|
||||
impl<F, P> GetPixel for F where F: Sync + Fn(Vec2<usize>) -> P {
|
||||
type Pixel = P;
|
||||
fn get_pixel(&self, position: Vec2<usize>) -> P { self(position) }
|
||||
}
|
||||
|
||||
impl<'samples, Samples> WritableChannels<'samples> for AnyChannels<Samples>
|
||||
where Samples: 'samples + WritableSamples<'samples>
|
||||
{
|
||||
fn infer_channel_list(&self) -> ChannelList {
|
||||
ChannelList::new(self.list.iter().map(|channel| ChannelDescription {
|
||||
name: channel.name.clone(),
|
||||
sample_type: channel.sample_data.sample_type(),
|
||||
quantize_linearly: channel.quantize_linearly,
|
||||
sampling: channel.sampling
|
||||
}).collect())
|
||||
}
|
||||
|
||||
fn infer_level_modes(&self) -> (LevelMode, RoundingMode) {
|
||||
let mode = self.list.iter().next().expect("zero channels in list").sample_data.infer_level_modes();
|
||||
|
||||
debug_assert!(
|
||||
std::iter::repeat(mode).zip(self.list.iter().skip(1))
|
||||
.all(|(first, other)| other.sample_data.infer_level_modes() == first),
|
||||
|
||||
"level mode must be the same across all levels (do not nest resolution levels!)"
|
||||
);
|
||||
|
||||
mode
|
||||
}
|
||||
|
||||
type Writer = AnyChannelsWriter<Samples::Writer>;
|
||||
fn create_writer(&'samples self, header: &Header) -> Self::Writer {
|
||||
let channels = self.list.iter()
|
||||
.map(|chan| chan.sample_data.create_samples_writer(header))
|
||||
.collect();
|
||||
|
||||
AnyChannelsWriter { channels }
|
||||
}
|
||||
}
|
||||
|
||||
/// A temporary writer for an arbitrary list of channels
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct AnyChannelsWriter<SamplesWriter> {
|
||||
channels: SmallVec<[SamplesWriter; 4]>
|
||||
}
|
||||
|
||||
impl<Samples> ChannelsWriter for AnyChannelsWriter<Samples> where Samples: SamplesWriter {
|
||||
fn extract_uncompressed_block(&self, header: &Header, block_index: BlockIndex) -> Vec<u8> {
|
||||
UncompressedBlock::collect_block_data_from_lines(&header.channels, block_index, |line_ref| {
|
||||
self.channels[line_ref.location.channel].extract_line(line_ref)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
impl<'c, Channels, Storage>
|
||||
WritableChannels<'c> for SpecificChannels<Storage, Channels>
|
||||
where
|
||||
Storage: 'c + GetPixel,
|
||||
Storage::Pixel: IntoRecursive,
|
||||
Channels: 'c + Sync + Clone + IntoRecursive,
|
||||
<Channels as IntoRecursive>::Recursive: WritableChannelsDescription<<Storage::Pixel as IntoRecursive>::Recursive>,
|
||||
{
|
||||
fn infer_channel_list(&self) -> ChannelList {
|
||||
let mut vec = self.channels.clone().into_recursive().channel_descriptions_list();
|
||||
vec.sort_unstable_by_key(|channel:&ChannelDescription| channel.name.clone()); // TODO no clone?
|
||||
|
||||
debug_assert!(
|
||||
// check for equal neighbors in sorted vec
|
||||
vec.iter().zip(vec.iter().skip(1)).all(|(prev, next)| prev.name != next.name),
|
||||
"specific channels contain duplicate channel names"
|
||||
);
|
||||
|
||||
ChannelList::new(vec)
|
||||
}
|
||||
|
||||
fn infer_level_modes(&self) -> (LevelMode, RoundingMode) {
|
||||
(LevelMode::Singular, RoundingMode::Down) // TODO
|
||||
}
|
||||
|
||||
type Writer = SpecificChannelsWriter<
|
||||
'c,
|
||||
<<Channels as IntoRecursive>::Recursive as WritableChannelsDescription<<Storage::Pixel as IntoRecursive>::Recursive>>::RecursiveWriter,
|
||||
Storage,
|
||||
Channels
|
||||
>;
|
||||
|
||||
fn create_writer(&'c self, header: &Header) -> Self::Writer {
|
||||
SpecificChannelsWriter {
|
||||
channels: self,
|
||||
recursive_channel_writer: self.channels.clone().into_recursive().create_recursive_writer(&header.channels),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// A temporary writer for a layer of channels, alpha being optional
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct SpecificChannelsWriter<'channels, PixelWriter, Storage, Channels> {
|
||||
channels: &'channels SpecificChannels<Storage, Channels>, // TODO this need not be a reference?? impl writer for specific_channels directly?
|
||||
recursive_channel_writer: PixelWriter,
|
||||
}
|
||||
|
||||
|
||||
impl<'channels, PxWriter, Storage, Channels> ChannelsWriter
|
||||
for SpecificChannelsWriter<'channels, PxWriter, Storage, Channels>
|
||||
where
|
||||
Channels: Sync,
|
||||
Storage: GetPixel,
|
||||
Storage::Pixel: IntoRecursive,
|
||||
PxWriter: Sync + RecursivePixelWriter<<Storage::Pixel as IntoRecursive>::Recursive>,
|
||||
{
|
||||
fn extract_uncompressed_block(&self, header: &Header, block_index: BlockIndex) -> Vec<u8> {
|
||||
let block_bytes = block_index.pixel_size.area() * header.channels.bytes_per_pixel;
|
||||
let mut block_bytes = vec![0_u8; block_bytes];
|
||||
|
||||
let width = block_index.pixel_size.0;
|
||||
let line_bytes = width * header.channels.bytes_per_pixel;
|
||||
let byte_lines = block_bytes.chunks_exact_mut(line_bytes);
|
||||
assert_eq!(byte_lines.len(), block_index.pixel_size.height(), "invalid block line splits");
|
||||
|
||||
//dbg!(width, line_bytes, header.channels.bytes_per_pixel, byte_lines.len());
|
||||
|
||||
let mut pixel_line = Vec::with_capacity(width);
|
||||
|
||||
for (y, line_bytes) in byte_lines.enumerate() {
|
||||
pixel_line.clear();
|
||||
pixel_line.extend((0 .. width).map(|x|
|
||||
self.channels.pixels.get_pixel(block_index.pixel_position + Vec2(x, y)).into_recursive()
|
||||
));
|
||||
|
||||
self.recursive_channel_writer.write_pixels(line_bytes, pixel_line.as_slice(), |px| px);
|
||||
}
|
||||
|
||||
block_bytes
|
||||
}
|
||||
}
|
||||
|
||||
/// A tuple containing either `ChannelsDescription` or `Option<ChannelsDescription>` entries.
|
||||
/// Use an `Option` if you want to dynamically omit a single channel (probably only for roundtrip tests).
|
||||
/// The number of entries must match the number of channels.
|
||||
pub trait WritableChannelsDescription<Pixel>: Sync {
|
||||
|
||||
/// A type that has a recursive entry for each channel in the image,
|
||||
/// which must accept the desired pixel type.
|
||||
type RecursiveWriter: RecursivePixelWriter<Pixel>;
|
||||
|
||||
/// Create the temporary writer, accepting the sorted list of channels from `channel_descriptions_list`.
|
||||
fn create_recursive_writer(&self, channels: &ChannelList) -> Self::RecursiveWriter;
|
||||
|
||||
/// Return all the channels that should actually end up in the image, in any order.
|
||||
fn channel_descriptions_list(&self) -> SmallVec<[ChannelDescription; 5]>;
|
||||
}
|
||||
|
||||
impl WritableChannelsDescription<NoneMore> for NoneMore {
|
||||
type RecursiveWriter = NoneMore;
|
||||
fn create_recursive_writer(&self, _: &ChannelList) -> Self::RecursiveWriter { NoneMore }
|
||||
fn channel_descriptions_list(&self) -> SmallVec<[ChannelDescription; 5]> { SmallVec::new() }
|
||||
}
|
||||
|
||||
impl<InnerDescriptions, InnerPixel, Sample: IntoNativeSample>
|
||||
WritableChannelsDescription<Recursive<InnerPixel, Sample>>
|
||||
for Recursive<InnerDescriptions, ChannelDescription>
|
||||
where InnerDescriptions: WritableChannelsDescription<InnerPixel>
|
||||
{
|
||||
type RecursiveWriter = RecursiveWriter<InnerDescriptions::RecursiveWriter, Sample>;
|
||||
|
||||
fn create_recursive_writer(&self, channels: &ChannelList) -> Self::RecursiveWriter {
|
||||
// this linear lookup is required because the order of the channels changed, due to alphabetical sorting
|
||||
let (start_byte_offset, target_sample_type) = channels.channels_with_byte_offset()
|
||||
.find(|(_offset, channel)| channel.name == self.value.name)
|
||||
.map(|(offset, channel)| (offset, channel.sample_type))
|
||||
.expect("a channel has not been put into channel list");
|
||||
|
||||
Recursive::new(self.inner.create_recursive_writer(channels), SampleWriter {
|
||||
start_byte_offset, target_sample_type,
|
||||
px: PhantomData::default()
|
||||
})
|
||||
}
|
||||
|
||||
fn channel_descriptions_list(&self) -> SmallVec<[ChannelDescription; 5]> {
|
||||
let mut inner_list = self.inner.channel_descriptions_list();
|
||||
inner_list.push(self.value.clone());
|
||||
inner_list
|
||||
}
|
||||
}
|
||||
|
||||
impl<InnerDescriptions, InnerPixel, Sample: IntoNativeSample>
|
||||
WritableChannelsDescription<Recursive<InnerPixel, Sample>>
|
||||
for Recursive<InnerDescriptions, Option<ChannelDescription>>
|
||||
where InnerDescriptions: WritableChannelsDescription<InnerPixel>
|
||||
{
|
||||
type RecursiveWriter = OptionalRecursiveWriter<InnerDescriptions::RecursiveWriter, Sample>;
|
||||
|
||||
fn create_recursive_writer(&self, channels: &ChannelList) -> Self::RecursiveWriter {
|
||||
// this linear lookup is required because the order of the channels changed, due to alphabetical sorting
|
||||
|
||||
let channel = self.value.as_ref().map(|required_channel|
|
||||
channels.channels_with_byte_offset()
|
||||
.find(|(_offset, channel)| channel == &required_channel)
|
||||
.map(|(offset, channel)| (offset, channel.sample_type))
|
||||
.expect("a channel has not been put into channel list")
|
||||
);
|
||||
|
||||
Recursive::new(
|
||||
self.inner.create_recursive_writer(channels),
|
||||
channel.map(|(start_byte_offset, target_sample_type)| SampleWriter {
|
||||
start_byte_offset, target_sample_type,
|
||||
px: PhantomData::default(),
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
fn channel_descriptions_list(&self) -> SmallVec<[ChannelDescription; 5]> {
|
||||
let mut inner_list = self.inner.channel_descriptions_list();
|
||||
if let Some(value) = &self.value { inner_list.push(value.clone()); }
|
||||
inner_list
|
||||
}
|
||||
}
|
||||
|
||||
/// Write pixels to a slice of bytes. The top level writer contains all the other channels,
|
||||
/// the most inner channel is `NoneMore`.
|
||||
pub trait RecursivePixelWriter<Pixel>: Sync {
|
||||
|
||||
/// Write pixels to a slice of bytes. Recursively do this for all channels.
|
||||
fn write_pixels<FullPixel>(&self, bytes: &mut [u8], pixels: &[FullPixel], get_pixel: impl Fn(&FullPixel) -> &Pixel);
|
||||
}
|
||||
|
||||
type RecursiveWriter<Inner, Sample> = Recursive<Inner, SampleWriter<Sample>>;
|
||||
type OptionalRecursiveWriter<Inner, Sample> = Recursive<Inner, Option<SampleWriter<Sample>>>;
|
||||
|
||||
/// Write the pixels of a single channel, unconditionally. Generic over the concrete sample type (f16, f32, u32).
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SampleWriter<Sample> {
|
||||
target_sample_type: SampleType,
|
||||
start_byte_offset: usize,
|
||||
px: PhantomData<Sample>,
|
||||
}
|
||||
|
||||
impl<Sample> SampleWriter<Sample> where Sample: IntoNativeSample {
|
||||
fn write_own_samples(&self, bytes: &mut [u8], samples: impl ExactSizeIterator<Item=Sample>) {
|
||||
let byte_start_index = samples.len() * self.start_byte_offset;
|
||||
let byte_count = samples.len() * self.target_sample_type.bytes_per_sample();
|
||||
let ref mut byte_writer = &mut bytes[byte_start_index..byte_start_index + byte_count];
|
||||
|
||||
let write_error_msg = "invalid memory buffer length when writing";
|
||||
|
||||
// match outside the loop to avoid matching on every single sample
|
||||
match self.target_sample_type {
|
||||
// TODO does this boil down to a `memcpy` where the sample type equals the type parameter?
|
||||
SampleType::F16 => for sample in samples { sample.to_f16().write(byte_writer).expect(write_error_msg); },
|
||||
SampleType::F32 => for sample in samples { sample.to_f32().write(byte_writer).expect(write_error_msg); },
|
||||
SampleType::U32 => for sample in samples { sample.to_u32().write(byte_writer).expect(write_error_msg); },
|
||||
};
|
||||
|
||||
debug_assert!(byte_writer.is_empty(), "all samples are written, but more were expected");
|
||||
}
|
||||
}
|
||||
|
||||
impl RecursivePixelWriter<NoneMore> for NoneMore {
|
||||
fn write_pixels<FullPixel>(&self, _: &mut [u8], _: &[FullPixel], _: impl Fn(&FullPixel) -> &NoneMore) {}
|
||||
}
|
||||
|
||||
impl<Inner, InnerPixel, Sample: IntoNativeSample>
|
||||
RecursivePixelWriter<Recursive<InnerPixel, Sample>>
|
||||
for RecursiveWriter<Inner, Sample>
|
||||
where Inner: RecursivePixelWriter<InnerPixel>
|
||||
{
|
||||
// TODO impl exact size iterator <item = Self::Pixel>
|
||||
fn write_pixels<FullPixel>(&self, bytes: &mut [u8], pixels: &[FullPixel], get_pixel: impl Fn(&FullPixel) -> &Recursive<InnerPixel, Sample>){
|
||||
self.value.write_own_samples(bytes, pixels.iter().map(|px| get_pixel(px).value));
|
||||
self.inner.write_pixels(bytes, pixels, |px| &get_pixel(px).inner);
|
||||
}
|
||||
}
|
||||
|
||||
impl<Inner, InnerPixel, Sample> RecursivePixelWriter<Recursive<InnerPixel, Sample>>
|
||||
for OptionalRecursiveWriter<Inner, Sample>
|
||||
where Inner: RecursivePixelWriter<InnerPixel>,
|
||||
Sample: IntoNativeSample
|
||||
{
|
||||
fn write_pixels<FullPixel>(&self, bytes: &mut [u8], pixels: &[FullPixel], get_pixel: impl Fn(&FullPixel) -> &Recursive<InnerPixel, Sample>) {
|
||||
if let Some(writer) = &self.value {
|
||||
writer.write_own_samples(bytes, pixels.iter().map(|px| get_pixel(px).value));
|
||||
}
|
||||
|
||||
self.inner.write_pixels(bytes, pixels, |px| &get_pixel(px).inner);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use crate::image::write::channels::WritableChannels;
|
||||
use crate::image::SpecificChannels;
|
||||
use crate::prelude::{f16};
|
||||
use crate::meta::attribute::{ChannelDescription, SampleType};
|
||||
use crate::image::pixel_vec::PixelVec;
|
||||
|
||||
#[test]
|
||||
fn compiles(){
|
||||
let x = 3_f32;
|
||||
let y = f16::from_f32(4.0);
|
||||
let z = 2_u32;
|
||||
let s = 1.3_f32;
|
||||
let px = (x,y,z,s);
|
||||
|
||||
assert_is_writable_channels(
|
||||
SpecificChannels::rgba(|_pos| px)
|
||||
);
|
||||
|
||||
assert_is_writable_channels(SpecificChannels::rgba(
|
||||
PixelVec::new((3, 2), vec![px, px, px, px, px, px])
|
||||
));
|
||||
|
||||
let px = (2333_u32, 4_f32);
|
||||
assert_is_writable_channels(
|
||||
SpecificChannels::build()
|
||||
.with_channel("A")
|
||||
.with_channel("C")
|
||||
.with_pixels(PixelVec::new((3, 2), vec![px, px, px, px, px, px]))
|
||||
);
|
||||
|
||||
let px = (3_f32, f16::ONE, 2333_u32, 4_f32);
|
||||
assert_is_writable_channels(SpecificChannels::new(
|
||||
(
|
||||
ChannelDescription::named("x", SampleType::F32),
|
||||
ChannelDescription::named("y", SampleType::F16),
|
||||
Some(ChannelDescription::named("z", SampleType::U32)),
|
||||
Some(ChannelDescription::named("p", SampleType::F32)),
|
||||
),
|
||||
|
||||
PixelVec::new((3, 2), vec![px, px, px, px, px, px])
|
||||
));
|
||||
|
||||
|
||||
|
||||
fn assert_is_writable_channels<'s>(_channels: impl WritableChannels<'s>){}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
188
vendor/exr/src/image/write/layers.rs
vendored
Normal file
188
vendor/exr/src/image/write/layers.rs
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
//! How to write either a single or a list of layers.
|
||||
|
||||
use crate::meta::header::{ImageAttributes, Header};
|
||||
use crate::meta::{Headers, compute_chunk_count};
|
||||
use crate::block::BlockIndex;
|
||||
use crate::image::{Layers, Layer};
|
||||
use crate::meta::attribute::{TileDescription};
|
||||
use crate::prelude::{SmallVec};
|
||||
use crate::image::write::channels::{WritableChannels, ChannelsWriter};
|
||||
use crate::image::recursive::{Recursive, NoneMore};
|
||||
|
||||
/// Enables an image containing this list of layers to be written to a file.
|
||||
pub trait WritableLayers<'slf> {
|
||||
|
||||
/// Generate the file meta data for this list of layers
|
||||
fn infer_headers(&self, image_attributes: &ImageAttributes) -> Headers;
|
||||
|
||||
/// The type of temporary writer
|
||||
type Writer: LayersWriter;
|
||||
|
||||
/// Create a temporary writer for this list of layers
|
||||
fn create_writer(&'slf self, headers: &[Header]) -> Self::Writer;
|
||||
}
|
||||
|
||||
/// A temporary writer for a list of channels
|
||||
pub trait LayersWriter: Sync {
|
||||
|
||||
/// Deliver a block of pixels from a single layer to be stored in the file
|
||||
fn extract_uncompressed_block(&self, headers: &[Header], block: BlockIndex) -> Vec<u8>;
|
||||
}
|
||||
|
||||
/// A temporary writer for an arbitrary list of layers
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct AllLayersWriter<ChannelsWriter> {
|
||||
layers: SmallVec<[LayerWriter<ChannelsWriter>; 2]>
|
||||
}
|
||||
|
||||
/// A temporary writer for a single layer
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct LayerWriter<ChannelsWriter> {
|
||||
channels: ChannelsWriter, // impl ChannelsWriter
|
||||
}
|
||||
|
||||
// impl for smallvec
|
||||
impl<'slf, Channels: 'slf> WritableLayers<'slf> for Layers<Channels> where Channels: WritableChannels<'slf> {
|
||||
fn infer_headers(&self, image_attributes: &ImageAttributes) -> Headers {
|
||||
slice_infer_headers(self.as_slice(), image_attributes)
|
||||
}
|
||||
|
||||
type Writer = AllLayersWriter<Channels::Writer>;
|
||||
fn create_writer(&'slf self, headers: &[Header]) -> Self::Writer {
|
||||
slice_create_writer(self.as_slice(), headers)
|
||||
}
|
||||
}
|
||||
|
||||
fn slice_infer_headers<'slf, Channels:'slf + WritableChannels<'slf>>(
|
||||
slice: &[Layer<Channels>], image_attributes: &ImageAttributes
|
||||
) -> Headers
|
||||
{
|
||||
slice.iter().map(|layer| layer.infer_headers(image_attributes).remove(0)).collect() // TODO no array-vs-first
|
||||
}
|
||||
|
||||
fn slice_create_writer<'slf, Channels:'slf + WritableChannels<'slf>>(
|
||||
slice: &'slf [Layer<Channels>], headers: &[Header]
|
||||
) -> AllLayersWriter<Channels::Writer>
|
||||
{
|
||||
AllLayersWriter {
|
||||
layers: slice.iter().zip(headers.chunks_exact(1)) // TODO no array-vs-first
|
||||
.map(|(layer, header)| layer.create_writer(header))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'slf, Channels: WritableChannels<'slf>> WritableLayers<'slf> for Layer<Channels> {
|
||||
fn infer_headers(&self, image_attributes: &ImageAttributes) -> Headers {
|
||||
let blocks = match self.encoding.blocks {
|
||||
crate::image::Blocks::ScanLines => crate::meta::BlockDescription::ScanLines,
|
||||
crate::image::Blocks::Tiles(tile_size) => {
|
||||
let (level_mode, rounding_mode) = self.channel_data.infer_level_modes();
|
||||
crate::meta::BlockDescription::Tiles(TileDescription { level_mode, rounding_mode, tile_size, })
|
||||
},
|
||||
};
|
||||
|
||||
let chunk_count = compute_chunk_count(
|
||||
self.encoding.compression, self.size, blocks
|
||||
);
|
||||
|
||||
let header = Header {
|
||||
channels: self.channel_data.infer_channel_list(),
|
||||
compression: self.encoding.compression,
|
||||
|
||||
blocks,
|
||||
chunk_count,
|
||||
|
||||
line_order: self.encoding.line_order,
|
||||
layer_size: self.size,
|
||||
shared_attributes: image_attributes.clone(),
|
||||
own_attributes: self.attributes.clone(),
|
||||
|
||||
|
||||
deep: false, // TODO deep data
|
||||
deep_data_version: None,
|
||||
max_samples_per_pixel: None,
|
||||
};
|
||||
|
||||
smallvec![ header ]// TODO no array-vs-first
|
||||
}
|
||||
|
||||
type Writer = LayerWriter</*'l,*/ Channels::Writer>;
|
||||
fn create_writer(&'slf self, headers: &[Header]) -> Self::Writer {
|
||||
let channels = self.channel_data
|
||||
.create_writer(headers.first().expect("inferred header error")); // TODO no array-vs-first
|
||||
|
||||
LayerWriter { channels }
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> LayersWriter for AllLayersWriter<C> where C: ChannelsWriter {
|
||||
fn extract_uncompressed_block(&self, headers: &[Header], block: BlockIndex) -> Vec<u8> {
|
||||
self.layers[block.layer].extract_uncompressed_block(std::slice::from_ref(&headers[block.layer]), block) // TODO no array-vs-first
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> LayersWriter for LayerWriter<C> where C: ChannelsWriter {
|
||||
fn extract_uncompressed_block(&self, headers: &[Header], block: BlockIndex) -> Vec<u8> {
|
||||
self.channels.extract_uncompressed_block(headers.first().expect("invalid inferred header"), block) // TODO no array-vs-first
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
impl<'slf> WritableLayers<'slf> for NoneMore {
|
||||
fn infer_headers(&self, _: &ImageAttributes) -> Headers { SmallVec::new() }
|
||||
|
||||
type Writer = NoneMore;
|
||||
fn create_writer(&'slf self, _: &[Header]) -> Self::Writer { NoneMore }
|
||||
}
|
||||
|
||||
impl<'slf, InnerLayers, Channels> WritableLayers<'slf> for Recursive<InnerLayers, Layer<Channels>>
|
||||
where InnerLayers: WritableLayers<'slf>, Channels: WritableChannels<'slf>
|
||||
{
|
||||
fn infer_headers(&self, image_attributes: &ImageAttributes) -> Headers {
|
||||
let mut headers = self.inner.infer_headers(image_attributes);
|
||||
headers.push(self.value.infer_headers(image_attributes).remove(0)); // TODO no unwrap
|
||||
headers
|
||||
}
|
||||
|
||||
type Writer = RecursiveLayersWriter<InnerLayers::Writer, Channels::Writer>;
|
||||
|
||||
fn create_writer(&'slf self, headers: &[Header]) -> Self::Writer {
|
||||
let (own_header, inner_headers) = headers.split_last()
|
||||
.expect("header has not been inferred correctly");
|
||||
|
||||
let layer_index = inner_headers.len();
|
||||
RecursiveLayersWriter {
|
||||
inner: self.inner.create_writer(inner_headers),
|
||||
value: (layer_index, self.value.create_writer(std::slice::from_ref(own_header))) // TODO no slice
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type RecursiveLayersWriter<InnerLayersWriter, ChannelsWriter> = Recursive<InnerLayersWriter, (usize, LayerWriter<ChannelsWriter>)>;
|
||||
|
||||
impl LayersWriter for NoneMore {
|
||||
fn extract_uncompressed_block(&self, _: &[Header], _: BlockIndex) -> Vec<u8> {
|
||||
panic!("recursive length mismatch bug");
|
||||
}
|
||||
}
|
||||
|
||||
impl<InnerLayersWriter, Channels> LayersWriter for RecursiveLayersWriter<InnerLayersWriter, Channels>
|
||||
where InnerLayersWriter: LayersWriter, Channels: ChannelsWriter
|
||||
{
|
||||
fn extract_uncompressed_block(&self, headers: &[Header], block: BlockIndex) -> Vec<u8> {
|
||||
let (layer_index, layer) = &self.value;
|
||||
if *layer_index == block.layer {
|
||||
let header = headers.get(*layer_index).expect("layer index bug");
|
||||
layer.extract_uncompressed_block(std::slice::from_ref(header), block) // TODO no slice?
|
||||
}
|
||||
else {
|
||||
self.inner.extract_uncompressed_block(headers, block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
184
vendor/exr/src/image/write/mod.rs
vendored
Normal file
184
vendor/exr/src/image/write/mod.rs
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
|
||||
//! Write an exr image to a file.
|
||||
//!
|
||||
//! First, call `my_image.write()`. The resulting value can be customized, like this:
|
||||
//! ```no_run
|
||||
//! use exr::prelude::*;
|
||||
//! # let my_image: FlatImage = unimplemented!();
|
||||
//!
|
||||
//! my_image.write()
|
||||
//! .on_progress(|progress| println!("progress: {:.1}", progress*100.0))
|
||||
//! .to_file("image.exr").unwrap();
|
||||
//! ```
|
||||
//!
|
||||
|
||||
pub mod layers;
|
||||
pub mod samples;
|
||||
pub mod channels;
|
||||
|
||||
|
||||
|
||||
use crate::meta::Headers;
|
||||
use crate::error::UnitResult;
|
||||
use std::io::{Seek, BufWriter};
|
||||
use crate::io::Write;
|
||||
use crate::image::{Image, ignore_progress, SpecificChannels, IntoSample};
|
||||
use crate::image::write::layers::{WritableLayers, LayersWriter};
|
||||
use crate::math::Vec2;
|
||||
use crate::block::writer::ChunksWriter;
|
||||
|
||||
/// An oversimplified function for "just write the damn file already" use cases.
|
||||
/// Have a look at the examples to see how you can write an image with more flexibility (it's not that hard).
|
||||
/// Use `write_rgb_file` if you do not need an alpha channel.
|
||||
///
|
||||
/// Each of `R`, `G`, `B` and `A` can be either `f16`, `f32`, `u32`, or `Sample`.
|
||||
// TODO explain pixel tuple f32,f16,u32
|
||||
pub fn write_rgba_file<R,G,B,A>(
|
||||
path: impl AsRef<std::path::Path>, width: usize, height: usize,
|
||||
colors: impl Sync + Fn(usize, usize) -> (R, G, B, A)
|
||||
) -> UnitResult
|
||||
where R: IntoSample, G: IntoSample, B: IntoSample, A: IntoSample,
|
||||
{
|
||||
let channels = SpecificChannels::rgba(|Vec2(x,y)| colors(x,y));
|
||||
Image::from_channels((width, height), channels).write().to_file(path)
|
||||
}
|
||||
|
||||
/// An oversimplified function for "just write the damn file already" use cases.
|
||||
/// Have a look at the examples to see how you can write an image with more flexibility (it's not that hard).
|
||||
/// Use `write_rgb_file` if you do not need an alpha channel.
|
||||
///
|
||||
/// Each of `R`, `G`, and `B` can be either `f16`, `f32`, `u32`, or `Sample`.
|
||||
// TODO explain pixel tuple f32,f16,u32
|
||||
pub fn write_rgb_file<R,G,B>(
|
||||
path: impl AsRef<std::path::Path>, width: usize, height: usize,
|
||||
colors: impl Sync + Fn(usize, usize) -> (R, G, B)
|
||||
) -> UnitResult
|
||||
where R: IntoSample, G: IntoSample, B: IntoSample
|
||||
{
|
||||
let channels = SpecificChannels::rgb(|Vec2(x,y)| colors(x,y));
|
||||
Image::from_channels((width, height), channels).write().to_file(path)
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// Enables an image to be written to a file. Call `image.write()` where this trait is implemented.
|
||||
pub trait WritableImage<'img, WritableLayers>: Sized {
|
||||
|
||||
/// Create a temporary writer which can be configured and used to write the image to a file.
|
||||
fn write(self) -> WriteImageWithOptions<'img, WritableLayers, fn(f64)>;
|
||||
}
|
||||
|
||||
impl<'img, WritableLayers> WritableImage<'img, WritableLayers> for &'img Image<WritableLayers> {
|
||||
fn write(self) -> WriteImageWithOptions<'img, WritableLayers, fn(f64)> {
|
||||
WriteImageWithOptions {
|
||||
image: self,
|
||||
check_compatibility: true,
|
||||
parallel: true,
|
||||
on_progress: ignore_progress
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A temporary writer which can be configured and used to write an image to a file.
|
||||
// temporary writer with options
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct WriteImageWithOptions<'img, Layers, OnProgress> {
|
||||
image: &'img Image<Layers>,
|
||||
on_progress: OnProgress,
|
||||
check_compatibility: bool,
|
||||
parallel: bool,
|
||||
}
|
||||
|
||||
|
||||
impl<'img, L, F> WriteImageWithOptions<'img, L, F>
|
||||
where L: WritableLayers<'img>, F: FnMut(f64)
|
||||
{
|
||||
/// Generate file meta data for this image. The meta data structure is close to the data in the file.
|
||||
pub fn infer_meta_data(&self) -> Headers { // TODO this should perform all validity checks? and none after that?
|
||||
self.image.layer_data.infer_headers(&self.image.attributes)
|
||||
}
|
||||
|
||||
/// Do not compress multiple pixel blocks on multiple threads at once.
|
||||
/// Might use less memory and synchronization, but will be slower in most situations.
|
||||
pub fn non_parallel(self) -> Self { Self { parallel: false, ..self } }
|
||||
|
||||
/// Skip some checks that ensure a file can be opened by other exr software.
|
||||
/// For example, it is no longer checked that no two headers or two attributes have the same name,
|
||||
/// which might be an expensive check for images with an exorbitant number of headers.
|
||||
///
|
||||
/// If you write an uncompressed file and need maximum speed, it might save a millisecond to disable the checks,
|
||||
/// if you know that your file is not invalid any ways. I do not recommend this though,
|
||||
/// as the file might not be readably by any other exr library after that.
|
||||
/// __You must care for not producing an invalid file yourself.__
|
||||
pub fn skip_compatibility_checks(self) -> Self { Self { check_compatibility: false, ..self } }
|
||||
|
||||
/// Specify a function to be called regularly throughout the writing process.
|
||||
/// Replaces all previously specified progress functions in this reader.
|
||||
pub fn on_progress<OnProgress>(self, on_progress: OnProgress) -> WriteImageWithOptions<'img, L, OnProgress>
|
||||
where OnProgress: FnMut(f64)
|
||||
{
|
||||
WriteImageWithOptions {
|
||||
on_progress,
|
||||
image: self.image,
|
||||
check_compatibility: self.check_compatibility,
|
||||
parallel: self.parallel
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the exr image to a file.
|
||||
/// Use `to_unbuffered` instead, if you do not have a file.
|
||||
/// If an error occurs, attempts to delete the partially written file.
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn to_file(self, path: impl AsRef<std::path::Path>) -> UnitResult {
|
||||
crate::io::attempt_delete_file_on_write_error(path.as_ref(), move |write|
|
||||
self.to_unbuffered(write)
|
||||
)
|
||||
}
|
||||
|
||||
/// Buffer the writer and then write the exr image to it.
|
||||
/// Use `to_buffered` instead, if your writer is an in-memory buffer.
|
||||
/// Use `to_file` instead, if you have a file path.
|
||||
/// If your writer cannot seek, you can write to an in-memory vector of bytes first, using `to_buffered`.
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn to_unbuffered(self, unbuffered: impl Write + Seek) -> UnitResult {
|
||||
self.to_buffered(BufWriter::new(unbuffered))
|
||||
}
|
||||
|
||||
/// Write the exr image to a writer.
|
||||
/// Use `to_file` instead, if you have a file path.
|
||||
/// Use `to_unbuffered` instead, if this is not an in-memory writer.
|
||||
/// If your writer cannot seek, you can write to an in-memory vector of bytes first.
|
||||
#[must_use]
|
||||
pub fn to_buffered(self, write: impl Write + Seek) -> UnitResult {
|
||||
let headers = self.infer_meta_data();
|
||||
let layers = self.image.layer_data.create_writer(&headers);
|
||||
|
||||
crate::block::write(
|
||||
write, headers, self.check_compatibility,
|
||||
move |meta, chunk_writer|{
|
||||
|
||||
let blocks = meta.collect_ordered_block_data(|block_index|
|
||||
layers.extract_uncompressed_block(&meta.headers, block_index)
|
||||
);
|
||||
|
||||
let chunk_writer = chunk_writer.on_progress(self.on_progress);
|
||||
if self.parallel { chunk_writer.compress_all_blocks_parallel(&meta, blocks)?; }
|
||||
else { chunk_writer.compress_all_blocks_sequential(&meta, blocks)?; }
|
||||
/*let blocks_writer = chunk_writer.as_blocks_writer(&meta);
|
||||
|
||||
// TODO propagate send requirement further upwards
|
||||
if self.parallel {
|
||||
blocks_writer.compress_all_blocks_parallel(blocks)?;
|
||||
}
|
||||
else {
|
||||
blocks_writer.compress_all_blocks_sequential(blocks)?;
|
||||
}*/
|
||||
|
||||
Ok(())
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
205
vendor/exr/src/image/write/samples.rs
vendored
Normal file
205
vendor/exr/src/image/write/samples.rs
vendored
Normal file
@@ -0,0 +1,205 @@
|
||||
//! How to write samples (a grid of `f32`, `f16` or `u32` values).
|
||||
|
||||
use crate::meta::attribute::{LevelMode, SampleType, TileDescription};
|
||||
use crate::meta::header::Header;
|
||||
use crate::block::lines::LineRefMut;
|
||||
use crate::image::{FlatSamples, Levels, RipMaps};
|
||||
use crate::math::{Vec2, RoundingMode};
|
||||
use crate::meta::{rip_map_levels, mip_map_levels, rip_map_indices, mip_map_indices, BlockDescription};
|
||||
|
||||
/// Enable an image with this sample grid to be written to a file.
|
||||
/// Also can contain multiple resolution levels.
|
||||
/// Usually contained within `Channels`.
|
||||
pub trait WritableSamples<'slf> {
|
||||
// fn is_deep(&self) -> bool;
|
||||
|
||||
/// Generate the file meta data regarding the number type of this storage
|
||||
fn sample_type(&self) -> SampleType;
|
||||
|
||||
/// Generate the file meta data regarding resolution levels
|
||||
fn infer_level_modes(&self) -> (LevelMode, RoundingMode);
|
||||
|
||||
/// The type of the temporary writer for this sample storage
|
||||
type Writer: SamplesWriter;
|
||||
|
||||
/// Create a temporary writer for this sample storage
|
||||
fn create_samples_writer(&'slf self, header: &Header) -> Self::Writer;
|
||||
}
|
||||
|
||||
/// Enable an image with this single level sample grid to be written to a file.
|
||||
/// Only contained within `Levels`.
|
||||
pub trait WritableLevel<'slf> {
|
||||
|
||||
/// Generate the file meta data regarding the number type of these samples
|
||||
fn sample_type(&self) -> SampleType;
|
||||
|
||||
/// The type of the temporary writer for this single level of samples
|
||||
type Writer: SamplesWriter;
|
||||
|
||||
/// Create a temporary writer for this single level of samples
|
||||
fn create_level_writer(&'slf self, size: Vec2<usize>) -> Self::Writer;
|
||||
}
|
||||
|
||||
/// A temporary writer for one or more resolution levels containing samples
|
||||
pub trait SamplesWriter: Sync {
|
||||
|
||||
/// Deliver a single short horizontal list of samples for a specific channel.
|
||||
fn extract_line(&self, line: LineRefMut<'_>);
|
||||
}
|
||||
|
||||
/// A temporary writer for a predefined non-deep sample storage
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub struct FlatSamplesWriter<'samples> {
|
||||
resolution: Vec2<usize>, // respects resolution level
|
||||
samples: &'samples FlatSamples
|
||||
}
|
||||
|
||||
|
||||
|
||||
// used if no layers are used and the flat samples are directly inside the channels
|
||||
impl<'samples> WritableSamples<'samples> for FlatSamples {
|
||||
fn sample_type(&self) -> SampleType {
|
||||
match self {
|
||||
FlatSamples::F16(_) => SampleType::F16,
|
||||
FlatSamples::F32(_) => SampleType::F32,
|
||||
FlatSamples::U32(_) => SampleType::U32,
|
||||
}
|
||||
}
|
||||
|
||||
fn infer_level_modes(&self) -> (LevelMode, RoundingMode) { (LevelMode::Singular, RoundingMode::Down) }
|
||||
|
||||
type Writer = FlatSamplesWriter<'samples>; //&'s FlatSamples;
|
||||
fn create_samples_writer(&'samples self, header: &Header) -> Self::Writer {
|
||||
FlatSamplesWriter {
|
||||
resolution: header.layer_size,
|
||||
samples: self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// used if layers are used and the flat samples are inside the levels
|
||||
impl<'samples> WritableLevel<'samples> for FlatSamples {
|
||||
fn sample_type(&self) -> SampleType {
|
||||
match self {
|
||||
FlatSamples::F16(_) => SampleType::F16,
|
||||
FlatSamples::F32(_) => SampleType::F32,
|
||||
FlatSamples::U32(_) => SampleType::U32,
|
||||
}
|
||||
}
|
||||
|
||||
type Writer = FlatSamplesWriter<'samples>;
|
||||
fn create_level_writer(&'samples self, size: Vec2<usize>) -> Self::Writer {
|
||||
FlatSamplesWriter {
|
||||
resolution: size,
|
||||
samples: self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'samples> SamplesWriter for FlatSamplesWriter<'samples> {
|
||||
fn extract_line(&self, line: LineRefMut<'_>) {
|
||||
let image_width = self.resolution.width(); // header.layer_size.width();
|
||||
debug_assert_ne!(image_width, 0, "image width calculation bug");
|
||||
|
||||
let start_index = line.location.position.y() * image_width + line.location.position.x();
|
||||
let end_index = start_index + line.location.sample_count;
|
||||
|
||||
debug_assert!(
|
||||
start_index < end_index && end_index <= self.samples.len(),
|
||||
"for resolution {:?}, this is an invalid line: {:?}",
|
||||
self.resolution, line.location
|
||||
);
|
||||
|
||||
match self.samples {
|
||||
FlatSamples::F16(samples) => line.write_samples_from_slice(&samples[start_index .. end_index]),
|
||||
FlatSamples::F32(samples) => line.write_samples_from_slice(&samples[start_index .. end_index]),
|
||||
FlatSamples::U32(samples) => line.write_samples_from_slice(&samples[start_index .. end_index]),
|
||||
}.expect("writing line bytes failed");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'samples, LevelSamples> WritableSamples<'samples> for Levels<LevelSamples>
|
||||
where LevelSamples: WritableLevel<'samples>
|
||||
{
|
||||
fn sample_type(&self) -> SampleType {
|
||||
let sample_type = self.levels_as_slice().first().expect("no levels found").sample_type();
|
||||
|
||||
debug_assert!(
|
||||
self.levels_as_slice().iter().skip(1).all(|ty| ty.sample_type() == sample_type),
|
||||
"sample types must be the same across all levels"
|
||||
);
|
||||
|
||||
sample_type
|
||||
}
|
||||
|
||||
fn infer_level_modes(&self) -> (LevelMode, RoundingMode) {
|
||||
match self {
|
||||
Levels::Singular(_) => (LevelMode::Singular, RoundingMode::Down),
|
||||
Levels::Mip { rounding_mode, .. } => (LevelMode::MipMap, *rounding_mode),
|
||||
Levels::Rip { rounding_mode, .. } => (LevelMode::RipMap, *rounding_mode),
|
||||
}
|
||||
}
|
||||
|
||||
type Writer = LevelsWriter<LevelSamples::Writer>;
|
||||
fn create_samples_writer(&'samples self, header: &Header) -> Self::Writer {
|
||||
let rounding = match header.blocks {
|
||||
BlockDescription::Tiles(TileDescription { rounding_mode, .. }) => Some(rounding_mode),
|
||||
BlockDescription::ScanLines => None,
|
||||
};
|
||||
|
||||
LevelsWriter {
|
||||
levels: match self {
|
||||
Levels::Singular(level) => Levels::Singular(level.create_level_writer(header.layer_size)),
|
||||
Levels::Mip { level_data, rounding_mode } => {
|
||||
debug_assert_eq!(
|
||||
level_data.len(),
|
||||
mip_map_indices(rounding.expect("mip maps only with tiles"), header.layer_size).count(),
|
||||
"invalid mip map count"
|
||||
);
|
||||
|
||||
Levels::Mip { // TODO store level size in image??
|
||||
rounding_mode: *rounding_mode,
|
||||
level_data: level_data.iter()
|
||||
.zip(mip_map_levels(rounding.expect("mip maps only with tiles"), header.layer_size))
|
||||
// .map(|level| level.create_samples_writer(header))
|
||||
.map(|(level, (_level_index, level_size))| level.create_level_writer(level_size))
|
||||
.collect()
|
||||
}
|
||||
},
|
||||
Levels::Rip { level_data, rounding_mode } => {
|
||||
debug_assert_eq!(level_data.map_data.len(), level_data.level_count.area(), "invalid rip level count");
|
||||
debug_assert_eq!(
|
||||
level_data.map_data.len(),
|
||||
rip_map_indices(rounding.expect("rip maps only with tiles"), header.layer_size).count(),
|
||||
"invalid rip map count"
|
||||
);
|
||||
|
||||
Levels::Rip {
|
||||
rounding_mode: *rounding_mode,
|
||||
level_data: RipMaps {
|
||||
level_count: level_data.level_count,
|
||||
map_data: level_data.map_data.iter()
|
||||
.zip(rip_map_levels(rounding.expect("rip maps only with tiles"), header.layer_size))
|
||||
.map(|(level, (_level_index, level_size))| level.create_level_writer(level_size))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A temporary writer for multiple resolution levels
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct LevelsWriter<SamplesWriter> {
|
||||
levels: Levels<SamplesWriter>,
|
||||
}
|
||||
|
||||
impl<Samples> SamplesWriter for LevelsWriter<Samples> where Samples: SamplesWriter {
|
||||
fn extract_line(&self, line: LineRefMut<'_>) {
|
||||
self.levels.get_level(line.location.level).expect("invalid level index") // TODO compute level size from line index??
|
||||
.extract_line(line)
|
||||
}
|
||||
}
|
||||
447
vendor/exr/src/io.rs
vendored
Normal file
447
vendor/exr/src/io.rs
vendored
Normal file
@@ -0,0 +1,447 @@
|
||||
|
||||
//! Specialized binary input and output.
|
||||
//! Uses the error handling for this crate.
|
||||
|
||||
#![doc(hidden)]
|
||||
pub use ::std::io::{Read, Write};
|
||||
|
||||
use half::slice::{HalfFloatSliceExt};
|
||||
use lebe::prelude::*;
|
||||
use ::half::f16;
|
||||
use crate::error::{Error, Result, UnitResult, IoResult};
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::path::Path;
|
||||
use std::fs::File;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
|
||||
/// Skip reading uninteresting bytes without allocating.
|
||||
#[inline]
|
||||
pub fn skip_bytes(read: &mut impl Read, count: usize) -> IoResult<()> {
|
||||
let count = u64::try_from(count).unwrap();
|
||||
|
||||
let skipped = std::io::copy(
|
||||
&mut read.by_ref().take(count),
|
||||
&mut std::io::sink()
|
||||
)?;
|
||||
|
||||
// the reader may have ended before we skipped the desired number of bytes
|
||||
if skipped < count {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"cannot skip more bytes than exist"
|
||||
));
|
||||
}
|
||||
|
||||
debug_assert_eq!(skipped, count, "skip bytes bug");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// If an error occurs while writing, attempts to delete the partially written file.
|
||||
/// Creates a file just before the first write operation, not when this function is called.
|
||||
#[inline]
|
||||
pub fn attempt_delete_file_on_write_error<'p>(path: &'p Path, write: impl FnOnce(LateFile<'p>) -> UnitResult) -> UnitResult {
|
||||
match write(LateFile::from(path)) {
|
||||
Err(error) => { // FIXME deletes existing file if creation of new file fails?
|
||||
let _deleted = std::fs::remove_file(path); // ignore deletion errors
|
||||
Err(error)
|
||||
},
|
||||
|
||||
ok => ok,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LateFile<'p> {
|
||||
path: &'p Path,
|
||||
file: Option<File>
|
||||
}
|
||||
|
||||
impl<'p> From<&'p Path> for LateFile<'p> {
|
||||
fn from(path: &'p Path) -> Self { Self { path, file: None } }
|
||||
}
|
||||
|
||||
impl<'p> LateFile<'p> {
|
||||
fn file(&mut self) -> std::io::Result<&mut File> {
|
||||
if self.file.is_none() { self.file = Some(File::create(self.path)?); }
|
||||
Ok(self.file.as_mut().unwrap()) // will not be reached if creation fails
|
||||
}
|
||||
}
|
||||
|
||||
impl<'p> std::io::Write for LateFile<'p> {
|
||||
fn write(&mut self, buffer: &[u8]) -> std::io::Result<usize> {
|
||||
self.file()?.write(buffer)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
if let Some(file) = &mut self.file { file.flush() }
|
||||
else { Ok(()) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'p> Seek for LateFile<'p> {
|
||||
fn seek(&mut self, position: SeekFrom) -> std::io::Result<u64> {
|
||||
self.file()?.seek(position)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Peek a single byte without consuming it.
|
||||
#[derive(Debug)]
|
||||
pub struct PeekRead<T> {
|
||||
|
||||
/// Cannot be exposed as it will not contain peeked values anymore.
|
||||
inner: T,
|
||||
|
||||
peeked: Option<IoResult<u8>>,
|
||||
}
|
||||
|
||||
impl<T: Read> PeekRead<T> {
|
||||
|
||||
/// Wrap a reader to make it peekable.
|
||||
#[inline]
|
||||
pub fn new(inner: T) -> Self {
|
||||
Self { inner, peeked: None }
|
||||
}
|
||||
|
||||
/// Read a single byte and return that without consuming it.
|
||||
/// The next `read` call will include that byte.
|
||||
#[inline]
|
||||
pub fn peek_u8(&mut self) -> &IoResult<u8> {
|
||||
self.peeked = self.peeked.take().or_else(|| Some(u8::read_from_little_endian(&mut self.inner)));
|
||||
self.peeked.as_ref().unwrap() // unwrap cannot fail because we just set it
|
||||
}
|
||||
|
||||
/// Skip a single byte if it equals the specified value.
|
||||
/// Returns whether the value was found.
|
||||
/// Consumes the peeked result if an error occurred.
|
||||
#[inline]
|
||||
pub fn skip_if_eq(&mut self, value: u8) -> IoResult<bool> {
|
||||
match self.peek_u8() {
|
||||
Ok(peeked) if *peeked == value => {
|
||||
self.peeked = None; // consume the byte
|
||||
Ok(true)
|
||||
},
|
||||
|
||||
Ok(_) => Ok(false),
|
||||
|
||||
// return the error otherwise.
|
||||
// unwrap is safe because this branch cannot be reached otherwise.
|
||||
// we need to take() from self because io errors cannot be cloned.
|
||||
Err(_) => Err(self.peeked.take().unwrap().err().unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<T: Read> Read for PeekRead<T> {
|
||||
fn read(&mut self, target_buffer: &mut [u8]) -> IoResult<usize> {
|
||||
if target_buffer.is_empty() {
|
||||
return Ok(0)
|
||||
}
|
||||
|
||||
match self.peeked.take() {
|
||||
None => self.inner.read(target_buffer),
|
||||
Some(peeked) => {
|
||||
target_buffer[0] = peeked?;
|
||||
|
||||
// indexing [1..] is safe because an empty buffer already returned ok
|
||||
Ok(1 + self.inner.read(&mut target_buffer[1..])?)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Read + Seek> PeekRead<Tracking<T>> {
|
||||
|
||||
/// Seek this read to the specified byte position.
|
||||
/// Discards any previously peeked value.
|
||||
pub fn skip_to(&mut self, position: usize) -> std::io::Result<()> {
|
||||
self.inner.seek_read_to(position)?;
|
||||
self.peeked = None;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Read> PeekRead<Tracking<T>> {
|
||||
|
||||
/// Current number of bytes read.
|
||||
pub fn byte_position(&self) -> usize {
|
||||
self.inner.byte_position()
|
||||
}
|
||||
}
|
||||
|
||||
/// Keep track of what byte we are at.
|
||||
/// Used to skip back to a previous place after writing some information.
|
||||
#[derive(Debug)]
|
||||
pub struct Tracking<T> {
|
||||
|
||||
/// Do not expose to prevent seeking without updating position
|
||||
inner: T,
|
||||
|
||||
position: usize,
|
||||
}
|
||||
|
||||
impl<T: Read> Read for Tracking<T> {
|
||||
fn read(&mut self, buffer: &mut [u8]) -> std::io::Result<usize> {
|
||||
let count = self.inner.read(buffer)?;
|
||||
self.position += count;
|
||||
Ok(count)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Write> Write for Tracking<T> {
|
||||
fn write(&mut self, buffer: &[u8]) -> std::io::Result<usize> {
|
||||
let count = self.inner.write(buffer)?;
|
||||
self.position += count;
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
self.inner.flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Tracking<T> {
|
||||
|
||||
/// If `inner` is a reference, if must never be seeked directly,
|
||||
/// but only through this `Tracking` instance.
|
||||
pub fn new(inner: T) -> Self {
|
||||
Tracking { inner, position: 0 }
|
||||
}
|
||||
|
||||
/// Current number of bytes written or read.
|
||||
pub fn byte_position(&self) -> usize {
|
||||
self.position
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Read + Seek> Tracking<T> {
|
||||
|
||||
/// Set the reader to the specified byte position.
|
||||
/// If it is only a couple of bytes, no seek system call is performed.
|
||||
pub fn seek_read_to(&mut self, target_position: usize) -> std::io::Result<()> {
|
||||
let delta = target_position as i128 - self.position as i128; // FIXME panicked at 'attempt to subtract with overflow'
|
||||
debug_assert!(delta.abs() < usize::MAX as i128);
|
||||
|
||||
if delta > 0 && delta < 16 { // TODO profile that this is indeed faster than a syscall! (should be because of bufread buffer discard)
|
||||
skip_bytes(self, delta as usize)?;
|
||||
self.position += delta as usize;
|
||||
}
|
||||
else if delta != 0 {
|
||||
self.inner.seek(SeekFrom::Start(u64::try_from(target_position).unwrap()))?;
|
||||
self.position = target_position;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Write + Seek> Tracking<T> {
|
||||
|
||||
/// Move the writing cursor to the specified target byte index.
|
||||
/// If seeking forward, this will write zeroes.
|
||||
pub fn seek_write_to(&mut self, target_position: usize) -> std::io::Result<()> {
|
||||
if target_position < self.position {
|
||||
self.inner.seek(SeekFrom::Start(u64::try_from(target_position).unwrap()))?;
|
||||
}
|
||||
else if target_position > self.position {
|
||||
std::io::copy(
|
||||
&mut std::io::repeat(0).take(u64::try_from(target_position - self.position).unwrap()),
|
||||
self
|
||||
)?;
|
||||
}
|
||||
|
||||
self.position = target_position;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Generic trait that defines common binary operations such as reading and writing for this type.
|
||||
pub trait Data: Sized + Default + Clone {
|
||||
|
||||
/// Number of bytes this would consume in an exr file.
|
||||
const BYTE_SIZE: usize = ::std::mem::size_of::<Self>();
|
||||
|
||||
/// Read a value of type `Self`.
|
||||
fn read(read: &mut impl Read) -> Result<Self>;
|
||||
|
||||
/// Read as many values of type `Self` as fit into the specified slice.
|
||||
/// If the slice cannot be filled completely, returns `Error::Invalid`.
|
||||
fn read_slice(read: &mut impl Read, slice: &mut[Self]) -> UnitResult;
|
||||
|
||||
/// Read as many values of type `Self` as specified with `data_size`.
|
||||
///
|
||||
/// This method will not allocate more memory than `soft_max` at once.
|
||||
/// If `hard_max` is specified, it will never read any more than that.
|
||||
/// Returns `Error::Invalid` if reader does not contain the desired number of elements.
|
||||
#[inline]
|
||||
fn read_vec(read: &mut impl Read, data_size: usize, soft_max: usize, hard_max: Option<usize>, purpose: &'static str) -> Result<Vec<Self>> {
|
||||
let mut vec = Vec::with_capacity(data_size.min(soft_max));
|
||||
Self::read_into_vec(read, &mut vec, data_size, soft_max, hard_max, purpose)?;
|
||||
Ok(vec)
|
||||
}
|
||||
|
||||
/// Write this value to the writer.
|
||||
fn write(self, write: &mut impl Write) -> UnitResult;
|
||||
|
||||
/// Write all values of that slice to the writer.
|
||||
fn write_slice(write: &mut impl Write, slice: &[Self]) -> UnitResult;
|
||||
|
||||
|
||||
/// Read as many values of type `Self` as specified with `data_size` into the provided vector.
|
||||
///
|
||||
/// This method will not allocate more memory than `soft_max` at once.
|
||||
/// If `hard_max` is specified, it will never read any more than that.
|
||||
/// Returns `Error::Invalid` if reader does not contain the desired number of elements.
|
||||
#[inline]
|
||||
fn read_into_vec(read: &mut impl Read, data: &mut Vec<Self>, data_size: usize, soft_max: usize, hard_max: Option<usize>, purpose: &'static str) -> UnitResult {
|
||||
if let Some(max) = hard_max {
|
||||
if data_size > max {
|
||||
return Err(Error::invalid(purpose))
|
||||
}
|
||||
}
|
||||
|
||||
let soft_max = hard_max.unwrap_or(soft_max).min(soft_max);
|
||||
let end = data.len() + data_size;
|
||||
|
||||
// do not allocate more than $chunks memory at once
|
||||
// (most of the time, this loop will run only once)
|
||||
while data.len() < end {
|
||||
let chunk_start = data.len();
|
||||
let chunk_end = (chunk_start + soft_max).min(data_size);
|
||||
|
||||
data.resize(chunk_end, Self::default());
|
||||
Self::read_slice(read, &mut data[chunk_start .. chunk_end])?; // safe because of `min(data_size)``
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write the length of the slice and then its contents.
|
||||
#[inline]
|
||||
fn write_i32_sized_slice<W: Write>(write: &mut W, slice: &[Self]) -> UnitResult {
|
||||
i32::try_from(slice.len())?.write(write)?;
|
||||
Self::write_slice(write, slice)
|
||||
}
|
||||
|
||||
/// Read the desired element count and then read that many items into a vector.
|
||||
///
|
||||
/// This method will not allocate more memory than `soft_max` at once.
|
||||
/// If `hard_max` is specified, it will never read any more than that.
|
||||
/// Returns `Error::Invalid` if reader does not contain the desired number of elements.
|
||||
#[inline]
|
||||
fn read_i32_sized_vec(read: &mut impl Read, soft_max: usize, hard_max: Option<usize>, purpose: &'static str) -> Result<Vec<Self>> {
|
||||
let size = usize::try_from(i32::read(read)?)?;
|
||||
Self::read_vec(read, size, soft_max, hard_max, purpose)
|
||||
}
|
||||
|
||||
/// Fill the slice with this value.
|
||||
#[inline]
|
||||
fn fill_slice(self, slice: &mut [Self]) where Self: Copy {
|
||||
// hopefully compiles down to a single memset call
|
||||
for value in slice {
|
||||
*value = self;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
macro_rules! implement_data_for_primitive {
|
||||
($kind: ident) => {
|
||||
impl Data for $kind {
|
||||
#[inline]
|
||||
fn read(read: &mut impl Read) -> Result<Self> {
|
||||
Ok(read.read_from_little_endian()?)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write(self, write: &mut impl Write) -> Result<()> {
|
||||
write.write_as_little_endian(&self)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_slice(read: &mut impl Read, slice: &mut [Self]) -> Result<()> {
|
||||
read.read_from_little_endian_into(slice)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_slice(write: &mut impl Write, slice: &[Self]) -> Result<()> {
|
||||
write.write_as_little_endian(slice)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
implement_data_for_primitive!(u8);
|
||||
implement_data_for_primitive!(i8);
|
||||
implement_data_for_primitive!(i16);
|
||||
implement_data_for_primitive!(u16);
|
||||
implement_data_for_primitive!(u32);
|
||||
implement_data_for_primitive!(i32);
|
||||
implement_data_for_primitive!(i64);
|
||||
implement_data_for_primitive!(u64);
|
||||
implement_data_for_primitive!(f32);
|
||||
implement_data_for_primitive!(f64);
|
||||
|
||||
|
||||
impl Data for f16 {
|
||||
#[inline]
|
||||
fn read(read: &mut impl Read) -> Result<Self> {
|
||||
u16::read(read).map(f16::from_bits)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_slice(read: &mut impl Read, slice: &mut [Self]) -> Result<()> {
|
||||
let bits = slice.reinterpret_cast_mut();
|
||||
u16::read_slice(read, bits)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write(self, write: &mut impl Write) -> Result<()> {
|
||||
self.to_bits().write(write)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_slice(write: &mut impl Write, slice: &[Self]) -> Result<()> {
|
||||
let bits = slice.reinterpret_cast();
|
||||
u16::write_slice(write, bits)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::io::PeekRead;
|
||||
use std::io::Read;
|
||||
|
||||
#[test]
|
||||
fn peek(){
|
||||
use lebe::prelude::*;
|
||||
let buffer: &[u8] = &[0,1,2,3];
|
||||
let mut peek = PeekRead::new(buffer);
|
||||
|
||||
assert_eq!(peek.peek_u8().as_ref().unwrap(), &0);
|
||||
assert_eq!(peek.peek_u8().as_ref().unwrap(), &0);
|
||||
assert_eq!(peek.peek_u8().as_ref().unwrap(), &0);
|
||||
assert_eq!(u8::read_from_little_endian(&mut peek).unwrap(), 0_u8);
|
||||
|
||||
assert_eq!(peek.read(&mut [0,0]).unwrap(), 2);
|
||||
|
||||
assert_eq!(peek.peek_u8().as_ref().unwrap(), &3);
|
||||
assert_eq!(u8::read_from_little_endian(&mut peek).unwrap(), 3_u8);
|
||||
|
||||
assert!(peek.peek_u8().is_err());
|
||||
assert!(peek.peek_u8().is_err());
|
||||
assert!(peek.peek_u8().is_err());
|
||||
assert!(peek.peek_u8().is_err());
|
||||
|
||||
assert!(u8::read_from_little_endian(&mut peek).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
100
vendor/exr/src/lib.rs
vendored
Normal file
100
vendor/exr/src/lib.rs
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
|
||||
|
||||
//! Read and write OpenEXR images.
|
||||
//! This library uses no foreign code or unsafe Rust.
|
||||
//!
|
||||
//! See the [README.md](https://github.com/johannesvollmer/exrs/blob/master/README.md) for crate information.
|
||||
//! Read __the [GUIDE.md](https://github.com/johannesvollmer/exrs/blob/master/GUIDE.md) for a API introduction__.
|
||||
//! Check out the [examples](https://github.com/johannesvollmer/exrs/tree/master/examples) for a first impression.
|
||||
|
||||
#![warn(
|
||||
rust_2018_idioms,
|
||||
future_incompatible,
|
||||
unused_extern_crates,
|
||||
unused,
|
||||
|
||||
missing_copy_implementations,
|
||||
missing_debug_implementations,
|
||||
|
||||
clippy::all,
|
||||
clippy::restriction,
|
||||
clippy::pedantic,
|
||||
clippy::nursery,
|
||||
clippy::cargo,
|
||||
)]
|
||||
|
||||
#![deny(
|
||||
unused_variables,
|
||||
unused_assignments,
|
||||
dead_code,
|
||||
unused_must_use,
|
||||
missing_copy_implementations,
|
||||
trivial_numeric_casts,
|
||||
redundant_semicolons
|
||||
)]
|
||||
|
||||
#![forbid(unsafe_code)]
|
||||
#![warn(missing_docs)]
|
||||
|
||||
pub mod io; // public to allow for custom attribute byte parsing
|
||||
|
||||
pub mod math;
|
||||
pub mod compression;
|
||||
pub mod meta;
|
||||
pub mod image;
|
||||
|
||||
pub mod error;
|
||||
pub mod block;
|
||||
|
||||
#[macro_use]
|
||||
extern crate smallvec;
|
||||
|
||||
/// Export the most important items from `exrs`.
|
||||
/// _Note: This includes a type called `Result`, possibly overwriting the default `std::Result` type usage._
|
||||
pub mod prelude {
|
||||
|
||||
/// Import this specifically if you want to be explicit but still use the extension traits.
|
||||
pub mod traits {
|
||||
pub use crate::image::write::{WritableImage, channels::GetPixel};
|
||||
pub use crate::image::read::{
|
||||
read, any_channels::ReadSamples, image::ReadLayers,
|
||||
image::ReadImage, layers::ReadChannels,
|
||||
specific_channels::{ReadSpecificChannel}
|
||||
};
|
||||
|
||||
pub use crate::image::crop::{Crop, CropWhere, CropResult, InspectSample, CroppedChannels, ApplyCroppedView};
|
||||
}
|
||||
|
||||
pub use traits::*;
|
||||
|
||||
pub use crate::image::write::{write_rgb_file, write_rgba_file};
|
||||
pub use crate::image::read::{
|
||||
read_first_rgba_layer_from_file,
|
||||
read_all_rgba_layers_from_file,
|
||||
read_all_data_from_file,
|
||||
read_all_flat_layers_from_file,
|
||||
read_first_flat_layer_from_file
|
||||
};
|
||||
|
||||
// image data structures
|
||||
pub use crate::image::*;
|
||||
pub use crate::meta::{ attribute, MetaData, header::{ LayerAttributes, ImageAttributes } };
|
||||
pub use crate::block::samples::Sample;
|
||||
pub use crate::meta::attribute::{
|
||||
AttributeValue, Compression, Text, IntegerBounds,
|
||||
LineOrder, SampleType, TileDescription, ChannelDescription
|
||||
};
|
||||
|
||||
// common math
|
||||
pub use crate::math::Vec2;
|
||||
|
||||
// error handling
|
||||
pub use crate::error::{ Result, Error };
|
||||
|
||||
// re-export external stuff
|
||||
pub use half::f16;
|
||||
pub use smallvec::SmallVec;
|
||||
}
|
||||
|
||||
|
||||
|
||||
213
vendor/exr/src/math.rs
vendored
Normal file
213
vendor/exr/src/math.rs
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
|
||||
// calculations inspired by
|
||||
// https://github.com/AcademySoftwareFoundation/openexr/blob/master/OpenEXR/IlmImf/ImfTiledMisc.cpp
|
||||
|
||||
//! Simple math utilities.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use crate::error::{i32_to_usize};
|
||||
use crate::error::Result;
|
||||
use std::ops::{Add, Sub, Div, Mul};
|
||||
use std::fmt::Debug;
|
||||
|
||||
/// Simple two-dimensional vector of any numerical type.
|
||||
/// Supports only few mathematical operations
|
||||
/// as this is used mainly as data struct.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Default)]
|
||||
pub struct Vec2<T> (pub T, pub T);
|
||||
|
||||
impl<T> Vec2<T> {
|
||||
|
||||
/// Returns the vector with the maximum of either coordinates.
|
||||
pub fn max(self, other: Self) -> Self where T: Ord {
|
||||
Vec2(self.0.max(other.0), self.1.max(other.1))
|
||||
}
|
||||
|
||||
/// Returns the vector with the minimum of either coordinates.
|
||||
pub fn min(self, other: Self) -> Self where T: Ord {
|
||||
Vec2(self.0.min(other.0), self.1.min(other.1))
|
||||
}
|
||||
|
||||
/// Try to convert all components of this vector to a new type,
|
||||
/// yielding either a vector of that new type, or an error.
|
||||
pub fn try_from<S>(value: Vec2<S>) -> std::result::Result<Self, T::Error> where T: TryFrom<S> {
|
||||
let x = T::try_from(value.0)?;
|
||||
let y = T::try_from(value.1)?;
|
||||
Ok(Vec2(x, y))
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// Seeing this vector as a dimension or size (width and height),
|
||||
/// this returns the area that this dimensions contains (`width * height`).
|
||||
#[inline] pub fn area(self) -> T where T: std::ops::Mul<T, Output = T> {
|
||||
self.0 * self.1
|
||||
}
|
||||
|
||||
/// The first component of this 2D vector.
|
||||
#[inline] pub fn x(self) -> T { self.0 }
|
||||
|
||||
/// The second component of this 2D vector.
|
||||
#[inline] pub fn y(self) -> T { self.1 }
|
||||
|
||||
/// The first component of this 2D vector.
|
||||
#[inline] pub fn width(self) -> T { self.0 }
|
||||
|
||||
/// The second component of this 2D vector.
|
||||
#[inline] pub fn height(self) -> T { self.1 }
|
||||
|
||||
// TODO use this!
|
||||
/// Convert this two-dimensional coordinate to an index suited for one-dimensional flattened image arrays.
|
||||
/// Works for images that store the pixels row by row, one after another, in a single array.
|
||||
/// In debug mode, panics for an index out of bounds.
|
||||
#[inline] pub fn flat_index_for_size(self, resolution: Vec2<T>) -> T
|
||||
where T: Copy + Debug + Ord + Mul<Output=T> + Add<Output=T>
|
||||
{
|
||||
debug_assert!(
|
||||
self.x() < resolution.width() && self.y() < resolution.height(),
|
||||
"Vec2 index {:?} is invalid for resolution {:?}", self, resolution
|
||||
);
|
||||
|
||||
let Vec2(x, y) = self;
|
||||
y * resolution.width() + x
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
impl Vec2<i32> {
|
||||
|
||||
/// Try to convert to [`Vec2<usize>`], returning an error on negative numbers.
|
||||
pub fn to_usize(self, error_message: &'static str) -> Result<Vec2<usize>> {
|
||||
let x = i32_to_usize(self.0, error_message)?;
|
||||
let y = i32_to_usize(self.1, error_message)?;
|
||||
Ok(Vec2(x, y))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl Vec2<usize> {
|
||||
|
||||
/// Panics for too large values
|
||||
pub fn to_i32(self) -> Vec2<i32> {
|
||||
let x = i32::try_from(self.0).expect("vector x coordinate too large");
|
||||
let y = i32::try_from(self.1).expect("vector y coordinate too large");
|
||||
Vec2(x, y)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
impl<T: std::ops::Add<T>> std::ops::Add<Vec2<T>> for Vec2<T> {
|
||||
type Output = Vec2<T::Output>;
|
||||
fn add(self, other: Vec2<T>) -> Self::Output {
|
||||
Vec2(self.0 + other.0, self.1 + other.1)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: std::ops::Sub<T>> std::ops::Sub<Vec2<T>> for Vec2<T> {
|
||||
type Output = Vec2<T::Output>;
|
||||
fn sub(self, other: Vec2<T>) -> Self::Output {
|
||||
Vec2(self.0 - other.0, self.1 - other.1)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: std::ops::Div<T>> std::ops::Div<Vec2<T>> for Vec2<T> {
|
||||
type Output = Vec2<T::Output>;
|
||||
fn div(self, other: Vec2<T>) -> Self::Output {
|
||||
Vec2(self.0 / other.0, self.1 / other.1)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: std::ops::Mul<T>> std::ops::Mul<Vec2<T>> for Vec2<T> {
|
||||
type Output = Vec2<T::Output>;
|
||||
fn mul(self, other: Vec2<T>) -> Self::Output {
|
||||
Vec2(self.0 * other.0, self.1 * other.1)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> std::ops::Neg for Vec2<T> where T: std::ops::Neg<Output=T> {
|
||||
type Output = Vec2<T>;
|
||||
fn neg(self) -> Self::Output { Vec2(-self.0, -self.1) }
|
||||
}
|
||||
|
||||
impl<T> From<(T, T)> for Vec2<T> {
|
||||
fn from((x, y): (T, T)) -> Self { Vec2(x, y) }
|
||||
}
|
||||
|
||||
impl<T> From<Vec2<T>> for (T, T) {
|
||||
fn from(vec2: Vec2<T>) -> Self { (vec2.0, vec2.1) }
|
||||
}
|
||||
|
||||
/// Computes `floor(log(x)/log(2))`. Returns 0 where argument is 0.
|
||||
// TODO does rust std not provide this?
|
||||
pub(crate) fn floor_log_2(mut number: u32) -> u32 {
|
||||
let mut log = 0;
|
||||
|
||||
// TODO check if this unrolls properly?
|
||||
while number > 1 {
|
||||
log += 1;
|
||||
number >>= 1;
|
||||
}
|
||||
|
||||
log
|
||||
}
|
||||
|
||||
|
||||
/// Computes `ceil(log(x)/log(2))`. Returns 0 where argument is 0.
|
||||
// taken from https://github.com/openexr/openexr/blob/master/OpenEXR/IlmImf/ImfTiledMisc.cpp
|
||||
// TODO does rust std not provide this?
|
||||
pub(crate) fn ceil_log_2(mut number: u32) -> u32 {
|
||||
let mut log = 0;
|
||||
let mut round_up = 0;
|
||||
|
||||
// TODO check if this unrolls properly
|
||||
while number > 1 {
|
||||
if number & 1 != 0 {
|
||||
round_up = 1;
|
||||
}
|
||||
|
||||
log += 1;
|
||||
number >>= 1;
|
||||
}
|
||||
|
||||
log + round_up
|
||||
}
|
||||
|
||||
|
||||
/// Round up or down in specific calculations.
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
|
||||
pub enum RoundingMode {
|
||||
|
||||
/// Round down.
|
||||
Down,
|
||||
|
||||
/// Round up.
|
||||
Up,
|
||||
}
|
||||
|
||||
impl RoundingMode {
|
||||
pub(crate) fn log2(self, number: u32) -> u32 {
|
||||
match self {
|
||||
RoundingMode::Down => self::floor_log_2(number),
|
||||
RoundingMode::Up => self::ceil_log_2(number),
|
||||
}
|
||||
}
|
||||
|
||||
/// Only works for positive numbers.
|
||||
pub(crate) fn divide<T>(self, dividend: T, divisor: T) -> T
|
||||
where T: Copy + Add<Output = T> + Sub<Output = T> + Div<Output = T> + From<u8> + std::cmp::PartialOrd
|
||||
{
|
||||
assert!(
|
||||
dividend >= T::from(0) && divisor >= T::from(1),
|
||||
"division with rounding up only works for positive numbers"
|
||||
);
|
||||
|
||||
match self {
|
||||
RoundingMode::Up => (dividend + divisor - T::from(1_u8)) / divisor, // only works for positive numbers
|
||||
RoundingMode::Down => dividend / divisor,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO log2 tests
|
||||
2226
vendor/exr/src/meta/attribute.rs
vendored
Normal file
2226
vendor/exr/src/meta/attribute.rs
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1197
vendor/exr/src/meta/header.rs
vendored
Normal file
1197
vendor/exr/src/meta/header.rs
vendored
Normal file
File diff suppressed because it is too large
Load Diff
821
vendor/exr/src/meta/mod.rs
vendored
Normal file
821
vendor/exr/src/meta/mod.rs
vendored
Normal file
@@ -0,0 +1,821 @@
|
||||
|
||||
//! Describes all meta data possible in an exr file.
|
||||
//! Contains functionality to read and write meta data from bytes.
|
||||
//! Browse the `exr::image` module to get started with the high-level interface.
|
||||
|
||||
pub mod attribute;
|
||||
pub mod header;
|
||||
|
||||
|
||||
use crate::io::*;
|
||||
use ::smallvec::SmallVec;
|
||||
use self::attribute::*;
|
||||
use crate::block::chunk::{TileCoordinates, CompressedBlock};
|
||||
use crate::error::*;
|
||||
use std::fs::File;
|
||||
use std::io::{BufReader};
|
||||
use crate::math::*;
|
||||
use std::collections::{HashSet};
|
||||
use std::convert::TryFrom;
|
||||
use crate::meta::header::{Header};
|
||||
use crate::block::{BlockIndex, UncompressedBlock};
|
||||
|
||||
|
||||
// TODO rename MetaData to ImageInfo?
|
||||
|
||||
/// Contains the complete meta data of an exr image.
|
||||
/// Defines how the image is split up in the file,
|
||||
/// the number and type of images and channels,
|
||||
/// and various other attributes.
|
||||
/// The usage of custom attributes is encouraged.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct MetaData {
|
||||
|
||||
/// Some flags summarizing the features that must be supported to decode the file.
|
||||
pub requirements: Requirements,
|
||||
|
||||
/// One header to describe each layer in this file.
|
||||
// TODO rename to layer descriptions?
|
||||
pub headers: Headers,
|
||||
}
|
||||
|
||||
|
||||
/// List of `Header`s.
|
||||
pub type Headers = SmallVec<[Header; 3]>;
|
||||
|
||||
/// List of `OffsetTable`s.
|
||||
pub type OffsetTables = SmallVec<[OffsetTable; 3]>;
|
||||
|
||||
|
||||
/// The offset table is an ordered list of indices referencing pixel data in the exr file.
|
||||
/// For each pixel tile in the image, an index exists, which points to the byte-location
|
||||
/// of the corresponding pixel data in the file. That index can be used to load specific
|
||||
/// portions of an image without processing all bytes in a file. For each header,
|
||||
/// an offset table exists with its indices ordered by `LineOrder::Increasing`.
|
||||
// If the multipart bit is unset and the chunkCount attribute is not present,
|
||||
// the number of entries in the chunk table is computed using the
|
||||
// dataWindow, tileDesc, and compression attribute.
|
||||
//
|
||||
// If the multipart bit is set, the header must contain a
|
||||
// chunkCount attribute, that contains the length of the offset table.
|
||||
pub type OffsetTable = Vec<u64>;
|
||||
|
||||
|
||||
/// A summary of requirements that must be met to read this exr file.
|
||||
/// Used to determine whether this file can be read by a given reader.
|
||||
/// It includes the OpenEXR version number. This library aims to support version `2.0`.
|
||||
#[derive(Clone, Copy, Eq, PartialEq, Debug, Hash)]
|
||||
pub struct Requirements {
|
||||
|
||||
/// This library supports reading version 1 and 2, and writing version 2.
|
||||
// TODO write version 1 for simple images
|
||||
pub file_format_version: u8,
|
||||
|
||||
/// If true, this image has tiled blocks and contains only a single layer.
|
||||
/// If false and not deep and not multilayer, this image is a single layer image with scan line blocks.
|
||||
pub is_single_layer_and_tiled: bool,
|
||||
|
||||
// in c or bad c++ this might have been relevant (omg is he allowed to say that)
|
||||
/// Whether this file has strings with a length greater than 31.
|
||||
/// Strings can never be longer than 255.
|
||||
pub has_long_names: bool,
|
||||
|
||||
/// This image contains at least one layer with deep data.
|
||||
pub has_deep_data: bool,
|
||||
|
||||
/// Whether this file contains multiple layers.
|
||||
pub has_multiple_layers: bool,
|
||||
}
|
||||
|
||||
|
||||
/// Locates a rectangular section of pixels in an image.
|
||||
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
|
||||
pub struct TileIndices {
|
||||
|
||||
/// Index of the tile.
|
||||
pub location: TileCoordinates,
|
||||
|
||||
/// Pixel size of the tile.
|
||||
pub size: Vec2<usize>,
|
||||
}
|
||||
|
||||
/// How the image pixels are split up into separate blocks.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum BlockDescription {
|
||||
|
||||
/// The image is divided into scan line blocks.
|
||||
/// The number of scan lines in a block depends on the compression method.
|
||||
ScanLines,
|
||||
|
||||
/// The image is divided into tile blocks.
|
||||
/// Also specifies the size of each tile in the image
|
||||
/// and whether this image contains multiple resolution levels.
|
||||
Tiles(TileDescription)
|
||||
}
|
||||
|
||||
|
||||
/*impl TileIndices {
|
||||
pub fn cmp(&self, other: &Self) -> Ordering {
|
||||
match self.location.level_index.1.cmp(&other.location.level_index.1) {
|
||||
Ordering::Equal => {
|
||||
match self.location.level_index.0.cmp(&other.location.level_index.0) {
|
||||
Ordering::Equal => {
|
||||
match self.location.tile_index.1.cmp(&other.location.tile_index.1) {
|
||||
Ordering::Equal => {
|
||||
self.location.tile_index.0.cmp(&other.location.tile_index.0)
|
||||
},
|
||||
|
||||
other => other,
|
||||
}
|
||||
},
|
||||
|
||||
other => other
|
||||
}
|
||||
},
|
||||
|
||||
other => other
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
impl BlockDescription {
|
||||
|
||||
/// Whether this image is tiled. If false, this image is divided into scan line blocks.
|
||||
pub fn has_tiles(&self) -> bool {
|
||||
match self {
|
||||
BlockDescription::Tiles { .. } => true,
|
||||
_ => false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/// The first four bytes of each exr file.
|
||||
/// Used to abort reading non-exr files.
|
||||
pub mod magic_number {
|
||||
use super::*;
|
||||
|
||||
/// The first four bytes of each exr file.
|
||||
pub const BYTES: [u8; 4] = [0x76, 0x2f, 0x31, 0x01];
|
||||
|
||||
/// Without validation, write this instance to the byte stream.
|
||||
pub fn write(write: &mut impl Write) -> Result<()> {
|
||||
u8::write_slice(write, &self::BYTES)
|
||||
}
|
||||
|
||||
/// Consumes four bytes from the reader and returns whether the file may be an exr file.
|
||||
// TODO check if exr before allocating BufRead
|
||||
pub fn is_exr(read: &mut impl Read) -> Result<bool> {
|
||||
let mut magic_num = [0; 4];
|
||||
u8::read_slice(read, &mut magic_num)?;
|
||||
Ok(magic_num == self::BYTES)
|
||||
}
|
||||
|
||||
/// Validate this image. If it is an exr file, return `Ok(())`.
|
||||
pub fn validate_exr(read: &mut impl Read) -> UnitResult {
|
||||
if self::is_exr(read)? {
|
||||
Ok(())
|
||||
|
||||
} else {
|
||||
Err(Error::invalid("file identifier missing"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A `0_u8` at the end of a sequence.
|
||||
pub mod sequence_end {
|
||||
use super::*;
|
||||
|
||||
/// Number of bytes this would consume in an exr file.
|
||||
pub fn byte_size() -> usize {
|
||||
1
|
||||
}
|
||||
|
||||
/// Without validation, write this instance to the byte stream.
|
||||
pub fn write<W: Write>(write: &mut W) -> UnitResult {
|
||||
0_u8.write(write)
|
||||
}
|
||||
|
||||
/// Peeks the next byte. If it is zero, consumes the byte and returns true.
|
||||
pub fn has_come(read: &mut PeekRead<impl Read>) -> Result<bool> {
|
||||
Ok(read.skip_if_eq(0)?)
|
||||
}
|
||||
}
|
||||
|
||||
fn missing_attribute(name: &str) -> Error {
|
||||
Error::invalid(format!("missing or invalid {} attribute", name))
|
||||
}
|
||||
|
||||
|
||||
/// Compute the number of tiles required to contain all values.
|
||||
pub fn compute_block_count(full_res: usize, tile_size: usize) -> usize {
|
||||
// round up, because if the image is not evenly divisible by the tiles,
|
||||
// we add another tile at the end (which is only partially used)
|
||||
RoundingMode::Up.divide(full_res, tile_size)
|
||||
}
|
||||
|
||||
/// Compute the start position and size of a block inside a dimension.
|
||||
#[inline]
|
||||
pub fn calculate_block_position_and_size(total_size: usize, block_size: usize, block_index: usize) -> Result<(usize, usize)> {
|
||||
let block_position = block_size * block_index;
|
||||
|
||||
Ok((
|
||||
block_position,
|
||||
calculate_block_size(total_size, block_size, block_position)?
|
||||
))
|
||||
}
|
||||
|
||||
/// Calculate the size of a single block. If this is the last block,
|
||||
/// this only returns the required size, which is always smaller than the default block size.
|
||||
// TODO use this method everywhere instead of convoluted formulas
|
||||
#[inline]
|
||||
pub fn calculate_block_size(total_size: usize, block_size: usize, block_position: usize) -> Result<usize> {
|
||||
if block_position >= total_size {
|
||||
return Err(Error::invalid("block index"))
|
||||
}
|
||||
|
||||
if block_position + block_size <= total_size {
|
||||
Ok(block_size)
|
||||
}
|
||||
else {
|
||||
Ok(total_size - block_position)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Calculate number of mip levels in a given resolution.
|
||||
// TODO this should be cached? log2 may be very expensive
|
||||
pub fn compute_level_count(round: RoundingMode, full_res: usize) -> usize {
|
||||
usize::try_from(round.log2(u32::try_from(full_res).unwrap())).unwrap() + 1
|
||||
}
|
||||
|
||||
/// Calculate the size of a single mip level by index.
|
||||
// TODO this should be cached? log2 may be very expensive
|
||||
pub fn compute_level_size(round: RoundingMode, full_res: usize, level_index: usize) -> usize {
|
||||
assert!(level_index < std::mem::size_of::<usize>() * 8, "largest level size exceeds maximum integer value");
|
||||
round.divide(full_res, 1 << level_index).max(1)
|
||||
}
|
||||
|
||||
/// Iterates over all rip map level resolutions of a given size, including the indices of each level.
|
||||
/// The order of iteration conforms to `LineOrder::Increasing`.
|
||||
// TODO cache these?
|
||||
// TODO compute these directly instead of summing up an iterator?
|
||||
pub fn rip_map_levels(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=(Vec2<usize>, Vec2<usize>)> {
|
||||
rip_map_indices(round, max_resolution).map(move |level_indices|{
|
||||
// TODO progressively divide instead??
|
||||
let width = compute_level_size(round, max_resolution.width(), level_indices.x());
|
||||
let height = compute_level_size(round, max_resolution.height(), level_indices.y());
|
||||
(level_indices, Vec2(width, height))
|
||||
})
|
||||
}
|
||||
|
||||
/// Iterates over all mip map level resolutions of a given size, including the indices of each level.
|
||||
/// The order of iteration conforms to `LineOrder::Increasing`.
|
||||
// TODO cache all these level values when computing table offset size??
|
||||
// TODO compute these directly instead of summing up an iterator?
|
||||
pub fn mip_map_levels(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=(usize, Vec2<usize>)> {
|
||||
mip_map_indices(round, max_resolution)
|
||||
.map(move |level_index|{
|
||||
// TODO progressively divide instead??
|
||||
let width = compute_level_size(round, max_resolution.width(), level_index);
|
||||
let height = compute_level_size(round, max_resolution.height(), level_index);
|
||||
(level_index, Vec2(width, height))
|
||||
})
|
||||
}
|
||||
|
||||
/// Iterates over all rip map level indices of a given size.
|
||||
/// The order of iteration conforms to `LineOrder::Increasing`.
|
||||
pub fn rip_map_indices(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=Vec2<usize>> {
|
||||
let (width, height) = (
|
||||
compute_level_count(round, max_resolution.width()),
|
||||
compute_level_count(round, max_resolution.height())
|
||||
);
|
||||
|
||||
(0..height).flat_map(move |y_level|{
|
||||
(0..width).map(move |x_level|{
|
||||
Vec2(x_level, y_level)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Iterates over all mip map level indices of a given size.
|
||||
/// The order of iteration conforms to `LineOrder::Increasing`.
|
||||
pub fn mip_map_indices(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=usize> {
|
||||
0..compute_level_count(round, max_resolution.width().max(max_resolution.height()))
|
||||
}
|
||||
|
||||
/// Compute the number of chunks that an image is divided into. May be an expensive operation.
|
||||
// If not multilayer and chunkCount not present,
|
||||
// the number of entries in the chunk table is computed
|
||||
// using the dataWindow and tileDesc attributes and the compression format
|
||||
pub fn compute_chunk_count(compression: Compression, data_size: Vec2<usize>, blocks: BlockDescription) -> usize {
|
||||
|
||||
if let BlockDescription::Tiles(tiles) = blocks {
|
||||
let round = tiles.rounding_mode;
|
||||
let Vec2(tile_width, tile_height) = tiles.tile_size;
|
||||
|
||||
// TODO cache all these level values??
|
||||
use crate::meta::attribute::LevelMode::*;
|
||||
match tiles.level_mode {
|
||||
Singular => {
|
||||
let tiles_x = compute_block_count(data_size.width(), tile_width);
|
||||
let tiles_y = compute_block_count(data_size.height(), tile_height);
|
||||
tiles_x * tiles_y
|
||||
}
|
||||
|
||||
MipMap => {
|
||||
mip_map_levels(round, data_size).map(|(_, Vec2(level_width, level_height))| {
|
||||
compute_block_count(level_width, tile_width) * compute_block_count(level_height, tile_height)
|
||||
}).sum()
|
||||
},
|
||||
|
||||
RipMap => {
|
||||
rip_map_levels(round, data_size).map(|(_, Vec2(level_width, level_height))| {
|
||||
compute_block_count(level_width, tile_width) * compute_block_count(level_height, tile_height)
|
||||
}).sum()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scan line blocks never have mip maps
|
||||
else {
|
||||
compute_block_count(data_size.height(), compression.scan_lines_per_block())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
impl MetaData {
|
||||
|
||||
/// Read the exr meta data from a file.
|
||||
/// Use `read_from_unbuffered` instead if you do not have a file.
|
||||
/// Does not validate the meta data.
|
||||
#[must_use]
|
||||
pub fn read_from_file(path: impl AsRef<::std::path::Path>, pedantic: bool) -> Result<Self> {
|
||||
Self::read_from_unbuffered(File::open(path)?, pedantic)
|
||||
}
|
||||
|
||||
/// Buffer the reader and then read the exr meta data from it.
|
||||
/// Use `read_from_buffered` if your reader is an in-memory reader.
|
||||
/// Use `read_from_file` if you have a file path.
|
||||
/// Does not validate the meta data.
|
||||
#[must_use]
|
||||
pub fn read_from_unbuffered(unbuffered: impl Read, pedantic: bool) -> Result<Self> {
|
||||
Self::read_from_buffered(BufReader::new(unbuffered), pedantic)
|
||||
}
|
||||
|
||||
/// Read the exr meta data from a reader.
|
||||
/// Use `read_from_file` if you have a file path.
|
||||
/// Use `read_from_unbuffered` if this is not an in-memory reader.
|
||||
/// Does not validate the meta data.
|
||||
#[must_use]
|
||||
pub fn read_from_buffered(buffered: impl Read, pedantic: bool) -> Result<Self> {
|
||||
let mut read = PeekRead::new(buffered);
|
||||
MetaData::read_unvalidated_from_buffered_peekable(&mut read, pedantic)
|
||||
}
|
||||
|
||||
/// Does __not validate__ the meta data completely.
|
||||
#[must_use]
|
||||
pub(crate) fn read_unvalidated_from_buffered_peekable(read: &mut PeekRead<impl Read>, pedantic: bool) -> Result<Self> {
|
||||
magic_number::validate_exr(read)?;
|
||||
|
||||
let requirements = Requirements::read(read)?;
|
||||
|
||||
// do this check now in order to fast-fail for newer versions and features than version 2
|
||||
requirements.validate()?;
|
||||
|
||||
let headers = Header::read_all(read, &requirements, pedantic)?;
|
||||
|
||||
// TODO check if supporting requirements 2 always implies supporting requirements 1
|
||||
Ok(MetaData { requirements, headers })
|
||||
}
|
||||
|
||||
/// Validates the meta data.
|
||||
#[must_use]
|
||||
pub(crate) fn read_validated_from_buffered_peekable(
|
||||
read: &mut PeekRead<impl Read>, pedantic: bool
|
||||
) -> Result<Self> {
|
||||
let meta_data = Self::read_unvalidated_from_buffered_peekable(read, !pedantic)?;
|
||||
MetaData::validate(meta_data.headers.as_slice(), pedantic)?;
|
||||
Ok(meta_data)
|
||||
}
|
||||
|
||||
/// Validates the meta data and writes it to the stream.
|
||||
/// If pedantic, throws errors for files that may produce errors in other exr readers.
|
||||
/// Returns the automatically detected minimum requirement flags.
|
||||
pub(crate) fn write_validating_to_buffered(write: &mut impl Write, headers: &[Header], pedantic: bool) -> Result<Requirements> {
|
||||
// pedantic validation to not allow slightly invalid files
|
||||
// that still could be read correctly in theory
|
||||
let minimal_requirements = Self::validate(headers, pedantic)?;
|
||||
|
||||
magic_number::write(write)?;
|
||||
minimal_requirements.write(write)?;
|
||||
Header::write_all(headers, write, minimal_requirements.has_multiple_layers)?;
|
||||
Ok(minimal_requirements)
|
||||
}
|
||||
|
||||
/// Read one offset table from the reader for each header.
|
||||
pub fn read_offset_tables(read: &mut PeekRead<impl Read>, headers: &Headers) -> Result<OffsetTables> {
|
||||
headers.iter()
|
||||
.map(|header| u64::read_vec(read, header.chunk_count, u16::MAX as usize, None, "offset table size"))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Skip the offset tables by advancing the reader by the required byte count.
|
||||
// TODO use seek for large (probably all) tables!
|
||||
pub fn skip_offset_tables(read: &mut PeekRead<impl Read>, headers: &Headers) -> Result<usize> {
|
||||
let chunk_count: usize = headers.iter().map(|header| header.chunk_count).sum();
|
||||
crate::io::skip_bytes(read, chunk_count * u64::BYTE_SIZE)?; // TODO this should seek for large tables
|
||||
Ok(chunk_count)
|
||||
}
|
||||
|
||||
/// This iterator tells you the block indices of all blocks that must be in the image.
|
||||
/// The order of the blocks depends on the `LineOrder` attribute
|
||||
/// (unspecified line order is treated the same as increasing line order).
|
||||
/// The blocks written to the file must be exactly in this order,
|
||||
/// except for when the `LineOrder` is unspecified.
|
||||
/// The index represents the block index, in increasing line order, within the header.
|
||||
pub fn enumerate_ordered_header_block_indices(&self) -> impl '_ + Iterator<Item=(usize, BlockIndex)> {
|
||||
crate::block::enumerate_ordered_header_block_indices(&self.headers)
|
||||
}
|
||||
|
||||
/// Go through all the block indices in the correct order and call the specified closure for each of these blocks.
|
||||
/// That way, the blocks indices are filled with real block data and returned as an iterator.
|
||||
/// The closure returns the an `UncompressedBlock` for each block index.
|
||||
pub fn collect_ordered_blocks<'s>(&'s self, mut get_block: impl 's + FnMut(BlockIndex) -> UncompressedBlock)
|
||||
-> impl 's + Iterator<Item=(usize, UncompressedBlock)>
|
||||
{
|
||||
self.enumerate_ordered_header_block_indices().map(move |(index_in_header, block_index)|{
|
||||
(index_in_header, get_block(block_index))
|
||||
})
|
||||
}
|
||||
|
||||
/// Go through all the block indices in the correct order and call the specified closure for each of these blocks.
|
||||
/// That way, the blocks indices are filled with real block data and returned as an iterator.
|
||||
/// The closure returns the byte data for each block index.
|
||||
pub fn collect_ordered_block_data<'s>(&'s self, mut get_block_data: impl 's + FnMut(BlockIndex) -> Vec<u8>)
|
||||
-> impl 's + Iterator<Item=(usize, UncompressedBlock)>
|
||||
{
|
||||
self.collect_ordered_blocks(move |block_index|
|
||||
UncompressedBlock { index: block_index, data: get_block_data(block_index) }
|
||||
)
|
||||
}
|
||||
|
||||
/// Validates this meta data. Returns the minimal possible requirements.
|
||||
pub fn validate(headers: &[Header], pedantic: bool) -> Result<Requirements> {
|
||||
if headers.len() == 0 {
|
||||
return Err(Error::invalid("at least one layer is required"));
|
||||
}
|
||||
|
||||
let deep = false; // TODO deep data
|
||||
let is_multilayer = headers.len() > 1;
|
||||
let first_header_has_tiles = headers.iter().next()
|
||||
.map_or(false, |header| header.blocks.has_tiles());
|
||||
|
||||
let mut minimal_requirements = Requirements {
|
||||
// according to the spec, version 2 should only be necessary if `is_multilayer || deep`.
|
||||
// but the current open exr library does not support images with version 1, so always use version 2.
|
||||
file_format_version: 2,
|
||||
|
||||
// start as low as possible, later increasing if required
|
||||
has_long_names: false,
|
||||
|
||||
is_single_layer_and_tiled: !is_multilayer && first_header_has_tiles,
|
||||
has_multiple_layers: is_multilayer,
|
||||
has_deep_data: deep,
|
||||
};
|
||||
|
||||
for header in headers {
|
||||
if header.deep { // TODO deep data (and then remove this check)
|
||||
return Err(Error::unsupported("deep data not supported yet"));
|
||||
}
|
||||
|
||||
header.validate(is_multilayer, &mut minimal_requirements.has_long_names, pedantic)?;
|
||||
}
|
||||
|
||||
// TODO validation fn!
|
||||
/*if let Some(max) = max_pixel_bytes {
|
||||
let byte_size: usize = headers.iter()
|
||||
.map(|header| header.total_pixel_bytes())
|
||||
.sum();
|
||||
|
||||
if byte_size > max {
|
||||
return Err(Error::invalid("image larger than specified maximum"));
|
||||
}
|
||||
}*/
|
||||
|
||||
if pedantic { // check for duplicate header names
|
||||
let mut header_names = HashSet::with_capacity(headers.len());
|
||||
for header in headers {
|
||||
if !header_names.insert(&header.own_attributes.layer_name) {
|
||||
return Err(Error::invalid(format!(
|
||||
"duplicate layer name: `{}`",
|
||||
header.own_attributes.layer_name.as_ref().expect("header validation bug")
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pedantic {
|
||||
let must_share = headers.iter().flat_map(|header| header.own_attributes.other.iter())
|
||||
.any(|(_, value)| value.to_chromaticities().is_ok() || value.to_time_code().is_ok());
|
||||
|
||||
if must_share {
|
||||
return Err(Error::invalid("chromaticities and time code attributes must must not exist in own attributes but shared instead"));
|
||||
}
|
||||
}
|
||||
|
||||
if pedantic && headers.len() > 1 { // check for attributes that should not differ in between headers
|
||||
let first_header = headers.first().expect("header count validation bug");
|
||||
let first_header_attributes = &first_header.shared_attributes;
|
||||
|
||||
for header in &headers[1..] {
|
||||
if &header.shared_attributes != first_header_attributes {
|
||||
return Err(Error::invalid("display window, pixel aspect, chromaticities, and time code attributes must be equal for all headers"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug_assert!(minimal_requirements.validate().is_ok(), "inferred requirements are invalid");
|
||||
Ok(minimal_requirements)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
impl Requirements {
|
||||
|
||||
// this is actually used for control flow, as the number of headers may be 1 in a multilayer file
|
||||
/// Is this file declared to contain multiple layers?
|
||||
pub fn is_multilayer(&self) -> bool {
|
||||
self.has_multiple_layers
|
||||
}
|
||||
|
||||
/// Read the value without validating.
|
||||
pub fn read<R: Read>(read: &mut R) -> Result<Self> {
|
||||
use ::bit_field::BitField;
|
||||
|
||||
let version_and_flags = u32::read(read)?;
|
||||
|
||||
// take the 8 least significant bits, they contain the file format version number
|
||||
let version = (version_and_flags & 0x000F) as u8;
|
||||
|
||||
// the 24 most significant bits are treated as a set of boolean flags
|
||||
let is_single_tile = version_and_flags.get_bit(9);
|
||||
let has_long_names = version_and_flags.get_bit(10);
|
||||
let has_deep_data = version_and_flags.get_bit(11);
|
||||
let has_multiple_layers = version_and_flags.get_bit(12);
|
||||
|
||||
// all remaining bits except 9, 10, 11 and 12 are reserved and should be 0
|
||||
// if a file has any of these bits set to 1, it means this file contains
|
||||
// a feature that we don't support
|
||||
let unknown_flags = version_and_flags >> 13; // all flags excluding the 12 bits we already parsed
|
||||
|
||||
if unknown_flags != 0 { // TODO test if this correctly detects unsupported files
|
||||
return Err(Error::unsupported("too new file feature flags"));
|
||||
}
|
||||
|
||||
let version = Requirements {
|
||||
file_format_version: version,
|
||||
is_single_layer_and_tiled: is_single_tile, has_long_names,
|
||||
has_deep_data, has_multiple_layers,
|
||||
};
|
||||
|
||||
Ok(version)
|
||||
}
|
||||
|
||||
/// Without validation, write this instance to the byte stream.
|
||||
pub fn write<W: Write>(self, write: &mut W) -> UnitResult {
|
||||
use ::bit_field::BitField;
|
||||
|
||||
// the 8 least significant bits contain the file format version number
|
||||
// and the flags are set to 0
|
||||
let mut version_and_flags = self.file_format_version as u32;
|
||||
|
||||
// the 24 most significant bits are treated as a set of boolean flags
|
||||
version_and_flags.set_bit(9, self.is_single_layer_and_tiled);
|
||||
version_and_flags.set_bit(10, self.has_long_names);
|
||||
version_and_flags.set_bit(11, self.has_deep_data);
|
||||
version_and_flags.set_bit(12, self.has_multiple_layers);
|
||||
// all remaining bits except 9, 10, 11 and 12 are reserved and should be 0
|
||||
|
||||
version_and_flags.write(write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate this instance.
|
||||
pub fn validate(&self) -> UnitResult {
|
||||
if self.file_format_version == 2 {
|
||||
|
||||
match (
|
||||
self.is_single_layer_and_tiled, self.has_deep_data, self.has_multiple_layers,
|
||||
self.file_format_version
|
||||
) {
|
||||
// Single-part scan line. One normal scan line image.
|
||||
(false, false, false, 1..=2) => Ok(()),
|
||||
|
||||
// Single-part tile. One normal tiled image.
|
||||
(true, false, false, 1..=2) => Ok(()),
|
||||
|
||||
// Multi-part (new in 2.0).
|
||||
// Multiple normal images (scan line and/or tiled).
|
||||
(false, false, true, 2) => Ok(()),
|
||||
|
||||
// Single-part deep data (new in 2.0).
|
||||
// One deep tile or deep scan line part
|
||||
(false, true, false, 2) => Ok(()),
|
||||
|
||||
// Multi-part deep data (new in 2.0).
|
||||
// Multiple parts (any combination of:
|
||||
// tiles, scan lines, deep tiles and/or deep scan lines).
|
||||
(false, true, true, 2) => Ok(()),
|
||||
|
||||
_ => Err(Error::invalid("file feature flags"))
|
||||
}
|
||||
}
|
||||
else {
|
||||
Err(Error::unsupported("file versions other than 2.0 are not supported"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::meta::header::{ImageAttributes, LayerAttributes};
|
||||
|
||||
#[test]
|
||||
fn round_trip_requirements() {
|
||||
let requirements = Requirements {
|
||||
file_format_version: 2,
|
||||
is_single_layer_and_tiled: true,
|
||||
has_long_names: false,
|
||||
has_deep_data: true,
|
||||
has_multiple_layers: false
|
||||
};
|
||||
|
||||
let mut data: Vec<u8> = Vec::new();
|
||||
requirements.write(&mut data).unwrap();
|
||||
let read = Requirements::read(&mut data.as_slice()).unwrap();
|
||||
assert_eq!(requirements, read);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip(){
|
||||
let header = Header {
|
||||
channels: ChannelList::new(smallvec![
|
||||
ChannelDescription {
|
||||
name: Text::from("main"),
|
||||
sample_type: SampleType::U32,
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1)
|
||||
}
|
||||
],
|
||||
),
|
||||
compression: Compression::Uncompressed,
|
||||
line_order: LineOrder::Increasing,
|
||||
deep_data_version: Some(1),
|
||||
chunk_count: compute_chunk_count(Compression::Uncompressed, Vec2(2000, 333), BlockDescription::ScanLines),
|
||||
max_samples_per_pixel: Some(4),
|
||||
shared_attributes: ImageAttributes {
|
||||
pixel_aspect: 3.0,
|
||||
.. ImageAttributes::new(IntegerBounds {
|
||||
position: Vec2(2,1),
|
||||
size: Vec2(11, 9)
|
||||
})
|
||||
},
|
||||
|
||||
blocks: BlockDescription::ScanLines,
|
||||
deep: false,
|
||||
layer_size: Vec2(2000, 333),
|
||||
own_attributes: LayerAttributes {
|
||||
layer_name: Some(Text::from("test name lol")),
|
||||
layer_position: Vec2(3, -5),
|
||||
screen_window_center: Vec2(0.3, 99.0),
|
||||
screen_window_width: 0.19,
|
||||
.. Default::default()
|
||||
}
|
||||
};
|
||||
|
||||
let meta = MetaData {
|
||||
requirements: Requirements {
|
||||
file_format_version: 2,
|
||||
is_single_layer_and_tiled: false,
|
||||
has_long_names: false,
|
||||
has_deep_data: false,
|
||||
has_multiple_layers: false
|
||||
},
|
||||
headers: smallvec![ header ],
|
||||
};
|
||||
|
||||
|
||||
let mut data: Vec<u8> = Vec::new();
|
||||
MetaData::write_validating_to_buffered(&mut data, meta.headers.as_slice(), true).unwrap();
|
||||
let meta2 = MetaData::read_from_buffered(data.as_slice(), false).unwrap();
|
||||
MetaData::validate(meta2.headers.as_slice(), true).unwrap();
|
||||
assert_eq!(meta, meta2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn infer_low_requirements() {
|
||||
let header_version_1_short_names = Header {
|
||||
channels: ChannelList::new(smallvec![
|
||||
ChannelDescription {
|
||||
name: Text::from("main"),
|
||||
sample_type: SampleType::U32,
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1)
|
||||
}
|
||||
],
|
||||
),
|
||||
compression: Compression::Uncompressed,
|
||||
line_order: LineOrder::Increasing,
|
||||
deep_data_version: Some(1),
|
||||
chunk_count: compute_chunk_count(Compression::Uncompressed, Vec2(2000, 333), BlockDescription::ScanLines),
|
||||
max_samples_per_pixel: Some(4),
|
||||
shared_attributes: ImageAttributes {
|
||||
pixel_aspect: 3.0,
|
||||
.. ImageAttributes::new(IntegerBounds {
|
||||
position: Vec2(2,1),
|
||||
size: Vec2(11, 9)
|
||||
})
|
||||
},
|
||||
blocks: BlockDescription::ScanLines,
|
||||
deep: false,
|
||||
layer_size: Vec2(2000, 333),
|
||||
own_attributes: LayerAttributes {
|
||||
other: vec![
|
||||
(Text::try_from("x").unwrap(), AttributeValue::F32(3.0)),
|
||||
(Text::try_from("y").unwrap(), AttributeValue::F32(-1.0)),
|
||||
].into_iter().collect(),
|
||||
.. Default::default()
|
||||
}
|
||||
};
|
||||
|
||||
let low_requirements = MetaData::validate(
|
||||
&[header_version_1_short_names], true
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(low_requirements.has_long_names, false);
|
||||
assert_eq!(low_requirements.file_format_version, 2); // always have version 2
|
||||
assert_eq!(low_requirements.has_deep_data, false);
|
||||
assert_eq!(low_requirements.has_multiple_layers, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn infer_high_requirements() {
|
||||
let header_version_2_long_names = Header {
|
||||
channels: ChannelList::new(
|
||||
smallvec![
|
||||
ChannelDescription {
|
||||
name: Text::new_or_panic("main"),
|
||||
sample_type: SampleType::U32,
|
||||
quantize_linearly: false,
|
||||
sampling: Vec2(1, 1)
|
||||
}
|
||||
],
|
||||
),
|
||||
compression: Compression::Uncompressed,
|
||||
line_order: LineOrder::Increasing,
|
||||
deep_data_version: Some(1),
|
||||
chunk_count: compute_chunk_count(Compression::Uncompressed, Vec2(2000, 333), BlockDescription::ScanLines),
|
||||
max_samples_per_pixel: Some(4),
|
||||
shared_attributes: ImageAttributes {
|
||||
pixel_aspect: 3.0,
|
||||
.. ImageAttributes::new(IntegerBounds {
|
||||
position: Vec2(2,1),
|
||||
size: Vec2(11, 9)
|
||||
})
|
||||
},
|
||||
blocks: BlockDescription::ScanLines,
|
||||
deep: false,
|
||||
layer_size: Vec2(2000, 333),
|
||||
own_attributes: LayerAttributes {
|
||||
layer_name: Some(Text::new_or_panic("oasdasoidfj")),
|
||||
other: vec![
|
||||
(Text::new_or_panic("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"), AttributeValue::F32(3.0)),
|
||||
(Text::new_or_panic("y"), AttributeValue::F32(-1.0)),
|
||||
].into_iter().collect(),
|
||||
.. Default::default()
|
||||
}
|
||||
};
|
||||
|
||||
let mut layer_2 = header_version_2_long_names.clone();
|
||||
layer_2.own_attributes.layer_name = Some(Text::new_or_panic("anythingelse"));
|
||||
|
||||
let low_requirements = MetaData::validate(
|
||||
&[header_version_2_long_names, layer_2], true
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(low_requirements.has_long_names, true);
|
||||
assert_eq!(low_requirements.file_format_version, 2);
|
||||
assert_eq!(low_requirements.has_deep_data, false);
|
||||
assert_eq!(low_requirements.has_multiple_layers, true);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user