Initial vendor packages

Signed-off-by: Valentin Popov <valentin@popov.link>
This commit is contained in:
2024-01-08 01:21:28 +04:00
parent 5ecd8cf2cb
commit 1b6a04ca55
7309 changed files with 2160054 additions and 0 deletions

34
vendor/tiff/src/bytecast.rs vendored Normal file
View File

@ -0,0 +1,34 @@
//! Trivial, internal byte transmutation.
//!
//! A dependency like bytemuck would give us extra assurance of the safety but overall would not
//! reduce the amount of total unsafety. We don't use it in the interface where the traits would
//! really become useful.
//!
//! SAFETY: These are benign casts as we apply them to fixed size integer types only. All of them
//! are naturally aligned, valid for all bit patterns and their alignment is surely at most their
//! size (we assert the latter fact since it is 'implementation defined' if following the letter of
//! the unsafe code guidelines).
//!
//! TODO: Would like to use std-lib here.
use std::{mem, slice};
macro_rules! integral_slice_as_bytes{($int:ty, $const:ident $(,$mut:ident)*) => {
pub(crate) fn $const(slice: &[$int]) -> &[u8] {
assert!(mem::align_of::<$int>() <= mem::size_of::<$int>());
unsafe { slice::from_raw_parts(slice.as_ptr() as *const u8, mem::size_of_val(slice)) }
}
$(pub(crate) fn $mut(slice: &mut [$int]) -> &mut [u8] {
assert!(mem::align_of::<$int>() <= mem::size_of::<$int>());
unsafe { slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut u8, mem::size_of_val(slice)) }
})*
}}
integral_slice_as_bytes!(i8, i8_as_ne_bytes, i8_as_ne_mut_bytes);
integral_slice_as_bytes!(u16, u16_as_ne_bytes, u16_as_ne_mut_bytes);
integral_slice_as_bytes!(i16, i16_as_ne_bytes, i16_as_ne_mut_bytes);
integral_slice_as_bytes!(u32, u32_as_ne_bytes, u32_as_ne_mut_bytes);
integral_slice_as_bytes!(i32, i32_as_ne_bytes, i32_as_ne_mut_bytes);
integral_slice_as_bytes!(u64, u64_as_ne_bytes, u64_as_ne_mut_bytes);
integral_slice_as_bytes!(i64, i64_as_ne_bytes, i64_as_ne_mut_bytes);
integral_slice_as_bytes!(f32, f32_as_ne_bytes, f32_as_ne_mut_bytes);
integral_slice_as_bytes!(f64, f64_as_ne_bytes, f64_as_ne_mut_bytes);

670
vendor/tiff/src/decoder/ifd.rs vendored Normal file
View File

@ -0,0 +1,670 @@
//! Function for reading TIFF tags
use std::collections::HashMap;
use std::convert::{TryFrom, TryInto};
use std::io::{self, Read, Seek};
use std::mem;
use std::str;
use super::stream::{ByteOrder, EndianReader, SmartReader};
use crate::tags::{Tag, Type};
use crate::{TiffError, TiffFormatError, TiffResult};
use self::Value::{
Ascii, Byte, Double, Float, Ifd, IfdBig, List, Rational, RationalBig, SRational, SRationalBig,
Short, Signed, SignedBig, Unsigned, UnsignedBig,
};
#[allow(unused_qualifications)]
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub enum Value {
Byte(u8),
Short(u16),
Signed(i32),
SignedBig(i64),
Unsigned(u32),
UnsignedBig(u64),
Float(f32),
Double(f64),
List(Vec<Value>),
Rational(u32, u32),
RationalBig(u64, u64),
SRational(i32, i32),
SRationalBig(i64, i64),
Ascii(String),
Ifd(u32),
IfdBig(u64),
}
impl Value {
pub fn into_u8(self) -> TiffResult<u8> {
match self {
Byte(val) => Ok(val),
val => Err(TiffError::FormatError(TiffFormatError::ByteExpected(val))),
}
}
pub fn into_u16(self) -> TiffResult<u16> {
match self {
Short(val) => Ok(val),
Unsigned(val) => Ok(u16::try_from(val)?),
UnsignedBig(val) => Ok(u16::try_from(val)?),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_u32(self) -> TiffResult<u32> {
match self {
Short(val) => Ok(val.into()),
Unsigned(val) => Ok(val),
UnsignedBig(val) => Ok(u32::try_from(val)?),
Ifd(val) => Ok(val),
IfdBig(val) => Ok(u32::try_from(val)?),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_i32(self) -> TiffResult<i32> {
match self {
Signed(val) => Ok(val),
SignedBig(val) => Ok(i32::try_from(val)?),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_u64(self) -> TiffResult<u64> {
match self {
Short(val) => Ok(val.into()),
Unsigned(val) => Ok(val.into()),
UnsignedBig(val) => Ok(val),
Ifd(val) => Ok(val.into()),
IfdBig(val) => Ok(val),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_i64(self) -> TiffResult<i64> {
match self {
Signed(val) => Ok(val.into()),
SignedBig(val) => Ok(val),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_f32(self) -> TiffResult<f32> {
match self {
Float(val) => Ok(val),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_f64(self) -> TiffResult<f64> {
match self {
Double(val) => Ok(val),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_string(self) -> TiffResult<String> {
match self {
Ascii(val) => Ok(val),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_u32_vec(self) -> TiffResult<Vec<u32>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_u32()?)
}
Ok(new_vec)
}
Unsigned(val) => Ok(vec![val]),
UnsignedBig(val) => Ok(vec![u32::try_from(val)?]),
Rational(numerator, denominator) => Ok(vec![numerator, denominator]),
RationalBig(numerator, denominator) => {
Ok(vec![u32::try_from(numerator)?, u32::try_from(denominator)?])
}
Ifd(val) => Ok(vec![val]),
IfdBig(val) => Ok(vec![u32::try_from(val)?]),
Ascii(val) => Ok(val.chars().map(u32::from).collect()),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_u8_vec(self) -> TiffResult<Vec<u8>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_u8()?)
}
Ok(new_vec)
}
Byte(val) => Ok(vec![val]),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_u16_vec(self) -> TiffResult<Vec<u16>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_u16()?)
}
Ok(new_vec)
}
Short(val) => Ok(vec![val]),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_i32_vec(self) -> TiffResult<Vec<i32>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
match v {
SRational(numerator, denominator) => {
new_vec.push(numerator);
new_vec.push(denominator);
}
SRationalBig(numerator, denominator) => {
new_vec.push(i32::try_from(numerator)?);
new_vec.push(i32::try_from(denominator)?);
}
_ => new_vec.push(v.into_i32()?),
}
}
Ok(new_vec)
}
Signed(val) => Ok(vec![val]),
SignedBig(val) => Ok(vec![i32::try_from(val)?]),
SRational(numerator, denominator) => Ok(vec![numerator, denominator]),
SRationalBig(numerator, denominator) => {
Ok(vec![i32::try_from(numerator)?, i32::try_from(denominator)?])
}
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_f32_vec(self) -> TiffResult<Vec<f32>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_f32()?)
}
Ok(new_vec)
}
Float(val) => Ok(vec![val]),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_f64_vec(self) -> TiffResult<Vec<f64>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_f64()?)
}
Ok(new_vec)
}
Double(val) => Ok(vec![val]),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_u64_vec(self) -> TiffResult<Vec<u64>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_u64()?)
}
Ok(new_vec)
}
Unsigned(val) => Ok(vec![val.into()]),
UnsignedBig(val) => Ok(vec![val]),
Rational(numerator, denominator) => Ok(vec![numerator.into(), denominator.into()]),
RationalBig(numerator, denominator) => Ok(vec![numerator, denominator]),
Ifd(val) => Ok(vec![val.into()]),
IfdBig(val) => Ok(vec![val]),
Ascii(val) => Ok(val.chars().map(u32::from).map(u64::from).collect()),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_i64_vec(self) -> TiffResult<Vec<i64>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
match v {
SRational(numerator, denominator) => {
new_vec.push(numerator.into());
new_vec.push(denominator.into());
}
SRationalBig(numerator, denominator) => {
new_vec.push(numerator);
new_vec.push(denominator);
}
_ => new_vec.push(v.into_i64()?),
}
}
Ok(new_vec)
}
Signed(val) => Ok(vec![val.into()]),
SignedBig(val) => Ok(vec![val]),
SRational(numerator, denominator) => Ok(vec![numerator.into(), denominator.into()]),
SRationalBig(numerator, denominator) => Ok(vec![numerator, denominator]),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
}
#[derive(Clone)]
pub struct Entry {
type_: Type,
count: u64,
offset: [u8; 8],
}
impl ::std::fmt::Debug for Entry {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
fmt.write_str(&format!(
"Entry {{ type_: {:?}, count: {:?}, offset: {:?} }}",
self.type_, self.count, &self.offset
))
}
}
impl Entry {
pub fn new(type_: Type, count: u32, offset: [u8; 4]) -> Entry {
let mut offset = offset.to_vec();
offset.append(&mut vec![0; 4]);
Entry::new_u64(type_, count.into(), offset[..].try_into().unwrap())
}
pub fn new_u64(type_: Type, count: u64, offset: [u8; 8]) -> Entry {
Entry {
type_,
count,
offset,
}
}
/// Returns a mem_reader for the offset/value field
fn r(&self, byte_order: ByteOrder) -> SmartReader<io::Cursor<Vec<u8>>> {
SmartReader::wrap(io::Cursor::new(self.offset.to_vec()), byte_order)
}
pub fn val<R: Read + Seek>(
&self,
limits: &super::Limits,
bigtiff: bool,
reader: &mut SmartReader<R>,
) -> TiffResult<Value> {
// Case 1: there are no values so we can return immediately.
if self.count == 0 {
return Ok(List(Vec::new()));
}
let bo = reader.byte_order();
let tag_size = match self.type_ {
Type::BYTE | Type::SBYTE | Type::ASCII | Type::UNDEFINED => 1,
Type::SHORT | Type::SSHORT => 2,
Type::LONG | Type::SLONG | Type::FLOAT | Type::IFD => 4,
Type::LONG8
| Type::SLONG8
| Type::DOUBLE
| Type::RATIONAL
| Type::SRATIONAL
| Type::IFD8 => 8,
};
let value_bytes = match self.count.checked_mul(tag_size) {
Some(n) => n,
None => {
return Err(TiffError::LimitsExceeded);
}
};
// Case 2: there is one value.
if self.count == 1 {
// 2a: the value is 5-8 bytes and we're in BigTiff mode.
if bigtiff && value_bytes > 4 && value_bytes <= 8 {
return Ok(match self.type_ {
Type::LONG8 => UnsignedBig(self.r(bo).read_u64()?),
Type::SLONG8 => SignedBig(self.r(bo).read_i64()?),
Type::DOUBLE => Double(self.r(bo).read_f64()?),
Type::RATIONAL => {
let mut r = self.r(bo);
Rational(r.read_u32()?, r.read_u32()?)
}
Type::SRATIONAL => {
let mut r = self.r(bo);
SRational(r.read_i32()?, r.read_i32()?)
}
Type::IFD8 => IfdBig(self.r(bo).read_u64()?),
Type::BYTE
| Type::SBYTE
| Type::ASCII
| Type::UNDEFINED
| Type::SHORT
| Type::SSHORT
| Type::LONG
| Type::SLONG
| Type::FLOAT
| Type::IFD => unreachable!(),
});
}
// 2b: the value is at most 4 bytes or doesn't fit in the offset field.
return Ok(match self.type_ {
Type::BYTE => Unsigned(u32::from(self.offset[0])),
Type::SBYTE => Signed(i32::from(self.offset[0] as i8)),
Type::UNDEFINED => Byte(self.offset[0]),
Type::SHORT => Unsigned(u32::from(self.r(bo).read_u16()?)),
Type::SSHORT => Signed(i32::from(self.r(bo).read_i16()?)),
Type::LONG => Unsigned(self.r(bo).read_u32()?),
Type::SLONG => Signed(self.r(bo).read_i32()?),
Type::FLOAT => Float(self.r(bo).read_f32()?),
Type::ASCII => {
if self.offset[0] == 0 {
Ascii("".to_string())
} else {
return Err(TiffError::FormatError(TiffFormatError::InvalidTag));
}
}
Type::LONG8 => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
UnsignedBig(reader.read_u64()?)
}
Type::SLONG8 => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
SignedBig(reader.read_i64()?)
}
Type::DOUBLE => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
Double(reader.read_f64()?)
}
Type::RATIONAL => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
Rational(reader.read_u32()?, reader.read_u32()?)
}
Type::SRATIONAL => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
SRational(reader.read_i32()?, reader.read_i32()?)
}
Type::IFD => Ifd(self.r(bo).read_u32()?),
Type::IFD8 => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
IfdBig(reader.read_u64()?)
}
});
}
// Case 3: There is more than one value, but it fits in the offset field.
if value_bytes <= 4 || bigtiff && value_bytes <= 8 {
match self.type_ {
Type::BYTE => return offset_to_bytes(self.count as usize, self),
Type::SBYTE => return offset_to_sbytes(self.count as usize, self),
Type::ASCII => {
let mut buf = vec![0; self.count as usize];
self.r(bo).read_exact(&mut buf)?;
if buf.is_ascii() && buf.ends_with(&[0]) {
let v = str::from_utf8(&buf)?;
let v = v.trim_matches(char::from(0));
return Ok(Ascii(v.into()));
} else {
return Err(TiffError::FormatError(TiffFormatError::InvalidTag));
}
}
Type::UNDEFINED => {
return Ok(List(
self.offset[0..self.count as usize]
.iter()
.map(|&b| Byte(b))
.collect(),
));
}
Type::SHORT => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Short(r.read_u16()?));
}
return Ok(List(v));
}
Type::SSHORT => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Signed(i32::from(r.read_i16()?)));
}
return Ok(List(v));
}
Type::LONG => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Unsigned(r.read_u32()?));
}
return Ok(List(v));
}
Type::SLONG => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Signed(r.read_i32()?));
}
return Ok(List(v));
}
Type::FLOAT => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Float(r.read_f32()?));
}
return Ok(List(v));
}
Type::IFD => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Ifd(r.read_u32()?));
}
return Ok(List(v));
}
Type::LONG8
| Type::SLONG8
| Type::RATIONAL
| Type::SRATIONAL
| Type::DOUBLE
| Type::IFD8 => {
unreachable!()
}
}
}
// Case 4: there is more than one value, and it doesn't fit in the offset field.
match self.type_ {
// TODO check if this could give wrong results
// at a different endianess of file/computer.
Type::BYTE => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
let mut buf = [0; 1];
reader.read_exact(&mut buf)?;
Ok(UnsignedBig(u64::from(buf[0])))
}),
Type::SBYTE => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(SignedBig(i64::from(reader.read_i8()? as i8)))
}),
Type::SHORT => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(UnsignedBig(u64::from(reader.read_u16()?)))
}),
Type::SSHORT => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(SignedBig(i64::from(reader.read_i16()?)))
}),
Type::LONG => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Unsigned(reader.read_u32()?))
}),
Type::SLONG => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Signed(reader.read_i32()?))
}),
Type::FLOAT => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Float(reader.read_f32()?))
}),
Type::DOUBLE => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Double(reader.read_f64()?))
}),
Type::RATIONAL => {
self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Rational(reader.read_u32()?, reader.read_u32()?))
})
}
Type::SRATIONAL => {
self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(SRational(reader.read_i32()?, reader.read_i32()?))
})
}
Type::LONG8 => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(UnsignedBig(reader.read_u64()?))
}),
Type::SLONG8 => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(SignedBig(reader.read_i64()?))
}),
Type::IFD => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Ifd(reader.read_u32()?))
}),
Type::IFD8 => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(IfdBig(reader.read_u64()?))
}),
Type::UNDEFINED => {
self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
let mut buf = [0; 1];
reader.read_exact(&mut buf)?;
Ok(Byte(buf[0]))
})
}
Type::ASCII => {
let n = usize::try_from(self.count)?;
if n > limits.decoding_buffer_size {
return Err(TiffError::LimitsExceeded);
}
if bigtiff {
reader.goto_offset(self.r(bo).read_u64()?)?
} else {
reader.goto_offset(self.r(bo).read_u32()?.into())?
}
let mut out = vec![0; n];
reader.read_exact(&mut out)?;
// Strings may be null-terminated, so we trim anything downstream of the null byte
if let Some(first) = out.iter().position(|&b| b == 0) {
out.truncate(first);
}
Ok(Ascii(String::from_utf8(out)?))
}
}
}
#[inline]
fn decode_offset<R, F>(
&self,
value_count: u64,
bo: ByteOrder,
bigtiff: bool,
limits: &super::Limits,
reader: &mut SmartReader<R>,
decode_fn: F,
) -> TiffResult<Value>
where
R: Read + Seek,
F: Fn(&mut SmartReader<R>) -> TiffResult<Value>,
{
let value_count = usize::try_from(value_count)?;
if value_count > limits.decoding_buffer_size / mem::size_of::<Value>() {
return Err(TiffError::LimitsExceeded);
}
let mut v = Vec::with_capacity(value_count);
let offset = if bigtiff {
self.r(bo).read_u64()?
} else {
self.r(bo).read_u32()?.into()
};
reader.goto_offset(offset)?;
for _ in 0..value_count {
v.push(decode_fn(reader)?)
}
Ok(List(v))
}
}
/// Extracts a list of BYTE tags stored in an offset
#[inline]
fn offset_to_bytes(n: usize, entry: &Entry) -> TiffResult<Value> {
Ok(List(
entry.offset[0..n]
.iter()
.map(|&e| Unsigned(u32::from(e)))
.collect(),
))
}
/// Extracts a list of SBYTE tags stored in an offset
#[inline]
fn offset_to_sbytes(n: usize, entry: &Entry) -> TiffResult<Value> {
Ok(List(
entry.offset[0..n]
.iter()
.map(|&e| Signed(i32::from(e as i8)))
.collect(),
))
}
/// Type representing an Image File Directory
pub type Directory = HashMap<Tag, Entry>;

601
vendor/tiff/src/decoder/image.rs vendored Normal file
View File

@ -0,0 +1,601 @@
use super::ifd::{Directory, Value};
use super::stream::{ByteOrder, DeflateReader, JpegReader, LZWReader, PackBitsReader};
use super::tag_reader::TagReader;
use super::{fp_predict_f32, fp_predict_f64, DecodingBuffer, Limits};
use super::{stream::SmartReader, ChunkType};
use crate::tags::{CompressionMethod, PhotometricInterpretation, Predictor, SampleFormat, Tag};
use crate::{ColorType, TiffError, TiffFormatError, TiffResult, TiffUnsupportedError, UsageError};
use std::convert::{TryFrom, TryInto};
use std::io::{self, Cursor, Read, Seek};
use std::sync::Arc;
#[derive(Debug)]
pub(crate) struct StripDecodeState {
pub rows_per_strip: u32,
}
#[derive(Debug)]
/// Computed values useful for tile decoding
pub(crate) struct TileAttributes {
pub image_width: usize,
pub image_height: usize,
pub tile_width: usize,
pub tile_length: usize,
}
impl TileAttributes {
pub fn tiles_across(&self) -> usize {
(self.image_width + self.tile_width - 1) / self.tile_width
}
pub fn tiles_down(&self) -> usize {
(self.image_height + self.tile_length - 1) / self.tile_length
}
fn padding_right(&self) -> usize {
(self.tile_width - self.image_width % self.tile_width) % self.tile_width
}
fn padding_down(&self) -> usize {
(self.tile_length - self.image_height % self.tile_length) % self.tile_length
}
pub fn get_padding(&self, tile: usize) -> (usize, usize) {
let row = tile / self.tiles_across();
let column = tile % self.tiles_across();
let padding_right = if column == self.tiles_across() - 1 {
self.padding_right()
} else {
0
};
let padding_down = if row == self.tiles_down() - 1 {
self.padding_down()
} else {
0
};
(padding_right, padding_down)
}
}
#[derive(Debug)]
pub(crate) struct Image {
pub ifd: Option<Directory>,
pub width: u32,
pub height: u32,
pub bits_per_sample: Vec<u8>,
#[allow(unused)]
pub samples: u8,
pub sample_format: Vec<SampleFormat>,
pub photometric_interpretation: PhotometricInterpretation,
pub compression_method: CompressionMethod,
pub predictor: Predictor,
pub jpeg_tables: Option<Arc<Vec<u8>>>,
pub chunk_type: ChunkType,
pub strip_decoder: Option<StripDecodeState>,
pub tile_attributes: Option<TileAttributes>,
pub chunk_offsets: Vec<u64>,
pub chunk_bytes: Vec<u64>,
}
impl Image {
pub fn from_reader<R: Read + Seek>(
reader: &mut SmartReader<R>,
ifd: Directory,
limits: &Limits,
bigtiff: bool,
) -> TiffResult<Image> {
let mut tag_reader = TagReader {
reader,
limits,
ifd: &ifd,
bigtiff,
};
let width = tag_reader.require_tag(Tag::ImageWidth)?.into_u32()?;
let height = tag_reader.require_tag(Tag::ImageLength)?.into_u32()?;
if width == 0 || height == 0 {
return Err(TiffError::FormatError(TiffFormatError::InvalidDimensions(
width, height,
)));
}
let photometric_interpretation = tag_reader
.find_tag(Tag::PhotometricInterpretation)?
.map(Value::into_u16)
.transpose()?
.and_then(PhotometricInterpretation::from_u16)
.ok_or(TiffUnsupportedError::UnknownInterpretation)?;
// Try to parse both the compression method and the number, format, and bits of the included samples.
// If they are not explicitly specified, those tags are reset to their default values and not carried from previous images.
let compression_method = match tag_reader.find_tag(Tag::Compression)? {
Some(val) => CompressionMethod::from_u16(val.into_u16()?)
.ok_or(TiffUnsupportedError::UnknownCompressionMethod)?,
None => CompressionMethod::None,
};
let jpeg_tables = if compression_method == CompressionMethod::ModernJPEG
&& ifd.contains_key(&Tag::JPEGTables)
{
let vec = tag_reader
.find_tag(Tag::JPEGTables)?
.unwrap()
.into_u8_vec()?;
if vec.len() < 2 {
return Err(TiffError::FormatError(
TiffFormatError::InvalidTagValueType(Tag::JPEGTables),
));
}
Some(Arc::new(vec))
} else {
None
};
let samples = tag_reader
.find_tag(Tag::SamplesPerPixel)?
.map(Value::into_u16)
.transpose()?
.unwrap_or(1)
.try_into()?;
let sample_format = match tag_reader.find_tag_uint_vec(Tag::SampleFormat)? {
Some(vals) => {
let sample_format: Vec<_> = vals
.into_iter()
.map(SampleFormat::from_u16_exhaustive)
.collect();
// TODO: for now, only homogenous formats across samples are supported.
if !sample_format.windows(2).all(|s| s[0] == s[1]) {
return Err(TiffUnsupportedError::UnsupportedSampleFormat(sample_format).into());
}
sample_format
}
None => vec![SampleFormat::Uint],
};
let bits_per_sample = match samples {
1 | 3 | 4 => tag_reader
.find_tag_uint_vec(Tag::BitsPerSample)?
.unwrap_or_else(|| vec![1]),
_ => return Err(TiffUnsupportedError::UnsupportedSampleDepth(samples).into()),
};
let predictor = tag_reader
.find_tag(Tag::Predictor)?
.map(Value::into_u16)
.transpose()?
.map(|p| {
Predictor::from_u16(p)
.ok_or(TiffError::FormatError(TiffFormatError::UnknownPredictor(p)))
})
.transpose()?
.unwrap_or(Predictor::None);
let chunk_type;
let chunk_offsets;
let chunk_bytes;
let strip_decoder;
let tile_attributes;
match (
ifd.contains_key(&Tag::StripByteCounts),
ifd.contains_key(&Tag::StripOffsets),
ifd.contains_key(&Tag::TileByteCounts),
ifd.contains_key(&Tag::TileOffsets),
) {
(true, true, false, false) => {
chunk_type = ChunkType::Strip;
chunk_offsets = tag_reader
.find_tag(Tag::StripOffsets)?
.unwrap()
.into_u64_vec()?;
chunk_bytes = tag_reader
.find_tag(Tag::StripByteCounts)?
.unwrap()
.into_u64_vec()?;
let rows_per_strip = tag_reader
.find_tag(Tag::RowsPerStrip)?
.map(Value::into_u32)
.transpose()?
.unwrap_or(height);
strip_decoder = Some(StripDecodeState { rows_per_strip });
tile_attributes = None;
if chunk_offsets.len() != chunk_bytes.len()
|| rows_per_strip == 0
|| u32::try_from(chunk_offsets.len())?
!= height.saturating_sub(1) / rows_per_strip + 1
{
return Err(TiffError::FormatError(
TiffFormatError::InconsistentSizesEncountered,
));
}
}
(false, false, true, true) => {
chunk_type = ChunkType::Tile;
let tile_width =
usize::try_from(tag_reader.require_tag(Tag::TileWidth)?.into_u32()?)?;
let tile_length =
usize::try_from(tag_reader.require_tag(Tag::TileLength)?.into_u32()?)?;
if tile_width == 0 {
return Err(TiffFormatError::InvalidTagValueType(Tag::TileWidth).into());
} else if tile_length == 0 {
return Err(TiffFormatError::InvalidTagValueType(Tag::TileLength).into());
}
strip_decoder = None;
tile_attributes = Some(TileAttributes {
image_width: usize::try_from(width)?,
image_height: usize::try_from(height)?,
tile_width,
tile_length,
});
chunk_offsets = tag_reader
.find_tag(Tag::TileOffsets)?
.unwrap()
.into_u64_vec()?;
chunk_bytes = tag_reader
.find_tag(Tag::TileByteCounts)?
.unwrap()
.into_u64_vec()?;
let tile = tile_attributes.as_ref().unwrap();
if chunk_offsets.len() != chunk_bytes.len()
|| chunk_offsets.len() != tile.tiles_down() * tile.tiles_across()
{
return Err(TiffError::FormatError(
TiffFormatError::InconsistentSizesEncountered,
));
}
}
(_, _, _, _) => {
return Err(TiffError::FormatError(
TiffFormatError::StripTileTagConflict,
))
}
};
Ok(Image {
ifd: Some(ifd),
width,
height,
bits_per_sample,
samples,
sample_format,
photometric_interpretation,
compression_method,
jpeg_tables,
predictor,
chunk_type,
strip_decoder,
tile_attributes,
chunk_offsets,
chunk_bytes,
})
}
pub(crate) fn colortype(&self) -> TiffResult<ColorType> {
match self.photometric_interpretation {
PhotometricInterpretation::RGB => match self.bits_per_sample[..] {
[r, g, b] if [r, r] == [g, b] => Ok(ColorType::RGB(r)),
[r, g, b, a] if [r, r, r] == [g, b, a] => Ok(ColorType::RGBA(r)),
// FIXME: We should _ignore_ other components. In particular:
// > Beware of extra components. Some TIFF files may have more components per pixel
// than you think. A Baseline TIFF reader must skip over them gracefully,using the
// values of the SamplesPerPixel and BitsPerSample fields.
// > -- TIFF 6.0 Specification, Section 7, Additional Baseline requirements.
_ => Err(TiffError::UnsupportedError(
TiffUnsupportedError::InterpretationWithBits(
self.photometric_interpretation,
self.bits_per_sample.clone(),
),
)),
},
PhotometricInterpretation::CMYK => match self.bits_per_sample[..] {
[c, m, y, k] if [c, c, c] == [m, y, k] => Ok(ColorType::CMYK(c)),
_ => Err(TiffError::UnsupportedError(
TiffUnsupportedError::InterpretationWithBits(
self.photometric_interpretation,
self.bits_per_sample.clone(),
),
)),
},
PhotometricInterpretation::YCbCr => match self.bits_per_sample[..] {
[y, cb, cr] if [y, y] == [cb, cr] => Ok(ColorType::YCbCr(y)),
_ => Err(TiffError::UnsupportedError(
TiffUnsupportedError::InterpretationWithBits(
self.photometric_interpretation,
self.bits_per_sample.clone(),
),
)),
},
PhotometricInterpretation::BlackIsZero | PhotometricInterpretation::WhiteIsZero
if self.bits_per_sample.len() == 1 =>
{
Ok(ColorType::Gray(self.bits_per_sample[0]))
}
// TODO: this is bad we should not fail at this point
_ => Err(TiffError::UnsupportedError(
TiffUnsupportedError::InterpretationWithBits(
self.photometric_interpretation,
self.bits_per_sample.clone(),
),
)),
}
}
fn create_reader<'r, R: 'r + Read>(
reader: R,
photometric_interpretation: PhotometricInterpretation,
compression_method: CompressionMethod,
compressed_length: u64,
jpeg_tables: Option<Arc<Vec<u8>>>,
) -> TiffResult<Box<dyn Read + 'r>> {
Ok(match compression_method {
CompressionMethod::None => Box::new(reader),
CompressionMethod::LZW => {
Box::new(LZWReader::new(reader, usize::try_from(compressed_length)?))
}
CompressionMethod::PackBits => Box::new(PackBitsReader::new(reader, compressed_length)),
CompressionMethod::Deflate | CompressionMethod::OldDeflate => {
Box::new(DeflateReader::new(reader))
}
CompressionMethod::ModernJPEG => {
if jpeg_tables.is_some() && compressed_length < 2 {
return Err(TiffError::FormatError(
TiffFormatError::InvalidTagValueType(Tag::JPEGTables),
));
}
let jpeg_reader = JpegReader::new(reader, compressed_length, jpeg_tables)?;
let mut decoder = jpeg::Decoder::new(jpeg_reader);
match photometric_interpretation {
PhotometricInterpretation::RGB => {
decoder.set_color_transform(jpeg::ColorTransform::RGB)
}
PhotometricInterpretation::WhiteIsZero => {
decoder.set_color_transform(jpeg::ColorTransform::None)
}
PhotometricInterpretation::BlackIsZero => {
decoder.set_color_transform(jpeg::ColorTransform::None)
}
PhotometricInterpretation::TransparencyMask => {
decoder.set_color_transform(jpeg::ColorTransform::None)
}
PhotometricInterpretation::CMYK => {
decoder.set_color_transform(jpeg::ColorTransform::CMYK)
}
PhotometricInterpretation::YCbCr => {
decoder.set_color_transform(jpeg::ColorTransform::YCbCr)
}
photometric_interpretation => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::UnsupportedInterpretation(
photometric_interpretation,
),
));
}
}
let data = decoder.decode()?;
Box::new(Cursor::new(data))
}
method => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::UnsupportedCompressionMethod(method),
))
}
})
}
pub(crate) fn chunk_file_range(&self, chunk: u32) -> TiffResult<(u64, u64)> {
let file_offset = self
.chunk_offsets
.get(chunk as usize)
.ok_or(TiffError::FormatError(
TiffFormatError::InconsistentSizesEncountered,
))?;
let compressed_bytes =
self.chunk_bytes
.get(chunk as usize)
.ok_or(TiffError::FormatError(
TiffFormatError::InconsistentSizesEncountered,
))?;
Ok((*file_offset, *compressed_bytes))
}
pub(crate) fn chunk_dimensions(&self) -> TiffResult<(u32, u32)> {
match self.chunk_type {
ChunkType::Strip => {
let strip_attrs = self.strip_decoder.as_ref().unwrap();
Ok((self.width, strip_attrs.rows_per_strip))
}
ChunkType::Tile => {
let tile_attrs = self.tile_attributes.as_ref().unwrap();
Ok((
u32::try_from(tile_attrs.tile_width)?,
u32::try_from(tile_attrs.tile_length)?,
))
}
}
}
pub(crate) fn chunk_data_dimensions(&self, chunk_index: u32) -> TiffResult<(u32, u32)> {
let dims = self.chunk_dimensions()?;
match self.chunk_type {
ChunkType::Strip => {
let strip_height_without_padding = chunk_index
.checked_mul(dims.1)
.and_then(|x| self.height.checked_sub(x))
.ok_or(TiffError::UsageError(UsageError::InvalidChunkIndex(
chunk_index,
)))?;
// Ignore potential vertical padding on the bottommost strip
let strip_height = dims.1.min(strip_height_without_padding);
Ok((dims.0, strip_height))
}
ChunkType::Tile => {
let tile_attrs = self.tile_attributes.as_ref().unwrap();
let (padding_right, padding_down) = tile_attrs.get_padding(chunk_index as usize);
let tile_width = tile_attrs.tile_width - padding_right;
let tile_length = tile_attrs.tile_length - padding_down;
Ok((u32::try_from(tile_width)?, u32::try_from(tile_length)?))
}
}
}
pub(crate) fn expand_chunk(
&self,
reader: impl Read,
mut buffer: DecodingBuffer,
output_width: usize,
byte_order: ByteOrder,
chunk_index: u32,
) -> TiffResult<()> {
// Validate that the provided buffer is of the expected type.
let color_type = self.colortype()?;
match (color_type, &buffer) {
(ColorType::RGB(n), _)
| (ColorType::RGBA(n), _)
| (ColorType::CMYK(n), _)
| (ColorType::YCbCr(n), _)
| (ColorType::Gray(n), _)
if usize::from(n) == buffer.byte_len() * 8 => {}
(ColorType::Gray(n), DecodingBuffer::U8(_)) if n < 8 => match self.predictor {
Predictor::None => {}
Predictor::Horizontal => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::HorizontalPredictor(color_type),
))
}
Predictor::FloatingPoint => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::FloatingPointPredictor(color_type),
));
}
},
(type_, _) => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::UnsupportedColorType(type_),
))
}
}
// Validate that the predictor is supported for the sample type.
match (self.predictor, &buffer) {
(Predictor::Horizontal, DecodingBuffer::F32(_))
| (Predictor::Horizontal, DecodingBuffer::F64(_)) => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::HorizontalPredictor(color_type),
));
}
(Predictor::FloatingPoint, DecodingBuffer::F32(_))
| (Predictor::FloatingPoint, DecodingBuffer::F64(_)) => {}
(Predictor::FloatingPoint, _) => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::FloatingPointPredictor(color_type),
));
}
_ => {}
}
let compressed_bytes =
self.chunk_bytes
.get(chunk_index as usize)
.ok_or(TiffError::FormatError(
TiffFormatError::InconsistentSizesEncountered,
))?;
let byte_len = buffer.byte_len();
let compression_method = self.compression_method;
let photometric_interpretation = self.photometric_interpretation;
let predictor = self.predictor;
let samples = self.bits_per_sample.len();
let chunk_dims = self.chunk_dimensions()?;
let data_dims = self.chunk_data_dimensions(chunk_index)?;
let padding_right = chunk_dims.0 - data_dims.0;
let jpeg_tables = self.jpeg_tables.clone();
let mut reader = Self::create_reader(
reader,
photometric_interpretation,
compression_method,
*compressed_bytes,
jpeg_tables,
)?;
if output_width == data_dims.0 as usize && padding_right == 0 {
let total_samples = data_dims.0 as usize * data_dims.1 as usize * samples;
let tile = &mut buffer.as_bytes_mut()[..total_samples * byte_len];
reader.read_exact(tile)?;
for row in 0..data_dims.1 as usize {
let row_start = row as usize * output_width as usize * samples;
let row_end = (row + 1) * output_width as usize * samples;
let row = buffer.subrange(row_start..row_end);
super::fix_endianness_and_predict(row, samples, byte_order, predictor);
}
if photometric_interpretation == PhotometricInterpretation::WhiteIsZero {
super::invert_colors(&mut buffer.subrange(0..total_samples), color_type);
}
} else if padding_right > 0 && self.predictor == Predictor::FloatingPoint {
// The floating point predictor shuffles the padding bytes into the encoded output, so
// this case is handled specially when needed.
let mut encoded = vec![0u8; chunk_dims.0 as usize * samples * byte_len];
for row in 0..data_dims.1 as usize {
let row_start = row * output_width as usize * samples;
let row_end = row_start + data_dims.0 as usize * samples;
reader.read_exact(&mut encoded)?;
match buffer.subrange(row_start..row_end) {
DecodingBuffer::F32(buf) => fp_predict_f32(&mut encoded, buf, samples),
DecodingBuffer::F64(buf) => fp_predict_f64(&mut encoded, buf, samples),
_ => unreachable!(),
}
if photometric_interpretation == PhotometricInterpretation::WhiteIsZero {
super::invert_colors(&mut buffer.subrange(row_start..row_end), color_type);
}
}
} else {
for row in 0..data_dims.1 as usize {
let row_start = row * output_width as usize * samples;
let row_end = row_start + data_dims.0 as usize * samples;
let row = &mut buffer.as_bytes_mut()[(row_start * byte_len)..(row_end * byte_len)];
reader.read_exact(row)?;
// Skip horizontal padding
if padding_right > 0 {
let len = u64::try_from(padding_right as usize * samples * byte_len)?;
io::copy(&mut reader.by_ref().take(len), &mut io::sink())?;
}
let mut row = buffer.subrange(row_start..row_end);
super::fix_endianness_and_predict(row.copy(), samples, byte_order, predictor);
if photometric_interpretation == PhotometricInterpretation::WhiteIsZero {
super::invert_colors(&mut row, color_type);
}
}
}
Ok(())
}
}

1176
vendor/tiff/src/decoder/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

435
vendor/tiff/src/decoder/stream.rs vendored Normal file
View File

@ -0,0 +1,435 @@
//! All IO functionality needed for TIFF decoding
use std::convert::TryFrom;
use std::io::{self, BufRead, BufReader, Read, Seek, SeekFrom, Take};
use std::sync::Arc;
/// Byte order of the TIFF file.
#[derive(Clone, Copy, Debug)]
pub enum ByteOrder {
/// little endian byte order
LittleEndian,
/// big endian byte order
BigEndian,
}
/// Reader that is aware of the byte order.
pub trait EndianReader: Read {
/// Byte order that should be adhered to
fn byte_order(&self) -> ByteOrder;
/// Reads an u16
#[inline(always)]
fn read_u16(&mut self) -> Result<u16, io::Error> {
let mut n = [0u8; 2];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => u16::from_le_bytes(n),
ByteOrder::BigEndian => u16::from_be_bytes(n),
})
}
/// Reads an i8
#[inline(always)]
fn read_i8(&mut self) -> Result<i8, io::Error> {
let mut n = [0u8; 1];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => i8::from_le_bytes(n),
ByteOrder::BigEndian => i8::from_be_bytes(n),
})
}
/// Reads an i16
#[inline(always)]
fn read_i16(&mut self) -> Result<i16, io::Error> {
let mut n = [0u8; 2];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => i16::from_le_bytes(n),
ByteOrder::BigEndian => i16::from_be_bytes(n),
})
}
/// Reads an u32
#[inline(always)]
fn read_u32(&mut self) -> Result<u32, io::Error> {
let mut n = [0u8; 4];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => u32::from_le_bytes(n),
ByteOrder::BigEndian => u32::from_be_bytes(n),
})
}
/// Reads an i32
#[inline(always)]
fn read_i32(&mut self) -> Result<i32, io::Error> {
let mut n = [0u8; 4];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => i32::from_le_bytes(n),
ByteOrder::BigEndian => i32::from_be_bytes(n),
})
}
/// Reads an u64
#[inline(always)]
fn read_u64(&mut self) -> Result<u64, io::Error> {
let mut n = [0u8; 8];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => u64::from_le_bytes(n),
ByteOrder::BigEndian => u64::from_be_bytes(n),
})
}
/// Reads an i64
#[inline(always)]
fn read_i64(&mut self) -> Result<i64, io::Error> {
let mut n = [0u8; 8];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => i64::from_le_bytes(n),
ByteOrder::BigEndian => i64::from_be_bytes(n),
})
}
/// Reads an f32
#[inline(always)]
fn read_f32(&mut self) -> Result<f32, io::Error> {
let mut n = [0u8; 4];
self.read_exact(&mut n)?;
Ok(f32::from_bits(match self.byte_order() {
ByteOrder::LittleEndian => u32::from_le_bytes(n),
ByteOrder::BigEndian => u32::from_be_bytes(n),
}))
}
/// Reads an f64
#[inline(always)]
fn read_f64(&mut self) -> Result<f64, io::Error> {
let mut n = [0u8; 8];
self.read_exact(&mut n)?;
Ok(f64::from_bits(match self.byte_order() {
ByteOrder::LittleEndian => u64::from_le_bytes(n),
ByteOrder::BigEndian => u64::from_be_bytes(n),
}))
}
}
///
/// # READERS
///
///
/// ## Deflate Reader
///
pub type DeflateReader<R> = flate2::read::ZlibDecoder<R>;
///
/// ## LZW Reader
///
/// Reader that decompresses LZW streams
pub struct LZWReader<R: Read> {
reader: BufReader<Take<R>>,
decoder: weezl::decode::Decoder,
}
impl<R: Read> LZWReader<R> {
/// Wraps a reader
pub fn new(reader: R, compressed_length: usize) -> LZWReader<R> {
Self {
reader: BufReader::with_capacity(
(32 * 1024).min(compressed_length),
reader.take(u64::try_from(compressed_length).unwrap()),
),
decoder: weezl::decode::Decoder::with_tiff_size_switch(weezl::BitOrder::Msb, 8),
}
}
}
impl<R: Read> Read for LZWReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
loop {
let result = self.decoder.decode_bytes(self.reader.fill_buf()?, buf);
self.reader.consume(result.consumed_in);
match result.status {
Ok(weezl::LzwStatus::Ok) => {
if result.consumed_out == 0 {
continue;
} else {
return Ok(result.consumed_out);
}
}
Ok(weezl::LzwStatus::NoProgress) => {
assert_eq!(result.consumed_in, 0);
assert_eq!(result.consumed_out, 0);
assert!(self.reader.buffer().is_empty());
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"no lzw end code found",
));
}
Ok(weezl::LzwStatus::Done) => {
return Ok(result.consumed_out);
}
Err(err) => return Err(io::Error::new(io::ErrorKind::InvalidData, err)),
}
}
}
}
///
/// ## JPEG Reader (for "new-style" JPEG format (TIFF compression tag 7))
///
pub(crate) struct JpegReader {
jpeg_tables: Option<Arc<Vec<u8>>>,
buffer: io::Cursor<Vec<u8>>,
offset: usize,
}
impl JpegReader {
/// Constructs new JpegReader wrapping a SmartReader.
/// Because JPEG compression in TIFF allows to save quantization and/or huffman tables in one
/// central location, the constructor accepts this data as `jpeg_tables` here containing either
/// or both.
/// These `jpeg_tables` are simply prepended to the remaining jpeg image data.
/// Because these `jpeg_tables` start with a `SOI` (HEX: `0xFFD8`) or __start of image__ marker
/// which is also at the beginning of the remaining JPEG image data and would
/// confuse the JPEG renderer, one of these has to be taken off. In this case the first two
/// bytes of the remaining JPEG data is removed because it follows `jpeg_tables`.
/// Similary, `jpeg_tables` ends with a `EOI` (HEX: `0xFFD9`) or __end of image__ marker,
/// this has to be removed as well (last two bytes of `jpeg_tables`).
pub fn new<R: Read>(
mut reader: R,
length: u64,
jpeg_tables: Option<Arc<Vec<u8>>>,
) -> io::Result<JpegReader> {
// Read jpeg image data
let mut segment = vec![0; length as usize];
reader.read_exact(&mut segment[..])?;
match jpeg_tables {
Some(jpeg_tables) => {
assert!(
jpeg_tables.len() >= 2,
"jpeg_tables, if given, must be at least 2 bytes long. Got {:?}",
jpeg_tables
);
assert!(
length >= 2,
"if jpeg_tables is given, length must be at least 2 bytes long, got {}",
length
);
let mut buffer = io::Cursor::new(segment);
// Skip the first two bytes (marker bytes)
buffer.seek(SeekFrom::Start(2))?;
Ok(JpegReader {
buffer,
jpeg_tables: Some(jpeg_tables),
offset: 0,
})
}
None => Ok(JpegReader {
buffer: io::Cursor::new(segment),
jpeg_tables: None,
offset: 0,
}),
}
}
}
impl Read for JpegReader {
// #[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut start = 0;
if let Some(jpeg_tables) = &self.jpeg_tables {
if jpeg_tables.len() - 2 > self.offset {
// Read (rest of) jpeg_tables to buf (without the last two bytes)
let size_remaining = jpeg_tables.len() - self.offset - 2;
let to_copy = size_remaining.min(buf.len());
buf[start..start + to_copy]
.copy_from_slice(&jpeg_tables[self.offset..self.offset + to_copy]);
self.offset += to_copy;
if to_copy == buf.len() {
return Ok(to_copy);
}
start += to_copy;
}
}
let read = self.buffer.read(&mut buf[start..])?;
self.offset += read;
Ok(read + start)
}
}
///
/// ## PackBits Reader
///
enum PackBitsReaderState {
Header,
Literal,
Repeat { value: u8 },
}
/// Reader that unpacks Apple's `PackBits` format
pub struct PackBitsReader<R: Read> {
reader: Take<R>,
state: PackBitsReaderState,
count: usize,
}
impl<R: Read> PackBitsReader<R> {
/// Wraps a reader
pub fn new(reader: R, length: u64) -> Self {
Self {
reader: reader.take(length),
state: PackBitsReaderState::Header,
count: 0,
}
}
}
impl<R: Read> Read for PackBitsReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
while let PackBitsReaderState::Header = self.state {
if self.reader.limit() == 0 {
return Ok(0);
}
let mut header: [u8; 1] = [0];
self.reader.read_exact(&mut header)?;
let h = header[0] as i8;
if h >= -127 && h <= -1 {
let mut data: [u8; 1] = [0];
self.reader.read_exact(&mut data)?;
self.state = PackBitsReaderState::Repeat { value: data[0] };
self.count = (1 - h as isize) as usize;
} else if h >= 0 {
self.state = PackBitsReaderState::Literal;
self.count = h as usize + 1;
} else {
// h = -128 is a no-op.
}
}
let length = buf.len().min(self.count);
let actual = match self.state {
PackBitsReaderState::Literal => self.reader.read(&mut buf[..length])?,
PackBitsReaderState::Repeat { value } => {
for b in &mut buf[..length] {
*b = value;
}
length
}
PackBitsReaderState::Header => unreachable!(),
};
self.count -= actual;
if self.count == 0 {
self.state = PackBitsReaderState::Header;
}
return Ok(actual);
}
}
///
/// ## SmartReader Reader
///
/// Reader that is aware of the byte order.
#[derive(Debug)]
pub struct SmartReader<R>
where
R: Read,
{
reader: R,
pub byte_order: ByteOrder,
}
impl<R> SmartReader<R>
where
R: Read,
{
/// Wraps a reader
pub fn wrap(reader: R, byte_order: ByteOrder) -> SmartReader<R> {
SmartReader { reader, byte_order }
}
pub fn into_inner(self) -> R {
self.reader
}
}
impl<R: Read + Seek> SmartReader<R> {
pub fn goto_offset(&mut self, offset: u64) -> io::Result<()> {
self.seek(io::SeekFrom::Start(offset)).map(|_| ())
}
}
impl<R> EndianReader for SmartReader<R>
where
R: Read,
{
#[inline(always)]
fn byte_order(&self) -> ByteOrder {
self.byte_order
}
}
impl<R: Read> Read for SmartReader<R> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.reader.read(buf)
}
}
impl<R: Read + Seek> Seek for SmartReader<R> {
#[inline]
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
self.reader.seek(pos)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_packbits() {
let encoded = vec![
0xFE, 0xAA, 0x02, 0x80, 0x00, 0x2A, 0xFD, 0xAA, 0x03, 0x80, 0x00, 0x2A, 0x22, 0xF7,
0xAA,
];
let encoded_len = encoded.len();
let buff = io::Cursor::new(encoded);
let mut decoder = PackBitsReader::new(buff, encoded_len as u64);
let mut decoded = Vec::new();
decoder.read_to_end(&mut decoded).unwrap();
let expected = vec![
0xAA, 0xAA, 0xAA, 0x80, 0x00, 0x2A, 0xAA, 0xAA, 0xAA, 0xAA, 0x80, 0x00, 0x2A, 0x22,
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
];
assert_eq!(decoded, expected);
}
}

45
vendor/tiff/src/decoder/tag_reader.rs vendored Normal file
View File

@ -0,0 +1,45 @@
use std::convert::TryFrom;
use std::io::{Read, Seek};
use crate::tags::Tag;
use crate::{TiffError, TiffFormatError, TiffResult};
use super::ifd::{Directory, Value};
use super::stream::SmartReader;
use super::Limits;
pub(crate) struct TagReader<'a, R: Read + Seek> {
pub reader: &'a mut SmartReader<R>,
pub ifd: &'a Directory,
pub limits: &'a Limits,
pub bigtiff: bool,
}
impl<'a, R: Read + Seek> TagReader<'a, R> {
pub(crate) fn find_tag(&mut self, tag: Tag) -> TiffResult<Option<Value>> {
Ok(match self.ifd.get(&tag) {
Some(entry) => Some(entry.clone().val(self.limits, self.bigtiff, self.reader)?),
None => None,
})
}
pub(crate) fn require_tag(&mut self, tag: Tag) -> TiffResult<Value> {
match self.find_tag(tag)? {
Some(val) => Ok(val),
None => Err(TiffError::FormatError(
TiffFormatError::RequiredTagNotFound(tag),
)),
}
}
pub fn find_tag_uint_vec<T: TryFrom<u64>>(&mut self, tag: Tag) -> TiffResult<Option<Vec<T>>> {
self.find_tag(tag)?
.map(|v| v.into_u64_vec())
.transpose()?
.map(|v| {
v.into_iter()
.map(|u| {
T::try_from(u).map_err(|_| TiffFormatError::InvalidTagValueType(tag).into())
})
.collect()
})
.transpose()
}
}

245
vendor/tiff/src/encoder/colortype.rs vendored Normal file
View File

@ -0,0 +1,245 @@
use crate::tags::{PhotometricInterpretation, SampleFormat};
/// Trait for different colortypes that can be encoded.
pub trait ColorType {
/// The type of each sample of this colortype
type Inner: super::TiffValue;
/// The value of the tiff tag `PhotometricInterpretation`
const TIFF_VALUE: PhotometricInterpretation;
/// The value of the tiff tag `BitsPerSample`
const BITS_PER_SAMPLE: &'static [u16];
/// The value of the tiff tag `SampleFormat`
const SAMPLE_FORMAT: &'static [SampleFormat];
}
pub struct Gray8;
impl ColorType for Gray8 {
type Inner = u8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint];
}
pub struct GrayI8;
impl ColorType for GrayI8 {
type Inner = i8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Int];
}
pub struct Gray16;
impl ColorType for Gray16 {
type Inner = u16;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[16];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint];
}
pub struct GrayI16;
impl ColorType for GrayI16 {
type Inner = i16;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[16];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Int];
}
pub struct Gray32;
impl ColorType for Gray32 {
type Inner = u32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint];
}
pub struct GrayI32;
impl ColorType for GrayI32 {
type Inner = i32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Int];
}
pub struct Gray32Float;
impl ColorType for Gray32Float {
type Inner = f32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP];
}
pub struct Gray64;
impl ColorType for Gray64 {
type Inner = u64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint];
}
pub struct GrayI64;
impl ColorType for GrayI64 {
type Inner = i64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Int];
}
pub struct Gray64Float;
impl ColorType for Gray64Float {
type Inner = f64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP];
}
pub struct RGB8;
impl ColorType for RGB8 {
type Inner = u8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[8, 8, 8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 3];
}
pub struct RGB16;
impl ColorType for RGB16 {
type Inner = u16;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[16, 16, 16];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 3];
}
pub struct RGB32;
impl ColorType for RGB32 {
type Inner = u32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 3];
}
pub struct RGB32Float;
impl ColorType for RGB32Float {
type Inner = f32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 3];
}
pub struct RGB64;
impl ColorType for RGB64 {
type Inner = u64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 3];
}
pub struct RGB64Float;
impl ColorType for RGB64Float {
type Inner = f64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 3];
}
pub struct RGBA8;
impl ColorType for RGBA8 {
type Inner = u8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[8, 8, 8, 8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct RGBA16;
impl ColorType for RGBA16 {
type Inner = u16;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[16, 16, 16, 16];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct RGBA32;
impl ColorType for RGBA32 {
type Inner = u32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct RGBA32Float;
impl ColorType for RGBA32Float {
type Inner = f32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 4];
}
pub struct RGBA64;
impl ColorType for RGBA64 {
type Inner = u64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct RGBA64Float;
impl ColorType for RGBA64Float {
type Inner = f64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 4];
}
pub struct CMYK8;
impl ColorType for CMYK8 {
type Inner = u8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[8, 8, 8, 8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct CMYK16;
impl ColorType for CMYK16 {
type Inner = u16;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[16, 16, 16, 16];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct CMYK32;
impl ColorType for CMYK32 {
type Inner = u32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct CMYK32Float;
impl ColorType for CMYK32Float {
type Inner = f32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 4];
}
pub struct CMYK64;
impl ColorType for CMYK64 {
type Inner = u64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct CMYK64Float;
impl ColorType for CMYK64Float {
type Inner = f64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 4];
}
pub struct YCbCr8;
impl ColorType for YCbCr8 {
type Inner = u8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::YCbCr;
const BITS_PER_SAMPLE: &'static [u16] = &[8, 8, 8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 3];
}

View File

@ -0,0 +1,83 @@
use crate::{encoder::compression::*, tags::CompressionMethod};
use flate2::{write::ZlibEncoder, Compression as FlateCompression};
use std::io::Write;
/// The Deflate algorithm used to compress image data in TIFF files.
#[derive(Debug, Clone, Copy)]
pub struct Deflate {
level: FlateCompression,
}
/// The level of compression used by the Deflate algorithm.
/// It allows trading compression ratio for compression speed.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[non_exhaustive]
pub enum DeflateLevel {
/// The fastest possible compression mode.
Fast = 1,
/// The conserative choice between speed and ratio.
Balanced = 6,
/// The best compression available with Deflate.
Best = 9,
}
impl Default for DeflateLevel {
fn default() -> Self {
DeflateLevel::Balanced
}
}
impl Deflate {
/// Create a new deflate compressor with a specific level of compression.
pub fn with_level(level: DeflateLevel) -> Self {
Self {
level: FlateCompression::new(level as u32),
}
}
}
impl Default for Deflate {
fn default() -> Self {
Self::with_level(DeflateLevel::default())
}
}
impl Compression for Deflate {
const COMPRESSION_METHOD: CompressionMethod = CompressionMethod::Deflate;
fn get_algorithm(&self) -> Compressor {
Compressor::Deflate(self.clone())
}
}
impl CompressionAlgorithm for Deflate {
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error> {
let mut encoder = ZlibEncoder::new(writer, self.level);
encoder.write_all(bytes)?;
encoder.try_finish()?;
Ok(encoder.total_out())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::encoder::compression::tests::TEST_DATA;
use std::io::Cursor;
#[test]
fn test_deflate() {
const EXPECTED_COMPRESSED_DATA: [u8; 64] = [
0x78, 0x9C, 0x15, 0xC7, 0xD1, 0x0D, 0x80, 0x20, 0x0C, 0x04, 0xD0, 0x55, 0x6E, 0x02,
0xA7, 0x71, 0x81, 0xA6, 0x41, 0xDA, 0x28, 0xD4, 0xF4, 0xD0, 0xF9, 0x81, 0xE4, 0xFD,
0xBC, 0xD3, 0x9C, 0x58, 0x04, 0x1C, 0xE9, 0xBD, 0xE2, 0x8A, 0x84, 0x5A, 0xD1, 0x7B,
0xE7, 0x97, 0xF4, 0xF8, 0x08, 0x8D, 0xF6, 0x66, 0x21, 0x3D, 0x3A, 0xE4, 0xA9, 0x91,
0x3E, 0xAC, 0xF1, 0x98, 0xB9, 0x70, 0x17, 0x13,
];
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Deflate::default().write_to(&mut writer, TEST_DATA).unwrap();
assert_eq!(EXPECTED_COMPRESSED_DATA, compressed_data.as_slice());
}
}

View File

@ -0,0 +1,47 @@
use crate::{encoder::compression::*, tags::CompressionMethod};
use std::io::Write;
use weezl::encode::Encoder as LZWEncoder;
/// The LZW algorithm used to compress image data in TIFF files.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct Lzw;
impl Compression for Lzw {
const COMPRESSION_METHOD: CompressionMethod = CompressionMethod::LZW;
fn get_algorithm(&self) -> Compressor {
Compressor::Lzw(*self)
}
}
impl CompressionAlgorithm for Lzw {
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error> {
let mut encoder = LZWEncoder::with_tiff_size_switch(weezl::BitOrder::Msb, 8);
let result = encoder.into_stream(writer).encode_all(bytes);
let byte_count = result.bytes_written as u64;
result.status.map(|_| byte_count)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::encoder::compression::tests::TEST_DATA;
use std::io::Cursor;
#[test]
fn test_lzw() {
const EXPECTED_COMPRESSED_DATA: [u8; 63] = [
0x80, 0x15, 0x0D, 0x06, 0x93, 0x98, 0x82, 0x08, 0x20, 0x30, 0x88, 0x0E, 0x67, 0x43,
0x91, 0xA4, 0xDC, 0x67, 0x10, 0x19, 0x8D, 0xE7, 0x21, 0x01, 0x8C, 0xD0, 0x65, 0x31,
0x9A, 0xE1, 0xD1, 0x03, 0xB1, 0x86, 0x1A, 0x6F, 0x3A, 0xC1, 0x4C, 0x66, 0xF3, 0x69,
0xC0, 0xE4, 0x65, 0x39, 0x9C, 0xCD, 0x26, 0xF3, 0x74, 0x20, 0xD8, 0x67, 0x89, 0x9A,
0x4E, 0x86, 0x83, 0x69, 0xCC, 0x5D, 0x01,
];
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Lzw::default().write_to(&mut writer, TEST_DATA).unwrap();
assert_eq!(EXPECTED_COMPRESSED_DATA, compressed_data.as_slice());
}
}

View File

@ -0,0 +1,60 @@
use crate::tags::CompressionMethod;
use std::io::{self, Write};
mod deflate;
mod lzw;
mod packbits;
mod uncompressed;
pub use self::deflate::{Deflate, DeflateLevel};
pub use self::lzw::Lzw;
pub use self::packbits::Packbits;
pub use self::uncompressed::Uncompressed;
/// An algorithm used for compression
pub trait CompressionAlgorithm {
/// The algorithm writes data directly into the writer.
/// It returns the total number of bytes written.
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error>;
}
/// An algorithm used for compression with associated enums and optional configurations.
pub trait Compression: CompressionAlgorithm {
/// The corresponding tag to the algorithm.
const COMPRESSION_METHOD: CompressionMethod;
/// Method to optain a type that can store each variant of comression algorithm.
fn get_algorithm(&self) -> Compressor;
}
/// An enum to store each compression algorithm.
pub enum Compressor {
Uncompressed(Uncompressed),
Lzw(Lzw),
Deflate(Deflate),
Packbits(Packbits),
}
impl Default for Compressor {
/// The default compression strategy does not apply any compression.
fn default() -> Self {
Compressor::Uncompressed(Uncompressed::default())
}
}
impl CompressionAlgorithm for Compressor {
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error> {
match self {
Compressor::Uncompressed(algorithm) => algorithm.write_to(writer, bytes),
Compressor::Lzw(algorithm) => algorithm.write_to(writer, bytes),
Compressor::Deflate(algorithm) => algorithm.write_to(writer, bytes),
Compressor::Packbits(algorithm) => algorithm.write_to(writer, bytes),
}
}
}
#[cfg(test)]
mod tests {
pub const TEST_DATA: &'static [u8] =
b"This is a string for checking various compression algorithms.";
}

View File

@ -0,0 +1,214 @@
use crate::{encoder::compression::*, tags::CompressionMethod};
use std::io::{BufWriter, Error, ErrorKind, Write};
/// Compressor that uses the Packbits[^note] algorithm to compress bytes.
///
/// [^note]: PackBits is often ineffective on continuous tone images,
/// including many grayscale images. In such cases, it is better
/// to leave the image uncompressed.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct Packbits;
impl Compression for Packbits {
const COMPRESSION_METHOD: CompressionMethod = CompressionMethod::PackBits;
fn get_algorithm(&self) -> Compressor {
Compressor::Packbits(*self)
}
}
impl CompressionAlgorithm for Packbits {
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error> {
// Inspired by https://github.com/skirridsystems/packbits
const MIN_REPT: u8 = 3; // Minimum run to compress between differ blocks
const MAX_BYTES: u8 = 128; // Maximum number of bytes that can be encoded in a header byte
// Encoding for header byte based on number of bytes represented.
fn encode_diff(n: u8) -> u8 {
n - 1
}
fn encode_rept(n: u8) -> u8 {
let var = 256 - (n - 1) as u16;
var as u8
}
fn write_u8<W: Write>(writer: &mut W, byte: u8) -> Result<u64, Error> {
writer.write(&[byte]).map(|byte_count| byte_count as u64)
}
let mut bufwriter = BufWriter::new(writer);
let mut bytes_written = 0u64; // The number of bytes written into the writer
let mut offset: Option<u64> = None; // The index of the first byte written into the writer
let mut src_index: usize = 0; // Index of the current byte
let mut src_count = bytes.len(); //The number of bytes remaining to be compressed
let mut in_run = false; // Indication whether counting of similar bytes is performed
let mut run_index = 0u8; // Distance into pending bytes that a run starts
let mut bytes_pending = 0u8; // Bytes looked at but not yet output
let mut pending_index = 0usize; // Index of the first pending byte
let mut curr_byte: u8; // Byte currently being considered
let mut last_byte: u8; // Previous byte
// Need at least one byte to compress
if src_count == 0 {
return Err(Error::new(ErrorKind::WriteZero, "write zero"));
}
// Prime compressor with first character.
last_byte = bytes[src_index];
src_index += 1;
bytes_pending += 1;
while src_count - 1 != 0 {
src_count -= 1;
curr_byte = bytes[src_index];
src_index += 1;
bytes_pending += 1;
if in_run {
if (curr_byte != last_byte) || (bytes_pending > MAX_BYTES) {
offset.get_or_insert(write_u8(&mut bufwriter, encode_rept(bytes_pending - 1))?);
write_u8(&mut bufwriter, last_byte)?;
bytes_written += 2;
bytes_pending = 1;
pending_index = src_index - 1;
run_index = 0;
in_run = false;
}
} else {
if bytes_pending > MAX_BYTES {
// We have as much differing data as we can output in one chunk.
// Output MAX_BYTES leaving one byte.
offset.get_or_insert(write_u8(&mut bufwriter, encode_diff(MAX_BYTES))?);
bufwriter.write(&bytes[pending_index..pending_index + MAX_BYTES as usize])?;
bytes_written += 1 + MAX_BYTES as u64;
pending_index += MAX_BYTES as usize;
bytes_pending -= MAX_BYTES;
run_index = bytes_pending - 1; // A run could start here
} else if curr_byte == last_byte {
if (bytes_pending - run_index >= MIN_REPT) || (run_index == 0) {
// This is a worthwhile run
if run_index != 0 {
// Flush differing data out of input buffer
offset.get_or_insert(write_u8(&mut bufwriter, encode_diff(run_index))?);
bufwriter
.write(&bytes[pending_index..pending_index + run_index as usize])?;
bytes_written += 1 + run_index as u64;
}
bytes_pending -= run_index; // Length of run
in_run = true;
}
} else {
run_index = bytes_pending - 1; // A run could start here
}
}
last_byte = curr_byte;
}
// Output the remainder
if in_run {
bytes_written += 2;
offset.get_or_insert(write_u8(&mut bufwriter, encode_rept(bytes_pending))?);
write_u8(&mut bufwriter, last_byte)?;
} else {
bytes_written += 1 + bytes_pending as u64;
offset.get_or_insert(write_u8(&mut bufwriter, encode_diff(bytes_pending))?);
bufwriter.write(&bytes[pending_index..pending_index + bytes_pending as usize])?;
}
bufwriter.flush()?;
Ok(bytes_written)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::encoder::compression::tests::TEST_DATA;
use std::io::Cursor;
#[test]
fn test_packbits_single_byte() {
// compress single byte
const UNCOMPRESSED_DATA: [u8; 1] = [0x3F];
const EXPECTED_COMPRESSED_DATA: [u8; 2] = [0x00, 0x3F];
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Packbits::default()
.write_to(&mut writer, &UNCOMPRESSED_DATA)
.unwrap();
assert_eq!(compressed_data, EXPECTED_COMPRESSED_DATA);
}
#[test]
fn test_packbits_rept() {
// compress buffer with repetitive sequence
const UNCOMPRESSED_DATA: &'static [u8] =
b"This strrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrring hangs.";
const EXPECTED_COMPRESSED_DATA: &'static [u8] = b"\x06This st\xD1r\x09ing hangs.";
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Packbits::default()
.write_to(&mut writer, UNCOMPRESSED_DATA)
.unwrap();
assert_eq!(compressed_data, EXPECTED_COMPRESSED_DATA);
}
#[test]
fn test_packbits_large_rept_nonrept() {
// compress buffer with large repetitive and non-repetitive sequence
let mut data = b"This st".to_vec();
for _i in 0..158 {
data.push(b'r');
}
data.extend_from_slice(b"ing hangs.");
for i in 0..158 {
data.push(i);
}
const EXPECTED_COMPRESSED_DATA: [u8; 182] = [
0x06, 0x54, 0x68, 0x69, 0x73, 0x20, 0x73, 0x74, 0x81, 0x72, 0xE3, 0x72, 0x7F, 0x69,
0x6E, 0x67, 0x20, 0x68, 0x61, 0x6E, 0x67, 0x73, 0x2E, 0x00, 0x01, 0x02, 0x03, 0x04,
0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12,
0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E,
0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C,
0x3D, 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A,
0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74,
0x75, 0x27, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81,
0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D,
];
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Packbits::default()
.write_to(&mut writer, data.as_slice())
.unwrap();
assert_eq!(compressed_data, EXPECTED_COMPRESSED_DATA);
}
#[test]
fn test_packbits() {
// compress teststring
const EXPECTED_COMPRESSED_DATA: &'static [u8] =
b"\x3CThis is a string for checking various compression algorithms.";
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Packbits::default()
.write_to(&mut writer, TEST_DATA)
.unwrap();
assert_eq!(compressed_data, EXPECTED_COMPRESSED_DATA);
}
}

View File

@ -0,0 +1,37 @@
use crate::{encoder::compression::*, tags::CompressionMethod};
use std::io::Write;
/// The default algorithm which does not compress at all.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct Uncompressed;
impl Compression for Uncompressed {
const COMPRESSION_METHOD: CompressionMethod = CompressionMethod::None;
fn get_algorithm(&self) -> Compressor {
Compressor::Uncompressed(*self)
}
}
impl CompressionAlgorithm for Uncompressed {
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error> {
writer.write(bytes).map(|byte_count| byte_count as u64)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::encoder::compression::tests::TEST_DATA;
use std::io::Cursor;
#[test]
fn test_no_compression() {
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Uncompressed::default()
.write_to(&mut writer, TEST_DATA)
.unwrap();
assert_eq!(TEST_DATA, compressed_data);
}
}

681
vendor/tiff/src/encoder/mod.rs vendored Normal file
View File

@ -0,0 +1,681 @@
pub use tiff_value::*;
use std::{
cmp,
collections::BTreeMap,
convert::{TryFrom, TryInto},
io::{self, Seek, Write},
marker::PhantomData,
mem,
num::TryFromIntError,
};
use crate::{
error::TiffResult,
tags::{CompressionMethod, ResolutionUnit, Tag},
TiffError, TiffFormatError,
};
pub mod colortype;
pub mod compression;
mod tiff_value;
mod writer;
use self::colortype::*;
use self::compression::*;
use self::writer::*;
/// Encoder for Tiff and BigTiff files.
///
/// With this type you can get a `DirectoryEncoder` or a `ImageEncoder`
/// to encode Tiff/BigTiff ifd directories with images.
///
/// See `DirectoryEncoder` and `ImageEncoder`.
///
/// # Examples
/// ```
/// # extern crate tiff;
/// # fn main() {
/// # let mut file = std::io::Cursor::new(Vec::new());
/// # let image_data = vec![0; 100*100*3];
/// use tiff::encoder::*;
///
/// // create a standard Tiff file
/// let mut tiff = TiffEncoder::new(&mut file).unwrap();
/// tiff.write_image::<colortype::RGB8>(100, 100, &image_data).unwrap();
///
/// // create a BigTiff file
/// let mut bigtiff = TiffEncoder::new_big(&mut file).unwrap();
/// bigtiff.write_image::<colortype::RGB8>(100, 100, &image_data).unwrap();
///
/// # }
/// ```
pub struct TiffEncoder<W, K: TiffKind = TiffKindStandard> {
writer: TiffWriter<W>,
kind: PhantomData<K>,
}
/// Constructor functions to create standard Tiff files.
impl<W: Write + Seek> TiffEncoder<W> {
/// Creates a new encoder for standard Tiff files.
///
/// To create BigTiff files, use [`new_big`][TiffEncoder::new_big] or
/// [`new_generic`][TiffEncoder::new_generic].
pub fn new(writer: W) -> TiffResult<TiffEncoder<W, TiffKindStandard>> {
TiffEncoder::new_generic(writer)
}
}
/// Constructor functions to create BigTiff files.
impl<W: Write + Seek> TiffEncoder<W, TiffKindBig> {
/// Creates a new encoder for BigTiff files.
///
/// To create standard Tiff files, use [`new`][TiffEncoder::new] or
/// [`new_generic`][TiffEncoder::new_generic].
pub fn new_big(writer: W) -> TiffResult<Self> {
TiffEncoder::new_generic(writer)
}
}
/// Generic functions that are available for both Tiff and BigTiff encoders.
impl<W: Write + Seek, K: TiffKind> TiffEncoder<W, K> {
/// Creates a new Tiff or BigTiff encoder, inferred from the return type.
pub fn new_generic(writer: W) -> TiffResult<Self> {
let mut encoder = TiffEncoder {
writer: TiffWriter::new(writer),
kind: PhantomData,
};
K::write_header(&mut encoder.writer)?;
Ok(encoder)
}
/// Create a [`DirectoryEncoder`] to encode an ifd directory.
pub fn new_directory(&mut self) -> TiffResult<DirectoryEncoder<W, K>> {
DirectoryEncoder::new(&mut self.writer)
}
/// Create an [`ImageEncoder`] to encode an image one slice at a time.
pub fn new_image<C: ColorType>(
&mut self,
width: u32,
height: u32,
) -> TiffResult<ImageEncoder<W, C, K, Uncompressed>> {
let encoder = DirectoryEncoder::new(&mut self.writer)?;
ImageEncoder::new(encoder, width, height)
}
/// Create an [`ImageEncoder`] to encode an image one slice at a time.
pub fn new_image_with_compression<C: ColorType, D: Compression>(
&mut self,
width: u32,
height: u32,
compression: D,
) -> TiffResult<ImageEncoder<W, C, K, D>> {
let encoder = DirectoryEncoder::new(&mut self.writer)?;
ImageEncoder::with_compression(encoder, width, height, compression)
}
/// Convenience function to write an entire image from memory.
pub fn write_image<C: ColorType>(
&mut self,
width: u32,
height: u32,
data: &[C::Inner],
) -> TiffResult<()>
where
[C::Inner]: TiffValue,
{
let encoder = DirectoryEncoder::new(&mut self.writer)?;
let image: ImageEncoder<W, C, K> = ImageEncoder::new(encoder, width, height)?;
image.write_data(data)
}
/// Convenience function to write an entire image from memory with a given compression.
pub fn write_image_with_compression<C: ColorType, D: Compression>(
&mut self,
width: u32,
height: u32,
compression: D,
data: &[C::Inner],
) -> TiffResult<()>
where
[C::Inner]: TiffValue,
{
let encoder = DirectoryEncoder::new(&mut self.writer)?;
let image: ImageEncoder<W, C, K, D> =
ImageEncoder::with_compression(encoder, width, height, compression)?;
image.write_data(data)
}
}
/// Low level interface to encode ifd directories.
///
/// You should call `finish` on this when you are finished with it.
/// Encoding can silently fail while this is dropping.
pub struct DirectoryEncoder<'a, W: 'a + Write + Seek, K: TiffKind> {
writer: &'a mut TiffWriter<W>,
dropped: bool,
// We use BTreeMap to make sure tags are written in correct order
ifd_pointer_pos: u64,
ifd: BTreeMap<u16, DirectoryEntry<K::OffsetType>>,
}
impl<'a, W: 'a + Write + Seek, K: TiffKind> DirectoryEncoder<'a, W, K> {
fn new(writer: &'a mut TiffWriter<W>) -> TiffResult<Self> {
// the previous word is the IFD offset position
let ifd_pointer_pos = writer.offset() - mem::size_of::<K::OffsetType>() as u64;
writer.pad_word_boundary()?; // TODO: Do we need to adjust this for BigTiff?
Ok(DirectoryEncoder {
writer,
dropped: false,
ifd_pointer_pos,
ifd: BTreeMap::new(),
})
}
/// Write a single ifd tag.
pub fn write_tag<T: TiffValue>(&mut self, tag: Tag, value: T) -> TiffResult<()> {
let mut bytes = Vec::with_capacity(value.bytes());
{
let mut writer = TiffWriter::new(&mut bytes);
value.write(&mut writer)?;
}
self.ifd.insert(
tag.to_u16(),
DirectoryEntry {
data_type: <T>::FIELD_TYPE.to_u16(),
count: value.count().try_into()?,
data: bytes,
},
);
Ok(())
}
fn write_directory(&mut self) -> TiffResult<u64> {
// Start by writing out all values
for &mut DirectoryEntry {
data: ref mut bytes,
..
} in self.ifd.values_mut()
{
let data_bytes = mem::size_of::<K::OffsetType>();
if bytes.len() > data_bytes {
let offset = self.writer.offset();
self.writer.write_bytes(bytes)?;
*bytes = vec![0; data_bytes];
let mut writer = TiffWriter::new(bytes as &mut [u8]);
K::write_offset(&mut writer, offset)?;
} else {
while bytes.len() < data_bytes {
bytes.push(0);
}
}
}
let offset = self.writer.offset();
K::write_entry_count(&mut self.writer, self.ifd.len())?;
for (
tag,
&DirectoryEntry {
data_type: ref field_type,
ref count,
data: ref offset,
},
) in self.ifd.iter()
{
self.writer.write_u16(*tag)?;
self.writer.write_u16(*field_type)?;
(*count).write(&mut self.writer)?;
self.writer.write_bytes(offset)?;
}
Ok(offset)
}
/// Write some data to the tiff file, the offset of the data is returned.
///
/// This could be used to write tiff strips.
pub fn write_data<T: TiffValue>(&mut self, value: T) -> TiffResult<u64> {
let offset = self.writer.offset();
value.write(&mut self.writer)?;
Ok(offset)
}
/// Provides the number of bytes written by the underlying TiffWriter during the last call.
fn last_written(&self) -> u64 {
self.writer.last_written()
}
fn finish_internal(&mut self) -> TiffResult<()> {
let ifd_pointer = self.write_directory()?;
let curr_pos = self.writer.offset();
self.writer.goto_offset(self.ifd_pointer_pos)?;
K::write_offset(&mut self.writer, ifd_pointer)?;
self.writer.goto_offset(curr_pos)?;
K::write_offset(&mut self.writer, 0)?;
self.dropped = true;
Ok(())
}
/// Write out the ifd directory.
pub fn finish(mut self) -> TiffResult<()> {
self.finish_internal()
}
}
impl<'a, W: Write + Seek, K: TiffKind> Drop for DirectoryEncoder<'a, W, K> {
fn drop(&mut self) {
if !self.dropped {
let _ = self.finish_internal();
}
}
}
/// Type to encode images strip by strip.
///
/// You should call `finish` on this when you are finished with it.
/// Encoding can silently fail while this is dropping.
///
/// # Examples
/// ```
/// # extern crate tiff;
/// # fn main() {
/// # let mut file = std::io::Cursor::new(Vec::new());
/// # let image_data = vec![0; 100*100*3];
/// use tiff::encoder::*;
/// use tiff::tags::Tag;
///
/// let mut tiff = TiffEncoder::new(&mut file).unwrap();
/// let mut image = tiff.new_image::<colortype::RGB8>(100, 100).unwrap();
///
/// // You can encode tags here
/// image.encoder().write_tag(Tag::Artist, "Image-tiff").unwrap();
///
/// // Strip size can be configured before writing data
/// image.rows_per_strip(2).unwrap();
///
/// let mut idx = 0;
/// while image.next_strip_sample_count() > 0 {
/// let sample_count = image.next_strip_sample_count() as usize;
/// image.write_strip(&image_data[idx..idx+sample_count]).unwrap();
/// idx += sample_count;
/// }
/// image.finish().unwrap();
/// # }
/// ```
/// You can also call write_data function wich will encode by strip and finish
pub struct ImageEncoder<
'a,
W: 'a + Write + Seek,
C: ColorType,
K: TiffKind,
D: Compression = Uncompressed,
> {
encoder: DirectoryEncoder<'a, W, K>,
strip_idx: u64,
strip_count: u64,
row_samples: u64,
width: u32,
height: u32,
rows_per_strip: u64,
strip_offsets: Vec<K::OffsetType>,
strip_byte_count: Vec<K::OffsetType>,
dropped: bool,
compression: D,
_phantom: ::std::marker::PhantomData<C>,
}
impl<'a, W: 'a + Write + Seek, T: ColorType, K: TiffKind, D: Compression>
ImageEncoder<'a, W, T, K, D>
{
fn new(encoder: DirectoryEncoder<'a, W, K>, width: u32, height: u32) -> TiffResult<Self>
where
D: Default,
{
Self::with_compression(encoder, width, height, D::default())
}
fn with_compression(
mut encoder: DirectoryEncoder<'a, W, K>,
width: u32,
height: u32,
compression: D,
) -> TiffResult<Self> {
if width == 0 || height == 0 {
return Err(TiffError::FormatError(TiffFormatError::InvalidDimensions(
width, height,
)));
}
let row_samples = u64::from(width) * u64::try_from(<T>::BITS_PER_SAMPLE.len())?;
let row_bytes = row_samples * u64::from(<T::Inner>::BYTE_LEN);
// Limit the strip size to prevent potential memory and security issues.
// Also keep the multiple strip handling 'oiled'
let rows_per_strip = {
match D::COMPRESSION_METHOD {
CompressionMethod::PackBits => 1, // Each row must be packed separately. Do not compress across row boundaries
_ => (1_000_000 + row_bytes - 1) / row_bytes,
}
};
let strip_count = (u64::from(height) + rows_per_strip - 1) / rows_per_strip;
encoder.write_tag(Tag::ImageWidth, width)?;
encoder.write_tag(Tag::ImageLength, height)?;
encoder.write_tag(Tag::Compression, D::COMPRESSION_METHOD.to_u16())?;
encoder.write_tag(Tag::BitsPerSample, <T>::BITS_PER_SAMPLE)?;
let sample_format: Vec<_> = <T>::SAMPLE_FORMAT.iter().map(|s| s.to_u16()).collect();
encoder.write_tag(Tag::SampleFormat, &sample_format[..])?;
encoder.write_tag(Tag::PhotometricInterpretation, <T>::TIFF_VALUE.to_u16())?;
encoder.write_tag(Tag::RowsPerStrip, u32::try_from(rows_per_strip)?)?;
encoder.write_tag(
Tag::SamplesPerPixel,
u16::try_from(<T>::BITS_PER_SAMPLE.len())?,
)?;
encoder.write_tag(Tag::XResolution, Rational { n: 1, d: 1 })?;
encoder.write_tag(Tag::YResolution, Rational { n: 1, d: 1 })?;
encoder.write_tag(Tag::ResolutionUnit, ResolutionUnit::None.to_u16())?;
Ok(ImageEncoder {
encoder,
strip_count,
strip_idx: 0,
row_samples,
rows_per_strip,
width,
height,
strip_offsets: Vec::new(),
strip_byte_count: Vec::new(),
dropped: false,
compression: compression,
_phantom: ::std::marker::PhantomData,
})
}
/// Number of samples the next strip should have.
pub fn next_strip_sample_count(&self) -> u64 {
if self.strip_idx >= self.strip_count {
return 0;
}
let raw_start_row = self.strip_idx * self.rows_per_strip;
let start_row = cmp::min(u64::from(self.height), raw_start_row);
let end_row = cmp::min(u64::from(self.height), raw_start_row + self.rows_per_strip);
(end_row - start_row) * self.row_samples
}
/// Write a single strip.
pub fn write_strip(&mut self, value: &[T::Inner]) -> TiffResult<()>
where
[T::Inner]: TiffValue,
{
let samples = self.next_strip_sample_count();
if u64::try_from(value.len())? != samples {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Slice is wrong size for strip",
)
.into());
}
// Write the (possible compressed) data to the encoder.
let offset = self.encoder.write_data(value)?;
let byte_count = self.encoder.last_written() as usize;
self.strip_offsets.push(K::convert_offset(offset)?);
self.strip_byte_count.push(byte_count.try_into()?);
self.strip_idx += 1;
Ok(())
}
/// Write strips from data
pub fn write_data(mut self, data: &[T::Inner]) -> TiffResult<()>
where
[T::Inner]: TiffValue,
{
let num_pix = usize::try_from(self.width)?
.checked_mul(usize::try_from(self.height)?)
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Image width * height exceeds usize",
)
})?;
if data.len() < num_pix {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Input data slice is undersized for provided dimensions",
)
.into());
}
self.encoder
.writer
.set_compression(self.compression.get_algorithm());
let mut idx = 0;
while self.next_strip_sample_count() > 0 {
let sample_count = usize::try_from(self.next_strip_sample_count())?;
self.write_strip(&data[idx..idx + sample_count])?;
idx += sample_count;
}
self.encoder.writer.reset_compression();
self.finish()?;
Ok(())
}
/// Set image resolution
pub fn resolution(&mut self, unit: ResolutionUnit, value: Rational) {
self.encoder
.write_tag(Tag::ResolutionUnit, unit.to_u16())
.unwrap();
self.encoder
.write_tag(Tag::XResolution, value.clone())
.unwrap();
self.encoder.write_tag(Tag::YResolution, value).unwrap();
}
/// Set image resolution unit
pub fn resolution_unit(&mut self, unit: ResolutionUnit) {
self.encoder
.write_tag(Tag::ResolutionUnit, unit.to_u16())
.unwrap();
}
/// Set image x-resolution
pub fn x_resolution(&mut self, value: Rational) {
self.encoder.write_tag(Tag::XResolution, value).unwrap();
}
/// Set image y-resolution
pub fn y_resolution(&mut self, value: Rational) {
self.encoder.write_tag(Tag::YResolution, value).unwrap();
}
/// Set image number of lines per strip
///
/// This function needs to be called before any calls to `write_data` or
/// `write_strip` and will return an error otherwise.
pub fn rows_per_strip(&mut self, value: u32) -> TiffResult<()> {
if self.strip_idx != 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Cannot change strip size after data was written",
)
.into());
}
// Write tag as 32 bits
self.encoder.write_tag(Tag::RowsPerStrip, value)?;
let value: u64 = value as u64;
self.strip_count = (self.height as u64 + value - 1) / value;
self.rows_per_strip = value;
Ok(())
}
fn finish_internal(&mut self) -> TiffResult<()> {
self.encoder
.write_tag(Tag::StripOffsets, K::convert_slice(&self.strip_offsets))?;
self.encoder.write_tag(
Tag::StripByteCounts,
K::convert_slice(&self.strip_byte_count),
)?;
self.dropped = true;
self.encoder.finish_internal()
}
/// Get a reference of the underlying `DirectoryEncoder`
pub fn encoder(&mut self) -> &mut DirectoryEncoder<'a, W, K> {
&mut self.encoder
}
/// Write out image and ifd directory.
pub fn finish(mut self) -> TiffResult<()> {
self.finish_internal()
}
}
impl<'a, W: Write + Seek, C: ColorType, K: TiffKind, D: Compression> Drop
for ImageEncoder<'a, W, C, K, D>
{
fn drop(&mut self) {
if !self.dropped {
let _ = self.finish_internal();
}
}
}
struct DirectoryEntry<S> {
data_type: u16,
count: S,
data: Vec<u8>,
}
/// Trait to abstract over Tiff/BigTiff differences.
///
/// Implemented for [`TiffKindStandard`] and [`TiffKindBig`].
pub trait TiffKind {
/// The type of offset fields, `u32` for normal Tiff, `u64` for BigTiff.
type OffsetType: TryFrom<usize, Error = TryFromIntError> + Into<u64> + TiffValue;
/// Needed for the `convert_slice` method.
type OffsetArrayType: ?Sized + TiffValue;
/// Write the (Big)Tiff header.
fn write_header<W: Write>(writer: &mut TiffWriter<W>) -> TiffResult<()>;
/// Convert a file offset to `Self::OffsetType`.
///
/// This returns an error for normal Tiff if the offset is larger than `u32::MAX`.
fn convert_offset(offset: u64) -> TiffResult<Self::OffsetType>;
/// Write an offset value to the given writer.
///
/// Like `convert_offset`, this errors if `offset > u32::MAX` for normal Tiff.
fn write_offset<W: Write>(writer: &mut TiffWriter<W>, offset: u64) -> TiffResult<()>;
/// Write the IFD entry count field with the given `count` value.
///
/// The entry count field is an `u16` for normal Tiff and `u64` for BigTiff. Errors
/// if the given `usize` is larger than the representable values.
fn write_entry_count<W: Write>(writer: &mut TiffWriter<W>, count: usize) -> TiffResult<()>;
/// Internal helper method for satisfying Rust's type checker.
///
/// The `TiffValue` trait is implemented for both primitive values (e.g. `u8`, `u32`) and
/// slices of primitive values (e.g. `[u8]`, `[u32]`). However, this is not represented in
/// the type system, so there is no guarantee that that for all `T: TiffValue` there is also
/// an implementation of `TiffValue` for `[T]`. This method works around that problem by
/// providing a conversion from `[T]` to some value that implements `TiffValue`, thereby
/// making all slices of `OffsetType` usable with `write_tag` and similar methods.
///
/// Implementations of this trait should always set `OffsetArrayType` to `[OffsetType]`.
fn convert_slice(slice: &[Self::OffsetType]) -> &Self::OffsetArrayType;
}
/// Create a standard Tiff file.
pub struct TiffKindStandard;
impl TiffKind for TiffKindStandard {
type OffsetType = u32;
type OffsetArrayType = [u32];
fn write_header<W: Write>(writer: &mut TiffWriter<W>) -> TiffResult<()> {
write_tiff_header(writer)?;
// blank the IFD offset location
writer.write_u32(0)?;
Ok(())
}
fn convert_offset(offset: u64) -> TiffResult<Self::OffsetType> {
Ok(Self::OffsetType::try_from(offset)?)
}
fn write_offset<W: Write>(writer: &mut TiffWriter<W>, offset: u64) -> TiffResult<()> {
writer.write_u32(u32::try_from(offset)?)?;
Ok(())
}
fn write_entry_count<W: Write>(writer: &mut TiffWriter<W>, count: usize) -> TiffResult<()> {
writer.write_u16(u16::try_from(count)?)?;
Ok(())
}
fn convert_slice(slice: &[Self::OffsetType]) -> &Self::OffsetArrayType {
slice
}
}
/// Create a BigTiff file.
pub struct TiffKindBig;
impl TiffKind for TiffKindBig {
type OffsetType = u64;
type OffsetArrayType = [u64];
fn write_header<W: Write>(writer: &mut TiffWriter<W>) -> TiffResult<()> {
write_bigtiff_header(writer)?;
// blank the IFD offset location
writer.write_u64(0)?;
Ok(())
}
fn convert_offset(offset: u64) -> TiffResult<Self::OffsetType> {
Ok(offset)
}
fn write_offset<W: Write>(writer: &mut TiffWriter<W>, offset: u64) -> TiffResult<()> {
writer.write_u64(offset)?;
Ok(())
}
fn write_entry_count<W: Write>(writer: &mut TiffWriter<W>, count: usize) -> TiffResult<()> {
writer.write_u64(u64::try_from(count)?)?;
Ok(())
}
fn convert_slice(slice: &[Self::OffsetType]) -> &Self::OffsetArrayType {
slice
}
}

523
vendor/tiff/src/encoder/tiff_value.rs vendored Normal file
View File

@ -0,0 +1,523 @@
use std::{borrow::Cow, io::Write, slice::from_ref};
use crate::{bytecast, tags::Type, TiffError, TiffFormatError, TiffResult};
use super::writer::TiffWriter;
/// Trait for types that can be encoded in a tiff file
pub trait TiffValue {
const BYTE_LEN: u8;
const FIELD_TYPE: Type;
fn count(&self) -> usize;
fn bytes(&self) -> usize {
self.count() * usize::from(Self::BYTE_LEN)
}
/// Access this value as an contiguous sequence of bytes.
/// If their is no trivial representation, allocate it on the heap.
fn data(&self) -> Cow<[u8]>;
/// Write this value to a TiffWriter.
/// While the default implementation will work in all cases, it may require unnecessary allocations.
/// The written bytes of any custom implementation MUST be the same as yielded by `self.data()`.
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_bytes(&self.data())?;
Ok(())
}
}
impl TiffValue for [u8] {
const BYTE_LEN: u8 = 1;
const FIELD_TYPE: Type = Type::BYTE;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(self)
}
}
impl TiffValue for [i8] {
const BYTE_LEN: u8 = 1;
const FIELD_TYPE: Type = Type::SBYTE;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i8_as_ne_bytes(self))
}
}
impl TiffValue for [u16] {
const BYTE_LEN: u8 = 2;
const FIELD_TYPE: Type = Type::SHORT;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u16_as_ne_bytes(self))
}
}
impl TiffValue for [i16] {
const BYTE_LEN: u8 = 2;
const FIELD_TYPE: Type = Type::SSHORT;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i16_as_ne_bytes(self))
}
}
impl TiffValue for [u32] {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::LONG;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u32_as_ne_bytes(self))
}
}
impl TiffValue for [i32] {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::SLONG;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i32_as_ne_bytes(self))
}
}
impl TiffValue for [u64] {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::LONG8;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u64_as_ne_bytes(self))
}
}
impl TiffValue for [i64] {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::SLONG8;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i64_as_ne_bytes(self))
}
}
impl TiffValue for [f32] {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::FLOAT;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
// We write using native endian so this should be safe
Cow::Borrowed(bytecast::f32_as_ne_bytes(self))
}
}
impl TiffValue for [f64] {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::DOUBLE;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
// We write using native endian so this should be safe
Cow::Borrowed(bytecast::f64_as_ne_bytes(self))
}
}
impl TiffValue for u8 {
const BYTE_LEN: u8 = 1;
const FIELD_TYPE: Type = Type::BYTE;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u8(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(from_ref(self))
}
}
impl TiffValue for i8 {
const BYTE_LEN: u8 = 1;
const FIELD_TYPE: Type = Type::SBYTE;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_i8(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i8_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for u16 {
const BYTE_LEN: u8 = 2;
const FIELD_TYPE: Type = Type::SHORT;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u16(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u16_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for i16 {
const BYTE_LEN: u8 = 2;
const FIELD_TYPE: Type = Type::SSHORT;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_i16(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i16_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for u32 {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::LONG;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u32(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u32_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for i32 {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::SLONG;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_i32(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i32_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for u64 {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::LONG8;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u64(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u64_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for i64 {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::SLONG8;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_i64(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i64_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for f32 {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::FLOAT;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_f32(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::f32_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for f64 {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::DOUBLE;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_f64(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::f64_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for Ifd {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::IFD;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u32(self.0)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u32_as_ne_bytes(from_ref(&self.0)))
}
}
impl TiffValue for Ifd8 {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::IFD8;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u64(self.0)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u64_as_ne_bytes(from_ref(&self.0)))
}
}
impl TiffValue for Rational {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::RATIONAL;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u32(self.n)?;
writer.write_u32(self.d)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Owned({
let first_dword = bytecast::u32_as_ne_bytes(from_ref(&self.n));
let second_dword = bytecast::u32_as_ne_bytes(from_ref(&self.d));
[first_dword, second_dword].concat()
})
}
}
impl TiffValue for SRational {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::SRATIONAL;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_i32(self.n)?;
writer.write_i32(self.d)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Owned({
let first_dword = bytecast::i32_as_ne_bytes(from_ref(&self.n));
let second_dword = bytecast::i32_as_ne_bytes(from_ref(&self.d));
[first_dword, second_dword].concat()
})
}
}
impl TiffValue for str {
const BYTE_LEN: u8 = 1;
const FIELD_TYPE: Type = Type::ASCII;
fn count(&self) -> usize {
self.len() + 1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
if self.is_ascii() && !self.bytes().any(|b| b == 0) {
writer.write_bytes(self.as_bytes())?;
writer.write_u8(0)?;
Ok(())
} else {
Err(TiffError::FormatError(TiffFormatError::InvalidTag))
}
}
fn data(&self) -> Cow<[u8]> {
Cow::Owned({
if self.is_ascii() && !self.bytes().any(|b| b == 0) {
let bytes: &[u8] = self.as_bytes();
[bytes, &[0]].concat()
} else {
vec![]
}
})
}
}
impl<'a, T: TiffValue + ?Sized> TiffValue for &'a T {
const BYTE_LEN: u8 = T::BYTE_LEN;
const FIELD_TYPE: Type = T::FIELD_TYPE;
fn count(&self) -> usize {
(*self).count()
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
(*self).write(writer)
}
fn data(&self) -> Cow<[u8]> {
T::data(self)
}
}
macro_rules! impl_tiff_value_for_contiguous_sequence {
($inner_type:ty; $bytes:expr; $field_type:expr) => {
impl $crate::encoder::TiffValue for [$inner_type] {
const BYTE_LEN: u8 = $bytes;
const FIELD_TYPE: Type = $field_type;
fn count(&self) -> usize {
self.len()
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
for x in self {
x.write(writer)?;
}
Ok(())
}
fn data(&self) -> Cow<[u8]> {
let mut buf: Vec<u8> = Vec::with_capacity(Self::BYTE_LEN as usize * self.len());
for x in self {
buf.extend_from_slice(&x.data());
}
Cow::Owned(buf)
}
}
};
}
impl_tiff_value_for_contiguous_sequence!(Ifd; 4; Type::IFD);
impl_tiff_value_for_contiguous_sequence!(Ifd8; 8; Type::IFD8);
impl_tiff_value_for_contiguous_sequence!(Rational; 8; Type::RATIONAL);
impl_tiff_value_for_contiguous_sequence!(SRational; 8; Type::SRATIONAL);
/// Type to represent tiff values of type `IFD`
#[derive(Clone)]
pub struct Ifd(pub u32);
/// Type to represent tiff values of type `IFD8`
#[derive(Clone)]
pub struct Ifd8(pub u64);
/// Type to represent tiff values of type `RATIONAL`
#[derive(Clone)]
pub struct Rational {
pub n: u32,
pub d: u32,
}
/// Type to represent tiff values of type `SRATIONAL`
#[derive(Clone)]
pub struct SRational {
pub n: i32,
pub d: i32,
}

188
vendor/tiff/src/encoder/writer.rs vendored Normal file
View File

@ -0,0 +1,188 @@
use crate::encoder::compression::*;
use crate::error::TiffResult;
use std::io::{self, Seek, SeekFrom, Write};
pub fn write_tiff_header<W: Write>(writer: &mut TiffWriter<W>) -> TiffResult<()> {
#[cfg(target_endian = "little")]
let boi: u8 = 0x49;
#[cfg(not(target_endian = "little"))]
let boi: u8 = 0x4d;
writer.writer.write_all(&[boi, boi])?;
writer.writer.write_all(&42u16.to_ne_bytes())?;
writer.offset += 4;
Ok(())
}
/// Writes a BigTiff header, excluding the IFD offset field.
///
/// Writes the byte order, version number, offset byte size, and zero constant fields. Does
// _not_ write the offset to the first IFD, this should be done by the caller.
pub fn write_bigtiff_header<W: Write>(writer: &mut TiffWriter<W>) -> TiffResult<()> {
#[cfg(target_endian = "little")]
let boi: u8 = 0x49;
#[cfg(not(target_endian = "little"))]
let boi: u8 = 0x4d;
// byte order indication
writer.writer.write_all(&[boi, boi])?;
// version number
writer.writer.write_all(&43u16.to_ne_bytes())?;
// bytesize of offsets (pointer size)
writer.writer.write_all(&8u16.to_ne_bytes())?;
// always 0
writer.writer.write_all(&0u16.to_ne_bytes())?;
// we wrote 8 bytes, so set the internal offset accordingly
writer.offset += 8;
Ok(())
}
pub struct TiffWriter<W> {
writer: W,
offset: u64,
byte_count: u64,
compressor: Compressor,
}
impl<W: Write> TiffWriter<W> {
pub fn new(writer: W) -> Self {
Self {
writer,
offset: 0,
byte_count: 0,
compressor: Compressor::default(),
}
}
pub fn set_compression(&mut self, compressor: Compressor) {
self.compressor = compressor;
}
pub fn reset_compression(&mut self) {
self.compressor = Compressor::default();
}
pub fn offset(&self) -> u64 {
self.offset
}
pub fn last_written(&self) -> u64 {
self.byte_count
}
pub fn write_bytes(&mut self, bytes: &[u8]) -> Result<(), io::Error> {
self.byte_count = self.compressor.write_to(&mut self.writer, bytes)?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_u8(&mut self, n: u8) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_i8(&mut self, n: i8) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_u16(&mut self, n: u16) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_i16(&mut self, n: i16) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_u32(&mut self, n: u32) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_i32(&mut self, n: i32) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_u64(&mut self, n: u64) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_i64(&mut self, n: i64) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_f32(&mut self, n: f32) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &u32::to_ne_bytes(n.to_bits()))?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_f64(&mut self, n: f64) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &u64::to_ne_bytes(n.to_bits()))?;
self.offset += self.byte_count;
Ok(())
}
pub fn pad_word_boundary(&mut self) -> Result<(), io::Error> {
if self.offset % 4 != 0 {
let padding = [0, 0, 0];
let padd_len = 4 - (self.offset % 4);
self.writer.write_all(&padding[..padd_len as usize])?;
self.offset += padd_len;
}
Ok(())
}
}
impl<W: Seek> TiffWriter<W> {
pub fn goto_offset(&mut self, offset: u64) -> Result<(), io::Error> {
self.offset = offset;
self.writer.seek(SeekFrom::Start(offset as u64))?;
Ok(())
}
}

369
vendor/tiff/src/error.rs vendored Normal file
View File

@ -0,0 +1,369 @@
use std::error::Error;
use std::fmt;
use std::fmt::Display;
use std::io;
use std::str;
use std::string;
use std::sync::Arc;
use jpeg::UnsupportedFeature;
use crate::decoder::{ifd::Value, ChunkType};
use crate::tags::{
CompressionMethod, PhotometricInterpretation, PlanarConfiguration, SampleFormat, Tag,
};
use crate::ColorType;
use crate::weezl::LzwError;
/// Tiff error kinds.
#[derive(Debug)]
pub enum TiffError {
/// The Image is not formatted properly.
FormatError(TiffFormatError),
/// The Decoder does not support features required by the image.
UnsupportedError(TiffUnsupportedError),
/// An I/O Error occurred while decoding the image.
IoError(io::Error),
/// The Limits of the Decoder is exceeded.
LimitsExceeded,
/// An integer conversion to or from a platform size failed, either due to
/// limits of the platform size or limits of the format.
IntSizeError,
/// The image does not support the requested operation
UsageError(UsageError),
}
/// The image is not formatted properly.
///
/// This indicates that the encoder producing the image might behave incorrectly or that the input
/// file has been corrupted.
///
/// The list of variants may grow to incorporate errors of future features. Matching against this
/// exhaustively is not covered by interface stability guarantees.
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub enum TiffFormatError {
TiffSignatureNotFound,
TiffSignatureInvalid,
ImageFileDirectoryNotFound,
InconsistentSizesEncountered,
UnexpectedCompressedData {
actual_bytes: usize,
required_bytes: usize,
},
InconsistentStripSamples {
actual_samples: usize,
required_samples: usize,
},
InvalidDimensions(u32, u32),
InvalidTag,
InvalidTagValueType(Tag),
RequiredTagNotFound(Tag),
UnknownPredictor(u16),
ByteExpected(Value),
UnsignedIntegerExpected(Value),
SignedIntegerExpected(Value),
Format(String),
RequiredTagEmpty(Tag),
StripTileTagConflict,
CycleInOffsets,
JpegDecoder(JpegDecoderError),
}
impl fmt::Display for TiffFormatError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use self::TiffFormatError::*;
match *self {
TiffSignatureNotFound => write!(fmt, "TIFF signature not found."),
TiffSignatureInvalid => write!(fmt, "TIFF signature invalid."),
ImageFileDirectoryNotFound => write!(fmt, "Image file directory not found."),
InconsistentSizesEncountered => write!(fmt, "Inconsistent sizes encountered."),
UnexpectedCompressedData {
actual_bytes,
required_bytes,
} => {
write!(
fmt,
"Decompression returned different amount of bytes than expected: got {}, expected {}.",
actual_bytes, required_bytes
)
}
InconsistentStripSamples {
actual_samples,
required_samples,
} => {
write!(
fmt,
"Inconsistent elements in strip: got {}, expected {}.",
actual_samples, required_samples
)
}
InvalidDimensions(width, height) => write!(fmt, "Invalid dimensions: {}x{}.", width, height),
InvalidTag => write!(fmt, "Image contains invalid tag."),
InvalidTagValueType(ref tag) => {
write!(fmt, "Tag `{:?}` did not have the expected value type.", tag)
}
RequiredTagNotFound(ref tag) => write!(fmt, "Required tag `{:?}` not found.", tag),
UnknownPredictor(ref predictor) => {
write!(fmt, "Unknown predictor “{}” encountered", predictor)
}
ByteExpected(ref val) => write!(fmt, "Expected byte, {:?} found.", val),
UnsignedIntegerExpected(ref val) => {
write!(fmt, "Expected unsigned integer, {:?} found.", val)
}
SignedIntegerExpected(ref val) => {
write!(fmt, "Expected signed integer, {:?} found.", val)
}
Format(ref val) => write!(fmt, "Invalid format: {:?}.", val),
RequiredTagEmpty(ref val) => write!(fmt, "Required tag {:?} was empty.", val),
StripTileTagConflict => write!(fmt, "File should contain either (StripByteCounts and StripOffsets) or (TileByteCounts and TileOffsets), other combination was found."),
CycleInOffsets => write!(fmt, "File contained a cycle in the list of IFDs"),
JpegDecoder(ref error) => write!(fmt, "{}", error),
}
}
}
/// The Decoder does not support features required by the image.
///
/// This only captures known failures for which the standard either does not require support or an
/// implementation has been planned but not yet completed. Some variants may become unused over
/// time and will then get deprecated before being removed.
///
/// The list of variants may grow. Matching against this exhaustively is not covered by interface
/// stability guarantees.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[non_exhaustive]
pub enum TiffUnsupportedError {
FloatingPointPredictor(ColorType),
HorizontalPredictor(ColorType),
InterpretationWithBits(PhotometricInterpretation, Vec<u8>),
UnknownInterpretation,
UnknownCompressionMethod,
UnsupportedCompressionMethod(CompressionMethod),
UnsupportedSampleDepth(u8),
UnsupportedSampleFormat(Vec<SampleFormat>),
UnsupportedColorType(ColorType),
UnsupportedBitsPerChannel(u8),
UnsupportedPlanarConfig(Option<PlanarConfiguration>),
UnsupportedDataType,
UnsupportedInterpretation(PhotometricInterpretation),
UnsupportedJpegFeature(UnsupportedFeature),
}
impl fmt::Display for TiffUnsupportedError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use self::TiffUnsupportedError::*;
match *self {
FloatingPointPredictor(color_type) => write!(
fmt,
"Floating point predictor for {:?} is unsupported.",
color_type
),
HorizontalPredictor(color_type) => write!(
fmt,
"Horizontal predictor for {:?} is unsupported.",
color_type
),
InterpretationWithBits(ref photometric_interpretation, ref bits_per_sample) => write!(
fmt,
"{:?} with {:?} bits per sample is unsupported",
photometric_interpretation, bits_per_sample
),
UnknownInterpretation => write!(
fmt,
"The image is using an unknown photometric interpretation."
),
UnknownCompressionMethod => write!(fmt, "Unknown compression method."),
UnsupportedCompressionMethod(method) => {
write!(fmt, "Compression method {:?} is unsupported", method)
}
UnsupportedSampleDepth(samples) => {
write!(fmt, "{} samples per pixel is unsupported.", samples)
}
UnsupportedSampleFormat(ref formats) => {
write!(fmt, "Sample format {:?} is unsupported.", formats)
}
UnsupportedColorType(color_type) => {
write!(fmt, "Color type {:?} is unsupported", color_type)
}
UnsupportedBitsPerChannel(bits) => {
write!(fmt, "{} bits per channel not supported", bits)
}
UnsupportedPlanarConfig(config) => {
write!(fmt, "Unsupported planar configuration “{:?}”.", config)
}
UnsupportedDataType => write!(fmt, "Unsupported data type."),
UnsupportedInterpretation(interpretation) => {
write!(
fmt,
"Unsupported photometric interpretation \"{:?}\".",
interpretation
)
}
UnsupportedJpegFeature(ref unsupported_feature) => {
write!(fmt, "Unsupported JPEG feature {:?}", unsupported_feature)
}
}
}
}
/// User attempted to use the Decoder in a way that is incompatible with a specific image.
///
/// For example: attempting to read a tile from a stripped image.
#[derive(Debug)]
pub enum UsageError {
InvalidChunkType(ChunkType, ChunkType),
InvalidChunkIndex(u32),
}
impl fmt::Display for UsageError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::UsageError::*;
match *self {
InvalidChunkType(expected, actual) => {
write!(
fmt,
"Requested operation is only valid for images with chunk encoding of type: {:?}, got {:?}.",
expected, actual
)
}
InvalidChunkIndex(index) => write!(fmt, "Image chunk index ({}) requested.", index),
}
}
}
impl fmt::Display for TiffError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
TiffError::FormatError(ref e) => write!(fmt, "Format error: {}", e),
TiffError::UnsupportedError(ref f) => write!(
fmt,
"The Decoder does not support the \
image format `{}`",
f
),
TiffError::IoError(ref e) => e.fmt(fmt),
TiffError::LimitsExceeded => write!(fmt, "The Decoder limits are exceeded"),
TiffError::IntSizeError => write!(fmt, "Platform or format size limits exceeded"),
TiffError::UsageError(ref e) => write!(fmt, "Usage error: {}", e),
}
}
}
impl Error for TiffError {
fn description(&self) -> &str {
match *self {
TiffError::FormatError(..) => "Format error",
TiffError::UnsupportedError(..) => "Unsupported error",
TiffError::IoError(..) => "IO error",
TiffError::LimitsExceeded => "Decoder limits exceeded",
TiffError::IntSizeError => "Platform or format size limits exceeded",
TiffError::UsageError(..) => "Invalid usage",
}
}
fn cause(&self) -> Option<&dyn Error> {
match *self {
TiffError::IoError(ref e) => Some(e),
_ => None,
}
}
}
impl From<io::Error> for TiffError {
fn from(err: io::Error) -> TiffError {
TiffError::IoError(err)
}
}
impl From<str::Utf8Error> for TiffError {
fn from(_err: str::Utf8Error) -> TiffError {
TiffError::FormatError(TiffFormatError::InvalidTag)
}
}
impl From<string::FromUtf8Error> for TiffError {
fn from(_err: string::FromUtf8Error) -> TiffError {
TiffError::FormatError(TiffFormatError::InvalidTag)
}
}
impl From<TiffFormatError> for TiffError {
fn from(err: TiffFormatError) -> TiffError {
TiffError::FormatError(err)
}
}
impl From<TiffUnsupportedError> for TiffError {
fn from(err: TiffUnsupportedError) -> TiffError {
TiffError::UnsupportedError(err)
}
}
impl From<UsageError> for TiffError {
fn from(err: UsageError) -> TiffError {
TiffError::UsageError(err)
}
}
impl From<std::num::TryFromIntError> for TiffError {
fn from(_err: std::num::TryFromIntError) -> TiffError {
TiffError::IntSizeError
}
}
impl From<LzwError> for TiffError {
fn from(err: LzwError) -> TiffError {
match err {
LzwError::InvalidCode => TiffError::FormatError(TiffFormatError::Format(String::from(
"LZW compressed data corrupted",
))),
}
}
}
#[derive(Debug, Clone)]
pub struct JpegDecoderError {
inner: Arc<jpeg::Error>,
}
impl JpegDecoderError {
fn new(error: jpeg::Error) -> Self {
Self {
inner: Arc::new(error),
}
}
}
impl PartialEq for JpegDecoderError {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.inner, &other.inner)
}
}
impl Display for JpegDecoderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
impl From<JpegDecoderError> for TiffError {
fn from(error: JpegDecoderError) -> Self {
TiffError::FormatError(TiffFormatError::JpegDecoder(error))
}
}
impl From<jpeg::Error> for TiffError {
fn from(error: jpeg::Error) -> Self {
JpegDecoderError::new(error).into()
}
}
/// Result of an image decoding/encoding process
pub type TiffResult<T> = Result<T, TiffError>;

43
vendor/tiff/src/lib.rs vendored Normal file
View File

@ -0,0 +1,43 @@
//! Decoding and Encoding of TIFF Images
//!
//! TIFF (Tagged Image File Format) is a versatile image format that supports
//! lossless and lossy compression.
//!
//! # Related Links
//! * <https://web.archive.org/web/20210108073850/https://www.adobe.io/open/standards/TIFF.html> - The TIFF specification
extern crate jpeg;
extern crate weezl;
mod bytecast;
pub mod decoder;
pub mod encoder;
mod error;
pub mod tags;
pub use self::error::{TiffError, TiffFormatError, TiffResult, TiffUnsupportedError, UsageError};
/// An enumeration over supported color types and their bit depths
#[derive(Copy, PartialEq, Eq, Debug, Clone, Hash)]
pub enum ColorType {
/// Pixel is grayscale
Gray(u8),
/// Pixel contains R, G and B channels
RGB(u8),
/// Pixel is an index into a color palette
Palette(u8),
/// Pixel is grayscale with an alpha channel
GrayA(u8),
/// Pixel is RGB with an alpha channel
RGBA(u8),
/// Pixel is CMYK
CMYK(u8),
/// Pixel is YCbCr
YCbCr(u8),
}

234
vendor/tiff/src/tags.rs vendored Normal file
View File

@ -0,0 +1,234 @@
macro_rules! tags {
{
// Permit arbitrary meta items, which include documentation.
$( #[$enum_attr:meta] )*
$vis:vis enum $name:ident($ty:tt) $(unknown($unknown_doc:literal))* {
// Each of the `Name = Val,` permitting documentation.
$($(#[$ident_attr:meta])* $tag:ident = $val:expr,)*
}
} => {
$( #[$enum_attr] )*
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
#[non_exhaustive]
pub enum $name {
$($(#[$ident_attr])* $tag,)*
$(
#[doc = $unknown_doc]
Unknown($ty),
)*
}
impl $name {
#[inline(always)]
fn __from_inner_type(n: $ty) -> Result<Self, $ty> {
match n {
$( $val => Ok($name::$tag), )*
n => Err(n),
}
}
#[inline(always)]
fn __to_inner_type(&self) -> $ty {
match *self {
$( $name::$tag => $val, )*
$( $name::Unknown(n) => { $unknown_doc; n }, )*
}
}
}
tags!($name, $ty, $($unknown_doc)*);
};
// For u16 tags, provide direct inherent primitive conversion methods.
($name:tt, u16, $($unknown_doc:literal)*) => {
impl $name {
#[inline(always)]
pub fn from_u16(val: u16) -> Option<Self> {
Self::__from_inner_type(val).ok()
}
$(
#[inline(always)]
pub fn from_u16_exhaustive(val: u16) -> Self {
$unknown_doc;
Self::__from_inner_type(val).unwrap_or_else(|_| $name::Unknown(val))
}
)*
#[inline(always)]
pub fn to_u16(&self) -> u16 {
Self::__to_inner_type(self)
}
}
};
// For other tag types, do nothing for now. With concat_idents one could
// provide inherent conversion methods for all types.
($name:tt, $ty:tt, $($unknown_doc:literal)*) => {};
}
// Note: These tags appear in the order they are mentioned in the TIFF reference
tags! {
/// TIFF tags
pub enum Tag(u16) unknown("A private or extension tag") {
// Baseline tags:
Artist = 315,
// grayscale images PhotometricInterpretation 1 or 3
BitsPerSample = 258,
CellLength = 265, // TODO add support
CellWidth = 264, // TODO add support
// palette-color images (PhotometricInterpretation 3)
ColorMap = 320, // TODO add support
Compression = 259, // TODO add support for 2 and 32773
Copyright = 33_432,
DateTime = 306,
ExtraSamples = 338, // TODO add support
FillOrder = 266, // TODO add support
FreeByteCounts = 289, // TODO add support
FreeOffsets = 288, // TODO add support
GrayResponseCurve = 291, // TODO add support
GrayResponseUnit = 290, // TODO add support
HostComputer = 316,
ImageDescription = 270,
ImageLength = 257,
ImageWidth = 256,
Make = 271,
MaxSampleValue = 281, // TODO add support
MinSampleValue = 280, // TODO add support
Model = 272,
NewSubfileType = 254, // TODO add support
Orientation = 274, // TODO add support
PhotometricInterpretation = 262,
PlanarConfiguration = 284,
ResolutionUnit = 296, // TODO add support
RowsPerStrip = 278,
SamplesPerPixel = 277,
Software = 305,
StripByteCounts = 279,
StripOffsets = 273,
SubfileType = 255, // TODO add support
Threshholding = 263, // TODO add support
XResolution = 282,
YResolution = 283,
// Advanced tags
Predictor = 317,
TileWidth = 322,
TileLength = 323,
TileOffsets = 324,
TileByteCounts = 325,
// Data Sample Format
SampleFormat = 339,
SMinSampleValue = 340, // TODO add support
SMaxSampleValue = 341, // TODO add support
// JPEG
JPEGTables = 347,
// GeoTIFF
ModelPixelScaleTag = 33550, // (SoftDesk)
ModelTransformationTag = 34264, // (JPL Carto Group)
ModelTiepointTag = 33922, // (Intergraph)
GeoKeyDirectoryTag = 34735, // (SPOT)
GeoDoubleParamsTag = 34736, // (SPOT)
GeoAsciiParamsTag = 34737, // (SPOT)
GdalNodata = 42113, // Contains areas with missing data
}
}
tags! {
/// The type of an IFD entry (a 2 byte field).
pub enum Type(u16) {
/// 8-bit unsigned integer
BYTE = 1,
/// 8-bit byte that contains a 7-bit ASCII code; the last byte must be zero
ASCII = 2,
/// 16-bit unsigned integer
SHORT = 3,
/// 32-bit unsigned integer
LONG = 4,
/// Fraction stored as two 32-bit unsigned integers
RATIONAL = 5,
/// 8-bit signed integer
SBYTE = 6,
/// 8-bit byte that may contain anything, depending on the field
UNDEFINED = 7,
/// 16-bit signed integer
SSHORT = 8,
/// 32-bit signed integer
SLONG = 9,
/// Fraction stored as two 32-bit signed integers
SRATIONAL = 10,
/// 32-bit IEEE floating point
FLOAT = 11,
/// 64-bit IEEE floating point
DOUBLE = 12,
/// 32-bit unsigned integer (offset)
IFD = 13,
/// BigTIFF 64-bit unsigned integer
LONG8 = 16,
/// BigTIFF 64-bit signed integer
SLONG8 = 17,
/// BigTIFF 64-bit unsigned integer (offset)
IFD8 = 18,
}
}
tags! {
/// See [TIFF compression tags](https://www.awaresystems.be/imaging/tiff/tifftags/compression.html)
/// for reference.
pub enum CompressionMethod(u16) {
None = 1,
Huffman = 2,
Fax3 = 3,
Fax4 = 4,
LZW = 5,
JPEG = 6,
// "Extended JPEG" or "new JPEG" style
ModernJPEG = 7,
Deflate = 8,
OldDeflate = 0x80B2,
PackBits = 0x8005,
}
}
tags! {
pub enum PhotometricInterpretation(u16) {
WhiteIsZero = 0,
BlackIsZero = 1,
RGB = 2,
RGBPalette = 3,
TransparencyMask = 4,
CMYK = 5,
YCbCr = 6,
CIELab = 8,
}
}
tags! {
pub enum PlanarConfiguration(u16) {
Chunky = 1,
Planar = 2,
}
}
tags! {
pub enum Predictor(u16) {
None = 1,
Horizontal = 2,
FloatingPoint = 3,
}
}
tags! {
/// Type to represent resolution units
pub enum ResolutionUnit(u16) {
None = 1,
Inch = 2,
Centimeter = 3,
}
}
tags! {
pub enum SampleFormat(u16) unknown("An unknown extension sample format") {
Uint = 1,
Int = 2,
IEEEFP = 3,
Void = 4,
}
}