Initial vendor packages

Signed-off-by: Valentin Popov <valentin@popov.link>
This commit is contained in:
2024-01-08 01:21:28 +04:00
parent 5ecd8cf2cb
commit 1b6a04ca55
7309 changed files with 2160054 additions and 0 deletions

1
vendor/tiff/.cargo-checksum.json vendored Normal file
View File

@ -0,0 +1 @@
{"files":{"CHANGES.md":"f9bd3690a3ce0c64595af40b6baacc64aabd10d34468bc307d5d67250045063d","Cargo.toml":"519d88ebdcbce493d4cec3bf0b5c7add95529e9b6e462493f71109ae19519839","LICENSE":"20c097d0de27e2adc179140a3d542091951a030699c7e1c78655f2a2d08d4e3e","README.md":"1c17af2aab3dcd95685672aecc5e5b7d1d608d41be9a8b22d327d06ebad0b72a","benches/lzw.rs":"ae9bd8a35ad6826ac204bb03bc6c07fc6070eb69c8f1baf20ed3222e515ca0a5","src/bytecast.rs":"fe162944a473d48d2502eab08f2fdb56e5b546d11cb25833454dd8d6395396ae","src/decoder/ifd.rs":"688e32504179ff53ac28cc5951e114d77a8771f276ec9c5007a861fbe07a77a3","src/decoder/image.rs":"2e22caf9401c84768e2527753fa9d9813886aea158daf4dd53fdf5c142672d1a","src/decoder/mod.rs":"e7cd83c9f5fec43036b9f576748adb76d654fdda42eb0fa64b7e0dce1a907575","src/decoder/stream.rs":"538dca03462c5a8351b558509766e8b9581d9de06571df1dd1fa95ce45918952","src/decoder/tag_reader.rs":"e95db23244da757588f0d885ebce18e189fafc52ac841682fb39371197b6f9d3","src/encoder/colortype.rs":"801eb6111d5c9fece5537a171b4aa3300d3d3f0f6242610ee819595146ea6928","src/encoder/compression/deflate.rs":"634692eb4290659a38f69c8a3635619333779d308d4f1583b0cb9e5974901bc0","src/encoder/compression/lzw.rs":"73076a9f332b9a48be65d19bcb16d25089d1a2812a2bcfe197223f805c34ef5d","src/encoder/compression/mod.rs":"817aceeb78331464e41713d3da97e9b7aabd7f34b34fb8173e2942d7ee039e5b","src/encoder/compression/packbits.rs":"a3b2e93b18ab69d82011ed78c17da050a2271e2253ee6903ec34b0b196577824","src/encoder/compression/uncompressed.rs":"296b865315b9fb0a8927a2862bdff64fcd01f92b38f7120795c2f68e92187f29","src/encoder/mod.rs":"83f91088019d1318edae70d594dae288edbf7e2f1cd4e48d8bffc22ea1f786a7","src/encoder/tiff_value.rs":"df212aa92b53c2e8bdbe0638bce6e0ac477ade8d8b3494f287aceb4dda621ff1","src/encoder/writer.rs":"b9abdc9095ffb5160ac758534e5ff282d90c801fe4a6f4e157c081d646894ee6","src/error.rs":"c7a021b3d30930abfb03e021734f04acb43dc003d817d9301c03e9954875f0d3","src/lib.rs":"6ec15b1e8f824b346520ec94dddb0604ee1a636c20dd96ece326f31351fd0c3c","src/tags.rs":"847cdb026212b7e45d42840b26eb43713f1579d32f51fe3e22140a352e5f1758","tests/benches/README.md":"61c5580a4c667cfe0cb2e63597f84b84e34a3424202d6e3e53954d4c273189f7","tests/benches/Transparency-lzw.tif":"b15297094106a33374672a6971ed1aaa5d2fbf3f3731c61af1107ba3e88af156","tests/benches/kodim02-lzw.tif":"9a9e3cf1c53e13a38f9d6409b0aae3fe214fea1b004ac3e848766b08bde38f66","tests/benches/kodim07-lzw.tif":"068beb46a3d186ed2fa65d5afbdbf1b34a02287b8d6c9e9c858728295ab277eb","tests/decode_bigtiff_images.rs":"5460b4c0be1588a6281c00a23ec1cb2a9b4cf3a5df5434da464ec30b888f6cd2","tests/decode_images.rs":"c1f673e844d43c5a1be3159a1523308b2a665126478ae78ffa7c1313fe81b9ef","tests/decodedata-rgb-3c-8b.tiff":"88fb7908c4562ad3f3a1180c02115da71efc34a6d3b4f0573360113385cef33c","tests/encode_images.rs":"30e49f5aa2169fcb1ec59ffda95bcff4dd567ebb33eff3d81fae606d147cee59","tests/encode_images_with_compression.rs":"0a351c0096ee1db74214d12f1b1669b73419ddd915d4850400774ce2598c53cd","tests/fuzz_tests.rs":"742f4283cdc6c3f5d1ccf320ea6c0eb7f3f517cc831de801206c077a0518ec99"},"package":"6d172b0f4d3fba17ba89811858b9d3d97f928aece846475bbda076ca46736211"}

196
vendor/tiff/CHANGES.md vendored Normal file
View File

@ -0,0 +1,196 @@
# Version 0.9.0
New features:
* Added support for photometric interpretation `YCbCr` and added related
`ColorType`.
Fixes:
* Decoding tiled images calculates padding correctly when image width or height
is a multiple of tile size. It could previously corrupt the last tile per row
by skipping over data.
# Version 0.8.1
Changes:
* The jpeg decoder gained to ability to utilize the Photometric Interpretation
directly instead of relying on a custom APP segment.
Fixes:
* A spurious error within the PackBits decoder lead to the incorrect results
(wrong bits or errors), based on the maximum size of reads from the
underlying reader.
* Removed a panic path in jpeg decoding, when a feature such as photometric
interpretation is not supported. An error is returned instead.
# Version 0.8.0
Changes:
* The minimum supported rust version is now indicated in `Cargo.toml`.
* The enums `TiffFormatError` and `TiffUnsupportedError` are now
marked with the `#[non_exhaustive]` attribute.
* Additionally, tag related enums `Value`, `Tags`, `Type`, `CompressionMethod`,
`PhotometricInterpretation`, `PlanarConfiguration`, `Predictor`,
`ResolutionUnit`, `SampleFormat` are also changed.
Removals:
* Removed deprecated methods of `Decoder`: `init`, `read_jpeg`,
`read_strip_to_buffer`, `read_strip`, `read_tile`. The implicit chunk (row or
tile) index order could not be easily tracked by the caller. New separate
utility interfaces may be introduced at a later point, for now callers are
obligated to choose their own.
Fixes:
* Update to `jpeg_decoder = 0.3`.
# Version 0.7.4
New features:
* Creating an encoder for invalid, zero-sized images is now rejected.
Fixes:
* Fix panic, in a case where decoding jpeg encoded images did not expect the
jpeg decoder to return an error.
* Fix panic by validating `rows_per_strip` better, fixing a division-by-zero.
# Version 0.7.3
New features:
* Allow decoder to access specific tiles by index.
* Add support for floating point predictor.
* Tiled jpeg file support.
Changes:
* Various refactoring and performance improvements.
# Version 0.7.2
New features:
* Encoding with `ImageEncoder` now takes an optional compressor argument,
allowing compressed encoding. See the methods
`TiffEncoder::{new_image,write_image}_with_compression`.
* `jpeg_decoder` has been upgraded, now supports lossless JPEG.
Changes:
* Decoding now more consistently reads and interprets the initial IFD, instead
of performing _some_ interpretation lazily. (This change prepares fully lazy
and backwards seeking.)
# Version 0.7.1
New features:
* Encoding signed integer formats is now supported.
* Extensive fuzzing with `cargo fuzz`.
Changes:
* Tile decoding should be a little faster, requires one less intermediate buffer.
* Images whose IFDs form a cycle due to their offsets will now raise an error
when the cycle would be entered (jumping back should still be supported).
Fixes:
* Fixed a regression that caused conflict between strips and tile images,
causing errors in decoding some images.
* Use checked integer arithmetic in limit calculations, fixes overflows.
* IFD Tags are now always cleared between images.
* Found by fuzzing: Several memory limit overflows; JPEG now correctly
validates offsets and a minimum size of its table; Check upper limit of strip
byte size correctly;
Notes:
Our CI has warned that this version no longer builds on `1.34.2` out of the
box. We're still committed to the MSRV on this major version yet one
dependency—`flate2`—has already bumped it in a SemVer compatible version of its
own. This is out-of-our-control (cargo's dependency resolution does not allow
us to address this in a reasonable manner).
This can be address this by pinning the version of `flate2` to `1.0.21` in your
own files. However, you should understand that this puts you in considerable
maintenance debt as you will no longer receive any updates for this dependency
and any package that _requires_ a new version of the `1.0` series would be
incompatible with this requirement (cargo might yell at you very loudly).
# Version 0.7.0
New features:
* Support for encoding BigTiff ([#122](https://github.com/image-rs/image-tiff/pull/122))
* _Breaking:_ Encoder types now have a generic parameter to differentiate BigTiff and standard Tiff encoding. Defaults to standard Tiff.
* Basic tile decoding ([#125](https://github.com/image-rs/image-tiff/pull/125))
* _Breaking:_ There is a new `TiffError::UsageError` variant.
* Support for datatypes `Int8` and `Int16` ([#114](https://github.com/image-rs/image-tiff/pull/114))
* _Breaking:_ `DecodingResult` and `DecodingBuffer` have the two new variants `I8` and `I16`.
* Support for `i32` arrays ([#118](https://github.com/image-rs/image-tiff/pull/118/files))
* _Breaking:_ `DecodingResult` and `DecodingBuffer` have a new `I32` variant.
* Support for `Ifd` and `IfdBig` tag types and `I64` data type ([#119](https://github.com/image-rs/image-tiff/pull/119))
* _Breaking:_ `DecodingResult` and `DecodingBuffer` have a new `I64` variant.
* Add `SMinSampleValue` and `SMaxSampleValue` ([#123](https://github.com/image-rs/image-tiff/pull/123))
Changes:
* Improve deflate support ([#132](https://github.com/image-rs/image-tiff/pull/132))
*Switch to streaming decompression via `flate2`. Aside from performance improvements and lower RAM consumption, this fixes a bug where `max_uncompressed_length` was precalculated for a single tile but then used as a hard limit on the whole data, failing to decompress any tiled images.
* Add support for new `Deflate` tag in addition to `OldDeflate`.
* _Breaking:_ Remove `InflateError`, which is no longer needed with `flate2` ([#134](https://github.com/image-rs/image-tiff/pull/134))
* _Breaking:_ Support for `MinIsWhite` is restricted to unsigned and floating
point values. This is expected to be be re-added once some contradictory
interpretation regarding semantics for signed values is resolved.
Fixes:
* Validate that ASCII tags are valid ASCII and end with a null byte ([#121](https://github.com/image-rs/image-tiff/pull/121))
Internal:
* Simplify decompression logic ([#126](https://github.com/image-rs/image-tiff/pull/126))
* Simplify `expand_strip` ([#128](https://github.com/image-rs/image-tiff/pull/128))
# Version 0.6.1
New features:
* Support for reading `u16` and ascii string tags.
* Added `Limits::unlimited` for disabling all limits.
* Added `ImageEncoder::rows_per_strip` to overwrite the default.
Changes:
* The default strip size for chunked encoding is now 1MB, up from 8KB. This
should lead to more efficient decoding and compression.
Fixes:
* Fixed a bug where LZW compressed strips could not be decoded, instead
returning an error `Inconsistent sizes encountered`.
* Reading a tag with a complex type and a single value returns the proper Value
variant, instead of a vector with one entry.
# Version 0.6.0
New features:
* Support for decoding BigTIFF with 64-bit offsets
* The value types byte, `f32`, `f64` are now recognized
* Support for Modern JPEG encoded images
Improvements:
* Better support for adding auxiliary tags before encoding image data
* Switched to lzw decoder library `weezl` for performance
* The `ColorType` trait now supports `SAMPLE_ENCODING` hints
Fixes:
* Fixed decoding of inline ASCII in tags
* Fixed handling after null terminator in ASCII data
* Recognize tile and sample format tags
# Version 0.5.0
* Added support for 32-bit and 64-bit decoded values.
* Added CMYK(16|32|64) color type support.
* Check many internal integer conversions to increase stability. This should
only lead to images being reported as faulty that would previously silently
break platform limits. If there are any false positives, please report them.
* Remove an erroneous check of decoded length in lzw compressed images.
# Version 0.4.0
* Several enumerations are now non_exhaustive for future extensions.
These are `Tag`, `Type`, `Value`, `PhotometricInterpretation`,
`CompressionMethod`, `Predictor`.
* Enums gained a dedicated method to convert to their TIFF variant value with
the specified type. Performing these conversions by casting the discriminant
with `as` is not guaranteed to be stable, except where documented explicitly.
* Removed the num-derive and num dependencies.
* Added support for decoding `deflate` compressed images.
* Make the decoder `Limits` customizable by exposing members.
* Fixed multi-page TIFF encoding writing incorrect offsets.

48
vendor/tiff/Cargo.toml vendored Normal file
View File

@ -0,0 +1,48 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
rust-version = "1.61.0"
name = "tiff"
version = "0.9.0"
authors = ["The image-rs Developers"]
exclude = [
"tests/images/*",
"tests/fuzz_images/*",
]
description = "TIFF decoding and encoding library in pure Rust"
readme = "README.md"
categories = [
"multimedia::images",
"multimedia::encoding",
]
license = "MIT"
repository = "https://github.com/image-rs/image-tiff"
resolver = "2"
[[bench]]
name = "lzw"
harness = false
[dependencies.flate2]
version = "1.0.20"
[dependencies.jpeg]
version = "0.3.0"
default-features = false
package = "jpeg-decoder"
[dependencies.weezl]
version = "0.1.0"
[dev-dependencies.criterion]
version = "0.3.1"

21
vendor/tiff/LICENSE vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2018 PistonDevelopers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

54
vendor/tiff/README.md vendored Normal file
View File

@ -0,0 +1,54 @@
# image-tiff
[![Build Status](https://github.com/image-rs/image-tiff/workflows/Rust%20CI/badge.svg)](https://github.com/image-rs/image-tiff/actions)
[![Documentation](https://docs.rs/tiff/badge.svg)](https://docs.rs/tiff)
[![Further crate info](https://img.shields.io/crates/v/tiff.svg)](https://crates.io/crates/tiff)
TIFF decoding and encoding library in pure Rust
## Supported
### Features
- Baseline spec (other than formats and tags listed below as not supported)
- Multipage
- BigTIFF
- Incremental decoding
### Formats
This table lists photometric interpretations and sample formats which are supported for encoding and decoding. The entries are `ColorType` variants for which sample bit depths are supported. Only samples where all bit depths are equal are currently supported. For example, `RGB(8)` means that the bit depth [8, 8, 8] is supported and will be interpreted as an 8 bit per channel RGB color type.
| `PhotometricInterpretation` | UINT Format | IEEEFP Format |
| --------------------------- | --------------------------------------- | ------------------------- |
| `WhiteIsZero` | Gray(8\|16\|32\|64) | Gray(32\|64) |
| `BlackIsZero` | Gray(8\|16\|32\|64) | Gray(32\|64) |
| `RGB` | RGB(8\|16\|32\|64), RGBA(8\|16\|32\|64) | RGB(32\|64), RGBA(32\|64) |
| `RGBPalette` | | |
| `Mask` | | |
| `CMYK` | CMYK(8\|16\|32\|64) | CMYK(32\|64) |
| `YCbCr` | | |
| `CIELab` | | |
### Compressions
| | Decoding | Encoding |
| -------- | -------- | -------- |
| None | ✓ | ✓ |
| LZW | ✓ | ✓ |
| Deflate | ✓ | ✓ |
| PackBits | ✓ | ✓ |
## Not yet supported
Formats and interpretations not listed above or with empty entries are unsupported.
- Baseline tags
- `ExtraSamples`
- Extension tags
## Fuzzing
This crate uses [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) in order to test the image parser.
After installing it with `cargo install cargo-fuzz` on a nightly rustc, the
fuzzing harness can be run with recommended settings using
`cargo fuzz run decode_image -snone -- -timeout=5`.

82
vendor/tiff/benches/lzw.rs vendored Normal file
View File

@ -0,0 +1,82 @@
extern crate criterion;
extern crate tiff;
use criterion::{
black_box, measurement::Measurement, BenchmarkGroup, BenchmarkId, Criterion, Throughput,
};
use tiff::decoder::Decoder;
fn read_image(image: &[u8]) {
let image = std::io::Cursor::new(image);
let decoder = Decoder::new(black_box(image));
let mut reader = decoder.unwrap();
while {
reader.read_image().unwrap();
reader.more_images()
} {}
}
fn main() {
struct BenchDef {
data: &'static [u8],
id: &'static str,
sample_size: usize,
}
fn run_bench_def<M: Measurement>(group: &mut BenchmarkGroup<M>, def: BenchDef) {
group
.sample_size(def.sample_size)
.throughput(Throughput::Bytes(def.data.len() as u64))
.bench_with_input(
BenchmarkId::new(def.id, def.data.len()),
def.data,
|b, input| b.iter(|| read_image(input)),
);
}
let mut c = Criterion::default().configure_from_args();
let mut group = c.benchmark_group("tiff-lzw");
macro_rules! data_file {
($name:literal) => {
include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), $name))
};
}
run_bench_def(
&mut group,
BenchDef {
data: data_file!("/tests/images/issue_69_lzw.tiff"),
id: "issue-69-lzw.tif",
sample_size: 500,
},
);
run_bench_def(
&mut group,
BenchDef {
data: data_file!("/tests/benches/kodim02-lzw.tif"),
id: "kodim02-lzw.tif",
sample_size: 20,
},
);
run_bench_def(
&mut group,
BenchDef {
data: data_file!("/tests/benches/kodim07-lzw.tif"),
id: "kodim07-lzw.tif",
sample_size: 20,
},
);
run_bench_def(
&mut group,
BenchDef {
data: data_file!("/tests/benches/Transparency-lzw.tif"),
id: "Transparency-lzw.tif",
sample_size: 20,
},
);
}

34
vendor/tiff/src/bytecast.rs vendored Normal file
View File

@ -0,0 +1,34 @@
//! Trivial, internal byte transmutation.
//!
//! A dependency like bytemuck would give us extra assurance of the safety but overall would not
//! reduce the amount of total unsafety. We don't use it in the interface where the traits would
//! really become useful.
//!
//! SAFETY: These are benign casts as we apply them to fixed size integer types only. All of them
//! are naturally aligned, valid for all bit patterns and their alignment is surely at most their
//! size (we assert the latter fact since it is 'implementation defined' if following the letter of
//! the unsafe code guidelines).
//!
//! TODO: Would like to use std-lib here.
use std::{mem, slice};
macro_rules! integral_slice_as_bytes{($int:ty, $const:ident $(,$mut:ident)*) => {
pub(crate) fn $const(slice: &[$int]) -> &[u8] {
assert!(mem::align_of::<$int>() <= mem::size_of::<$int>());
unsafe { slice::from_raw_parts(slice.as_ptr() as *const u8, mem::size_of_val(slice)) }
}
$(pub(crate) fn $mut(slice: &mut [$int]) -> &mut [u8] {
assert!(mem::align_of::<$int>() <= mem::size_of::<$int>());
unsafe { slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut u8, mem::size_of_val(slice)) }
})*
}}
integral_slice_as_bytes!(i8, i8_as_ne_bytes, i8_as_ne_mut_bytes);
integral_slice_as_bytes!(u16, u16_as_ne_bytes, u16_as_ne_mut_bytes);
integral_slice_as_bytes!(i16, i16_as_ne_bytes, i16_as_ne_mut_bytes);
integral_slice_as_bytes!(u32, u32_as_ne_bytes, u32_as_ne_mut_bytes);
integral_slice_as_bytes!(i32, i32_as_ne_bytes, i32_as_ne_mut_bytes);
integral_slice_as_bytes!(u64, u64_as_ne_bytes, u64_as_ne_mut_bytes);
integral_slice_as_bytes!(i64, i64_as_ne_bytes, i64_as_ne_mut_bytes);
integral_slice_as_bytes!(f32, f32_as_ne_bytes, f32_as_ne_mut_bytes);
integral_slice_as_bytes!(f64, f64_as_ne_bytes, f64_as_ne_mut_bytes);

670
vendor/tiff/src/decoder/ifd.rs vendored Normal file
View File

@ -0,0 +1,670 @@
//! Function for reading TIFF tags
use std::collections::HashMap;
use std::convert::{TryFrom, TryInto};
use std::io::{self, Read, Seek};
use std::mem;
use std::str;
use super::stream::{ByteOrder, EndianReader, SmartReader};
use crate::tags::{Tag, Type};
use crate::{TiffError, TiffFormatError, TiffResult};
use self::Value::{
Ascii, Byte, Double, Float, Ifd, IfdBig, List, Rational, RationalBig, SRational, SRationalBig,
Short, Signed, SignedBig, Unsigned, UnsignedBig,
};
#[allow(unused_qualifications)]
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub enum Value {
Byte(u8),
Short(u16),
Signed(i32),
SignedBig(i64),
Unsigned(u32),
UnsignedBig(u64),
Float(f32),
Double(f64),
List(Vec<Value>),
Rational(u32, u32),
RationalBig(u64, u64),
SRational(i32, i32),
SRationalBig(i64, i64),
Ascii(String),
Ifd(u32),
IfdBig(u64),
}
impl Value {
pub fn into_u8(self) -> TiffResult<u8> {
match self {
Byte(val) => Ok(val),
val => Err(TiffError::FormatError(TiffFormatError::ByteExpected(val))),
}
}
pub fn into_u16(self) -> TiffResult<u16> {
match self {
Short(val) => Ok(val),
Unsigned(val) => Ok(u16::try_from(val)?),
UnsignedBig(val) => Ok(u16::try_from(val)?),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_u32(self) -> TiffResult<u32> {
match self {
Short(val) => Ok(val.into()),
Unsigned(val) => Ok(val),
UnsignedBig(val) => Ok(u32::try_from(val)?),
Ifd(val) => Ok(val),
IfdBig(val) => Ok(u32::try_from(val)?),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_i32(self) -> TiffResult<i32> {
match self {
Signed(val) => Ok(val),
SignedBig(val) => Ok(i32::try_from(val)?),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_u64(self) -> TiffResult<u64> {
match self {
Short(val) => Ok(val.into()),
Unsigned(val) => Ok(val.into()),
UnsignedBig(val) => Ok(val),
Ifd(val) => Ok(val.into()),
IfdBig(val) => Ok(val),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_i64(self) -> TiffResult<i64> {
match self {
Signed(val) => Ok(val.into()),
SignedBig(val) => Ok(val),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_f32(self) -> TiffResult<f32> {
match self {
Float(val) => Ok(val),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_f64(self) -> TiffResult<f64> {
match self {
Double(val) => Ok(val),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_string(self) -> TiffResult<String> {
match self {
Ascii(val) => Ok(val),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_u32_vec(self) -> TiffResult<Vec<u32>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_u32()?)
}
Ok(new_vec)
}
Unsigned(val) => Ok(vec![val]),
UnsignedBig(val) => Ok(vec![u32::try_from(val)?]),
Rational(numerator, denominator) => Ok(vec![numerator, denominator]),
RationalBig(numerator, denominator) => {
Ok(vec![u32::try_from(numerator)?, u32::try_from(denominator)?])
}
Ifd(val) => Ok(vec![val]),
IfdBig(val) => Ok(vec![u32::try_from(val)?]),
Ascii(val) => Ok(val.chars().map(u32::from).collect()),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_u8_vec(self) -> TiffResult<Vec<u8>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_u8()?)
}
Ok(new_vec)
}
Byte(val) => Ok(vec![val]),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_u16_vec(self) -> TiffResult<Vec<u16>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_u16()?)
}
Ok(new_vec)
}
Short(val) => Ok(vec![val]),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_i32_vec(self) -> TiffResult<Vec<i32>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
match v {
SRational(numerator, denominator) => {
new_vec.push(numerator);
new_vec.push(denominator);
}
SRationalBig(numerator, denominator) => {
new_vec.push(i32::try_from(numerator)?);
new_vec.push(i32::try_from(denominator)?);
}
_ => new_vec.push(v.into_i32()?),
}
}
Ok(new_vec)
}
Signed(val) => Ok(vec![val]),
SignedBig(val) => Ok(vec![i32::try_from(val)?]),
SRational(numerator, denominator) => Ok(vec![numerator, denominator]),
SRationalBig(numerator, denominator) => {
Ok(vec![i32::try_from(numerator)?, i32::try_from(denominator)?])
}
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
pub fn into_f32_vec(self) -> TiffResult<Vec<f32>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_f32()?)
}
Ok(new_vec)
}
Float(val) => Ok(vec![val]),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_f64_vec(self) -> TiffResult<Vec<f64>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_f64()?)
}
Ok(new_vec)
}
Double(val) => Ok(vec![val]),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_u64_vec(self) -> TiffResult<Vec<u64>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
new_vec.push(v.into_u64()?)
}
Ok(new_vec)
}
Unsigned(val) => Ok(vec![val.into()]),
UnsignedBig(val) => Ok(vec![val]),
Rational(numerator, denominator) => Ok(vec![numerator.into(), denominator.into()]),
RationalBig(numerator, denominator) => Ok(vec![numerator, denominator]),
Ifd(val) => Ok(vec![val.into()]),
IfdBig(val) => Ok(vec![val]),
Ascii(val) => Ok(val.chars().map(u32::from).map(u64::from).collect()),
val => Err(TiffError::FormatError(
TiffFormatError::UnsignedIntegerExpected(val),
)),
}
}
pub fn into_i64_vec(self) -> TiffResult<Vec<i64>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec {
match v {
SRational(numerator, denominator) => {
new_vec.push(numerator.into());
new_vec.push(denominator.into());
}
SRationalBig(numerator, denominator) => {
new_vec.push(numerator);
new_vec.push(denominator);
}
_ => new_vec.push(v.into_i64()?),
}
}
Ok(new_vec)
}
Signed(val) => Ok(vec![val.into()]),
SignedBig(val) => Ok(vec![val]),
SRational(numerator, denominator) => Ok(vec![numerator.into(), denominator.into()]),
SRationalBig(numerator, denominator) => Ok(vec![numerator, denominator]),
val => Err(TiffError::FormatError(
TiffFormatError::SignedIntegerExpected(val),
)),
}
}
}
#[derive(Clone)]
pub struct Entry {
type_: Type,
count: u64,
offset: [u8; 8],
}
impl ::std::fmt::Debug for Entry {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
fmt.write_str(&format!(
"Entry {{ type_: {:?}, count: {:?}, offset: {:?} }}",
self.type_, self.count, &self.offset
))
}
}
impl Entry {
pub fn new(type_: Type, count: u32, offset: [u8; 4]) -> Entry {
let mut offset = offset.to_vec();
offset.append(&mut vec![0; 4]);
Entry::new_u64(type_, count.into(), offset[..].try_into().unwrap())
}
pub fn new_u64(type_: Type, count: u64, offset: [u8; 8]) -> Entry {
Entry {
type_,
count,
offset,
}
}
/// Returns a mem_reader for the offset/value field
fn r(&self, byte_order: ByteOrder) -> SmartReader<io::Cursor<Vec<u8>>> {
SmartReader::wrap(io::Cursor::new(self.offset.to_vec()), byte_order)
}
pub fn val<R: Read + Seek>(
&self,
limits: &super::Limits,
bigtiff: bool,
reader: &mut SmartReader<R>,
) -> TiffResult<Value> {
// Case 1: there are no values so we can return immediately.
if self.count == 0 {
return Ok(List(Vec::new()));
}
let bo = reader.byte_order();
let tag_size = match self.type_ {
Type::BYTE | Type::SBYTE | Type::ASCII | Type::UNDEFINED => 1,
Type::SHORT | Type::SSHORT => 2,
Type::LONG | Type::SLONG | Type::FLOAT | Type::IFD => 4,
Type::LONG8
| Type::SLONG8
| Type::DOUBLE
| Type::RATIONAL
| Type::SRATIONAL
| Type::IFD8 => 8,
};
let value_bytes = match self.count.checked_mul(tag_size) {
Some(n) => n,
None => {
return Err(TiffError::LimitsExceeded);
}
};
// Case 2: there is one value.
if self.count == 1 {
// 2a: the value is 5-8 bytes and we're in BigTiff mode.
if bigtiff && value_bytes > 4 && value_bytes <= 8 {
return Ok(match self.type_ {
Type::LONG8 => UnsignedBig(self.r(bo).read_u64()?),
Type::SLONG8 => SignedBig(self.r(bo).read_i64()?),
Type::DOUBLE => Double(self.r(bo).read_f64()?),
Type::RATIONAL => {
let mut r = self.r(bo);
Rational(r.read_u32()?, r.read_u32()?)
}
Type::SRATIONAL => {
let mut r = self.r(bo);
SRational(r.read_i32()?, r.read_i32()?)
}
Type::IFD8 => IfdBig(self.r(bo).read_u64()?),
Type::BYTE
| Type::SBYTE
| Type::ASCII
| Type::UNDEFINED
| Type::SHORT
| Type::SSHORT
| Type::LONG
| Type::SLONG
| Type::FLOAT
| Type::IFD => unreachable!(),
});
}
// 2b: the value is at most 4 bytes or doesn't fit in the offset field.
return Ok(match self.type_ {
Type::BYTE => Unsigned(u32::from(self.offset[0])),
Type::SBYTE => Signed(i32::from(self.offset[0] as i8)),
Type::UNDEFINED => Byte(self.offset[0]),
Type::SHORT => Unsigned(u32::from(self.r(bo).read_u16()?)),
Type::SSHORT => Signed(i32::from(self.r(bo).read_i16()?)),
Type::LONG => Unsigned(self.r(bo).read_u32()?),
Type::SLONG => Signed(self.r(bo).read_i32()?),
Type::FLOAT => Float(self.r(bo).read_f32()?),
Type::ASCII => {
if self.offset[0] == 0 {
Ascii("".to_string())
} else {
return Err(TiffError::FormatError(TiffFormatError::InvalidTag));
}
}
Type::LONG8 => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
UnsignedBig(reader.read_u64()?)
}
Type::SLONG8 => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
SignedBig(reader.read_i64()?)
}
Type::DOUBLE => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
Double(reader.read_f64()?)
}
Type::RATIONAL => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
Rational(reader.read_u32()?, reader.read_u32()?)
}
Type::SRATIONAL => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
SRational(reader.read_i32()?, reader.read_i32()?)
}
Type::IFD => Ifd(self.r(bo).read_u32()?),
Type::IFD8 => {
reader.goto_offset(self.r(bo).read_u32()?.into())?;
IfdBig(reader.read_u64()?)
}
});
}
// Case 3: There is more than one value, but it fits in the offset field.
if value_bytes <= 4 || bigtiff && value_bytes <= 8 {
match self.type_ {
Type::BYTE => return offset_to_bytes(self.count as usize, self),
Type::SBYTE => return offset_to_sbytes(self.count as usize, self),
Type::ASCII => {
let mut buf = vec![0; self.count as usize];
self.r(bo).read_exact(&mut buf)?;
if buf.is_ascii() && buf.ends_with(&[0]) {
let v = str::from_utf8(&buf)?;
let v = v.trim_matches(char::from(0));
return Ok(Ascii(v.into()));
} else {
return Err(TiffError::FormatError(TiffFormatError::InvalidTag));
}
}
Type::UNDEFINED => {
return Ok(List(
self.offset[0..self.count as usize]
.iter()
.map(|&b| Byte(b))
.collect(),
));
}
Type::SHORT => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Short(r.read_u16()?));
}
return Ok(List(v));
}
Type::SSHORT => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Signed(i32::from(r.read_i16()?)));
}
return Ok(List(v));
}
Type::LONG => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Unsigned(r.read_u32()?));
}
return Ok(List(v));
}
Type::SLONG => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Signed(r.read_i32()?));
}
return Ok(List(v));
}
Type::FLOAT => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Float(r.read_f32()?));
}
return Ok(List(v));
}
Type::IFD => {
let mut r = self.r(bo);
let mut v = Vec::new();
for _ in 0..self.count {
v.push(Ifd(r.read_u32()?));
}
return Ok(List(v));
}
Type::LONG8
| Type::SLONG8
| Type::RATIONAL
| Type::SRATIONAL
| Type::DOUBLE
| Type::IFD8 => {
unreachable!()
}
}
}
// Case 4: there is more than one value, and it doesn't fit in the offset field.
match self.type_ {
// TODO check if this could give wrong results
// at a different endianess of file/computer.
Type::BYTE => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
let mut buf = [0; 1];
reader.read_exact(&mut buf)?;
Ok(UnsignedBig(u64::from(buf[0])))
}),
Type::SBYTE => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(SignedBig(i64::from(reader.read_i8()? as i8)))
}),
Type::SHORT => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(UnsignedBig(u64::from(reader.read_u16()?)))
}),
Type::SSHORT => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(SignedBig(i64::from(reader.read_i16()?)))
}),
Type::LONG => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Unsigned(reader.read_u32()?))
}),
Type::SLONG => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Signed(reader.read_i32()?))
}),
Type::FLOAT => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Float(reader.read_f32()?))
}),
Type::DOUBLE => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Double(reader.read_f64()?))
}),
Type::RATIONAL => {
self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Rational(reader.read_u32()?, reader.read_u32()?))
})
}
Type::SRATIONAL => {
self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(SRational(reader.read_i32()?, reader.read_i32()?))
})
}
Type::LONG8 => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(UnsignedBig(reader.read_u64()?))
}),
Type::SLONG8 => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(SignedBig(reader.read_i64()?))
}),
Type::IFD => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(Ifd(reader.read_u32()?))
}),
Type::IFD8 => self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
Ok(IfdBig(reader.read_u64()?))
}),
Type::UNDEFINED => {
self.decode_offset(self.count, bo, bigtiff, limits, reader, |reader| {
let mut buf = [0; 1];
reader.read_exact(&mut buf)?;
Ok(Byte(buf[0]))
})
}
Type::ASCII => {
let n = usize::try_from(self.count)?;
if n > limits.decoding_buffer_size {
return Err(TiffError::LimitsExceeded);
}
if bigtiff {
reader.goto_offset(self.r(bo).read_u64()?)?
} else {
reader.goto_offset(self.r(bo).read_u32()?.into())?
}
let mut out = vec![0; n];
reader.read_exact(&mut out)?;
// Strings may be null-terminated, so we trim anything downstream of the null byte
if let Some(first) = out.iter().position(|&b| b == 0) {
out.truncate(first);
}
Ok(Ascii(String::from_utf8(out)?))
}
}
}
#[inline]
fn decode_offset<R, F>(
&self,
value_count: u64,
bo: ByteOrder,
bigtiff: bool,
limits: &super::Limits,
reader: &mut SmartReader<R>,
decode_fn: F,
) -> TiffResult<Value>
where
R: Read + Seek,
F: Fn(&mut SmartReader<R>) -> TiffResult<Value>,
{
let value_count = usize::try_from(value_count)?;
if value_count > limits.decoding_buffer_size / mem::size_of::<Value>() {
return Err(TiffError::LimitsExceeded);
}
let mut v = Vec::with_capacity(value_count);
let offset = if bigtiff {
self.r(bo).read_u64()?
} else {
self.r(bo).read_u32()?.into()
};
reader.goto_offset(offset)?;
for _ in 0..value_count {
v.push(decode_fn(reader)?)
}
Ok(List(v))
}
}
/// Extracts a list of BYTE tags stored in an offset
#[inline]
fn offset_to_bytes(n: usize, entry: &Entry) -> TiffResult<Value> {
Ok(List(
entry.offset[0..n]
.iter()
.map(|&e| Unsigned(u32::from(e)))
.collect(),
))
}
/// Extracts a list of SBYTE tags stored in an offset
#[inline]
fn offset_to_sbytes(n: usize, entry: &Entry) -> TiffResult<Value> {
Ok(List(
entry.offset[0..n]
.iter()
.map(|&e| Signed(i32::from(e as i8)))
.collect(),
))
}
/// Type representing an Image File Directory
pub type Directory = HashMap<Tag, Entry>;

601
vendor/tiff/src/decoder/image.rs vendored Normal file
View File

@ -0,0 +1,601 @@
use super::ifd::{Directory, Value};
use super::stream::{ByteOrder, DeflateReader, JpegReader, LZWReader, PackBitsReader};
use super::tag_reader::TagReader;
use super::{fp_predict_f32, fp_predict_f64, DecodingBuffer, Limits};
use super::{stream::SmartReader, ChunkType};
use crate::tags::{CompressionMethod, PhotometricInterpretation, Predictor, SampleFormat, Tag};
use crate::{ColorType, TiffError, TiffFormatError, TiffResult, TiffUnsupportedError, UsageError};
use std::convert::{TryFrom, TryInto};
use std::io::{self, Cursor, Read, Seek};
use std::sync::Arc;
#[derive(Debug)]
pub(crate) struct StripDecodeState {
pub rows_per_strip: u32,
}
#[derive(Debug)]
/// Computed values useful for tile decoding
pub(crate) struct TileAttributes {
pub image_width: usize,
pub image_height: usize,
pub tile_width: usize,
pub tile_length: usize,
}
impl TileAttributes {
pub fn tiles_across(&self) -> usize {
(self.image_width + self.tile_width - 1) / self.tile_width
}
pub fn tiles_down(&self) -> usize {
(self.image_height + self.tile_length - 1) / self.tile_length
}
fn padding_right(&self) -> usize {
(self.tile_width - self.image_width % self.tile_width) % self.tile_width
}
fn padding_down(&self) -> usize {
(self.tile_length - self.image_height % self.tile_length) % self.tile_length
}
pub fn get_padding(&self, tile: usize) -> (usize, usize) {
let row = tile / self.tiles_across();
let column = tile % self.tiles_across();
let padding_right = if column == self.tiles_across() - 1 {
self.padding_right()
} else {
0
};
let padding_down = if row == self.tiles_down() - 1 {
self.padding_down()
} else {
0
};
(padding_right, padding_down)
}
}
#[derive(Debug)]
pub(crate) struct Image {
pub ifd: Option<Directory>,
pub width: u32,
pub height: u32,
pub bits_per_sample: Vec<u8>,
#[allow(unused)]
pub samples: u8,
pub sample_format: Vec<SampleFormat>,
pub photometric_interpretation: PhotometricInterpretation,
pub compression_method: CompressionMethod,
pub predictor: Predictor,
pub jpeg_tables: Option<Arc<Vec<u8>>>,
pub chunk_type: ChunkType,
pub strip_decoder: Option<StripDecodeState>,
pub tile_attributes: Option<TileAttributes>,
pub chunk_offsets: Vec<u64>,
pub chunk_bytes: Vec<u64>,
}
impl Image {
pub fn from_reader<R: Read + Seek>(
reader: &mut SmartReader<R>,
ifd: Directory,
limits: &Limits,
bigtiff: bool,
) -> TiffResult<Image> {
let mut tag_reader = TagReader {
reader,
limits,
ifd: &ifd,
bigtiff,
};
let width = tag_reader.require_tag(Tag::ImageWidth)?.into_u32()?;
let height = tag_reader.require_tag(Tag::ImageLength)?.into_u32()?;
if width == 0 || height == 0 {
return Err(TiffError::FormatError(TiffFormatError::InvalidDimensions(
width, height,
)));
}
let photometric_interpretation = tag_reader
.find_tag(Tag::PhotometricInterpretation)?
.map(Value::into_u16)
.transpose()?
.and_then(PhotometricInterpretation::from_u16)
.ok_or(TiffUnsupportedError::UnknownInterpretation)?;
// Try to parse both the compression method and the number, format, and bits of the included samples.
// If they are not explicitly specified, those tags are reset to their default values and not carried from previous images.
let compression_method = match tag_reader.find_tag(Tag::Compression)? {
Some(val) => CompressionMethod::from_u16(val.into_u16()?)
.ok_or(TiffUnsupportedError::UnknownCompressionMethod)?,
None => CompressionMethod::None,
};
let jpeg_tables = if compression_method == CompressionMethod::ModernJPEG
&& ifd.contains_key(&Tag::JPEGTables)
{
let vec = tag_reader
.find_tag(Tag::JPEGTables)?
.unwrap()
.into_u8_vec()?;
if vec.len() < 2 {
return Err(TiffError::FormatError(
TiffFormatError::InvalidTagValueType(Tag::JPEGTables),
));
}
Some(Arc::new(vec))
} else {
None
};
let samples = tag_reader
.find_tag(Tag::SamplesPerPixel)?
.map(Value::into_u16)
.transpose()?
.unwrap_or(1)
.try_into()?;
let sample_format = match tag_reader.find_tag_uint_vec(Tag::SampleFormat)? {
Some(vals) => {
let sample_format: Vec<_> = vals
.into_iter()
.map(SampleFormat::from_u16_exhaustive)
.collect();
// TODO: for now, only homogenous formats across samples are supported.
if !sample_format.windows(2).all(|s| s[0] == s[1]) {
return Err(TiffUnsupportedError::UnsupportedSampleFormat(sample_format).into());
}
sample_format
}
None => vec![SampleFormat::Uint],
};
let bits_per_sample = match samples {
1 | 3 | 4 => tag_reader
.find_tag_uint_vec(Tag::BitsPerSample)?
.unwrap_or_else(|| vec![1]),
_ => return Err(TiffUnsupportedError::UnsupportedSampleDepth(samples).into()),
};
let predictor = tag_reader
.find_tag(Tag::Predictor)?
.map(Value::into_u16)
.transpose()?
.map(|p| {
Predictor::from_u16(p)
.ok_or(TiffError::FormatError(TiffFormatError::UnknownPredictor(p)))
})
.transpose()?
.unwrap_or(Predictor::None);
let chunk_type;
let chunk_offsets;
let chunk_bytes;
let strip_decoder;
let tile_attributes;
match (
ifd.contains_key(&Tag::StripByteCounts),
ifd.contains_key(&Tag::StripOffsets),
ifd.contains_key(&Tag::TileByteCounts),
ifd.contains_key(&Tag::TileOffsets),
) {
(true, true, false, false) => {
chunk_type = ChunkType::Strip;
chunk_offsets = tag_reader
.find_tag(Tag::StripOffsets)?
.unwrap()
.into_u64_vec()?;
chunk_bytes = tag_reader
.find_tag(Tag::StripByteCounts)?
.unwrap()
.into_u64_vec()?;
let rows_per_strip = tag_reader
.find_tag(Tag::RowsPerStrip)?
.map(Value::into_u32)
.transpose()?
.unwrap_or(height);
strip_decoder = Some(StripDecodeState { rows_per_strip });
tile_attributes = None;
if chunk_offsets.len() != chunk_bytes.len()
|| rows_per_strip == 0
|| u32::try_from(chunk_offsets.len())?
!= height.saturating_sub(1) / rows_per_strip + 1
{
return Err(TiffError::FormatError(
TiffFormatError::InconsistentSizesEncountered,
));
}
}
(false, false, true, true) => {
chunk_type = ChunkType::Tile;
let tile_width =
usize::try_from(tag_reader.require_tag(Tag::TileWidth)?.into_u32()?)?;
let tile_length =
usize::try_from(tag_reader.require_tag(Tag::TileLength)?.into_u32()?)?;
if tile_width == 0 {
return Err(TiffFormatError::InvalidTagValueType(Tag::TileWidth).into());
} else if tile_length == 0 {
return Err(TiffFormatError::InvalidTagValueType(Tag::TileLength).into());
}
strip_decoder = None;
tile_attributes = Some(TileAttributes {
image_width: usize::try_from(width)?,
image_height: usize::try_from(height)?,
tile_width,
tile_length,
});
chunk_offsets = tag_reader
.find_tag(Tag::TileOffsets)?
.unwrap()
.into_u64_vec()?;
chunk_bytes = tag_reader
.find_tag(Tag::TileByteCounts)?
.unwrap()
.into_u64_vec()?;
let tile = tile_attributes.as_ref().unwrap();
if chunk_offsets.len() != chunk_bytes.len()
|| chunk_offsets.len() != tile.tiles_down() * tile.tiles_across()
{
return Err(TiffError::FormatError(
TiffFormatError::InconsistentSizesEncountered,
));
}
}
(_, _, _, _) => {
return Err(TiffError::FormatError(
TiffFormatError::StripTileTagConflict,
))
}
};
Ok(Image {
ifd: Some(ifd),
width,
height,
bits_per_sample,
samples,
sample_format,
photometric_interpretation,
compression_method,
jpeg_tables,
predictor,
chunk_type,
strip_decoder,
tile_attributes,
chunk_offsets,
chunk_bytes,
})
}
pub(crate) fn colortype(&self) -> TiffResult<ColorType> {
match self.photometric_interpretation {
PhotometricInterpretation::RGB => match self.bits_per_sample[..] {
[r, g, b] if [r, r] == [g, b] => Ok(ColorType::RGB(r)),
[r, g, b, a] if [r, r, r] == [g, b, a] => Ok(ColorType::RGBA(r)),
// FIXME: We should _ignore_ other components. In particular:
// > Beware of extra components. Some TIFF files may have more components per pixel
// than you think. A Baseline TIFF reader must skip over them gracefully,using the
// values of the SamplesPerPixel and BitsPerSample fields.
// > -- TIFF 6.0 Specification, Section 7, Additional Baseline requirements.
_ => Err(TiffError::UnsupportedError(
TiffUnsupportedError::InterpretationWithBits(
self.photometric_interpretation,
self.bits_per_sample.clone(),
),
)),
},
PhotometricInterpretation::CMYK => match self.bits_per_sample[..] {
[c, m, y, k] if [c, c, c] == [m, y, k] => Ok(ColorType::CMYK(c)),
_ => Err(TiffError::UnsupportedError(
TiffUnsupportedError::InterpretationWithBits(
self.photometric_interpretation,
self.bits_per_sample.clone(),
),
)),
},
PhotometricInterpretation::YCbCr => match self.bits_per_sample[..] {
[y, cb, cr] if [y, y] == [cb, cr] => Ok(ColorType::YCbCr(y)),
_ => Err(TiffError::UnsupportedError(
TiffUnsupportedError::InterpretationWithBits(
self.photometric_interpretation,
self.bits_per_sample.clone(),
),
)),
},
PhotometricInterpretation::BlackIsZero | PhotometricInterpretation::WhiteIsZero
if self.bits_per_sample.len() == 1 =>
{
Ok(ColorType::Gray(self.bits_per_sample[0]))
}
// TODO: this is bad we should not fail at this point
_ => Err(TiffError::UnsupportedError(
TiffUnsupportedError::InterpretationWithBits(
self.photometric_interpretation,
self.bits_per_sample.clone(),
),
)),
}
}
fn create_reader<'r, R: 'r + Read>(
reader: R,
photometric_interpretation: PhotometricInterpretation,
compression_method: CompressionMethod,
compressed_length: u64,
jpeg_tables: Option<Arc<Vec<u8>>>,
) -> TiffResult<Box<dyn Read + 'r>> {
Ok(match compression_method {
CompressionMethod::None => Box::new(reader),
CompressionMethod::LZW => {
Box::new(LZWReader::new(reader, usize::try_from(compressed_length)?))
}
CompressionMethod::PackBits => Box::new(PackBitsReader::new(reader, compressed_length)),
CompressionMethod::Deflate | CompressionMethod::OldDeflate => {
Box::new(DeflateReader::new(reader))
}
CompressionMethod::ModernJPEG => {
if jpeg_tables.is_some() && compressed_length < 2 {
return Err(TiffError::FormatError(
TiffFormatError::InvalidTagValueType(Tag::JPEGTables),
));
}
let jpeg_reader = JpegReader::new(reader, compressed_length, jpeg_tables)?;
let mut decoder = jpeg::Decoder::new(jpeg_reader);
match photometric_interpretation {
PhotometricInterpretation::RGB => {
decoder.set_color_transform(jpeg::ColorTransform::RGB)
}
PhotometricInterpretation::WhiteIsZero => {
decoder.set_color_transform(jpeg::ColorTransform::None)
}
PhotometricInterpretation::BlackIsZero => {
decoder.set_color_transform(jpeg::ColorTransform::None)
}
PhotometricInterpretation::TransparencyMask => {
decoder.set_color_transform(jpeg::ColorTransform::None)
}
PhotometricInterpretation::CMYK => {
decoder.set_color_transform(jpeg::ColorTransform::CMYK)
}
PhotometricInterpretation::YCbCr => {
decoder.set_color_transform(jpeg::ColorTransform::YCbCr)
}
photometric_interpretation => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::UnsupportedInterpretation(
photometric_interpretation,
),
));
}
}
let data = decoder.decode()?;
Box::new(Cursor::new(data))
}
method => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::UnsupportedCompressionMethod(method),
))
}
})
}
pub(crate) fn chunk_file_range(&self, chunk: u32) -> TiffResult<(u64, u64)> {
let file_offset = self
.chunk_offsets
.get(chunk as usize)
.ok_or(TiffError::FormatError(
TiffFormatError::InconsistentSizesEncountered,
))?;
let compressed_bytes =
self.chunk_bytes
.get(chunk as usize)
.ok_or(TiffError::FormatError(
TiffFormatError::InconsistentSizesEncountered,
))?;
Ok((*file_offset, *compressed_bytes))
}
pub(crate) fn chunk_dimensions(&self) -> TiffResult<(u32, u32)> {
match self.chunk_type {
ChunkType::Strip => {
let strip_attrs = self.strip_decoder.as_ref().unwrap();
Ok((self.width, strip_attrs.rows_per_strip))
}
ChunkType::Tile => {
let tile_attrs = self.tile_attributes.as_ref().unwrap();
Ok((
u32::try_from(tile_attrs.tile_width)?,
u32::try_from(tile_attrs.tile_length)?,
))
}
}
}
pub(crate) fn chunk_data_dimensions(&self, chunk_index: u32) -> TiffResult<(u32, u32)> {
let dims = self.chunk_dimensions()?;
match self.chunk_type {
ChunkType::Strip => {
let strip_height_without_padding = chunk_index
.checked_mul(dims.1)
.and_then(|x| self.height.checked_sub(x))
.ok_or(TiffError::UsageError(UsageError::InvalidChunkIndex(
chunk_index,
)))?;
// Ignore potential vertical padding on the bottommost strip
let strip_height = dims.1.min(strip_height_without_padding);
Ok((dims.0, strip_height))
}
ChunkType::Tile => {
let tile_attrs = self.tile_attributes.as_ref().unwrap();
let (padding_right, padding_down) = tile_attrs.get_padding(chunk_index as usize);
let tile_width = tile_attrs.tile_width - padding_right;
let tile_length = tile_attrs.tile_length - padding_down;
Ok((u32::try_from(tile_width)?, u32::try_from(tile_length)?))
}
}
}
pub(crate) fn expand_chunk(
&self,
reader: impl Read,
mut buffer: DecodingBuffer,
output_width: usize,
byte_order: ByteOrder,
chunk_index: u32,
) -> TiffResult<()> {
// Validate that the provided buffer is of the expected type.
let color_type = self.colortype()?;
match (color_type, &buffer) {
(ColorType::RGB(n), _)
| (ColorType::RGBA(n), _)
| (ColorType::CMYK(n), _)
| (ColorType::YCbCr(n), _)
| (ColorType::Gray(n), _)
if usize::from(n) == buffer.byte_len() * 8 => {}
(ColorType::Gray(n), DecodingBuffer::U8(_)) if n < 8 => match self.predictor {
Predictor::None => {}
Predictor::Horizontal => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::HorizontalPredictor(color_type),
))
}
Predictor::FloatingPoint => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::FloatingPointPredictor(color_type),
));
}
},
(type_, _) => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::UnsupportedColorType(type_),
))
}
}
// Validate that the predictor is supported for the sample type.
match (self.predictor, &buffer) {
(Predictor::Horizontal, DecodingBuffer::F32(_))
| (Predictor::Horizontal, DecodingBuffer::F64(_)) => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::HorizontalPredictor(color_type),
));
}
(Predictor::FloatingPoint, DecodingBuffer::F32(_))
| (Predictor::FloatingPoint, DecodingBuffer::F64(_)) => {}
(Predictor::FloatingPoint, _) => {
return Err(TiffError::UnsupportedError(
TiffUnsupportedError::FloatingPointPredictor(color_type),
));
}
_ => {}
}
let compressed_bytes =
self.chunk_bytes
.get(chunk_index as usize)
.ok_or(TiffError::FormatError(
TiffFormatError::InconsistentSizesEncountered,
))?;
let byte_len = buffer.byte_len();
let compression_method = self.compression_method;
let photometric_interpretation = self.photometric_interpretation;
let predictor = self.predictor;
let samples = self.bits_per_sample.len();
let chunk_dims = self.chunk_dimensions()?;
let data_dims = self.chunk_data_dimensions(chunk_index)?;
let padding_right = chunk_dims.0 - data_dims.0;
let jpeg_tables = self.jpeg_tables.clone();
let mut reader = Self::create_reader(
reader,
photometric_interpretation,
compression_method,
*compressed_bytes,
jpeg_tables,
)?;
if output_width == data_dims.0 as usize && padding_right == 0 {
let total_samples = data_dims.0 as usize * data_dims.1 as usize * samples;
let tile = &mut buffer.as_bytes_mut()[..total_samples * byte_len];
reader.read_exact(tile)?;
for row in 0..data_dims.1 as usize {
let row_start = row as usize * output_width as usize * samples;
let row_end = (row + 1) * output_width as usize * samples;
let row = buffer.subrange(row_start..row_end);
super::fix_endianness_and_predict(row, samples, byte_order, predictor);
}
if photometric_interpretation == PhotometricInterpretation::WhiteIsZero {
super::invert_colors(&mut buffer.subrange(0..total_samples), color_type);
}
} else if padding_right > 0 && self.predictor == Predictor::FloatingPoint {
// The floating point predictor shuffles the padding bytes into the encoded output, so
// this case is handled specially when needed.
let mut encoded = vec![0u8; chunk_dims.0 as usize * samples * byte_len];
for row in 0..data_dims.1 as usize {
let row_start = row * output_width as usize * samples;
let row_end = row_start + data_dims.0 as usize * samples;
reader.read_exact(&mut encoded)?;
match buffer.subrange(row_start..row_end) {
DecodingBuffer::F32(buf) => fp_predict_f32(&mut encoded, buf, samples),
DecodingBuffer::F64(buf) => fp_predict_f64(&mut encoded, buf, samples),
_ => unreachable!(),
}
if photometric_interpretation == PhotometricInterpretation::WhiteIsZero {
super::invert_colors(&mut buffer.subrange(row_start..row_end), color_type);
}
}
} else {
for row in 0..data_dims.1 as usize {
let row_start = row * output_width as usize * samples;
let row_end = row_start + data_dims.0 as usize * samples;
let row = &mut buffer.as_bytes_mut()[(row_start * byte_len)..(row_end * byte_len)];
reader.read_exact(row)?;
// Skip horizontal padding
if padding_right > 0 {
let len = u64::try_from(padding_right as usize * samples * byte_len)?;
io::copy(&mut reader.by_ref().take(len), &mut io::sink())?;
}
let mut row = buffer.subrange(row_start..row_end);
super::fix_endianness_and_predict(row.copy(), samples, byte_order, predictor);
if photometric_interpretation == PhotometricInterpretation::WhiteIsZero {
super::invert_colors(&mut row, color_type);
}
}
}
Ok(())
}
}

1176
vendor/tiff/src/decoder/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

435
vendor/tiff/src/decoder/stream.rs vendored Normal file
View File

@ -0,0 +1,435 @@
//! All IO functionality needed for TIFF decoding
use std::convert::TryFrom;
use std::io::{self, BufRead, BufReader, Read, Seek, SeekFrom, Take};
use std::sync::Arc;
/// Byte order of the TIFF file.
#[derive(Clone, Copy, Debug)]
pub enum ByteOrder {
/// little endian byte order
LittleEndian,
/// big endian byte order
BigEndian,
}
/// Reader that is aware of the byte order.
pub trait EndianReader: Read {
/// Byte order that should be adhered to
fn byte_order(&self) -> ByteOrder;
/// Reads an u16
#[inline(always)]
fn read_u16(&mut self) -> Result<u16, io::Error> {
let mut n = [0u8; 2];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => u16::from_le_bytes(n),
ByteOrder::BigEndian => u16::from_be_bytes(n),
})
}
/// Reads an i8
#[inline(always)]
fn read_i8(&mut self) -> Result<i8, io::Error> {
let mut n = [0u8; 1];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => i8::from_le_bytes(n),
ByteOrder::BigEndian => i8::from_be_bytes(n),
})
}
/// Reads an i16
#[inline(always)]
fn read_i16(&mut self) -> Result<i16, io::Error> {
let mut n = [0u8; 2];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => i16::from_le_bytes(n),
ByteOrder::BigEndian => i16::from_be_bytes(n),
})
}
/// Reads an u32
#[inline(always)]
fn read_u32(&mut self) -> Result<u32, io::Error> {
let mut n = [0u8; 4];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => u32::from_le_bytes(n),
ByteOrder::BigEndian => u32::from_be_bytes(n),
})
}
/// Reads an i32
#[inline(always)]
fn read_i32(&mut self) -> Result<i32, io::Error> {
let mut n = [0u8; 4];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => i32::from_le_bytes(n),
ByteOrder::BigEndian => i32::from_be_bytes(n),
})
}
/// Reads an u64
#[inline(always)]
fn read_u64(&mut self) -> Result<u64, io::Error> {
let mut n = [0u8; 8];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => u64::from_le_bytes(n),
ByteOrder::BigEndian => u64::from_be_bytes(n),
})
}
/// Reads an i64
#[inline(always)]
fn read_i64(&mut self) -> Result<i64, io::Error> {
let mut n = [0u8; 8];
self.read_exact(&mut n)?;
Ok(match self.byte_order() {
ByteOrder::LittleEndian => i64::from_le_bytes(n),
ByteOrder::BigEndian => i64::from_be_bytes(n),
})
}
/// Reads an f32
#[inline(always)]
fn read_f32(&mut self) -> Result<f32, io::Error> {
let mut n = [0u8; 4];
self.read_exact(&mut n)?;
Ok(f32::from_bits(match self.byte_order() {
ByteOrder::LittleEndian => u32::from_le_bytes(n),
ByteOrder::BigEndian => u32::from_be_bytes(n),
}))
}
/// Reads an f64
#[inline(always)]
fn read_f64(&mut self) -> Result<f64, io::Error> {
let mut n = [0u8; 8];
self.read_exact(&mut n)?;
Ok(f64::from_bits(match self.byte_order() {
ByteOrder::LittleEndian => u64::from_le_bytes(n),
ByteOrder::BigEndian => u64::from_be_bytes(n),
}))
}
}
///
/// # READERS
///
///
/// ## Deflate Reader
///
pub type DeflateReader<R> = flate2::read::ZlibDecoder<R>;
///
/// ## LZW Reader
///
/// Reader that decompresses LZW streams
pub struct LZWReader<R: Read> {
reader: BufReader<Take<R>>,
decoder: weezl::decode::Decoder,
}
impl<R: Read> LZWReader<R> {
/// Wraps a reader
pub fn new(reader: R, compressed_length: usize) -> LZWReader<R> {
Self {
reader: BufReader::with_capacity(
(32 * 1024).min(compressed_length),
reader.take(u64::try_from(compressed_length).unwrap()),
),
decoder: weezl::decode::Decoder::with_tiff_size_switch(weezl::BitOrder::Msb, 8),
}
}
}
impl<R: Read> Read for LZWReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
loop {
let result = self.decoder.decode_bytes(self.reader.fill_buf()?, buf);
self.reader.consume(result.consumed_in);
match result.status {
Ok(weezl::LzwStatus::Ok) => {
if result.consumed_out == 0 {
continue;
} else {
return Ok(result.consumed_out);
}
}
Ok(weezl::LzwStatus::NoProgress) => {
assert_eq!(result.consumed_in, 0);
assert_eq!(result.consumed_out, 0);
assert!(self.reader.buffer().is_empty());
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"no lzw end code found",
));
}
Ok(weezl::LzwStatus::Done) => {
return Ok(result.consumed_out);
}
Err(err) => return Err(io::Error::new(io::ErrorKind::InvalidData, err)),
}
}
}
}
///
/// ## JPEG Reader (for "new-style" JPEG format (TIFF compression tag 7))
///
pub(crate) struct JpegReader {
jpeg_tables: Option<Arc<Vec<u8>>>,
buffer: io::Cursor<Vec<u8>>,
offset: usize,
}
impl JpegReader {
/// Constructs new JpegReader wrapping a SmartReader.
/// Because JPEG compression in TIFF allows to save quantization and/or huffman tables in one
/// central location, the constructor accepts this data as `jpeg_tables` here containing either
/// or both.
/// These `jpeg_tables` are simply prepended to the remaining jpeg image data.
/// Because these `jpeg_tables` start with a `SOI` (HEX: `0xFFD8`) or __start of image__ marker
/// which is also at the beginning of the remaining JPEG image data and would
/// confuse the JPEG renderer, one of these has to be taken off. In this case the first two
/// bytes of the remaining JPEG data is removed because it follows `jpeg_tables`.
/// Similary, `jpeg_tables` ends with a `EOI` (HEX: `0xFFD9`) or __end of image__ marker,
/// this has to be removed as well (last two bytes of `jpeg_tables`).
pub fn new<R: Read>(
mut reader: R,
length: u64,
jpeg_tables: Option<Arc<Vec<u8>>>,
) -> io::Result<JpegReader> {
// Read jpeg image data
let mut segment = vec![0; length as usize];
reader.read_exact(&mut segment[..])?;
match jpeg_tables {
Some(jpeg_tables) => {
assert!(
jpeg_tables.len() >= 2,
"jpeg_tables, if given, must be at least 2 bytes long. Got {:?}",
jpeg_tables
);
assert!(
length >= 2,
"if jpeg_tables is given, length must be at least 2 bytes long, got {}",
length
);
let mut buffer = io::Cursor::new(segment);
// Skip the first two bytes (marker bytes)
buffer.seek(SeekFrom::Start(2))?;
Ok(JpegReader {
buffer,
jpeg_tables: Some(jpeg_tables),
offset: 0,
})
}
None => Ok(JpegReader {
buffer: io::Cursor::new(segment),
jpeg_tables: None,
offset: 0,
}),
}
}
}
impl Read for JpegReader {
// #[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut start = 0;
if let Some(jpeg_tables) = &self.jpeg_tables {
if jpeg_tables.len() - 2 > self.offset {
// Read (rest of) jpeg_tables to buf (without the last two bytes)
let size_remaining = jpeg_tables.len() - self.offset - 2;
let to_copy = size_remaining.min(buf.len());
buf[start..start + to_copy]
.copy_from_slice(&jpeg_tables[self.offset..self.offset + to_copy]);
self.offset += to_copy;
if to_copy == buf.len() {
return Ok(to_copy);
}
start += to_copy;
}
}
let read = self.buffer.read(&mut buf[start..])?;
self.offset += read;
Ok(read + start)
}
}
///
/// ## PackBits Reader
///
enum PackBitsReaderState {
Header,
Literal,
Repeat { value: u8 },
}
/// Reader that unpacks Apple's `PackBits` format
pub struct PackBitsReader<R: Read> {
reader: Take<R>,
state: PackBitsReaderState,
count: usize,
}
impl<R: Read> PackBitsReader<R> {
/// Wraps a reader
pub fn new(reader: R, length: u64) -> Self {
Self {
reader: reader.take(length),
state: PackBitsReaderState::Header,
count: 0,
}
}
}
impl<R: Read> Read for PackBitsReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
while let PackBitsReaderState::Header = self.state {
if self.reader.limit() == 0 {
return Ok(0);
}
let mut header: [u8; 1] = [0];
self.reader.read_exact(&mut header)?;
let h = header[0] as i8;
if h >= -127 && h <= -1 {
let mut data: [u8; 1] = [0];
self.reader.read_exact(&mut data)?;
self.state = PackBitsReaderState::Repeat { value: data[0] };
self.count = (1 - h as isize) as usize;
} else if h >= 0 {
self.state = PackBitsReaderState::Literal;
self.count = h as usize + 1;
} else {
// h = -128 is a no-op.
}
}
let length = buf.len().min(self.count);
let actual = match self.state {
PackBitsReaderState::Literal => self.reader.read(&mut buf[..length])?,
PackBitsReaderState::Repeat { value } => {
for b in &mut buf[..length] {
*b = value;
}
length
}
PackBitsReaderState::Header => unreachable!(),
};
self.count -= actual;
if self.count == 0 {
self.state = PackBitsReaderState::Header;
}
return Ok(actual);
}
}
///
/// ## SmartReader Reader
///
/// Reader that is aware of the byte order.
#[derive(Debug)]
pub struct SmartReader<R>
where
R: Read,
{
reader: R,
pub byte_order: ByteOrder,
}
impl<R> SmartReader<R>
where
R: Read,
{
/// Wraps a reader
pub fn wrap(reader: R, byte_order: ByteOrder) -> SmartReader<R> {
SmartReader { reader, byte_order }
}
pub fn into_inner(self) -> R {
self.reader
}
}
impl<R: Read + Seek> SmartReader<R> {
pub fn goto_offset(&mut self, offset: u64) -> io::Result<()> {
self.seek(io::SeekFrom::Start(offset)).map(|_| ())
}
}
impl<R> EndianReader for SmartReader<R>
where
R: Read,
{
#[inline(always)]
fn byte_order(&self) -> ByteOrder {
self.byte_order
}
}
impl<R: Read> Read for SmartReader<R> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.reader.read(buf)
}
}
impl<R: Read + Seek> Seek for SmartReader<R> {
#[inline]
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
self.reader.seek(pos)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_packbits() {
let encoded = vec![
0xFE, 0xAA, 0x02, 0x80, 0x00, 0x2A, 0xFD, 0xAA, 0x03, 0x80, 0x00, 0x2A, 0x22, 0xF7,
0xAA,
];
let encoded_len = encoded.len();
let buff = io::Cursor::new(encoded);
let mut decoder = PackBitsReader::new(buff, encoded_len as u64);
let mut decoded = Vec::new();
decoder.read_to_end(&mut decoded).unwrap();
let expected = vec![
0xAA, 0xAA, 0xAA, 0x80, 0x00, 0x2A, 0xAA, 0xAA, 0xAA, 0xAA, 0x80, 0x00, 0x2A, 0x22,
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
];
assert_eq!(decoded, expected);
}
}

45
vendor/tiff/src/decoder/tag_reader.rs vendored Normal file
View File

@ -0,0 +1,45 @@
use std::convert::TryFrom;
use std::io::{Read, Seek};
use crate::tags::Tag;
use crate::{TiffError, TiffFormatError, TiffResult};
use super::ifd::{Directory, Value};
use super::stream::SmartReader;
use super::Limits;
pub(crate) struct TagReader<'a, R: Read + Seek> {
pub reader: &'a mut SmartReader<R>,
pub ifd: &'a Directory,
pub limits: &'a Limits,
pub bigtiff: bool,
}
impl<'a, R: Read + Seek> TagReader<'a, R> {
pub(crate) fn find_tag(&mut self, tag: Tag) -> TiffResult<Option<Value>> {
Ok(match self.ifd.get(&tag) {
Some(entry) => Some(entry.clone().val(self.limits, self.bigtiff, self.reader)?),
None => None,
})
}
pub(crate) fn require_tag(&mut self, tag: Tag) -> TiffResult<Value> {
match self.find_tag(tag)? {
Some(val) => Ok(val),
None => Err(TiffError::FormatError(
TiffFormatError::RequiredTagNotFound(tag),
)),
}
}
pub fn find_tag_uint_vec<T: TryFrom<u64>>(&mut self, tag: Tag) -> TiffResult<Option<Vec<T>>> {
self.find_tag(tag)?
.map(|v| v.into_u64_vec())
.transpose()?
.map(|v| {
v.into_iter()
.map(|u| {
T::try_from(u).map_err(|_| TiffFormatError::InvalidTagValueType(tag).into())
})
.collect()
})
.transpose()
}
}

245
vendor/tiff/src/encoder/colortype.rs vendored Normal file
View File

@ -0,0 +1,245 @@
use crate::tags::{PhotometricInterpretation, SampleFormat};
/// Trait for different colortypes that can be encoded.
pub trait ColorType {
/// The type of each sample of this colortype
type Inner: super::TiffValue;
/// The value of the tiff tag `PhotometricInterpretation`
const TIFF_VALUE: PhotometricInterpretation;
/// The value of the tiff tag `BitsPerSample`
const BITS_PER_SAMPLE: &'static [u16];
/// The value of the tiff tag `SampleFormat`
const SAMPLE_FORMAT: &'static [SampleFormat];
}
pub struct Gray8;
impl ColorType for Gray8 {
type Inner = u8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint];
}
pub struct GrayI8;
impl ColorType for GrayI8 {
type Inner = i8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Int];
}
pub struct Gray16;
impl ColorType for Gray16 {
type Inner = u16;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[16];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint];
}
pub struct GrayI16;
impl ColorType for GrayI16 {
type Inner = i16;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[16];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Int];
}
pub struct Gray32;
impl ColorType for Gray32 {
type Inner = u32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint];
}
pub struct GrayI32;
impl ColorType for GrayI32 {
type Inner = i32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Int];
}
pub struct Gray32Float;
impl ColorType for Gray32Float {
type Inner = f32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP];
}
pub struct Gray64;
impl ColorType for Gray64 {
type Inner = u64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint];
}
pub struct GrayI64;
impl ColorType for GrayI64 {
type Inner = i64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Int];
}
pub struct Gray64Float;
impl ColorType for Gray64Float {
type Inner = f64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::BlackIsZero;
const BITS_PER_SAMPLE: &'static [u16] = &[64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP];
}
pub struct RGB8;
impl ColorType for RGB8 {
type Inner = u8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[8, 8, 8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 3];
}
pub struct RGB16;
impl ColorType for RGB16 {
type Inner = u16;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[16, 16, 16];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 3];
}
pub struct RGB32;
impl ColorType for RGB32 {
type Inner = u32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 3];
}
pub struct RGB32Float;
impl ColorType for RGB32Float {
type Inner = f32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 3];
}
pub struct RGB64;
impl ColorType for RGB64 {
type Inner = u64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 3];
}
pub struct RGB64Float;
impl ColorType for RGB64Float {
type Inner = f64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 3];
}
pub struct RGBA8;
impl ColorType for RGBA8 {
type Inner = u8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[8, 8, 8, 8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct RGBA16;
impl ColorType for RGBA16 {
type Inner = u16;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[16, 16, 16, 16];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct RGBA32;
impl ColorType for RGBA32 {
type Inner = u32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct RGBA32Float;
impl ColorType for RGBA32Float {
type Inner = f32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 4];
}
pub struct RGBA64;
impl ColorType for RGBA64 {
type Inner = u64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct RGBA64Float;
impl ColorType for RGBA64Float {
type Inner = f64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::RGB;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 4];
}
pub struct CMYK8;
impl ColorType for CMYK8 {
type Inner = u8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[8, 8, 8, 8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct CMYK16;
impl ColorType for CMYK16 {
type Inner = u16;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[16, 16, 16, 16];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct CMYK32;
impl ColorType for CMYK32 {
type Inner = u32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct CMYK32Float;
impl ColorType for CMYK32Float {
type Inner = f32;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[32, 32, 32, 32];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 4];
}
pub struct CMYK64;
impl ColorType for CMYK64 {
type Inner = u64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 4];
}
pub struct CMYK64Float;
impl ColorType for CMYK64Float {
type Inner = f64;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::CMYK;
const BITS_PER_SAMPLE: &'static [u16] = &[64, 64, 64, 64];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::IEEEFP; 4];
}
pub struct YCbCr8;
impl ColorType for YCbCr8 {
type Inner = u8;
const TIFF_VALUE: PhotometricInterpretation = PhotometricInterpretation::YCbCr;
const BITS_PER_SAMPLE: &'static [u16] = &[8, 8, 8];
const SAMPLE_FORMAT: &'static [SampleFormat] = &[SampleFormat::Uint; 3];
}

View File

@ -0,0 +1,83 @@
use crate::{encoder::compression::*, tags::CompressionMethod};
use flate2::{write::ZlibEncoder, Compression as FlateCompression};
use std::io::Write;
/// The Deflate algorithm used to compress image data in TIFF files.
#[derive(Debug, Clone, Copy)]
pub struct Deflate {
level: FlateCompression,
}
/// The level of compression used by the Deflate algorithm.
/// It allows trading compression ratio for compression speed.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[non_exhaustive]
pub enum DeflateLevel {
/// The fastest possible compression mode.
Fast = 1,
/// The conserative choice between speed and ratio.
Balanced = 6,
/// The best compression available with Deflate.
Best = 9,
}
impl Default for DeflateLevel {
fn default() -> Self {
DeflateLevel::Balanced
}
}
impl Deflate {
/// Create a new deflate compressor with a specific level of compression.
pub fn with_level(level: DeflateLevel) -> Self {
Self {
level: FlateCompression::new(level as u32),
}
}
}
impl Default for Deflate {
fn default() -> Self {
Self::with_level(DeflateLevel::default())
}
}
impl Compression for Deflate {
const COMPRESSION_METHOD: CompressionMethod = CompressionMethod::Deflate;
fn get_algorithm(&self) -> Compressor {
Compressor::Deflate(self.clone())
}
}
impl CompressionAlgorithm for Deflate {
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error> {
let mut encoder = ZlibEncoder::new(writer, self.level);
encoder.write_all(bytes)?;
encoder.try_finish()?;
Ok(encoder.total_out())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::encoder::compression::tests::TEST_DATA;
use std::io::Cursor;
#[test]
fn test_deflate() {
const EXPECTED_COMPRESSED_DATA: [u8; 64] = [
0x78, 0x9C, 0x15, 0xC7, 0xD1, 0x0D, 0x80, 0x20, 0x0C, 0x04, 0xD0, 0x55, 0x6E, 0x02,
0xA7, 0x71, 0x81, 0xA6, 0x41, 0xDA, 0x28, 0xD4, 0xF4, 0xD0, 0xF9, 0x81, 0xE4, 0xFD,
0xBC, 0xD3, 0x9C, 0x58, 0x04, 0x1C, 0xE9, 0xBD, 0xE2, 0x8A, 0x84, 0x5A, 0xD1, 0x7B,
0xE7, 0x97, 0xF4, 0xF8, 0x08, 0x8D, 0xF6, 0x66, 0x21, 0x3D, 0x3A, 0xE4, 0xA9, 0x91,
0x3E, 0xAC, 0xF1, 0x98, 0xB9, 0x70, 0x17, 0x13,
];
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Deflate::default().write_to(&mut writer, TEST_DATA).unwrap();
assert_eq!(EXPECTED_COMPRESSED_DATA, compressed_data.as_slice());
}
}

View File

@ -0,0 +1,47 @@
use crate::{encoder::compression::*, tags::CompressionMethod};
use std::io::Write;
use weezl::encode::Encoder as LZWEncoder;
/// The LZW algorithm used to compress image data in TIFF files.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct Lzw;
impl Compression for Lzw {
const COMPRESSION_METHOD: CompressionMethod = CompressionMethod::LZW;
fn get_algorithm(&self) -> Compressor {
Compressor::Lzw(*self)
}
}
impl CompressionAlgorithm for Lzw {
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error> {
let mut encoder = LZWEncoder::with_tiff_size_switch(weezl::BitOrder::Msb, 8);
let result = encoder.into_stream(writer).encode_all(bytes);
let byte_count = result.bytes_written as u64;
result.status.map(|_| byte_count)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::encoder::compression::tests::TEST_DATA;
use std::io::Cursor;
#[test]
fn test_lzw() {
const EXPECTED_COMPRESSED_DATA: [u8; 63] = [
0x80, 0x15, 0x0D, 0x06, 0x93, 0x98, 0x82, 0x08, 0x20, 0x30, 0x88, 0x0E, 0x67, 0x43,
0x91, 0xA4, 0xDC, 0x67, 0x10, 0x19, 0x8D, 0xE7, 0x21, 0x01, 0x8C, 0xD0, 0x65, 0x31,
0x9A, 0xE1, 0xD1, 0x03, 0xB1, 0x86, 0x1A, 0x6F, 0x3A, 0xC1, 0x4C, 0x66, 0xF3, 0x69,
0xC0, 0xE4, 0x65, 0x39, 0x9C, 0xCD, 0x26, 0xF3, 0x74, 0x20, 0xD8, 0x67, 0x89, 0x9A,
0x4E, 0x86, 0x83, 0x69, 0xCC, 0x5D, 0x01,
];
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Lzw::default().write_to(&mut writer, TEST_DATA).unwrap();
assert_eq!(EXPECTED_COMPRESSED_DATA, compressed_data.as_slice());
}
}

View File

@ -0,0 +1,60 @@
use crate::tags::CompressionMethod;
use std::io::{self, Write};
mod deflate;
mod lzw;
mod packbits;
mod uncompressed;
pub use self::deflate::{Deflate, DeflateLevel};
pub use self::lzw::Lzw;
pub use self::packbits::Packbits;
pub use self::uncompressed::Uncompressed;
/// An algorithm used for compression
pub trait CompressionAlgorithm {
/// The algorithm writes data directly into the writer.
/// It returns the total number of bytes written.
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error>;
}
/// An algorithm used for compression with associated enums and optional configurations.
pub trait Compression: CompressionAlgorithm {
/// The corresponding tag to the algorithm.
const COMPRESSION_METHOD: CompressionMethod;
/// Method to optain a type that can store each variant of comression algorithm.
fn get_algorithm(&self) -> Compressor;
}
/// An enum to store each compression algorithm.
pub enum Compressor {
Uncompressed(Uncompressed),
Lzw(Lzw),
Deflate(Deflate),
Packbits(Packbits),
}
impl Default for Compressor {
/// The default compression strategy does not apply any compression.
fn default() -> Self {
Compressor::Uncompressed(Uncompressed::default())
}
}
impl CompressionAlgorithm for Compressor {
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error> {
match self {
Compressor::Uncompressed(algorithm) => algorithm.write_to(writer, bytes),
Compressor::Lzw(algorithm) => algorithm.write_to(writer, bytes),
Compressor::Deflate(algorithm) => algorithm.write_to(writer, bytes),
Compressor::Packbits(algorithm) => algorithm.write_to(writer, bytes),
}
}
}
#[cfg(test)]
mod tests {
pub const TEST_DATA: &'static [u8] =
b"This is a string for checking various compression algorithms.";
}

View File

@ -0,0 +1,214 @@
use crate::{encoder::compression::*, tags::CompressionMethod};
use std::io::{BufWriter, Error, ErrorKind, Write};
/// Compressor that uses the Packbits[^note] algorithm to compress bytes.
///
/// [^note]: PackBits is often ineffective on continuous tone images,
/// including many grayscale images. In such cases, it is better
/// to leave the image uncompressed.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct Packbits;
impl Compression for Packbits {
const COMPRESSION_METHOD: CompressionMethod = CompressionMethod::PackBits;
fn get_algorithm(&self) -> Compressor {
Compressor::Packbits(*self)
}
}
impl CompressionAlgorithm for Packbits {
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error> {
// Inspired by https://github.com/skirridsystems/packbits
const MIN_REPT: u8 = 3; // Minimum run to compress between differ blocks
const MAX_BYTES: u8 = 128; // Maximum number of bytes that can be encoded in a header byte
// Encoding for header byte based on number of bytes represented.
fn encode_diff(n: u8) -> u8 {
n - 1
}
fn encode_rept(n: u8) -> u8 {
let var = 256 - (n - 1) as u16;
var as u8
}
fn write_u8<W: Write>(writer: &mut W, byte: u8) -> Result<u64, Error> {
writer.write(&[byte]).map(|byte_count| byte_count as u64)
}
let mut bufwriter = BufWriter::new(writer);
let mut bytes_written = 0u64; // The number of bytes written into the writer
let mut offset: Option<u64> = None; // The index of the first byte written into the writer
let mut src_index: usize = 0; // Index of the current byte
let mut src_count = bytes.len(); //The number of bytes remaining to be compressed
let mut in_run = false; // Indication whether counting of similar bytes is performed
let mut run_index = 0u8; // Distance into pending bytes that a run starts
let mut bytes_pending = 0u8; // Bytes looked at but not yet output
let mut pending_index = 0usize; // Index of the first pending byte
let mut curr_byte: u8; // Byte currently being considered
let mut last_byte: u8; // Previous byte
// Need at least one byte to compress
if src_count == 0 {
return Err(Error::new(ErrorKind::WriteZero, "write zero"));
}
// Prime compressor with first character.
last_byte = bytes[src_index];
src_index += 1;
bytes_pending += 1;
while src_count - 1 != 0 {
src_count -= 1;
curr_byte = bytes[src_index];
src_index += 1;
bytes_pending += 1;
if in_run {
if (curr_byte != last_byte) || (bytes_pending > MAX_BYTES) {
offset.get_or_insert(write_u8(&mut bufwriter, encode_rept(bytes_pending - 1))?);
write_u8(&mut bufwriter, last_byte)?;
bytes_written += 2;
bytes_pending = 1;
pending_index = src_index - 1;
run_index = 0;
in_run = false;
}
} else {
if bytes_pending > MAX_BYTES {
// We have as much differing data as we can output in one chunk.
// Output MAX_BYTES leaving one byte.
offset.get_or_insert(write_u8(&mut bufwriter, encode_diff(MAX_BYTES))?);
bufwriter.write(&bytes[pending_index..pending_index + MAX_BYTES as usize])?;
bytes_written += 1 + MAX_BYTES as u64;
pending_index += MAX_BYTES as usize;
bytes_pending -= MAX_BYTES;
run_index = bytes_pending - 1; // A run could start here
} else if curr_byte == last_byte {
if (bytes_pending - run_index >= MIN_REPT) || (run_index == 0) {
// This is a worthwhile run
if run_index != 0 {
// Flush differing data out of input buffer
offset.get_or_insert(write_u8(&mut bufwriter, encode_diff(run_index))?);
bufwriter
.write(&bytes[pending_index..pending_index + run_index as usize])?;
bytes_written += 1 + run_index as u64;
}
bytes_pending -= run_index; // Length of run
in_run = true;
}
} else {
run_index = bytes_pending - 1; // A run could start here
}
}
last_byte = curr_byte;
}
// Output the remainder
if in_run {
bytes_written += 2;
offset.get_or_insert(write_u8(&mut bufwriter, encode_rept(bytes_pending))?);
write_u8(&mut bufwriter, last_byte)?;
} else {
bytes_written += 1 + bytes_pending as u64;
offset.get_or_insert(write_u8(&mut bufwriter, encode_diff(bytes_pending))?);
bufwriter.write(&bytes[pending_index..pending_index + bytes_pending as usize])?;
}
bufwriter.flush()?;
Ok(bytes_written)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::encoder::compression::tests::TEST_DATA;
use std::io::Cursor;
#[test]
fn test_packbits_single_byte() {
// compress single byte
const UNCOMPRESSED_DATA: [u8; 1] = [0x3F];
const EXPECTED_COMPRESSED_DATA: [u8; 2] = [0x00, 0x3F];
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Packbits::default()
.write_to(&mut writer, &UNCOMPRESSED_DATA)
.unwrap();
assert_eq!(compressed_data, EXPECTED_COMPRESSED_DATA);
}
#[test]
fn test_packbits_rept() {
// compress buffer with repetitive sequence
const UNCOMPRESSED_DATA: &'static [u8] =
b"This strrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrring hangs.";
const EXPECTED_COMPRESSED_DATA: &'static [u8] = b"\x06This st\xD1r\x09ing hangs.";
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Packbits::default()
.write_to(&mut writer, UNCOMPRESSED_DATA)
.unwrap();
assert_eq!(compressed_data, EXPECTED_COMPRESSED_DATA);
}
#[test]
fn test_packbits_large_rept_nonrept() {
// compress buffer with large repetitive and non-repetitive sequence
let mut data = b"This st".to_vec();
for _i in 0..158 {
data.push(b'r');
}
data.extend_from_slice(b"ing hangs.");
for i in 0..158 {
data.push(i);
}
const EXPECTED_COMPRESSED_DATA: [u8; 182] = [
0x06, 0x54, 0x68, 0x69, 0x73, 0x20, 0x73, 0x74, 0x81, 0x72, 0xE3, 0x72, 0x7F, 0x69,
0x6E, 0x67, 0x20, 0x68, 0x61, 0x6E, 0x67, 0x73, 0x2E, 0x00, 0x01, 0x02, 0x03, 0x04,
0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12,
0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E,
0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C,
0x3D, 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A,
0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74,
0x75, 0x27, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81,
0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D,
];
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Packbits::default()
.write_to(&mut writer, data.as_slice())
.unwrap();
assert_eq!(compressed_data, EXPECTED_COMPRESSED_DATA);
}
#[test]
fn test_packbits() {
// compress teststring
const EXPECTED_COMPRESSED_DATA: &'static [u8] =
b"\x3CThis is a string for checking various compression algorithms.";
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Packbits::default()
.write_to(&mut writer, TEST_DATA)
.unwrap();
assert_eq!(compressed_data, EXPECTED_COMPRESSED_DATA);
}
}

View File

@ -0,0 +1,37 @@
use crate::{encoder::compression::*, tags::CompressionMethod};
use std::io::Write;
/// The default algorithm which does not compress at all.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct Uncompressed;
impl Compression for Uncompressed {
const COMPRESSION_METHOD: CompressionMethod = CompressionMethod::None;
fn get_algorithm(&self) -> Compressor {
Compressor::Uncompressed(*self)
}
}
impl CompressionAlgorithm for Uncompressed {
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error> {
writer.write(bytes).map(|byte_count| byte_count as u64)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::encoder::compression::tests::TEST_DATA;
use std::io::Cursor;
#[test]
fn test_no_compression() {
let mut compressed_data = Vec::<u8>::new();
let mut writer = Cursor::new(&mut compressed_data);
Uncompressed::default()
.write_to(&mut writer, TEST_DATA)
.unwrap();
assert_eq!(TEST_DATA, compressed_data);
}
}

681
vendor/tiff/src/encoder/mod.rs vendored Normal file
View File

@ -0,0 +1,681 @@
pub use tiff_value::*;
use std::{
cmp,
collections::BTreeMap,
convert::{TryFrom, TryInto},
io::{self, Seek, Write},
marker::PhantomData,
mem,
num::TryFromIntError,
};
use crate::{
error::TiffResult,
tags::{CompressionMethod, ResolutionUnit, Tag},
TiffError, TiffFormatError,
};
pub mod colortype;
pub mod compression;
mod tiff_value;
mod writer;
use self::colortype::*;
use self::compression::*;
use self::writer::*;
/// Encoder for Tiff and BigTiff files.
///
/// With this type you can get a `DirectoryEncoder` or a `ImageEncoder`
/// to encode Tiff/BigTiff ifd directories with images.
///
/// See `DirectoryEncoder` and `ImageEncoder`.
///
/// # Examples
/// ```
/// # extern crate tiff;
/// # fn main() {
/// # let mut file = std::io::Cursor::new(Vec::new());
/// # let image_data = vec![0; 100*100*3];
/// use tiff::encoder::*;
///
/// // create a standard Tiff file
/// let mut tiff = TiffEncoder::new(&mut file).unwrap();
/// tiff.write_image::<colortype::RGB8>(100, 100, &image_data).unwrap();
///
/// // create a BigTiff file
/// let mut bigtiff = TiffEncoder::new_big(&mut file).unwrap();
/// bigtiff.write_image::<colortype::RGB8>(100, 100, &image_data).unwrap();
///
/// # }
/// ```
pub struct TiffEncoder<W, K: TiffKind = TiffKindStandard> {
writer: TiffWriter<W>,
kind: PhantomData<K>,
}
/// Constructor functions to create standard Tiff files.
impl<W: Write + Seek> TiffEncoder<W> {
/// Creates a new encoder for standard Tiff files.
///
/// To create BigTiff files, use [`new_big`][TiffEncoder::new_big] or
/// [`new_generic`][TiffEncoder::new_generic].
pub fn new(writer: W) -> TiffResult<TiffEncoder<W, TiffKindStandard>> {
TiffEncoder::new_generic(writer)
}
}
/// Constructor functions to create BigTiff files.
impl<W: Write + Seek> TiffEncoder<W, TiffKindBig> {
/// Creates a new encoder for BigTiff files.
///
/// To create standard Tiff files, use [`new`][TiffEncoder::new] or
/// [`new_generic`][TiffEncoder::new_generic].
pub fn new_big(writer: W) -> TiffResult<Self> {
TiffEncoder::new_generic(writer)
}
}
/// Generic functions that are available for both Tiff and BigTiff encoders.
impl<W: Write + Seek, K: TiffKind> TiffEncoder<W, K> {
/// Creates a new Tiff or BigTiff encoder, inferred from the return type.
pub fn new_generic(writer: W) -> TiffResult<Self> {
let mut encoder = TiffEncoder {
writer: TiffWriter::new(writer),
kind: PhantomData,
};
K::write_header(&mut encoder.writer)?;
Ok(encoder)
}
/// Create a [`DirectoryEncoder`] to encode an ifd directory.
pub fn new_directory(&mut self) -> TiffResult<DirectoryEncoder<W, K>> {
DirectoryEncoder::new(&mut self.writer)
}
/// Create an [`ImageEncoder`] to encode an image one slice at a time.
pub fn new_image<C: ColorType>(
&mut self,
width: u32,
height: u32,
) -> TiffResult<ImageEncoder<W, C, K, Uncompressed>> {
let encoder = DirectoryEncoder::new(&mut self.writer)?;
ImageEncoder::new(encoder, width, height)
}
/// Create an [`ImageEncoder`] to encode an image one slice at a time.
pub fn new_image_with_compression<C: ColorType, D: Compression>(
&mut self,
width: u32,
height: u32,
compression: D,
) -> TiffResult<ImageEncoder<W, C, K, D>> {
let encoder = DirectoryEncoder::new(&mut self.writer)?;
ImageEncoder::with_compression(encoder, width, height, compression)
}
/// Convenience function to write an entire image from memory.
pub fn write_image<C: ColorType>(
&mut self,
width: u32,
height: u32,
data: &[C::Inner],
) -> TiffResult<()>
where
[C::Inner]: TiffValue,
{
let encoder = DirectoryEncoder::new(&mut self.writer)?;
let image: ImageEncoder<W, C, K> = ImageEncoder::new(encoder, width, height)?;
image.write_data(data)
}
/// Convenience function to write an entire image from memory with a given compression.
pub fn write_image_with_compression<C: ColorType, D: Compression>(
&mut self,
width: u32,
height: u32,
compression: D,
data: &[C::Inner],
) -> TiffResult<()>
where
[C::Inner]: TiffValue,
{
let encoder = DirectoryEncoder::new(&mut self.writer)?;
let image: ImageEncoder<W, C, K, D> =
ImageEncoder::with_compression(encoder, width, height, compression)?;
image.write_data(data)
}
}
/// Low level interface to encode ifd directories.
///
/// You should call `finish` on this when you are finished with it.
/// Encoding can silently fail while this is dropping.
pub struct DirectoryEncoder<'a, W: 'a + Write + Seek, K: TiffKind> {
writer: &'a mut TiffWriter<W>,
dropped: bool,
// We use BTreeMap to make sure tags are written in correct order
ifd_pointer_pos: u64,
ifd: BTreeMap<u16, DirectoryEntry<K::OffsetType>>,
}
impl<'a, W: 'a + Write + Seek, K: TiffKind> DirectoryEncoder<'a, W, K> {
fn new(writer: &'a mut TiffWriter<W>) -> TiffResult<Self> {
// the previous word is the IFD offset position
let ifd_pointer_pos = writer.offset() - mem::size_of::<K::OffsetType>() as u64;
writer.pad_word_boundary()?; // TODO: Do we need to adjust this for BigTiff?
Ok(DirectoryEncoder {
writer,
dropped: false,
ifd_pointer_pos,
ifd: BTreeMap::new(),
})
}
/// Write a single ifd tag.
pub fn write_tag<T: TiffValue>(&mut self, tag: Tag, value: T) -> TiffResult<()> {
let mut bytes = Vec::with_capacity(value.bytes());
{
let mut writer = TiffWriter::new(&mut bytes);
value.write(&mut writer)?;
}
self.ifd.insert(
tag.to_u16(),
DirectoryEntry {
data_type: <T>::FIELD_TYPE.to_u16(),
count: value.count().try_into()?,
data: bytes,
},
);
Ok(())
}
fn write_directory(&mut self) -> TiffResult<u64> {
// Start by writing out all values
for &mut DirectoryEntry {
data: ref mut bytes,
..
} in self.ifd.values_mut()
{
let data_bytes = mem::size_of::<K::OffsetType>();
if bytes.len() > data_bytes {
let offset = self.writer.offset();
self.writer.write_bytes(bytes)?;
*bytes = vec![0; data_bytes];
let mut writer = TiffWriter::new(bytes as &mut [u8]);
K::write_offset(&mut writer, offset)?;
} else {
while bytes.len() < data_bytes {
bytes.push(0);
}
}
}
let offset = self.writer.offset();
K::write_entry_count(&mut self.writer, self.ifd.len())?;
for (
tag,
&DirectoryEntry {
data_type: ref field_type,
ref count,
data: ref offset,
},
) in self.ifd.iter()
{
self.writer.write_u16(*tag)?;
self.writer.write_u16(*field_type)?;
(*count).write(&mut self.writer)?;
self.writer.write_bytes(offset)?;
}
Ok(offset)
}
/// Write some data to the tiff file, the offset of the data is returned.
///
/// This could be used to write tiff strips.
pub fn write_data<T: TiffValue>(&mut self, value: T) -> TiffResult<u64> {
let offset = self.writer.offset();
value.write(&mut self.writer)?;
Ok(offset)
}
/// Provides the number of bytes written by the underlying TiffWriter during the last call.
fn last_written(&self) -> u64 {
self.writer.last_written()
}
fn finish_internal(&mut self) -> TiffResult<()> {
let ifd_pointer = self.write_directory()?;
let curr_pos = self.writer.offset();
self.writer.goto_offset(self.ifd_pointer_pos)?;
K::write_offset(&mut self.writer, ifd_pointer)?;
self.writer.goto_offset(curr_pos)?;
K::write_offset(&mut self.writer, 0)?;
self.dropped = true;
Ok(())
}
/// Write out the ifd directory.
pub fn finish(mut self) -> TiffResult<()> {
self.finish_internal()
}
}
impl<'a, W: Write + Seek, K: TiffKind> Drop for DirectoryEncoder<'a, W, K> {
fn drop(&mut self) {
if !self.dropped {
let _ = self.finish_internal();
}
}
}
/// Type to encode images strip by strip.
///
/// You should call `finish` on this when you are finished with it.
/// Encoding can silently fail while this is dropping.
///
/// # Examples
/// ```
/// # extern crate tiff;
/// # fn main() {
/// # let mut file = std::io::Cursor::new(Vec::new());
/// # let image_data = vec![0; 100*100*3];
/// use tiff::encoder::*;
/// use tiff::tags::Tag;
///
/// let mut tiff = TiffEncoder::new(&mut file).unwrap();
/// let mut image = tiff.new_image::<colortype::RGB8>(100, 100).unwrap();
///
/// // You can encode tags here
/// image.encoder().write_tag(Tag::Artist, "Image-tiff").unwrap();
///
/// // Strip size can be configured before writing data
/// image.rows_per_strip(2).unwrap();
///
/// let mut idx = 0;
/// while image.next_strip_sample_count() > 0 {
/// let sample_count = image.next_strip_sample_count() as usize;
/// image.write_strip(&image_data[idx..idx+sample_count]).unwrap();
/// idx += sample_count;
/// }
/// image.finish().unwrap();
/// # }
/// ```
/// You can also call write_data function wich will encode by strip and finish
pub struct ImageEncoder<
'a,
W: 'a + Write + Seek,
C: ColorType,
K: TiffKind,
D: Compression = Uncompressed,
> {
encoder: DirectoryEncoder<'a, W, K>,
strip_idx: u64,
strip_count: u64,
row_samples: u64,
width: u32,
height: u32,
rows_per_strip: u64,
strip_offsets: Vec<K::OffsetType>,
strip_byte_count: Vec<K::OffsetType>,
dropped: bool,
compression: D,
_phantom: ::std::marker::PhantomData<C>,
}
impl<'a, W: 'a + Write + Seek, T: ColorType, K: TiffKind, D: Compression>
ImageEncoder<'a, W, T, K, D>
{
fn new(encoder: DirectoryEncoder<'a, W, K>, width: u32, height: u32) -> TiffResult<Self>
where
D: Default,
{
Self::with_compression(encoder, width, height, D::default())
}
fn with_compression(
mut encoder: DirectoryEncoder<'a, W, K>,
width: u32,
height: u32,
compression: D,
) -> TiffResult<Self> {
if width == 0 || height == 0 {
return Err(TiffError::FormatError(TiffFormatError::InvalidDimensions(
width, height,
)));
}
let row_samples = u64::from(width) * u64::try_from(<T>::BITS_PER_SAMPLE.len())?;
let row_bytes = row_samples * u64::from(<T::Inner>::BYTE_LEN);
// Limit the strip size to prevent potential memory and security issues.
// Also keep the multiple strip handling 'oiled'
let rows_per_strip = {
match D::COMPRESSION_METHOD {
CompressionMethod::PackBits => 1, // Each row must be packed separately. Do not compress across row boundaries
_ => (1_000_000 + row_bytes - 1) / row_bytes,
}
};
let strip_count = (u64::from(height) + rows_per_strip - 1) / rows_per_strip;
encoder.write_tag(Tag::ImageWidth, width)?;
encoder.write_tag(Tag::ImageLength, height)?;
encoder.write_tag(Tag::Compression, D::COMPRESSION_METHOD.to_u16())?;
encoder.write_tag(Tag::BitsPerSample, <T>::BITS_PER_SAMPLE)?;
let sample_format: Vec<_> = <T>::SAMPLE_FORMAT.iter().map(|s| s.to_u16()).collect();
encoder.write_tag(Tag::SampleFormat, &sample_format[..])?;
encoder.write_tag(Tag::PhotometricInterpretation, <T>::TIFF_VALUE.to_u16())?;
encoder.write_tag(Tag::RowsPerStrip, u32::try_from(rows_per_strip)?)?;
encoder.write_tag(
Tag::SamplesPerPixel,
u16::try_from(<T>::BITS_PER_SAMPLE.len())?,
)?;
encoder.write_tag(Tag::XResolution, Rational { n: 1, d: 1 })?;
encoder.write_tag(Tag::YResolution, Rational { n: 1, d: 1 })?;
encoder.write_tag(Tag::ResolutionUnit, ResolutionUnit::None.to_u16())?;
Ok(ImageEncoder {
encoder,
strip_count,
strip_idx: 0,
row_samples,
rows_per_strip,
width,
height,
strip_offsets: Vec::new(),
strip_byte_count: Vec::new(),
dropped: false,
compression: compression,
_phantom: ::std::marker::PhantomData,
})
}
/// Number of samples the next strip should have.
pub fn next_strip_sample_count(&self) -> u64 {
if self.strip_idx >= self.strip_count {
return 0;
}
let raw_start_row = self.strip_idx * self.rows_per_strip;
let start_row = cmp::min(u64::from(self.height), raw_start_row);
let end_row = cmp::min(u64::from(self.height), raw_start_row + self.rows_per_strip);
(end_row - start_row) * self.row_samples
}
/// Write a single strip.
pub fn write_strip(&mut self, value: &[T::Inner]) -> TiffResult<()>
where
[T::Inner]: TiffValue,
{
let samples = self.next_strip_sample_count();
if u64::try_from(value.len())? != samples {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Slice is wrong size for strip",
)
.into());
}
// Write the (possible compressed) data to the encoder.
let offset = self.encoder.write_data(value)?;
let byte_count = self.encoder.last_written() as usize;
self.strip_offsets.push(K::convert_offset(offset)?);
self.strip_byte_count.push(byte_count.try_into()?);
self.strip_idx += 1;
Ok(())
}
/// Write strips from data
pub fn write_data(mut self, data: &[T::Inner]) -> TiffResult<()>
where
[T::Inner]: TiffValue,
{
let num_pix = usize::try_from(self.width)?
.checked_mul(usize::try_from(self.height)?)
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Image width * height exceeds usize",
)
})?;
if data.len() < num_pix {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Input data slice is undersized for provided dimensions",
)
.into());
}
self.encoder
.writer
.set_compression(self.compression.get_algorithm());
let mut idx = 0;
while self.next_strip_sample_count() > 0 {
let sample_count = usize::try_from(self.next_strip_sample_count())?;
self.write_strip(&data[idx..idx + sample_count])?;
idx += sample_count;
}
self.encoder.writer.reset_compression();
self.finish()?;
Ok(())
}
/// Set image resolution
pub fn resolution(&mut self, unit: ResolutionUnit, value: Rational) {
self.encoder
.write_tag(Tag::ResolutionUnit, unit.to_u16())
.unwrap();
self.encoder
.write_tag(Tag::XResolution, value.clone())
.unwrap();
self.encoder.write_tag(Tag::YResolution, value).unwrap();
}
/// Set image resolution unit
pub fn resolution_unit(&mut self, unit: ResolutionUnit) {
self.encoder
.write_tag(Tag::ResolutionUnit, unit.to_u16())
.unwrap();
}
/// Set image x-resolution
pub fn x_resolution(&mut self, value: Rational) {
self.encoder.write_tag(Tag::XResolution, value).unwrap();
}
/// Set image y-resolution
pub fn y_resolution(&mut self, value: Rational) {
self.encoder.write_tag(Tag::YResolution, value).unwrap();
}
/// Set image number of lines per strip
///
/// This function needs to be called before any calls to `write_data` or
/// `write_strip` and will return an error otherwise.
pub fn rows_per_strip(&mut self, value: u32) -> TiffResult<()> {
if self.strip_idx != 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Cannot change strip size after data was written",
)
.into());
}
// Write tag as 32 bits
self.encoder.write_tag(Tag::RowsPerStrip, value)?;
let value: u64 = value as u64;
self.strip_count = (self.height as u64 + value - 1) / value;
self.rows_per_strip = value;
Ok(())
}
fn finish_internal(&mut self) -> TiffResult<()> {
self.encoder
.write_tag(Tag::StripOffsets, K::convert_slice(&self.strip_offsets))?;
self.encoder.write_tag(
Tag::StripByteCounts,
K::convert_slice(&self.strip_byte_count),
)?;
self.dropped = true;
self.encoder.finish_internal()
}
/// Get a reference of the underlying `DirectoryEncoder`
pub fn encoder(&mut self) -> &mut DirectoryEncoder<'a, W, K> {
&mut self.encoder
}
/// Write out image and ifd directory.
pub fn finish(mut self) -> TiffResult<()> {
self.finish_internal()
}
}
impl<'a, W: Write + Seek, C: ColorType, K: TiffKind, D: Compression> Drop
for ImageEncoder<'a, W, C, K, D>
{
fn drop(&mut self) {
if !self.dropped {
let _ = self.finish_internal();
}
}
}
struct DirectoryEntry<S> {
data_type: u16,
count: S,
data: Vec<u8>,
}
/// Trait to abstract over Tiff/BigTiff differences.
///
/// Implemented for [`TiffKindStandard`] and [`TiffKindBig`].
pub trait TiffKind {
/// The type of offset fields, `u32` for normal Tiff, `u64` for BigTiff.
type OffsetType: TryFrom<usize, Error = TryFromIntError> + Into<u64> + TiffValue;
/// Needed for the `convert_slice` method.
type OffsetArrayType: ?Sized + TiffValue;
/// Write the (Big)Tiff header.
fn write_header<W: Write>(writer: &mut TiffWriter<W>) -> TiffResult<()>;
/// Convert a file offset to `Self::OffsetType`.
///
/// This returns an error for normal Tiff if the offset is larger than `u32::MAX`.
fn convert_offset(offset: u64) -> TiffResult<Self::OffsetType>;
/// Write an offset value to the given writer.
///
/// Like `convert_offset`, this errors if `offset > u32::MAX` for normal Tiff.
fn write_offset<W: Write>(writer: &mut TiffWriter<W>, offset: u64) -> TiffResult<()>;
/// Write the IFD entry count field with the given `count` value.
///
/// The entry count field is an `u16` for normal Tiff and `u64` for BigTiff. Errors
/// if the given `usize` is larger than the representable values.
fn write_entry_count<W: Write>(writer: &mut TiffWriter<W>, count: usize) -> TiffResult<()>;
/// Internal helper method for satisfying Rust's type checker.
///
/// The `TiffValue` trait is implemented for both primitive values (e.g. `u8`, `u32`) and
/// slices of primitive values (e.g. `[u8]`, `[u32]`). However, this is not represented in
/// the type system, so there is no guarantee that that for all `T: TiffValue` there is also
/// an implementation of `TiffValue` for `[T]`. This method works around that problem by
/// providing a conversion from `[T]` to some value that implements `TiffValue`, thereby
/// making all slices of `OffsetType` usable with `write_tag` and similar methods.
///
/// Implementations of this trait should always set `OffsetArrayType` to `[OffsetType]`.
fn convert_slice(slice: &[Self::OffsetType]) -> &Self::OffsetArrayType;
}
/// Create a standard Tiff file.
pub struct TiffKindStandard;
impl TiffKind for TiffKindStandard {
type OffsetType = u32;
type OffsetArrayType = [u32];
fn write_header<W: Write>(writer: &mut TiffWriter<W>) -> TiffResult<()> {
write_tiff_header(writer)?;
// blank the IFD offset location
writer.write_u32(0)?;
Ok(())
}
fn convert_offset(offset: u64) -> TiffResult<Self::OffsetType> {
Ok(Self::OffsetType::try_from(offset)?)
}
fn write_offset<W: Write>(writer: &mut TiffWriter<W>, offset: u64) -> TiffResult<()> {
writer.write_u32(u32::try_from(offset)?)?;
Ok(())
}
fn write_entry_count<W: Write>(writer: &mut TiffWriter<W>, count: usize) -> TiffResult<()> {
writer.write_u16(u16::try_from(count)?)?;
Ok(())
}
fn convert_slice(slice: &[Self::OffsetType]) -> &Self::OffsetArrayType {
slice
}
}
/// Create a BigTiff file.
pub struct TiffKindBig;
impl TiffKind for TiffKindBig {
type OffsetType = u64;
type OffsetArrayType = [u64];
fn write_header<W: Write>(writer: &mut TiffWriter<W>) -> TiffResult<()> {
write_bigtiff_header(writer)?;
// blank the IFD offset location
writer.write_u64(0)?;
Ok(())
}
fn convert_offset(offset: u64) -> TiffResult<Self::OffsetType> {
Ok(offset)
}
fn write_offset<W: Write>(writer: &mut TiffWriter<W>, offset: u64) -> TiffResult<()> {
writer.write_u64(offset)?;
Ok(())
}
fn write_entry_count<W: Write>(writer: &mut TiffWriter<W>, count: usize) -> TiffResult<()> {
writer.write_u64(u64::try_from(count)?)?;
Ok(())
}
fn convert_slice(slice: &[Self::OffsetType]) -> &Self::OffsetArrayType {
slice
}
}

523
vendor/tiff/src/encoder/tiff_value.rs vendored Normal file
View File

@ -0,0 +1,523 @@
use std::{borrow::Cow, io::Write, slice::from_ref};
use crate::{bytecast, tags::Type, TiffError, TiffFormatError, TiffResult};
use super::writer::TiffWriter;
/// Trait for types that can be encoded in a tiff file
pub trait TiffValue {
const BYTE_LEN: u8;
const FIELD_TYPE: Type;
fn count(&self) -> usize;
fn bytes(&self) -> usize {
self.count() * usize::from(Self::BYTE_LEN)
}
/// Access this value as an contiguous sequence of bytes.
/// If their is no trivial representation, allocate it on the heap.
fn data(&self) -> Cow<[u8]>;
/// Write this value to a TiffWriter.
/// While the default implementation will work in all cases, it may require unnecessary allocations.
/// The written bytes of any custom implementation MUST be the same as yielded by `self.data()`.
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_bytes(&self.data())?;
Ok(())
}
}
impl TiffValue for [u8] {
const BYTE_LEN: u8 = 1;
const FIELD_TYPE: Type = Type::BYTE;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(self)
}
}
impl TiffValue for [i8] {
const BYTE_LEN: u8 = 1;
const FIELD_TYPE: Type = Type::SBYTE;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i8_as_ne_bytes(self))
}
}
impl TiffValue for [u16] {
const BYTE_LEN: u8 = 2;
const FIELD_TYPE: Type = Type::SHORT;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u16_as_ne_bytes(self))
}
}
impl TiffValue for [i16] {
const BYTE_LEN: u8 = 2;
const FIELD_TYPE: Type = Type::SSHORT;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i16_as_ne_bytes(self))
}
}
impl TiffValue for [u32] {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::LONG;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u32_as_ne_bytes(self))
}
}
impl TiffValue for [i32] {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::SLONG;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i32_as_ne_bytes(self))
}
}
impl TiffValue for [u64] {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::LONG8;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u64_as_ne_bytes(self))
}
}
impl TiffValue for [i64] {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::SLONG8;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i64_as_ne_bytes(self))
}
}
impl TiffValue for [f32] {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::FLOAT;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
// We write using native endian so this should be safe
Cow::Borrowed(bytecast::f32_as_ne_bytes(self))
}
}
impl TiffValue for [f64] {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::DOUBLE;
fn count(&self) -> usize {
self.len()
}
fn data(&self) -> Cow<[u8]> {
// We write using native endian so this should be safe
Cow::Borrowed(bytecast::f64_as_ne_bytes(self))
}
}
impl TiffValue for u8 {
const BYTE_LEN: u8 = 1;
const FIELD_TYPE: Type = Type::BYTE;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u8(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(from_ref(self))
}
}
impl TiffValue for i8 {
const BYTE_LEN: u8 = 1;
const FIELD_TYPE: Type = Type::SBYTE;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_i8(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i8_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for u16 {
const BYTE_LEN: u8 = 2;
const FIELD_TYPE: Type = Type::SHORT;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u16(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u16_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for i16 {
const BYTE_LEN: u8 = 2;
const FIELD_TYPE: Type = Type::SSHORT;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_i16(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i16_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for u32 {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::LONG;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u32(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u32_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for i32 {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::SLONG;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_i32(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i32_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for u64 {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::LONG8;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u64(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u64_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for i64 {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::SLONG8;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_i64(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::i64_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for f32 {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::FLOAT;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_f32(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::f32_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for f64 {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::DOUBLE;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_f64(*self)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::f64_as_ne_bytes(from_ref(self)))
}
}
impl TiffValue for Ifd {
const BYTE_LEN: u8 = 4;
const FIELD_TYPE: Type = Type::IFD;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u32(self.0)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u32_as_ne_bytes(from_ref(&self.0)))
}
}
impl TiffValue for Ifd8 {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::IFD8;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u64(self.0)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Borrowed(bytecast::u64_as_ne_bytes(from_ref(&self.0)))
}
}
impl TiffValue for Rational {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::RATIONAL;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_u32(self.n)?;
writer.write_u32(self.d)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Owned({
let first_dword = bytecast::u32_as_ne_bytes(from_ref(&self.n));
let second_dword = bytecast::u32_as_ne_bytes(from_ref(&self.d));
[first_dword, second_dword].concat()
})
}
}
impl TiffValue for SRational {
const BYTE_LEN: u8 = 8;
const FIELD_TYPE: Type = Type::SRATIONAL;
fn count(&self) -> usize {
1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
writer.write_i32(self.n)?;
writer.write_i32(self.d)?;
Ok(())
}
fn data(&self) -> Cow<[u8]> {
Cow::Owned({
let first_dword = bytecast::i32_as_ne_bytes(from_ref(&self.n));
let second_dword = bytecast::i32_as_ne_bytes(from_ref(&self.d));
[first_dword, second_dword].concat()
})
}
}
impl TiffValue for str {
const BYTE_LEN: u8 = 1;
const FIELD_TYPE: Type = Type::ASCII;
fn count(&self) -> usize {
self.len() + 1
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
if self.is_ascii() && !self.bytes().any(|b| b == 0) {
writer.write_bytes(self.as_bytes())?;
writer.write_u8(0)?;
Ok(())
} else {
Err(TiffError::FormatError(TiffFormatError::InvalidTag))
}
}
fn data(&self) -> Cow<[u8]> {
Cow::Owned({
if self.is_ascii() && !self.bytes().any(|b| b == 0) {
let bytes: &[u8] = self.as_bytes();
[bytes, &[0]].concat()
} else {
vec![]
}
})
}
}
impl<'a, T: TiffValue + ?Sized> TiffValue for &'a T {
const BYTE_LEN: u8 = T::BYTE_LEN;
const FIELD_TYPE: Type = T::FIELD_TYPE;
fn count(&self) -> usize {
(*self).count()
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
(*self).write(writer)
}
fn data(&self) -> Cow<[u8]> {
T::data(self)
}
}
macro_rules! impl_tiff_value_for_contiguous_sequence {
($inner_type:ty; $bytes:expr; $field_type:expr) => {
impl $crate::encoder::TiffValue for [$inner_type] {
const BYTE_LEN: u8 = $bytes;
const FIELD_TYPE: Type = $field_type;
fn count(&self) -> usize {
self.len()
}
fn write<W: Write>(&self, writer: &mut TiffWriter<W>) -> TiffResult<()> {
for x in self {
x.write(writer)?;
}
Ok(())
}
fn data(&self) -> Cow<[u8]> {
let mut buf: Vec<u8> = Vec::with_capacity(Self::BYTE_LEN as usize * self.len());
for x in self {
buf.extend_from_slice(&x.data());
}
Cow::Owned(buf)
}
}
};
}
impl_tiff_value_for_contiguous_sequence!(Ifd; 4; Type::IFD);
impl_tiff_value_for_contiguous_sequence!(Ifd8; 8; Type::IFD8);
impl_tiff_value_for_contiguous_sequence!(Rational; 8; Type::RATIONAL);
impl_tiff_value_for_contiguous_sequence!(SRational; 8; Type::SRATIONAL);
/// Type to represent tiff values of type `IFD`
#[derive(Clone)]
pub struct Ifd(pub u32);
/// Type to represent tiff values of type `IFD8`
#[derive(Clone)]
pub struct Ifd8(pub u64);
/// Type to represent tiff values of type `RATIONAL`
#[derive(Clone)]
pub struct Rational {
pub n: u32,
pub d: u32,
}
/// Type to represent tiff values of type `SRATIONAL`
#[derive(Clone)]
pub struct SRational {
pub n: i32,
pub d: i32,
}

188
vendor/tiff/src/encoder/writer.rs vendored Normal file
View File

@ -0,0 +1,188 @@
use crate::encoder::compression::*;
use crate::error::TiffResult;
use std::io::{self, Seek, SeekFrom, Write};
pub fn write_tiff_header<W: Write>(writer: &mut TiffWriter<W>) -> TiffResult<()> {
#[cfg(target_endian = "little")]
let boi: u8 = 0x49;
#[cfg(not(target_endian = "little"))]
let boi: u8 = 0x4d;
writer.writer.write_all(&[boi, boi])?;
writer.writer.write_all(&42u16.to_ne_bytes())?;
writer.offset += 4;
Ok(())
}
/// Writes a BigTiff header, excluding the IFD offset field.
///
/// Writes the byte order, version number, offset byte size, and zero constant fields. Does
// _not_ write the offset to the first IFD, this should be done by the caller.
pub fn write_bigtiff_header<W: Write>(writer: &mut TiffWriter<W>) -> TiffResult<()> {
#[cfg(target_endian = "little")]
let boi: u8 = 0x49;
#[cfg(not(target_endian = "little"))]
let boi: u8 = 0x4d;
// byte order indication
writer.writer.write_all(&[boi, boi])?;
// version number
writer.writer.write_all(&43u16.to_ne_bytes())?;
// bytesize of offsets (pointer size)
writer.writer.write_all(&8u16.to_ne_bytes())?;
// always 0
writer.writer.write_all(&0u16.to_ne_bytes())?;
// we wrote 8 bytes, so set the internal offset accordingly
writer.offset += 8;
Ok(())
}
pub struct TiffWriter<W> {
writer: W,
offset: u64,
byte_count: u64,
compressor: Compressor,
}
impl<W: Write> TiffWriter<W> {
pub fn new(writer: W) -> Self {
Self {
writer,
offset: 0,
byte_count: 0,
compressor: Compressor::default(),
}
}
pub fn set_compression(&mut self, compressor: Compressor) {
self.compressor = compressor;
}
pub fn reset_compression(&mut self) {
self.compressor = Compressor::default();
}
pub fn offset(&self) -> u64 {
self.offset
}
pub fn last_written(&self) -> u64 {
self.byte_count
}
pub fn write_bytes(&mut self, bytes: &[u8]) -> Result<(), io::Error> {
self.byte_count = self.compressor.write_to(&mut self.writer, bytes)?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_u8(&mut self, n: u8) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_i8(&mut self, n: i8) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_u16(&mut self, n: u16) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_i16(&mut self, n: i16) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_u32(&mut self, n: u32) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_i32(&mut self, n: i32) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_u64(&mut self, n: u64) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_i64(&mut self, n: i64) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &n.to_ne_bytes())?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_f32(&mut self, n: f32) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &u32::to_ne_bytes(n.to_bits()))?;
self.offset += self.byte_count;
Ok(())
}
pub fn write_f64(&mut self, n: f64) -> Result<(), io::Error> {
self.byte_count = self
.compressor
.write_to(&mut self.writer, &u64::to_ne_bytes(n.to_bits()))?;
self.offset += self.byte_count;
Ok(())
}
pub fn pad_word_boundary(&mut self) -> Result<(), io::Error> {
if self.offset % 4 != 0 {
let padding = [0, 0, 0];
let padd_len = 4 - (self.offset % 4);
self.writer.write_all(&padding[..padd_len as usize])?;
self.offset += padd_len;
}
Ok(())
}
}
impl<W: Seek> TiffWriter<W> {
pub fn goto_offset(&mut self, offset: u64) -> Result<(), io::Error> {
self.offset = offset;
self.writer.seek(SeekFrom::Start(offset as u64))?;
Ok(())
}
}

369
vendor/tiff/src/error.rs vendored Normal file
View File

@ -0,0 +1,369 @@
use std::error::Error;
use std::fmt;
use std::fmt::Display;
use std::io;
use std::str;
use std::string;
use std::sync::Arc;
use jpeg::UnsupportedFeature;
use crate::decoder::{ifd::Value, ChunkType};
use crate::tags::{
CompressionMethod, PhotometricInterpretation, PlanarConfiguration, SampleFormat, Tag,
};
use crate::ColorType;
use crate::weezl::LzwError;
/// Tiff error kinds.
#[derive(Debug)]
pub enum TiffError {
/// The Image is not formatted properly.
FormatError(TiffFormatError),
/// The Decoder does not support features required by the image.
UnsupportedError(TiffUnsupportedError),
/// An I/O Error occurred while decoding the image.
IoError(io::Error),
/// The Limits of the Decoder is exceeded.
LimitsExceeded,
/// An integer conversion to or from a platform size failed, either due to
/// limits of the platform size or limits of the format.
IntSizeError,
/// The image does not support the requested operation
UsageError(UsageError),
}
/// The image is not formatted properly.
///
/// This indicates that the encoder producing the image might behave incorrectly or that the input
/// file has been corrupted.
///
/// The list of variants may grow to incorporate errors of future features. Matching against this
/// exhaustively is not covered by interface stability guarantees.
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub enum TiffFormatError {
TiffSignatureNotFound,
TiffSignatureInvalid,
ImageFileDirectoryNotFound,
InconsistentSizesEncountered,
UnexpectedCompressedData {
actual_bytes: usize,
required_bytes: usize,
},
InconsistentStripSamples {
actual_samples: usize,
required_samples: usize,
},
InvalidDimensions(u32, u32),
InvalidTag,
InvalidTagValueType(Tag),
RequiredTagNotFound(Tag),
UnknownPredictor(u16),
ByteExpected(Value),
UnsignedIntegerExpected(Value),
SignedIntegerExpected(Value),
Format(String),
RequiredTagEmpty(Tag),
StripTileTagConflict,
CycleInOffsets,
JpegDecoder(JpegDecoderError),
}
impl fmt::Display for TiffFormatError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use self::TiffFormatError::*;
match *self {
TiffSignatureNotFound => write!(fmt, "TIFF signature not found."),
TiffSignatureInvalid => write!(fmt, "TIFF signature invalid."),
ImageFileDirectoryNotFound => write!(fmt, "Image file directory not found."),
InconsistentSizesEncountered => write!(fmt, "Inconsistent sizes encountered."),
UnexpectedCompressedData {
actual_bytes,
required_bytes,
} => {
write!(
fmt,
"Decompression returned different amount of bytes than expected: got {}, expected {}.",
actual_bytes, required_bytes
)
}
InconsistentStripSamples {
actual_samples,
required_samples,
} => {
write!(
fmt,
"Inconsistent elements in strip: got {}, expected {}.",
actual_samples, required_samples
)
}
InvalidDimensions(width, height) => write!(fmt, "Invalid dimensions: {}x{}.", width, height),
InvalidTag => write!(fmt, "Image contains invalid tag."),
InvalidTagValueType(ref tag) => {
write!(fmt, "Tag `{:?}` did not have the expected value type.", tag)
}
RequiredTagNotFound(ref tag) => write!(fmt, "Required tag `{:?}` not found.", tag),
UnknownPredictor(ref predictor) => {
write!(fmt, "Unknown predictor “{}” encountered", predictor)
}
ByteExpected(ref val) => write!(fmt, "Expected byte, {:?} found.", val),
UnsignedIntegerExpected(ref val) => {
write!(fmt, "Expected unsigned integer, {:?} found.", val)
}
SignedIntegerExpected(ref val) => {
write!(fmt, "Expected signed integer, {:?} found.", val)
}
Format(ref val) => write!(fmt, "Invalid format: {:?}.", val),
RequiredTagEmpty(ref val) => write!(fmt, "Required tag {:?} was empty.", val),
StripTileTagConflict => write!(fmt, "File should contain either (StripByteCounts and StripOffsets) or (TileByteCounts and TileOffsets), other combination was found."),
CycleInOffsets => write!(fmt, "File contained a cycle in the list of IFDs"),
JpegDecoder(ref error) => write!(fmt, "{}", error),
}
}
}
/// The Decoder does not support features required by the image.
///
/// This only captures known failures for which the standard either does not require support or an
/// implementation has been planned but not yet completed. Some variants may become unused over
/// time and will then get deprecated before being removed.
///
/// The list of variants may grow. Matching against this exhaustively is not covered by interface
/// stability guarantees.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[non_exhaustive]
pub enum TiffUnsupportedError {
FloatingPointPredictor(ColorType),
HorizontalPredictor(ColorType),
InterpretationWithBits(PhotometricInterpretation, Vec<u8>),
UnknownInterpretation,
UnknownCompressionMethod,
UnsupportedCompressionMethod(CompressionMethod),
UnsupportedSampleDepth(u8),
UnsupportedSampleFormat(Vec<SampleFormat>),
UnsupportedColorType(ColorType),
UnsupportedBitsPerChannel(u8),
UnsupportedPlanarConfig(Option<PlanarConfiguration>),
UnsupportedDataType,
UnsupportedInterpretation(PhotometricInterpretation),
UnsupportedJpegFeature(UnsupportedFeature),
}
impl fmt::Display for TiffUnsupportedError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use self::TiffUnsupportedError::*;
match *self {
FloatingPointPredictor(color_type) => write!(
fmt,
"Floating point predictor for {:?} is unsupported.",
color_type
),
HorizontalPredictor(color_type) => write!(
fmt,
"Horizontal predictor for {:?} is unsupported.",
color_type
),
InterpretationWithBits(ref photometric_interpretation, ref bits_per_sample) => write!(
fmt,
"{:?} with {:?} bits per sample is unsupported",
photometric_interpretation, bits_per_sample
),
UnknownInterpretation => write!(
fmt,
"The image is using an unknown photometric interpretation."
),
UnknownCompressionMethod => write!(fmt, "Unknown compression method."),
UnsupportedCompressionMethod(method) => {
write!(fmt, "Compression method {:?} is unsupported", method)
}
UnsupportedSampleDepth(samples) => {
write!(fmt, "{} samples per pixel is unsupported.", samples)
}
UnsupportedSampleFormat(ref formats) => {
write!(fmt, "Sample format {:?} is unsupported.", formats)
}
UnsupportedColorType(color_type) => {
write!(fmt, "Color type {:?} is unsupported", color_type)
}
UnsupportedBitsPerChannel(bits) => {
write!(fmt, "{} bits per channel not supported", bits)
}
UnsupportedPlanarConfig(config) => {
write!(fmt, "Unsupported planar configuration “{:?}”.", config)
}
UnsupportedDataType => write!(fmt, "Unsupported data type."),
UnsupportedInterpretation(interpretation) => {
write!(
fmt,
"Unsupported photometric interpretation \"{:?}\".",
interpretation
)
}
UnsupportedJpegFeature(ref unsupported_feature) => {
write!(fmt, "Unsupported JPEG feature {:?}", unsupported_feature)
}
}
}
}
/// User attempted to use the Decoder in a way that is incompatible with a specific image.
///
/// For example: attempting to read a tile from a stripped image.
#[derive(Debug)]
pub enum UsageError {
InvalidChunkType(ChunkType, ChunkType),
InvalidChunkIndex(u32),
}
impl fmt::Display for UsageError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::UsageError::*;
match *self {
InvalidChunkType(expected, actual) => {
write!(
fmt,
"Requested operation is only valid for images with chunk encoding of type: {:?}, got {:?}.",
expected, actual
)
}
InvalidChunkIndex(index) => write!(fmt, "Image chunk index ({}) requested.", index),
}
}
}
impl fmt::Display for TiffError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
TiffError::FormatError(ref e) => write!(fmt, "Format error: {}", e),
TiffError::UnsupportedError(ref f) => write!(
fmt,
"The Decoder does not support the \
image format `{}`",
f
),
TiffError::IoError(ref e) => e.fmt(fmt),
TiffError::LimitsExceeded => write!(fmt, "The Decoder limits are exceeded"),
TiffError::IntSizeError => write!(fmt, "Platform or format size limits exceeded"),
TiffError::UsageError(ref e) => write!(fmt, "Usage error: {}", e),
}
}
}
impl Error for TiffError {
fn description(&self) -> &str {
match *self {
TiffError::FormatError(..) => "Format error",
TiffError::UnsupportedError(..) => "Unsupported error",
TiffError::IoError(..) => "IO error",
TiffError::LimitsExceeded => "Decoder limits exceeded",
TiffError::IntSizeError => "Platform or format size limits exceeded",
TiffError::UsageError(..) => "Invalid usage",
}
}
fn cause(&self) -> Option<&dyn Error> {
match *self {
TiffError::IoError(ref e) => Some(e),
_ => None,
}
}
}
impl From<io::Error> for TiffError {
fn from(err: io::Error) -> TiffError {
TiffError::IoError(err)
}
}
impl From<str::Utf8Error> for TiffError {
fn from(_err: str::Utf8Error) -> TiffError {
TiffError::FormatError(TiffFormatError::InvalidTag)
}
}
impl From<string::FromUtf8Error> for TiffError {
fn from(_err: string::FromUtf8Error) -> TiffError {
TiffError::FormatError(TiffFormatError::InvalidTag)
}
}
impl From<TiffFormatError> for TiffError {
fn from(err: TiffFormatError) -> TiffError {
TiffError::FormatError(err)
}
}
impl From<TiffUnsupportedError> for TiffError {
fn from(err: TiffUnsupportedError) -> TiffError {
TiffError::UnsupportedError(err)
}
}
impl From<UsageError> for TiffError {
fn from(err: UsageError) -> TiffError {
TiffError::UsageError(err)
}
}
impl From<std::num::TryFromIntError> for TiffError {
fn from(_err: std::num::TryFromIntError) -> TiffError {
TiffError::IntSizeError
}
}
impl From<LzwError> for TiffError {
fn from(err: LzwError) -> TiffError {
match err {
LzwError::InvalidCode => TiffError::FormatError(TiffFormatError::Format(String::from(
"LZW compressed data corrupted",
))),
}
}
}
#[derive(Debug, Clone)]
pub struct JpegDecoderError {
inner: Arc<jpeg::Error>,
}
impl JpegDecoderError {
fn new(error: jpeg::Error) -> Self {
Self {
inner: Arc::new(error),
}
}
}
impl PartialEq for JpegDecoderError {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.inner, &other.inner)
}
}
impl Display for JpegDecoderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
impl From<JpegDecoderError> for TiffError {
fn from(error: JpegDecoderError) -> Self {
TiffError::FormatError(TiffFormatError::JpegDecoder(error))
}
}
impl From<jpeg::Error> for TiffError {
fn from(error: jpeg::Error) -> Self {
JpegDecoderError::new(error).into()
}
}
/// Result of an image decoding/encoding process
pub type TiffResult<T> = Result<T, TiffError>;

43
vendor/tiff/src/lib.rs vendored Normal file
View File

@ -0,0 +1,43 @@
//! Decoding and Encoding of TIFF Images
//!
//! TIFF (Tagged Image File Format) is a versatile image format that supports
//! lossless and lossy compression.
//!
//! # Related Links
//! * <https://web.archive.org/web/20210108073850/https://www.adobe.io/open/standards/TIFF.html> - The TIFF specification
extern crate jpeg;
extern crate weezl;
mod bytecast;
pub mod decoder;
pub mod encoder;
mod error;
pub mod tags;
pub use self::error::{TiffError, TiffFormatError, TiffResult, TiffUnsupportedError, UsageError};
/// An enumeration over supported color types and their bit depths
#[derive(Copy, PartialEq, Eq, Debug, Clone, Hash)]
pub enum ColorType {
/// Pixel is grayscale
Gray(u8),
/// Pixel contains R, G and B channels
RGB(u8),
/// Pixel is an index into a color palette
Palette(u8),
/// Pixel is grayscale with an alpha channel
GrayA(u8),
/// Pixel is RGB with an alpha channel
RGBA(u8),
/// Pixel is CMYK
CMYK(u8),
/// Pixel is YCbCr
YCbCr(u8),
}

234
vendor/tiff/src/tags.rs vendored Normal file
View File

@ -0,0 +1,234 @@
macro_rules! tags {
{
// Permit arbitrary meta items, which include documentation.
$( #[$enum_attr:meta] )*
$vis:vis enum $name:ident($ty:tt) $(unknown($unknown_doc:literal))* {
// Each of the `Name = Val,` permitting documentation.
$($(#[$ident_attr:meta])* $tag:ident = $val:expr,)*
}
} => {
$( #[$enum_attr] )*
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
#[non_exhaustive]
pub enum $name {
$($(#[$ident_attr])* $tag,)*
$(
#[doc = $unknown_doc]
Unknown($ty),
)*
}
impl $name {
#[inline(always)]
fn __from_inner_type(n: $ty) -> Result<Self, $ty> {
match n {
$( $val => Ok($name::$tag), )*
n => Err(n),
}
}
#[inline(always)]
fn __to_inner_type(&self) -> $ty {
match *self {
$( $name::$tag => $val, )*
$( $name::Unknown(n) => { $unknown_doc; n }, )*
}
}
}
tags!($name, $ty, $($unknown_doc)*);
};
// For u16 tags, provide direct inherent primitive conversion methods.
($name:tt, u16, $($unknown_doc:literal)*) => {
impl $name {
#[inline(always)]
pub fn from_u16(val: u16) -> Option<Self> {
Self::__from_inner_type(val).ok()
}
$(
#[inline(always)]
pub fn from_u16_exhaustive(val: u16) -> Self {
$unknown_doc;
Self::__from_inner_type(val).unwrap_or_else(|_| $name::Unknown(val))
}
)*
#[inline(always)]
pub fn to_u16(&self) -> u16 {
Self::__to_inner_type(self)
}
}
};
// For other tag types, do nothing for now. With concat_idents one could
// provide inherent conversion methods for all types.
($name:tt, $ty:tt, $($unknown_doc:literal)*) => {};
}
// Note: These tags appear in the order they are mentioned in the TIFF reference
tags! {
/// TIFF tags
pub enum Tag(u16) unknown("A private or extension tag") {
// Baseline tags:
Artist = 315,
// grayscale images PhotometricInterpretation 1 or 3
BitsPerSample = 258,
CellLength = 265, // TODO add support
CellWidth = 264, // TODO add support
// palette-color images (PhotometricInterpretation 3)
ColorMap = 320, // TODO add support
Compression = 259, // TODO add support for 2 and 32773
Copyright = 33_432,
DateTime = 306,
ExtraSamples = 338, // TODO add support
FillOrder = 266, // TODO add support
FreeByteCounts = 289, // TODO add support
FreeOffsets = 288, // TODO add support
GrayResponseCurve = 291, // TODO add support
GrayResponseUnit = 290, // TODO add support
HostComputer = 316,
ImageDescription = 270,
ImageLength = 257,
ImageWidth = 256,
Make = 271,
MaxSampleValue = 281, // TODO add support
MinSampleValue = 280, // TODO add support
Model = 272,
NewSubfileType = 254, // TODO add support
Orientation = 274, // TODO add support
PhotometricInterpretation = 262,
PlanarConfiguration = 284,
ResolutionUnit = 296, // TODO add support
RowsPerStrip = 278,
SamplesPerPixel = 277,
Software = 305,
StripByteCounts = 279,
StripOffsets = 273,
SubfileType = 255, // TODO add support
Threshholding = 263, // TODO add support
XResolution = 282,
YResolution = 283,
// Advanced tags
Predictor = 317,
TileWidth = 322,
TileLength = 323,
TileOffsets = 324,
TileByteCounts = 325,
// Data Sample Format
SampleFormat = 339,
SMinSampleValue = 340, // TODO add support
SMaxSampleValue = 341, // TODO add support
// JPEG
JPEGTables = 347,
// GeoTIFF
ModelPixelScaleTag = 33550, // (SoftDesk)
ModelTransformationTag = 34264, // (JPL Carto Group)
ModelTiepointTag = 33922, // (Intergraph)
GeoKeyDirectoryTag = 34735, // (SPOT)
GeoDoubleParamsTag = 34736, // (SPOT)
GeoAsciiParamsTag = 34737, // (SPOT)
GdalNodata = 42113, // Contains areas with missing data
}
}
tags! {
/// The type of an IFD entry (a 2 byte field).
pub enum Type(u16) {
/// 8-bit unsigned integer
BYTE = 1,
/// 8-bit byte that contains a 7-bit ASCII code; the last byte must be zero
ASCII = 2,
/// 16-bit unsigned integer
SHORT = 3,
/// 32-bit unsigned integer
LONG = 4,
/// Fraction stored as two 32-bit unsigned integers
RATIONAL = 5,
/// 8-bit signed integer
SBYTE = 6,
/// 8-bit byte that may contain anything, depending on the field
UNDEFINED = 7,
/// 16-bit signed integer
SSHORT = 8,
/// 32-bit signed integer
SLONG = 9,
/// Fraction stored as two 32-bit signed integers
SRATIONAL = 10,
/// 32-bit IEEE floating point
FLOAT = 11,
/// 64-bit IEEE floating point
DOUBLE = 12,
/// 32-bit unsigned integer (offset)
IFD = 13,
/// BigTIFF 64-bit unsigned integer
LONG8 = 16,
/// BigTIFF 64-bit signed integer
SLONG8 = 17,
/// BigTIFF 64-bit unsigned integer (offset)
IFD8 = 18,
}
}
tags! {
/// See [TIFF compression tags](https://www.awaresystems.be/imaging/tiff/tifftags/compression.html)
/// for reference.
pub enum CompressionMethod(u16) {
None = 1,
Huffman = 2,
Fax3 = 3,
Fax4 = 4,
LZW = 5,
JPEG = 6,
// "Extended JPEG" or "new JPEG" style
ModernJPEG = 7,
Deflate = 8,
OldDeflate = 0x80B2,
PackBits = 0x8005,
}
}
tags! {
pub enum PhotometricInterpretation(u16) {
WhiteIsZero = 0,
BlackIsZero = 1,
RGB = 2,
RGBPalette = 3,
TransparencyMask = 4,
CMYK = 5,
YCbCr = 6,
CIELab = 8,
}
}
tags! {
pub enum PlanarConfiguration(u16) {
Chunky = 1,
Planar = 2,
}
}
tags! {
pub enum Predictor(u16) {
None = 1,
Horizontal = 2,
FloatingPoint = 3,
}
}
tags! {
/// Type to represent resolution units
pub enum ResolutionUnit(u16) {
None = 1,
Inch = 2,
Centimeter = 3,
}
}
tags! {
pub enum SampleFormat(u16) unknown("An unknown extension sample format") {
Uint = 1,
Int = 2,
IEEEFP = 3,
Void = 4,
}
}

4
vendor/tiff/tests/benches/README.md vendored Normal file
View File

@ -0,0 +1,4 @@
Copyrights:
kodim*.png: Eastman Kodak Company, released for unrestricted use
Transparency.png: Public Domain, according to Wikimedia

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,46 @@
extern crate tiff;
use tiff::decoder::Decoder;
use tiff::tags::Tag;
use tiff::ColorType;
use std::fs::File;
use std::path::PathBuf;
const TEST_IMAGE_DIR: &str = "./tests/images/bigtiff";
#[test]
fn test_big_tiff() {
let filenames = ["BigTIFF.tif", "BigTIFFMotorola.tif", "BigTIFFLong.tif"];
for filename in filenames.iter() {
let path = PathBuf::from(TEST_IMAGE_DIR).join(filename);
let img_file = File::open(path).expect("Cannot find test image!");
let mut decoder = Decoder::new(img_file).expect("Cannot create decoder");
assert_eq!(
decoder.dimensions().expect("Cannot get dimensions"),
(64, 64)
);
assert_eq!(
decoder.colortype().expect("Cannot get colortype"),
ColorType::RGB(8)
);
assert_eq!(
decoder
.get_tag_u64(Tag::StripOffsets)
.expect("Cannot get StripOffsets"),
16
);
assert_eq!(
decoder
.get_tag_u64(Tag::RowsPerStrip)
.expect("Cannot get RowsPerStrip"),
64
);
assert_eq!(
decoder
.get_tag_u64(Tag::StripByteCounts)
.expect("Cannot get StripByteCounts"),
12288
)
}
}

479
vendor/tiff/tests/decode_images.rs vendored Normal file
View File

@ -0,0 +1,479 @@
extern crate tiff;
use tiff::decoder::{ifd, Decoder, DecodingResult};
use tiff::ColorType;
use std::fs::File;
use std::path::PathBuf;
const TEST_IMAGE_DIR: &str = "./tests/images/";
macro_rules! test_image_sum {
($name:ident, $buffer:ident, $sum_ty:ty) => {
fn $name(file: &str, expected_type: ColorType, expected_sum: $sum_ty) {
let path = PathBuf::from(TEST_IMAGE_DIR).join(file);
let img_file = File::open(path).expect("Cannot find test image!");
let mut decoder = Decoder::new(img_file).expect("Cannot create decoder");
assert_eq!(decoder.colortype().unwrap(), expected_type);
let img_res = decoder.read_image().unwrap();
match img_res {
DecodingResult::$buffer(res) => {
let sum: $sum_ty = res.into_iter().map(<$sum_ty>::from).sum();
assert_eq!(sum, expected_sum);
}
_ => panic!("Wrong bit depth"),
}
}
};
}
test_image_sum!(test_image_sum_u8, U8, u64);
test_image_sum!(test_image_sum_i8, I8, i64);
test_image_sum!(test_image_sum_u16, U16, u64);
test_image_sum!(test_image_sum_i16, I16, i64);
test_image_sum!(test_image_sum_u32, U32, u64);
test_image_sum!(test_image_sum_u64, U64, u64);
test_image_sum!(test_image_sum_f32, F32, f32);
test_image_sum!(test_image_sum_f64, F64, f64);
/// Tests that a decoder can be constructed for an image and the color type
/// read from the IFD and is of the appropriate type, but the type is
/// unsupported.
fn test_image_color_type_unsupported(file: &str, expected_type: ColorType) {
let path = PathBuf::from(TEST_IMAGE_DIR).join(file);
let img_file = File::open(path).expect("Cannot find test image!");
let mut decoder = Decoder::new(img_file).expect("Cannot create decoder");
assert_eq!(decoder.colortype().unwrap(), expected_type);
assert!(match decoder.read_image() {
Err(tiff::TiffError::UnsupportedError(
tiff::TiffUnsupportedError::UnsupportedColorType(_),
)) => true,
_ => false,
});
}
#[test]
fn test_cmyk_u8() {
test_image_sum_u8("cmyk-3c-8b.tiff", ColorType::CMYK(8), 8522658);
}
#[test]
fn test_cmyk_u16() {
test_image_sum_u16("cmyk-3c-16b.tiff", ColorType::CMYK(16), 2181426827);
}
#[test]
fn test_cmyk_f32() {
test_image_sum_f32("cmyk-3c-32b-float.tiff", ColorType::CMYK(32), 496.0405);
}
#[test]
fn test_gray_u8() {
test_image_sum_u8("minisblack-1c-8b.tiff", ColorType::Gray(8), 2840893);
}
#[test]
fn test_gray_u12() {
test_image_color_type_unsupported("12bit.cropped.tiff", ColorType::Gray(12));
}
#[test]
fn test_gray_u16() {
test_image_sum_u16("minisblack-1c-16b.tiff", ColorType::Gray(16), 733126239);
}
#[test]
fn test_gray_u32() {
test_image_sum_u32("gradient-1c-32b.tiff", ColorType::Gray(32), 549892913787);
}
#[test]
fn test_gray_u64() {
test_image_sum_u64("gradient-1c-64b.tiff", ColorType::Gray(64), 549892913787);
}
#[test]
fn test_gray_f32() {
test_image_sum_f32("gradient-1c-32b-float.tiff", ColorType::Gray(32), 128.03194);
}
#[test]
fn test_gray_f64() {
test_image_sum_f64(
"gradient-1c-64b-float.tiff",
ColorType::Gray(64),
128.0319210877642,
);
}
#[test]
fn test_rgb_u8() {
test_image_sum_u8("rgb-3c-8b.tiff", ColorType::RGB(8), 7842108);
}
#[test]
fn test_rgb_u12() {
test_image_color_type_unsupported("12bit.cropped.rgb.tiff", ColorType::RGB(12));
}
#[test]
fn test_rgb_u16() {
test_image_sum_u16("rgb-3c-16b.tiff", ColorType::RGB(16), 2024349944);
}
#[test]
fn test_rgb_u32() {
test_image_sum_u32("gradient-3c-32b.tiff", ColorType::RGB(32), 2030834111716);
}
#[test]
fn test_rgb_u64() {
test_image_sum_u64("gradient-3c-64b.tiff", ColorType::RGB(64), 2030834111716);
}
#[test]
fn test_rgb_f32() {
test_image_sum_f32("gradient-3c-32b-float.tiff", ColorType::RGB(32), 472.8405);
}
#[test]
fn test_int8() {
test_image_sum_i8("int8.tif", ColorType::Gray(8), 3111)
}
#[test]
fn test_int8_rgb() {
test_image_sum_i8("int8_rgb.tif", ColorType::RGB(8), -10344)
}
#[test]
fn test_int16() {
test_image_sum_i16("int16.tif", ColorType::Gray(16), 354396);
}
#[test]
fn test_int16_rgb() {
test_image_sum_i16("int16_rgb.tif", ColorType::RGB(16), 1063188);
}
#[test]
fn test_string_tags() {
// these files have null-terminated strings for their Software tag. One has extra bytes after
// the null byte, so we check both to ensure that we're truncating properly
let filenames = ["minisblack-1c-16b.tiff", "rgb-3c-16b.tiff"];
for filename in filenames.iter() {
let path = PathBuf::from(TEST_IMAGE_DIR).join(filename);
let img_file = File::open(path).expect("Cannot find test image!");
let mut decoder = Decoder::new(img_file).expect("Cannot create decoder");
let software = decoder.get_tag(tiff::tags::Tag::Software).unwrap();
match software {
ifd::Value::Ascii(s) => assert_eq!(
&s,
"GraphicsMagick 1.2 unreleased Q16 http://www.GraphicsMagick.org/"
),
_ => assert!(false),
};
}
}
#[test]
fn test_decode_data() {
let mut image_data = Vec::new();
for x in 0..100 {
for y in 0..100u8 {
let val = x + y;
image_data.push(val);
image_data.push(val);
image_data.push(val);
}
}
let file = File::open("./tests/decodedata-rgb-3c-8b.tiff").unwrap();
let mut decoder = Decoder::new(file).unwrap();
assert_eq!(decoder.colortype().unwrap(), ColorType::RGB(8));
assert_eq!(decoder.dimensions().unwrap(), (100, 100));
if let DecodingResult::U8(img_res) = decoder.read_image().unwrap() {
assert_eq!(image_data, img_res);
} else {
panic!("Wrong data type");
}
}
#[test]
fn issue_69() {
test_image_sum_u16("issue_69_lzw.tiff", ColorType::Gray(16), 1015486);
test_image_sum_u16("issue_69_packbits.tiff", ColorType::Gray(16), 1015486);
}
// TODO: GrayA support
//#[test]
//fn test_gray_alpha_u8()
//{
//let img_file = File::open("./tests/images/minisblack-2c-8b-alpha.tiff").expect("Cannot find test image!");
//let mut decoder = Decoder::new(img_file).expect("Cannot create decoder");
//assert_eq!(decoder.colortype().unwrap(), ColorType::GrayA(8));
//let img_res = decoder.read_image();
//assert!(img_res.is_ok());
//}
#[test]
fn test_tiled_rgb_u8() {
test_image_sum_u8("tiled-rgb-u8.tif", ColorType::RGB(8), 39528948);
}
#[test]
fn test_tiled_rect_rgb_u8() {
test_image_sum_u8("tiled-rect-rgb-u8.tif", ColorType::RGB(8), 62081032);
}
/* #[test]
fn test_tiled_jpeg_rgb_u8() {
test_image_sum_u8("tiled-jpeg-rgb-u8.tif", ColorType::RGB(8), 93031606);
} */
#[test]
fn test_tiled_oversize_gray_i8() {
test_image_sum_i8("tiled-oversize-gray-i8.tif", ColorType::Gray(8), 1214996);
}
#[test]
fn test_tiled_cmyk_i8() {
test_image_sum_i8("tiled-cmyk-i8.tif", ColorType::CMYK(8), 1759101);
}
#[test]
fn test_tiled_incremental() {
let file = "tiled-rgb-u8.tif";
let expected_type = ColorType::RGB(8);
let sums = [
188760, 195639, 108148, 81986, 665088, 366140, 705317, 423366, 172033, 324455, 244102,
81853, 181258, 247971, 129486, 55600, 565625, 422102, 730888, 379271, 232142, 292549,
244045, 86866, 188141, 115036, 150785, 84389, 353170, 459325, 719619, 329594, 278663,
220474, 243048, 113563, 189152, 109684, 179391, 122188, 279651, 622093, 724682, 302459,
268428, 204499, 224255, 124674, 170668, 121868, 192768, 183367, 378029, 585651, 657712,
296790, 241444, 197083, 198429, 134869, 182318, 86034, 203655, 182338, 297255, 601284,
633813, 242531, 228578, 206441, 193552, 125412, 181527, 165439, 202531, 159538, 268388,
565790, 611382, 272967, 236497, 215154, 158881, 90806, 106114, 182342, 191824, 186138,
215174, 393193, 701228, 198866, 227944, 193830, 166330, 49008, 55719, 122820, 197316,
161969, 203152, 170986, 624427, 188605, 186187, 111064, 115192, 39538, 48626, 163929,
144682, 135796, 194141, 154198, 584125, 180255, 153524, 121433, 132641, 35743, 47798,
152343, 162874, 167664, 160175, 133038, 659882, 138339, 166470, 124173, 118929, 51317,
45267, 155776, 161331, 161006, 130052, 137618, 337291, 106481, 161999, 127343, 87724,
59540, 63907, 155677, 140668, 141523, 108061, 168657, 186482, 98599, 147614, 139963, 90444,
56602, 92547, 125644, 134212, 126569, 144153, 179800, 174516, 133969, 129399, 117681,
83305, 55075, 110737, 115108, 128572, 128911, 130922, 179986, 143288, 145884, 155856,
96683, 94057, 56238, 79649, 71651, 70182, 75010, 77009, 98855, 78979, 74341, 83482, 53403,
59842, 30305,
];
let path = PathBuf::from(TEST_IMAGE_DIR).join(file);
let img_file = File::open(path).expect("Cannot find test image!");
let mut decoder = Decoder::new(img_file).expect("Cannot create decoder");
assert_eq!(decoder.colortype().unwrap(), expected_type);
let tiles = decoder.tile_count().unwrap();
assert_eq!(tiles as usize, sums.len());
for tile in 0..tiles {
match decoder.read_chunk(tile).unwrap() {
DecodingResult::U8(res) => {
let sum: u64 = res.into_iter().map(<u64>::from).sum();
assert_eq!(sum, sums[tile as usize]);
}
_ => panic!("Wrong bit depth"),
}
}
}
#[test]
fn test_div_zero() {
use tiff::{TiffError, TiffFormatError};
let image = [
73, 73, 42, 0, 8, 0, 0, 0, 8, 0, 0, 1, 4, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 40, 1, 0, 0,
0, 158, 0, 0, 251, 3, 1, 3, 0, 1, 0, 0, 0, 1, 0, 0, 39, 6, 1, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0,
17, 1, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3, 0, 1, 0, 0, 0, 158, 0, 0, 251, 67, 1, 3, 0,
1, 0, 0, 0, 40, 0, 0, 0, 66, 1, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 178, 178, 178, 178,
178, 178, 178,
];
let err = tiff::decoder::Decoder::new(std::io::Cursor::new(&image)).unwrap_err();
match err {
TiffError::FormatError(TiffFormatError::StripTileTagConflict) => {}
unexpected => panic!("Unexpected error {}", unexpected),
}
}
#[test]
fn test_too_many_value_bytes() {
let image = [
73, 73, 43, 0, 8, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 8, 0, 0, 0,
23, 0, 12, 0, 0, 65, 4, 0, 1, 6, 0, 0, 1, 16, 0, 1, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0,
0, 0, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, 59, 73, 84, 186, 202, 83, 240, 66, 1, 53, 22, 56, 47,
0, 0, 0, 0, 0, 0, 1, 222, 4, 0, 58, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 4, 0, 0, 100, 0,
0, 89, 89, 89, 89, 89, 89, 89, 89, 96, 1, 20, 89, 89, 89, 89, 18,
];
let error = tiff::decoder::Decoder::new(std::io::Cursor::new(&image)).unwrap_err();
match error {
tiff::TiffError::LimitsExceeded => {}
unexpected => panic!("Unexpected error {}", unexpected),
}
}
#[test]
fn fuzzer_testcase5() {
let image = [
73, 73, 42, 0, 8, 0, 0, 0, 8, 0, 0, 1, 4, 0, 1, 0, 0, 0, 100, 0, 0, 0, 1, 1, 4, 0, 1, 0, 0,
0, 158, 0, 0, 251, 3, 1, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, 6, 1, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0,
17, 1, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 0, 0, 0, 0, 0, 246, 16, 0, 0, 22, 1, 4, 0, 1,
0, 0, 0, 40, 0, 251, 255, 23, 1, 4, 0, 1, 0, 0, 0, 48, 178, 178, 178, 178, 178, 178, 178,
178, 178, 178,
];
let mut decoder = tiff::decoder::Decoder::new(std::io::Cursor::new(&image)).unwrap();
let _ = decoder.read_image().unwrap_err();
}
#[test]
fn fuzzer_testcase1() {
let image = [
73, 73, 42, 0, 8, 0, 0, 0, 8, 0, 0, 1, 4, 0, 1, 0, 0, 0, 99, 255, 255, 254, 1, 1, 4, 0, 1,
0, 0, 0, 158, 0, 0, 251, 3, 1, 3, 255, 254, 255, 255, 0, 1, 0, 0, 0, 6, 1, 3, 0, 1, 0, 0,
0, 0, 0, 0, 0, 17, 1, 4, 0, 9, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 0, 2, 0, 0, 0, 63, 0, 0, 0,
22, 1, 4, 0, 1, 0, 0, 0, 44, 0, 0, 0, 23, 1, 4, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 1, 0, 178,
178,
];
let _ = tiff::decoder::Decoder::new(std::io::Cursor::new(&image)).unwrap_err();
}
#[test]
fn test_stripped_image_overflow() {
let image = [
73, 73, 42, 0, 8, 0, 0, 0, 8, 0, 0, 1, 4, 0, 1, 0, 0, 0, 100, 0, 0, 148, 1, 1, 4, 0, 1, 0,
0, 0, 158, 0, 0, 251, 3, 1, 3, 255, 254, 255, 255, 0, 1, 0, 0, 0, 6, 1, 3, 0, 1, 0, 0, 0,
0, 0, 0, 0, 17, 1, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 0, 2, 0, 0, 0, 63, 0, 0, 0, 22,
1, 4, 0, 1, 0, 0, 0, 44, 0, 248, 255, 23, 1, 4, 0, 1, 0, 0, 0, 178, 178, 178, 0, 1, 178,
178, 178,
];
let mut decoder = tiff::decoder::Decoder::new(std::io::Cursor::new(&image)).unwrap();
let err = decoder.read_image().unwrap_err();
match err {
tiff::TiffError::LimitsExceeded => {}
unexpected => panic!("Unexpected error {}", unexpected),
}
}
#[test]
fn oom() {
let image = [
73, 73, 42, 0, 8, 0, 0, 0, 8, 0, 0, 1, 4, 0, 1, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 40, 1, 0, 0,
0, 158, 0, 0, 251, 3, 1, 3, 0, 1, 0, 0, 0, 7, 0, 0, 0, 6, 1, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0,
17, 1, 4, 0, 1, 0, 0, 0, 3, 77, 0, 0, 1, 1, 3, 0, 1, 0, 0, 0, 3, 128, 0, 0, 22, 1, 4, 0, 1,
0, 0, 0, 40, 0, 0, 0, 23, 1, 4, 0, 1, 0, 0, 0, 178, 48, 178, 178, 178, 178, 162, 178,
];
let _ = tiff::decoder::Decoder::new(std::io::Cursor::new(&image)).unwrap_err();
}
#[test]
fn fuzzer_testcase4() {
let image = [
73, 73, 42, 0, 8, 0, 0, 0, 8, 0, 0, 1, 4, 0, 1, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 40, 1, 0, 0,
0, 158, 0, 0, 251, 3, 1, 3, 0, 1, 0, 0, 0, 5, 0, 0, 0, 6, 1, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0,
17, 1, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3, 0, 1, 0, 0, 0, 3, 128, 0, 0, 22, 1, 4, 0, 1,
0, 0, 0, 40, 0, 0, 0, 23, 1, 4, 0, 1, 0, 0, 0, 48, 178, 178, 178, 0, 1, 0, 13, 13,
];
let _ = tiff::decoder::Decoder::new(std::io::Cursor::new(&image)).unwrap_err();
}
#[test]
fn fuzzer_testcase2() {
let image = [
73, 73, 42, 0, 8, 0, 0, 0, 15, 0, 0, 254, 44, 1, 0, 0, 0, 0, 0, 32, 0, 0, 0, 1, 4, 0, 1, 0,
0, 0, 0, 1, 0, 0, 91, 1, 1, 0, 0, 0, 0, 0, 242, 4, 0, 0, 0, 22, 0, 56, 77, 0, 77, 1, 0, 0,
73, 42, 0, 1, 4, 0, 1, 0, 0, 0, 4, 0, 8, 0, 0, 1, 4, 0, 1, 0, 0, 0, 158, 0, 0, 251, 3, 1,
3, 0, 1, 0, 0, 0, 7, 0, 0, 0, 6, 1, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0, 17, 1, 4, 0, 1, 0, 0, 0,
0, 0, 0, 0, 1, 1, 3, 0, 1, 0, 0, 0, 0, 0, 0, 4, 61, 1, 18, 0, 1, 0, 0, 0, 202, 0, 0, 0, 17,
1, 100, 0, 129, 0, 0, 0, 0, 0, 0, 0, 232, 254, 252, 255, 254, 255, 255, 255, 1, 29, 0, 0,
22, 1, 3, 0, 1, 0, 0, 0, 16, 0, 0, 0, 23, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 123, 73, 254, 0,
73,
];
let _ = tiff::decoder::Decoder::new(std::io::Cursor::new(&image)).unwrap_err();
}
#[test]
fn invalid_jpeg_tag_2() {
let image = [
73, 73, 42, 0, 8, 0, 0, 0, 16, 0, 254, 0, 4, 0, 1, 0, 0, 0, 0, 0, 0, 242, 0, 1, 4, 0, 1, 0,
0, 0, 0, 129, 16, 0, 1, 1, 4, 0, 1, 0, 0, 0, 214, 0, 0, 248, 253, 1, 3, 0, 1, 0, 0, 0, 64,
0, 0, 0, 3, 1, 3, 0, 1, 0, 0, 0, 7, 0, 0, 0, 6, 1, 3, 0, 1, 0, 0, 0, 1, 0, 0, 64, 14, 1, 0,
2, 0, 0, 148, 0, 206, 0, 0, 0, 17, 1, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
1, 0, 0, 0, 22, 1, 4, 0, 17, 0, 0, 201, 1, 0, 0, 0, 23, 1, 2, 0, 20, 0, 0, 0, 194, 0, 0, 0,
91, 1, 7, 0, 5, 0, 0, 0, 64, 0, 0, 0, 237, 254, 65, 255, 255, 255, 255, 255, 1, 0, 0, 0,
22, 1, 4, 0, 1, 0, 0, 0, 42, 0, 0, 0, 23, 1, 255, 255, 255, 255, 255, 36, 36, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 36, 73, 73, 0, 42, 36, 36, 36, 36, 0, 0, 8, 0,
];
let _ = tiff::decoder::Decoder::new(std::io::Cursor::new(&image)).unwrap_err();
}
#[test]
fn fuzzer_testcase3() {
let image = [
73, 73, 42, 0, 8, 0, 0, 0, 8, 0, 0, 1, 4, 0, 1, 0, 0, 0, 2, 0, 0, 0, 61, 1, 9, 0, 46, 22,
128, 0, 0, 0, 0, 1, 6, 1, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, 17, 1, 4, 0, 27, 0, 0, 0, 0, 0, 0,
0, 1, 1, 3, 0, 1, 0, 0, 0, 17, 1, 0, 231, 22, 1, 1, 0, 1, 0, 0, 0, 130, 0, 0, 0, 23, 1, 4,
0, 14, 0, 0, 0, 0, 0, 0, 0, 133, 133, 133, 77, 77, 77, 0, 0, 22, 128, 0, 255, 255, 255,
255, 255,
];
let _ = tiff::decoder::Decoder::new(std::io::Cursor::new(&image)).unwrap_err();
}
#[test]
fn timeout() {
use tiff::{TiffError, TiffFormatError};
let image = [
73, 73, 42, 0, 8, 0, 0, 0, 16, 0, 254, 0, 4, 0, 1, 68, 0, 0, 0, 2, 0, 32, 254, 252, 0, 109,
0, 129, 0, 0, 0, 32, 0, 58, 0, 1, 4, 0, 1, 0, 6, 0, 0, 0, 8, 0, 0, 1, 73, 73, 42, 0, 8, 0,
0, 0, 8, 0, 0, 1, 4, 0, 1, 0, 0, 0, 21, 0, 0, 0, 61, 1, 255, 128, 9, 0, 0, 8, 0, 1, 113, 2,
3, 1, 3, 0, 1, 0, 0, 0, 5, 0, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 112, 0, 0, 36, 0, 0,
0, 112, 56, 200, 0, 5, 0, 0, 64, 0, 0, 1, 0, 4, 0, 0, 0, 2, 0, 6, 1, 3, 0, 1, 0, 0, 0, 0,
0, 0, 4, 17, 1, 1, 0, 93, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 3, 6, 0, 231, 22, 1,
1, 0, 1, 0, 0, 0, 2, 64, 118, 36, 23, 1, 1, 0, 43, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 4, 0, 8,
0, 0, 73, 73, 42, 0, 8, 0, 0, 0, 0, 0, 32,
];
let error = tiff::decoder::Decoder::new(std::io::Cursor::new(&image)).unwrap_err();
match error {
TiffError::FormatError(TiffFormatError::CycleInOffsets) => {}
e => panic!("Unexpected error {:?}", e),
}
}
#[test]
fn test_no_rows_per_strip() {
test_image_sum_u8("no_rows_per_strip.tiff", ColorType::RGB(8), 99448840);
}
#[test]
fn test_predictor_3_rgb_f32() {
test_image_sum_f32("predictor-3-rgb-f32.tif", ColorType::RGB(32), 54004.33);
}
#[test]
fn test_predictor_3_gray_f32() {
test_image_sum_f32("predictor-3-gray-f32.tif", ColorType::Gray(32), 20008.275);
}

Binary file not shown.

529
vendor/tiff/tests/encode_images.rs vendored Normal file
View File

@ -0,0 +1,529 @@
extern crate tiff;
use tiff::decoder::{ifd, Decoder, DecodingResult};
use tiff::encoder::{colortype, Ifd, Ifd8, SRational, TiffEncoder};
use tiff::tags::Tag;
use tiff::ColorType;
use std::fs::File;
use std::io::{Cursor, Seek, SeekFrom};
use std::path::PathBuf;
#[test]
fn encode_decode() {
let mut image_data = Vec::new();
for x in 0..100 {
for y in 0..100u8 {
let val = x + y;
image_data.push(val);
image_data.push(val);
image_data.push(val);
}
}
let mut file = Cursor::new(Vec::new());
{
let mut tiff = TiffEncoder::new(&mut file).unwrap();
let mut image = tiff.new_image::<colortype::RGB8>(100, 100).unwrap();
image
.encoder()
.write_tag(Tag::Artist, "Image-tiff")
.unwrap();
image.write_data(&image_data).unwrap();
}
{
file.seek(SeekFrom::Start(0)).unwrap();
let mut decoder = Decoder::new(&mut file).unwrap();
assert_eq!(decoder.colortype().unwrap(), ColorType::RGB(8));
assert_eq!(decoder.dimensions().unwrap(), (100, 100));
assert_eq!(
decoder.get_tag(Tag::Artist).unwrap(),
ifd::Value::Ascii("Image-tiff".into())
);
if let DecodingResult::U8(img_res) = decoder.read_image().unwrap() {
assert_eq!(image_data, img_res);
} else {
panic!("Wrong data type");
}
}
}
#[test]
fn encode_decode_big() {
let mut image_data = Vec::new();
for x in 0..100 {
for y in 0..100u8 {
let val = x + y;
image_data.push(val);
image_data.push(val);
image_data.push(val);
}
}
let mut file = Cursor::new(Vec::new());
{
let mut tiff = TiffEncoder::new_big(&mut file).unwrap();
let mut image = tiff.new_image::<colortype::RGB8>(100, 100).unwrap();
image
.encoder()
.write_tag(Tag::Artist, "Image-tiff")
.unwrap();
image.write_data(&image_data).unwrap();
}
{
file.seek(SeekFrom::Start(0)).unwrap();
let mut decoder = Decoder::new(&mut file).unwrap();
assert_eq!(decoder.colortype().unwrap(), ColorType::RGB(8));
assert_eq!(decoder.dimensions().unwrap(), (100, 100));
assert_eq!(
decoder.get_tag(Tag::Artist).unwrap(),
ifd::Value::Ascii("Image-tiff".into())
);
if let DecodingResult::U8(img_res) = decoder.read_image().unwrap() {
assert_eq!(image_data, img_res);
} else {
panic!("Wrong data type");
}
}
}
#[test]
fn test_encode_ifd() {
let mut data = Cursor::new(Vec::new());
{
let mut tiff = TiffEncoder::new(&mut data).unwrap();
let mut image_encoder = tiff.new_image::<colortype::Gray8>(1, 1).unwrap();
image_encoder.write_strip(&[1]).unwrap();
let encoder = image_encoder.encoder();
// Use the "reusable" tags section as per the TIFF6 spec
encoder.write_tag(Tag::Unknown(65000), Ifd(42u32)).unwrap();
encoder
.write_tag(Tag::Unknown(65001), &[Ifd(100u32)][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65002), &[Ifd(1u32), Ifd(2u32), Ifd(3u32)][..])
.unwrap();
encoder.write_tag(Tag::Unknown(65010), Ifd8(43u64)).unwrap();
encoder
.write_tag(Tag::Unknown(65011), &[Ifd8(100u64)][..])
.unwrap();
encoder
.write_tag(
Tag::Unknown(65012),
&[Ifd8(1u64), Ifd8(2u64), Ifd8(3u64)][..],
)
.unwrap();
}
// Rewind the cursor for reading
data.set_position(0);
{
let mut decoder = Decoder::new(&mut data).unwrap();
assert_eq!(decoder.assert_tag_u32(65000), 42);
assert_eq!(decoder.assert_tag_u32_vec(65000), [42]);
assert_eq!(decoder.assert_tag_u32_vec(65001), [100]);
assert_eq!(decoder.assert_tag_u32_vec(65002), [1, 2, 3]);
assert_eq!(decoder.assert_tag_u64(65010), 43);
assert_eq!(decoder.assert_tag_u64_vec(65010), [43]);
assert_eq!(decoder.assert_tag_u64_vec(65011), [100]);
assert_eq!(decoder.assert_tag_u64_vec(65012), [1, 2, 3]);
}
}
#[test]
/// Test that attempting to encode when the input buffer is undersized returns
/// an error rather than panicking.
/// See: https://github.com/PistonDevelopers/image-tiff/issues/35
fn test_encode_undersized_buffer() {
let input_data = vec![1, 2, 3];
let output = Vec::new();
let mut output_stream = Cursor::new(output);
if let Ok(mut tiff) = TiffEncoder::new(&mut output_stream) {
let res = tiff.write_image::<colortype::RGB8>(50, 50, &input_data);
assert!(res.is_err());
}
}
const TEST_IMAGE_DIR: &str = "./tests/images/";
macro_rules! test_roundtrip {
($name:ident, $buffer:ident, $buffer_ty:ty) => {
fn $name<C: colortype::ColorType<Inner = $buffer_ty>>(
file: &str,
expected_type: ColorType,
) {
let path = PathBuf::from(TEST_IMAGE_DIR).join(file);
let img_file = File::open(path).expect("Cannot find test image!");
let mut decoder = Decoder::new(img_file).expect("Cannot create decoder");
assert_eq!(decoder.colortype().unwrap(), expected_type);
let image_data = match decoder.read_image().unwrap() {
DecodingResult::$buffer(res) => res,
_ => panic!("Wrong data type"),
};
let mut file = Cursor::new(Vec::new());
{
let mut tiff = TiffEncoder::new(&mut file).unwrap();
let (width, height) = decoder.dimensions().unwrap();
tiff.write_image::<C>(width, height, &image_data).unwrap();
}
file.seek(SeekFrom::Start(0)).unwrap();
{
let mut decoder = Decoder::new(&mut file).unwrap();
if let DecodingResult::$buffer(img_res) = decoder.read_image().unwrap() {
assert_eq!(image_data, img_res);
} else {
panic!("Wrong data type");
}
}
}
};
}
test_roundtrip!(test_u8_roundtrip, U8, u8);
test_roundtrip!(test_u16_roundtrip, U16, u16);
test_roundtrip!(test_u32_roundtrip, U32, u32);
test_roundtrip!(test_u64_roundtrip, U64, u64);
test_roundtrip!(test_f32_roundtrip, F32, f32);
test_roundtrip!(test_f64_roundtrip, F64, f64);
#[test]
fn test_gray_u8_roundtrip() {
test_u8_roundtrip::<colortype::Gray8>("minisblack-1c-8b.tiff", ColorType::Gray(8));
}
#[test]
fn test_rgb_u8_roundtrip() {
test_u8_roundtrip::<colortype::RGB8>("rgb-3c-8b.tiff", ColorType::RGB(8));
}
#[test]
fn test_cmyk_u8_roundtrip() {
test_u8_roundtrip::<colortype::CMYK8>("cmyk-3c-8b.tiff", ColorType::CMYK(8));
}
#[test]
fn test_gray_u16_roundtrip() {
test_u16_roundtrip::<colortype::Gray16>("minisblack-1c-16b.tiff", ColorType::Gray(16));
}
#[test]
fn test_rgb_u16_roundtrip() {
test_u16_roundtrip::<colortype::RGB16>("rgb-3c-16b.tiff", ColorType::RGB(16));
}
#[test]
fn test_cmyk_u16_roundtrip() {
test_u16_roundtrip::<colortype::CMYK16>("cmyk-3c-16b.tiff", ColorType::CMYK(16));
}
#[test]
fn test_gray_u32_roundtrip() {
test_u32_roundtrip::<colortype::Gray32>("gradient-1c-32b.tiff", ColorType::Gray(32));
}
#[test]
fn test_rgb_u32_roundtrip() {
test_u32_roundtrip::<colortype::RGB32>("gradient-3c-32b.tiff", ColorType::RGB(32));
}
#[test]
fn test_gray_u64_roundtrip() {
test_u64_roundtrip::<colortype::Gray64>("gradient-1c-64b.tiff", ColorType::Gray(64));
}
#[test]
fn test_rgb_u64_roundtrip() {
test_u64_roundtrip::<colortype::RGB64>("gradient-3c-64b.tiff", ColorType::RGB(64));
}
#[test]
fn test_gray_f32_roundtrip() {
test_f32_roundtrip::<colortype::Gray32Float>("gradient-1c-32b-float.tiff", ColorType::Gray(32));
}
#[test]
fn test_rgb_f32_roundtrip() {
test_f32_roundtrip::<colortype::RGB32Float>("gradient-3c-32b-float.tiff", ColorType::RGB(32));
}
#[test]
fn test_cmyk_f32_roundtrip() {
test_f32_roundtrip::<colortype::CMYK32Float>("cmyk-3c-32b-float.tiff", ColorType::CMYK(32));
}
#[test]
fn test_gray_f64_roundtrip() {
test_f64_roundtrip::<colortype::Gray64Float>("gradient-1c-64b-float.tiff", ColorType::Gray(64));
}
#[test]
fn test_ycbcr_u8_roundtrip() {
test_u8_roundtrip::<colortype::YCbCr8>("tiled-jpeg-ycbcr.tif", ColorType::YCbCr(8));
}
trait AssertDecode {
fn assert_tag_u32(&mut self, tag: u16) -> u32;
fn assert_tag_u32_vec(&mut self, tag: u16) -> Vec<u32>;
fn assert_tag_i32(&mut self, tag: u16) -> i32;
fn assert_tag_i32_vec(&mut self, tag: u16) -> Vec<i32>;
fn assert_tag_u64(&mut self, tag: u16) -> u64;
fn assert_tag_u64_vec(&mut self, tag: u16) -> Vec<u64>;
fn assert_tag_i64(&mut self, tag: u16) -> i64;
fn assert_tag_i64_vec(&mut self, tag: u16) -> Vec<i64>;
}
impl<R: std::io::Read + std::io::Seek> AssertDecode for Decoder<R> {
fn assert_tag_u32(&mut self, tag: u16) -> u32 {
self.get_tag(Tag::Unknown(tag)).unwrap().into_u32().unwrap()
}
fn assert_tag_u32_vec(&mut self, tag: u16) -> Vec<u32> {
self.get_tag(Tag::Unknown(tag))
.unwrap()
.into_u32_vec()
.unwrap()
}
fn assert_tag_i32(&mut self, tag: u16) -> i32 {
self.get_tag(Tag::Unknown(tag)).unwrap().into_i32().unwrap()
}
fn assert_tag_i32_vec(&mut self, tag: u16) -> Vec<i32> {
self.get_tag(Tag::Unknown(tag))
.unwrap()
.into_i32_vec()
.unwrap()
}
fn assert_tag_u64(&mut self, tag: u16) -> u64 {
self.get_tag(Tag::Unknown(tag)).unwrap().into_u64().unwrap()
}
fn assert_tag_u64_vec(&mut self, tag: u16) -> Vec<u64> {
self.get_tag(Tag::Unknown(tag))
.unwrap()
.into_u64_vec()
.unwrap()
}
fn assert_tag_i64(&mut self, tag: u16) -> i64 {
self.get_tag(Tag::Unknown(tag)).unwrap().into_i64().unwrap()
}
fn assert_tag_i64_vec(&mut self, tag: u16) -> Vec<i64> {
self.get_tag(Tag::Unknown(tag))
.unwrap()
.into_i64_vec()
.unwrap()
}
}
#[test]
fn test_multiple_byte() {
let mut data = Cursor::new(Vec::new());
{
let mut tiff = TiffEncoder::new(&mut data).unwrap();
let mut image_encoder = tiff.new_image::<colortype::Gray8>(1, 1).unwrap();
image_encoder.write_strip(&[1]).unwrap();
let encoder = image_encoder.encoder();
encoder.write_tag(Tag::Unknown(65000), &[1_u8][..]).unwrap();
encoder
.write_tag(Tag::Unknown(65001), &[1_u8, 2][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65002), &[1_u8, 2, 3][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65003), &[1_u8, 2, 3, 4][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65004), &[1_u8, 2, 3, 4, 5][..])
.unwrap();
}
data.set_position(0);
{
let mut decoder = Decoder::new(&mut data).unwrap();
assert_eq!(decoder.assert_tag_u32_vec(65000), [1]);
assert_eq!(decoder.assert_tag_u32_vec(65001), [1, 2]);
assert_eq!(decoder.assert_tag_u32_vec(65002), [1, 2, 3]);
assert_eq!(decoder.assert_tag_u32_vec(65003), [1, 2, 3, 4]);
assert_eq!(decoder.assert_tag_u32_vec(65004), [1, 2, 3, 4, 5]);
}
}
#[test]
/// Test writing signed tags from TIFF 6.0
fn test_signed() {
let mut data = Cursor::new(Vec::new());
fn make_srational(i: i32) -> SRational {
SRational { n: i, d: 100 }
}
{
let mut tiff = TiffEncoder::new(&mut data).unwrap();
let mut image_encoder = tiff.new_image::<colortype::Gray8>(1, 1).unwrap();
image_encoder.write_strip(&[1]).unwrap();
let encoder = image_encoder.encoder();
//Use the "reusable" tags section as per the TIFF6 spec
encoder.write_tag(Tag::Unknown(65000), -1_i8).unwrap();
encoder
.write_tag(Tag::Unknown(65001), &[-1_i8][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65002), &[-1_i8, 2][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65003), &[-1_i8, 2, -3][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65004), &[-1_i8, 2, -3, 4][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65005), &[-1_i8, 2, -3, 4, -5][..])
.unwrap();
encoder.write_tag(Tag::Unknown(65010), -1_i16).unwrap();
encoder.write_tag(Tag::Unknown(65011), -1_i16).unwrap();
encoder
.write_tag(Tag::Unknown(65012), &[-1_i16, 2][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65013), &[-1_i16, 2, -3][..])
.unwrap();
encoder.write_tag(Tag::Unknown(65020), -1_i32).unwrap();
encoder
.write_tag(Tag::Unknown(65021), &[-1_i32][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65022), &[-1_i32, 2][..])
.unwrap();
encoder.write_tag(Tag::Unknown(65030), -1_i64).unwrap();
encoder
.write_tag(Tag::Unknown(65031), &[-1_i64][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65032), &[-1_i64, 2][..])
.unwrap();
encoder
.write_tag(Tag::Unknown(65040), make_srational(-1))
.unwrap();
encoder
.write_tag(
Tag::Unknown(65041),
&[make_srational(-1), make_srational(2)][..],
)
.unwrap();
}
//Rewind the cursor for reading
data.set_position(0);
{
let mut decoder = Decoder::new(&mut data).unwrap();
assert_eq!(decoder.assert_tag_i32(65000), -1);
assert_eq!(decoder.assert_tag_i32_vec(65001), [-1]);
assert_eq!(decoder.assert_tag_i32_vec(65002), [-1, 2]);
assert_eq!(decoder.assert_tag_i32_vec(65003), [-1, 2, -3]);
assert_eq!(decoder.assert_tag_i32_vec(65004), [-1, 2, -3, 4]);
assert_eq!(decoder.assert_tag_i32_vec(65005), [-1, 2, -3, 4, -5],);
assert_eq!(decoder.assert_tag_i32(65010), -1);
assert_eq!(decoder.assert_tag_i32_vec(65011), [-1]);
assert_eq!(decoder.assert_tag_i32_vec(65012), [-1, 2]);
assert_eq!(decoder.assert_tag_i32_vec(65013), [-1, 2, -3]);
assert_eq!(decoder.assert_tag_i32(65020), -1);
assert_eq!(decoder.assert_tag_i32_vec(65021), [-1]);
assert_eq!(decoder.assert_tag_i32_vec(65022), [-1, 2]);
assert_eq!(decoder.assert_tag_i64(65030), -1);
assert_eq!(decoder.assert_tag_i64_vec(65031), [-1]);
assert_eq!(decoder.assert_tag_i64_vec(65032), [-1, 2]);
assert_eq!(decoder.assert_tag_i32_vec(65040), [-1, 100]);
assert_eq!(decoder.assert_tag_i32_vec(65041), [-1_i32, 100, 2, 100]);
}
}
#[test]
/// check multipage image handling
fn test_multipage_image() {
let mut img_file = Cursor::new(Vec::new());
{
// first create a multipage image with 2 images
let mut img_encoder = TiffEncoder::new(&mut img_file).unwrap();
// write first grayscale image (2x2 16-bit)
let img1: Vec<u16> = [1, 2, 3, 4].to_vec();
img_encoder
.write_image::<colortype::Gray16>(2, 2, &img1[..])
.unwrap();
// write second grayscale image (3x3 8-bit)
let img2: Vec<u8> = [9, 8, 7, 6, 5, 4, 3, 2, 1].to_vec();
img_encoder
.write_image::<colortype::Gray8>(3, 3, &img2[..])
.unwrap();
}
// seek to the beginning of the file, so that it can be decoded
img_file.seek(SeekFrom::Start(0)).unwrap();
{
let mut img_decoder = Decoder::new(&mut img_file).unwrap();
// check the dimensions of the image in the first page
assert_eq!(img_decoder.dimensions().unwrap(), (2, 2));
img_decoder.next_image().unwrap();
// check the dimensions of the image in the second page
assert_eq!(img_decoder.dimensions().unwrap(), (3, 3));
}
}
#[test]
/// verify rows per strip setting
fn test_rows_per_strip() {
let mut file = Cursor::new(Vec::new());
{
let mut img_encoder = TiffEncoder::new(&mut file).unwrap();
let mut image = img_encoder.new_image::<colortype::Gray8>(100, 100).unwrap();
assert_eq!(image.next_strip_sample_count(), 100 * 100);
image.rows_per_strip(2).unwrap();
assert_eq!(image.next_strip_sample_count(), 2 * 100);
let img2: Vec<u8> = vec![0; 2 * 100];
image.write_strip(&img2[..]).unwrap();
assert!(image.rows_per_strip(5).is_err());
for i in 1..50 {
let img2: Vec<u8> = vec![i; 2 * 100];
image.write_strip(&img2[..]).unwrap();
}
assert!(image.write_strip(&img2[..]).is_err());
image.finish().unwrap();
}
file.seek(SeekFrom::Start(0)).unwrap();
{
let mut decoder = Decoder::new(&mut file).unwrap();
assert_eq!(decoder.get_tag_u64(Tag::RowsPerStrip).unwrap(), 2);
assert_eq!(decoder.strip_count().unwrap(), 50);
for i in 0..50 {
let img2 = [i; 2 * 100];
match decoder.read_chunk(i as u32).unwrap() {
DecodingResult::U8(data) => assert_eq!(&img2[..], &data[..]),
other => panic!("Incorrect strip type {:?}", other),
}
}
}
}

View File

@ -0,0 +1,157 @@
extern crate tiff;
use std::io::{Cursor, Seek, Write};
use tiff::{
decoder::{Decoder, DecodingResult},
encoder::{
colortype::{self, ColorType},
compression::*,
TiffEncoder, TiffValue,
},
};
trait TestImage<const NUM_CHANNELS: usize>: From<Vec<<Self::Color as ColorType>::Inner>> {
const WIDTH: u32;
const HEIGHT: u32;
type Color: ColorType;
fn reference_data(&self) -> &[<Self::Color as ColorType>::Inner];
fn generate_pixel(x: u32, y: u32) -> [<Self::Color as ColorType>::Inner; NUM_CHANNELS];
fn compress<C: Compression, W: Write + Seek>(
&self,
encoder: &mut TiffEncoder<W>,
compression: C,
) where
[<Self::Color as ColorType>::Inner]: TiffValue,
{
let image = encoder
.new_image_with_compression::<Self::Color, C>(Self::WIDTH, Self::HEIGHT, compression)
.unwrap();
image.write_data(self.reference_data()).unwrap();
}
fn generate() -> Self {
assert_eq!(
Self::Color::BITS_PER_SAMPLE.len(),
NUM_CHANNELS,
"Incompatible color type"
);
let mut data = Vec::with_capacity((Self::WIDTH * Self::HEIGHT) as usize * NUM_CHANNELS);
for x in 0..Self::WIDTH {
for y in 0..Self::HEIGHT {
data.extend(IntoIterator::into_iter(Self::generate_pixel(x, y)));
}
}
Self::from(data)
}
}
struct TestImageColor(Vec<u16>);
impl From<Vec<u16>> for TestImageColor {
fn from(value: Vec<u16>) -> Self {
Self(value)
}
}
impl TestImage<3> for TestImageColor {
const WIDTH: u32 = 1;
const HEIGHT: u32 = 7;
type Color = colortype::RGB16;
fn reference_data(&self) -> &[u16] {
&self.0
}
fn generate_pixel(x: u32, y: u32) -> [<Self::Color as ColorType>::Inner; 3] {
let val = (x + y) % <Self::Color as ColorType>::Inner::MAX as u32;
[val as <Self::Color as ColorType>::Inner; 3]
}
}
struct TestImageGrayscale(Vec<u8>);
impl From<Vec<u8>> for TestImageGrayscale {
fn from(value: Vec<u8>) -> Self {
Self(value)
}
}
impl TestImage<1> for TestImageGrayscale {
const WIDTH: u32 = 21;
const HEIGHT: u32 = 10;
type Color = colortype::Gray8;
fn reference_data(&self) -> &[u8] {
&self.0
}
fn generate_pixel(x: u32, y: u32) -> [<Self::Color as ColorType>::Inner; 1] {
let val = (x + y) % <Self::Color as ColorType>::Inner::MAX as u32;
[val as <Self::Color as ColorType>::Inner]
}
}
fn encode_decode_with_compression<C: Compression + Clone>(compression: C) {
let mut data = Cursor::new(Vec::new());
let image_rgb = TestImageColor::generate();
let image_grayscale = TestImageGrayscale::generate();
// Encode tiff with compression
{
// Create a multipage image with 2 images
let mut encoder = TiffEncoder::new(&mut data).unwrap();
image_rgb.compress(&mut encoder, compression.clone());
image_grayscale.compress(&mut encoder, compression);
}
// Decode tiff
data.set_position(0);
{
let mut decoder = Decoder::new(data).unwrap();
// Check the RGB image
assert_eq!(
match decoder.read_image() {
Ok(DecodingResult::U16(image_data)) => image_data,
unexpected => panic!("Descoding RGB failed: {:?}", unexpected),
},
image_rgb.reference_data()
);
// Check the grayscale image
decoder.next_image().unwrap();
assert_eq!(
match decoder.read_image() {
Ok(DecodingResult::U8(image_data)) => image_data,
unexpected => panic!("Decoding grayscale failed: {:?}", unexpected),
},
image_grayscale.reference_data()
);
}
}
#[test]
fn encode_decode_without_compression() {
encode_decode_with_compression(Uncompressed::default());
}
#[test]
fn encode_decode_with_lzw() {
encode_decode_with_compression(Lzw::default());
}
#[test]
fn encode_decode_with_deflate() {
encode_decode_with_compression(Deflate::with_level(DeflateLevel::Fast));
encode_decode_with_compression(Deflate::with_level(DeflateLevel::Balanced));
encode_decode_with_compression(Deflate::with_level(DeflateLevel::Best));
}
#[test]
fn encode_decode_with_packbits() {
encode_decode_with_compression(Packbits::default());
}

51
vendor/tiff/tests/fuzz_tests.rs vendored Normal file
View File

@ -0,0 +1,51 @@
extern crate tiff;
use tiff::decoder::Decoder;
use tiff::TiffResult;
use std::fs::File;
fn test_directory<F: Fn(File) -> bool>(path: &str, f: F) {
for entry in std::fs::read_dir(path).unwrap() {
let file = File::open(entry.unwrap().path()).unwrap();
assert!(f(file));
}
}
fn decode_tiff(file: File) -> TiffResult<()> {
let mut decoder = Decoder::new(file)?;
decoder.read_image()?;
Ok(())
}
#[test]
fn oor_panic() {
test_directory("./tests/fuzz_images/oor_panic", |file| {
let _ = decode_tiff(file);
true
});
}
#[test]
fn oom_crash() {
test_directory("./tests/fuzz_images/oom_crash", |file| {
decode_tiff(file).is_err()
});
}
#[test]
fn inf_loop() {
test_directory("./tests/fuzz_images/inf_loop", |file| {
let _ = decode_tiff(file);
true
});
}
// https://github.com/image-rs/image-tiff/issues/33
#[test]
fn divide_by_zero() {
test_directory("./tests/fuzz_images/divide_by_zero", |file| {
let _ = decode_tiff(file);
true
});
}