Initial vendor packages

Signed-off-by: Valentin Popov <valentin@popov.link>
This commit is contained in:
2024-01-08 01:21:28 +04:00
parent 5ecd8cf2cb
commit 1b6a04ca55
7309 changed files with 2160054 additions and 0 deletions

1
vendor/smawk/.cargo-checksum.json vendored Normal file
View File

@ -0,0 +1 @@
{"files":{"Cargo.toml":"4581638d3c628d22826bde37114048c825ffb354f17f21645d8d49f9ebd64689","LICENSE":"0173035e025d60b1d19197840a93a887f6da8b075c01dd10601fcb6414a0043b","README.md":"c27297df61be8dd14e47dc30a80ae1d443f5acea82932139637543bc6d860631","dprint.json":"aacd5ec32db8741fbdea4ac916e61f0011485a51e8ec7a660f849be60cc7b512","rustfmt.toml":"6819baea67831b8a8b2a7ad33af1128dd2774a900c804635c912bb6545a4e922","src/brute_force.rs":"02edda18441ea5d6cc89d2fdfb9ab32a361e2598de74a71fb930fb630288ce35","src/lib.rs":"b312e4855945cfe27f4b1e9949b1c6ffea8f248ad80ac8fc49e72f0cc38df219","src/monge.rs":"f6c475f4d094b70b5e45d0c8a94112d42eaafa0ab41b2d3d96d06a38f1bac32d","src/recursive.rs":"e585286fe6c885dcac8001d0f484718aa8f73f3f85a452f8b4c1cb36d4fbfcf6","tests/agreement.rs":"764406a5d8c9a322bab8787764d780832cfc3962722ed01efda99684a619d543","tests/complexity.rs":"e2e850d38529f171eb6005807c2a86a3f95a907052253eaa8e24a834200cda0b","tests/monge.rs":"fe418373f89904cd40e2ed1d539bccd2d9be50c1f3f9ab2d93806ff3bce6b7ea","tests/random_monge/mod.rs":"83cf1dd0c7b0b511ad754c19857a5d830ed54e8fef3c31235cd70b709687534b","tests/version-numbers.rs":"73301b7bfe500eada5ede66f0dce89bd3e354af50a8e7a123b02931cd5eb8e16"},"package":"b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c"}

53
vendor/smawk/Cargo.toml vendored Normal file
View File

@ -0,0 +1,53 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
name = "smawk"
version = "0.3.2"
authors = ["Martin Geisler <martin@geisler.net>"]
exclude = [
".github/",
".gitignore",
"benches/",
"examples/",
]
description = "Functions for finding row-minima in a totally monotone matrix."
readme = "README.md"
keywords = [
"smawk",
"matrix",
"optimization",
"dynamic-programming",
]
categories = [
"algorithms",
"mathematics",
"science",
]
license = "MIT"
repository = "https://github.com/mgeisler/smawk"
[dependencies.ndarray]
version = "0.15.4"
optional = true
[dev-dependencies.num-traits]
version = "0.2.14"
[dev-dependencies.rand]
version = "0.8.4"
[dev-dependencies.rand_chacha]
version = "0.3.1"
[dev-dependencies.version-sync]
version = "0.9.4"

21
vendor/smawk/LICENSE vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Martin Geisler
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

151
vendor/smawk/README.md vendored Normal file
View File

@ -0,0 +1,151 @@
# SMAWK Algorithm in Rust
[![](https://github.com/mgeisler/smawk/workflows/build/badge.svg)][build-status]
[![](https://codecov.io/gh/mgeisler/smawk/branch/master/graph/badge.svg)][codecov]
[![](https://img.shields.io/crates/v/smawk.svg)][crates-io]
[![](https://docs.rs/smawk/badge.svg)][api-docs]
This crate contains an implementation of the [SMAWK algorithm][smawk] for
finding the smallest element per row in a totally monotone matrix.
The SMAWK algorithm allows you to lower the running time of some algorithms from
O(_n_²) to just O(_n_). In other words, you can turn a quadratic time complexity
(which is often too expensive) into linear time complexity.
Finding optimal line breaks in a paragraph of text is an example of an algorithm
which would normally take O(_n_²) time for _n_ words. With this crate, the
running time becomes linear. Please see the [textwrap crate][textwrap] for an
example of this.
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
smawk = "0.3"
```
You can now efficiently find row and column minima. Here is an example where we
find the column minima:
```rust
use smawk::Matrix;
let matrix = vec![
vec![3, 2, 4, 5, 6],
vec![2, 1, 3, 3, 4],
vec![2, 1, 3, 3, 4],
vec![3, 2, 4, 3, 4],
vec![4, 3, 2, 1, 1],
];
let minima = vec![1, 1, 4, 4, 4];
assert_eq!(smawk::column_minima(&matrix), minima);
```
The `minima` vector gives the index of the minimum value per column, so
`minima[0] == 1` since the minimum value in the first column is 2 (row 1). Note
that the smallest row index is returned.
### Cargo Features
This crate has an optional dependency on the
[`ndarray` crate](https://docs.rs/ndarray/), which provides an efficient matrix
implementation. Enable the `ndarray` Cargo feature to use it.
## Documentation
**[API documentation][api-docs]**
## Changelog
### Version 0.3.2 (2023-09-17)
This release adds more documentation and renames the top-level SMAWK functions.
The old names have been kept for now to ensure backwards compatibility, but they
will be removed in a future release.
- [#65](https://github.com/mgeisler/smawk/pull/65): Forbid the use of unsafe
code.
- [#69](https://github.com/mgeisler/smawk/pull/69): Migrate to the Rust 2021
edition.
- [#73](https://github.com/mgeisler/smawk/pull/73): Add examples to all
functions.
- [#74](https://github.com/mgeisler/smawk/pull/74): Add “mathematics” as a crate
category.
- [#75](https://github.com/mgeisler/smawk/pull/75): Remove `smawk_` prefix from
optimized functions.
### Version 0.3.1 (2021-01-30)
This release relaxes the bounds on the `smawk_row_minima`,
`smawk_column_minima`, and `online_column_minima` functions so that they work on
matrices containing floating point numbers.
- [#55](https://github.com/mgeisler/smawk/pull/55): Relax bounds to `PartialOrd`
instead of `Ord`.
- [#56](https://github.com/mgeisler/smawk/pull/56): Update dependencies to their
latest versions.
- [#59](https://github.com/mgeisler/smawk/pull/59): Give an example of what
SMAWK does in the README.
### Version 0.3.0 (2020-09-02)
This release slims down the crate significantly by making `ndarray` an optional
dependency.
- [#45](https://github.com/mgeisler/smawk/pull/45): Move non-SMAWK code and unit
tests out of lib and into separate modules.
- [#46](https://github.com/mgeisler/smawk/pull/46): Switch `smawk_row_minima`
and `smawk_column_minima` functions to a new `Matrix` trait.
- [#47](https://github.com/mgeisler/smawk/pull/47): Make the dependency on the
`ndarray` crate optional.
- [#48](https://github.com/mgeisler/smawk/pull/48): Let `is_monge` take a
`Matrix` argument instead of `ndarray::Array2`.
- [#50](https://github.com/mgeisler/smawk/pull/50): Remove mandatory
dependencies on `rand` and `num-traits` crates.
### Version 0.2.0 (2020-07-29)
This release updates the code to Rust 2018.
- [#18](https://github.com/mgeisler/smawk/pull/18): Make `online_column_minima`
generic in matrix type.
- [#23](https://github.com/mgeisler/smawk/pull/23): Switch to the
[Rust 2018][rust-2018] edition. We test against the latest stable and nightly
version of Rust.
- [#29](https://github.com/mgeisler/smawk/pull/29): Drop strict Rust 2018
compatibility by not testing with Rust 1.31.0.
- [#32](https://github.com/mgeisler/smawk/pull/32): Fix crash on overflow in
`is_monge`.
- [#33](https://github.com/mgeisler/smawk/pull/33): Update `rand` dependency to
latest version and get rid of `rand_derive`.
- [#34](https://github.com/mgeisler/smawk/pull/34): Bump `num-traits` and
`version-sync` dependencies to latest versions.
- [#35](https://github.com/mgeisler/smawk/pull/35): Drop unnecessary Windows
tests. The assumption is that the numeric computations we do are
cross-platform.
- [#36](https://github.com/mgeisler/smawk/pull/36): Update `ndarray` dependency
to the latest version.
- [#37](https://github.com/mgeisler/smawk/pull/37): Automate publishing new
releases to crates.io.
### Version 0.1.0 — August 7th, 2018
First release with the classical offline SMAWK algorithm as well as a newer
online version where the matrix entries can depend on previously computed column
minima.
## License
SMAWK can be distributed according to the [MIT license][mit]. Contributions will
be accepted under the same license.
[build-status]: https://github.com/mgeisler/smawk/actions?query=branch%3Amaster+workflow%3Abuild
[crates-io]: https://crates.io/crates/smawk
[codecov]: https://codecov.io/gh/mgeisler/smawk
[textwrap]: https://crates.io/crates/textwrap
[smawk]: https://en.wikipedia.org/wiki/SMAWK_algorithm
[api-docs]: https://docs.rs/smawk/
[rust-2018]: https://doc.rust-lang.org/edition-guide/rust-2018/
[mit]: LICENSE

19
vendor/smawk/dprint.json vendored Normal file
View File

@ -0,0 +1,19 @@
{
"markdown": {
"textWrap": "always"
},
"exec": {
"commands": [{
"command": "rustfmt",
"exts": ["rs"]
}]
},
"excludes": ["target/"],
"plugins": [
"https://plugins.dprint.dev/json-0.17.4.wasm",
"https://plugins.dprint.dev/markdown-0.16.1.wasm",
"https://plugins.dprint.dev/toml-0.5.4.wasm",
"https://plugins.dprint.dev/exec-0.4.3.json@42343548b8022c99b1d750be6b894fe6b6c7ee25f72ae9f9082226dd2e515072",
"https://plugins.dprint.dev/prettier-0.27.0.json@3557a62b4507c55a47d8cde0683195b14d13c41dda66d0f0b0e111aed107e2fe"
]
}

2
vendor/smawk/rustfmt.toml vendored Normal file
View File

@ -0,0 +1,2 @@
# Use rustfmt from the nightly channel for this:
imports_granularity = "Module"

150
vendor/smawk/src/brute_force.rs vendored Normal file
View File

@ -0,0 +1,150 @@
//! Brute-force algorithm for finding column minima.
//!
//! The functions here are mostly meant to be used for testing
//! correctness of the SMAWK implementation.
//!
//! **Note: this module is only available if you enable the `ndarray`
//! Cargo feature.**
use ndarray::{Array2, ArrayView1};
/// Compute lane minimum by brute force.
///
/// This does a simple scan through the lane (row or column).
#[inline]
pub fn lane_minimum<T: Ord>(lane: ArrayView1<'_, T>) -> usize {
lane.iter()
.enumerate()
.min_by_key(|&(idx, elem)| (elem, idx))
.map(|(idx, _)| idx)
.expect("empty lane in matrix")
}
/// Compute row minima by brute force in O(*mn*) time.
///
/// This function implements a simple brute-force approach where each
/// matrix row is scanned completely. This means that the function
/// works on all matrices, not just Monge matrices.
///
/// # Examples
///
/// ```
/// let matrix = ndarray::arr2(&[[4, 2, 4, 3],
/// [5, 3, 5, 3],
/// [5, 3, 3, 1]]);
/// assert_eq!(smawk::brute_force::row_minima(&matrix),
/// vec![1, 1, 3]);
/// ```
///
/// # Panics
///
/// It is an error to call this on a matrix with zero columns.
pub fn row_minima<T: Ord>(matrix: &Array2<T>) -> Vec<usize> {
matrix.rows().into_iter().map(lane_minimum).collect()
}
/// Compute column minima by brute force in O(*mn*) time.
///
/// This function implements a simple brute-force approach where each
/// matrix column is scanned completely. This means that the function
/// works on all matrices, not just Monge matrices.
///
/// # Examples
///
/// ```
/// let matrix = ndarray::arr2(&[[4, 2, 4, 3],
/// [5, 3, 5, 3],
/// [5, 3, 3, 1]]);
/// assert_eq!(smawk::brute_force::column_minima(&matrix),
/// vec![0, 0, 2, 2]);
/// ```
///
/// # Panics
///
/// It is an error to call this on a matrix with zero rows.
pub fn column_minima<T: Ord>(matrix: &Array2<T>) -> Vec<usize> {
matrix.columns().into_iter().map(lane_minimum).collect()
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::arr2;
#[test]
fn brute_force_1x1() {
let matrix = arr2(&[[2]]);
let minima = vec![0];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn brute_force_2x1() {
let matrix = arr2(&[
[3], //
[2],
]);
let minima = vec![0, 0];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn brute_force_1x2() {
let matrix = arr2(&[[2, 1]]);
let minima = vec![1];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn brute_force_2x2() {
let matrix = arr2(&[
[3, 2], //
[2, 1],
]);
let minima = vec![1, 1];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn brute_force_3x3() {
let matrix = arr2(&[
[3, 4, 4], //
[3, 4, 4],
[2, 3, 3],
]);
let minima = vec![0, 0, 0];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn brute_force_4x4() {
let matrix = arr2(&[
[4, 5, 5, 5], //
[2, 3, 3, 3],
[2, 3, 3, 3],
[2, 2, 2, 2],
]);
let minima = vec![0, 0, 0, 0];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn brute_force_5x5() {
let matrix = arr2(&[
[3, 2, 4, 5, 6],
[2, 1, 3, 3, 4],
[2, 1, 3, 3, 4],
[3, 2, 4, 3, 4],
[4, 3, 2, 1, 1],
]);
let minima = vec![1, 1, 1, 1, 3];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
}

570
vendor/smawk/src/lib.rs vendored Normal file
View File

@ -0,0 +1,570 @@
//! This crate implements various functions that help speed up dynamic
//! programming, most importantly the SMAWK algorithm for finding row
//! or column minima in a totally monotone matrix with *m* rows and
//! *n* columns in time O(*m* + *n*). This is much better than the
//! brute force solution which would take O(*mn*). When *m* and *n*
//! are of the same order, this turns a quadratic function into a
//! linear function.
//!
//! # Examples
//!
//! Computing the column minima of an *m* ✕ *n* Monge matrix can be
//! done efficiently with `smawk::column_minima`:
//!
//! ```
//! use smawk::Matrix;
//!
//! let matrix = vec![
//! vec![3, 2, 4, 5, 6],
//! vec![2, 1, 3, 3, 4],
//! vec![2, 1, 3, 3, 4],
//! vec![3, 2, 4, 3, 4],
//! vec![4, 3, 2, 1, 1],
//! ];
//! let minima = vec![1, 1, 4, 4, 4];
//! assert_eq!(smawk::column_minima(&matrix), minima);
//! ```
//!
//! The `minima` vector gives the index of the minimum value per
//! column, so `minima[0] == 1` since the minimum value in the first
//! column is 2 (row 1). Note that the smallest row index is returned.
//!
//! # Definitions
//!
//! Some of the functions in this crate only work on matrices that are
//! *totally monotone*, which we will define below.
//!
//! ## Monotone Matrices
//!
//! We start with a helper definition. Given an *m* ✕ *n* matrix `M`,
//! we say that `M` is *monotone* when the minimum value of row `i` is
//! found to the left of the minimum value in row `i'` where `i < i'`.
//!
//! More formally, if we let `rm(i)` denote the column index of the
//! left-most minimum value in row `i`, then we have
//!
//! ```text
//! rm(0) ≤ rm(1) ≤ ... ≤ rm(m - 1)
//! ```
//!
//! This means that as you go down the rows from top to bottom, the
//! row-minima proceed from left to right.
//!
//! The algorithms in this crate deal with finding such row- and
//! column-minima.
//!
//! ## Totally Monotone Matrices
//!
//! We say that a matrix `M` is *totally monotone* when every
//! sub-matrix is monotone. A sub-matrix is formed by the intersection
//! of any two rows `i < i'` and any two columns `j < j'`.
//!
//! This is often expressed as via this equivalent condition:
//!
//! ```text
//! M[i, j] > M[i, j'] => M[i', j] > M[i', j']
//! ```
//!
//! for all `i < i'` and `j < j'`.
//!
//! ## Monge Property for Matrices
//!
//! A matrix `M` is said to fulfill the *Monge property* if
//!
//! ```text
//! M[i, j] + M[i', j'] ≤ M[i, j'] + M[i', j]
//! ```
//!
//! for all `i < i'` and `j < j'`. This says that given any rectangle
//! in the matrix, the sum of the top-left and bottom-right corners is
//! less than or equal to the sum of the bottom-left and upper-right
//! corners.
//!
//! All Monge matrices are totally monotone, so it is enough to
//! establish that the Monge property holds in order to use a matrix
//! with the functions in this crate. If your program is dealing with
//! unknown inputs, it can use [`monge::is_monge`] to verify that a
//! matrix is a Monge matrix.
#![doc(html_root_url = "https://docs.rs/smawk/0.3.2")]
// The s! macro from ndarray uses unsafe internally, so we can only
// forbid unsafe code when building with the default features.
#![cfg_attr(not(feature = "ndarray"), forbid(unsafe_code))]
#[cfg(feature = "ndarray")]
pub mod brute_force;
pub mod monge;
#[cfg(feature = "ndarray")]
pub mod recursive;
/// Minimal matrix trait for two-dimensional arrays.
///
/// This provides the functionality needed to represent a read-only
/// numeric matrix. You can query the size of the matrix and access
/// elements. Modeled after [`ndarray::Array2`] from the [ndarray
/// crate](https://crates.io/crates/ndarray).
///
/// Enable the `ndarray` Cargo feature if you want to use it with
/// `ndarray::Array2`.
pub trait Matrix<T: Copy> {
/// Return the number of rows.
fn nrows(&self) -> usize;
/// Return the number of columns.
fn ncols(&self) -> usize;
/// Return a matrix element.
fn index(&self, row: usize, column: usize) -> T;
}
/// Simple and inefficient matrix representation used for doctest
/// examples and simple unit tests.
///
/// You should prefer implementing it yourself, or you can enable the
/// `ndarray` Cargo feature and use the provided implementation for
/// [`ndarray::Array2`].
impl<T: Copy> Matrix<T> for Vec<Vec<T>> {
fn nrows(&self) -> usize {
self.len()
}
fn ncols(&self) -> usize {
self[0].len()
}
fn index(&self, row: usize, column: usize) -> T {
self[row][column]
}
}
/// Adapting [`ndarray::Array2`] to the `Matrix` trait.
///
/// **Note: this implementation is only available if you enable the
/// `ndarray` Cargo feature.**
#[cfg(feature = "ndarray")]
impl<T: Copy> Matrix<T> for ndarray::Array2<T> {
#[inline]
fn nrows(&self) -> usize {
self.nrows()
}
#[inline]
fn ncols(&self) -> usize {
self.ncols()
}
#[inline]
fn index(&self, row: usize, column: usize) -> T {
self[[row, column]]
}
}
/// Compute row minima in O(*m* + *n*) time.
///
/// This implements the [SMAWK algorithm] for efficiently finding row
/// minima in a totally monotone matrix.
///
/// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and
/// Wilbur, *Geometric applications of a matrix searching algorithm*,
/// Algorithmica 2, pp. 195-208 (1987) and the code here is a
/// translation [David Eppstein's Python code][pads].
///
/// Running time on an *m* ✕ *n* matrix: O(*m* + *n*).
///
/// # Examples
///
/// ```
/// use smawk::Matrix;
/// let matrix = vec![vec![4, 2, 4, 3],
/// vec![5, 3, 5, 3],
/// vec![5, 3, 3, 1]];
/// assert_eq!(smawk::row_minima(&matrix),
/// vec![1, 1, 3]);
/// ```
///
/// # Panics
///
/// It is an error to call this on a matrix with zero columns.
///
/// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py
/// [SMAWK algorithm]: https://en.wikipedia.org/wiki/SMAWK_algorithm
pub fn row_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
// Benchmarking shows that SMAWK performs roughly the same on row-
// and column-major matrices.
let mut minima = vec![0; matrix.nrows()];
smawk_inner(
&|j, i| matrix.index(i, j),
&(0..matrix.ncols()).collect::<Vec<_>>(),
&(0..matrix.nrows()).collect::<Vec<_>>(),
&mut minima,
);
minima
}
#[deprecated(since = "0.3.2", note = "Please use `row_minima` instead.")]
pub fn smawk_row_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
row_minima(matrix)
}
/// Compute column minima in O(*m* + *n*) time.
///
/// This implements the [SMAWK algorithm] for efficiently finding
/// column minima in a totally monotone matrix.
///
/// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and
/// Wilbur, *Geometric applications of a matrix searching algorithm*,
/// Algorithmica 2, pp. 195-208 (1987) and the code here is a
/// translation [David Eppstein's Python code][pads].
///
/// Running time on an *m* ✕ *n* matrix: O(*m* + *n*).
///
/// # Examples
///
/// ```
/// use smawk::Matrix;
/// let matrix = vec![vec![4, 2, 4, 3],
/// vec![5, 3, 5, 3],
/// vec![5, 3, 3, 1]];
/// assert_eq!(smawk::column_minima(&matrix),
/// vec![0, 0, 2, 2]);
/// ```
///
/// # Panics
///
/// It is an error to call this on a matrix with zero rows.
///
/// [SMAWK algorithm]: https://en.wikipedia.org/wiki/SMAWK_algorithm
/// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py
pub fn column_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
let mut minima = vec![0; matrix.ncols()];
smawk_inner(
&|i, j| matrix.index(i, j),
&(0..matrix.nrows()).collect::<Vec<_>>(),
&(0..matrix.ncols()).collect::<Vec<_>>(),
&mut minima,
);
minima
}
#[deprecated(since = "0.3.2", note = "Please use `column_minima` instead.")]
pub fn smawk_column_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
column_minima(matrix)
}
/// Compute column minima in the given area of the matrix. The
/// `minima` slice is updated inplace.
fn smawk_inner<T: PartialOrd + Copy, M: Fn(usize, usize) -> T>(
matrix: &M,
rows: &[usize],
cols: &[usize],
minima: &mut [usize],
) {
if cols.is_empty() {
return;
}
let mut stack = Vec::with_capacity(cols.len());
for r in rows {
// TODO: use stack.last() instead of stack.is_empty() etc
while !stack.is_empty()
&& matrix(stack[stack.len() - 1], cols[stack.len() - 1])
> matrix(*r, cols[stack.len() - 1])
{
stack.pop();
}
if stack.len() != cols.len() {
stack.push(*r);
}
}
let rows = &stack;
let mut odd_cols = Vec::with_capacity(1 + cols.len() / 2);
for (idx, c) in cols.iter().enumerate() {
if idx % 2 == 1 {
odd_cols.push(*c);
}
}
smawk_inner(matrix, rows, &odd_cols, minima);
let mut r = 0;
for (c, &col) in cols.iter().enumerate().filter(|(c, _)| c % 2 == 0) {
let mut row = rows[r];
let last_row = if c == cols.len() - 1 {
rows[rows.len() - 1]
} else {
minima[cols[c + 1]]
};
let mut pair = (matrix(row, col), row);
while row != last_row {
r += 1;
row = rows[r];
if (matrix(row, col), row) < pair {
pair = (matrix(row, col), row);
}
}
minima[col] = pair.1;
}
}
/// Compute upper-right column minima in O(*m* + *n*) time.
///
/// The input matrix must be totally monotone.
///
/// The function returns a vector of `(usize, T)`. The `usize` in the
/// tuple at index `j` tells you the row of the minimum value in
/// column `j` and the `T` value is minimum value itself.
///
/// The algorithm only considers values above the main diagonal, which
/// means that it computes values `v(j)` where:
///
/// ```text
/// v(0) = initial
/// v(j) = min { M[i, j] | i < j } for j > 0
/// ```
///
/// If we let `r(j)` denote the row index of the minimum value in
/// column `j`, the tuples in the result vector become `(r(j), M[r(j),
/// j])`.
///
/// The algorithm is an *online* algorithm, in the sense that `matrix`
/// function can refer back to previously computed column minima when
/// determining an entry in the matrix. The guarantee is that we only
/// call `matrix(i, j)` after having computed `v(i)`. This is
/// reflected in the `&[(usize, T)]` argument to `matrix`, which grows
/// as more and more values are computed.
pub fn online_column_minima<T: Copy + PartialOrd, M: Fn(&[(usize, T)], usize, usize) -> T>(
initial: T,
size: usize,
matrix: M,
) -> Vec<(usize, T)> {
let mut result = vec![(0, initial)];
// State used by the algorithm.
let mut finished = 0;
let mut base = 0;
let mut tentative = 0;
// Shorthand for evaluating the matrix. We need a macro here since
// we don't want to borrow the result vector.
macro_rules! m {
($i:expr, $j:expr) => {{
assert!($i < $j, "(i, j) not above diagonal: ({}, {})", $i, $j);
assert!(
$i < size && $j < size,
"(i, j) out of bounds: ({}, {}), size: {}",
$i,
$j,
size
);
matrix(&result[..finished + 1], $i, $j)
}};
}
// Keep going until we have finished all size columns. Since the
// columns are zero-indexed, we're done when finished == size - 1.
while finished < size - 1 {
// First case: we have already advanced past the previous
// tentative value. We make a new tentative value by applying
// smawk_inner to the largest square submatrix that fits under
// the base.
let i = finished + 1;
if i > tentative {
let rows = (base..finished + 1).collect::<Vec<_>>();
tentative = std::cmp::min(finished + rows.len(), size - 1);
let cols = (finished + 1..tentative + 1).collect::<Vec<_>>();
let mut minima = vec![0; tentative + 1];
smawk_inner(&|i, j| m![i, j], &rows, &cols, &mut minima);
for col in cols {
let row = minima[col];
let v = m![row, col];
if col >= result.len() {
result.push((row, v));
} else if v < result[col].1 {
result[col] = (row, v);
}
}
finished = i;
continue;
}
// Second case: the new column minimum is on the diagonal. All
// subsequent ones will be at least as low, so we can clear
// out all our work from higher rows. As in the fourth case,
// the loss of tentative is amortized against the increase in
// base.
let diag = m![i - 1, i];
if diag < result[i].1 {
result[i] = (i - 1, diag);
base = i - 1;
tentative = i;
finished = i;
continue;
}
// Third case: row i-1 does not supply a column minimum in any
// column up to tentative. We simply advance finished while
// maintaining the invariant.
if m![i - 1, tentative] >= result[tentative].1 {
finished = i;
continue;
}
// Fourth and final case: a new column minimum at tentative.
// This allows us to make progress by incorporating rows prior
// to finished into the base. The base invariant holds because
// these rows cannot supply any later column minima. The work
// done when we last advanced tentative (and undone by this
// step) can be amortized against the increase in base.
base = i - 1;
tentative = i;
finished = i;
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smawk_1x1() {
let matrix = vec![vec![2]];
assert_eq!(row_minima(&matrix), vec![0]);
assert_eq!(column_minima(&matrix), vec![0]);
}
#[test]
fn smawk_2x1() {
let matrix = vec![
vec![3], //
vec![2],
];
assert_eq!(row_minima(&matrix), vec![0, 0]);
assert_eq!(column_minima(&matrix), vec![1]);
}
#[test]
fn smawk_1x2() {
let matrix = vec![vec![2, 1]];
assert_eq!(row_minima(&matrix), vec![1]);
assert_eq!(column_minima(&matrix), vec![0, 0]);
}
#[test]
fn smawk_2x2() {
let matrix = vec![
vec![3, 2], //
vec![2, 1],
];
assert_eq!(row_minima(&matrix), vec![1, 1]);
assert_eq!(column_minima(&matrix), vec![1, 1]);
}
#[test]
fn smawk_3x3() {
let matrix = vec![
vec![3, 4, 4], //
vec![3, 4, 4],
vec![2, 3, 3],
];
assert_eq!(row_minima(&matrix), vec![0, 0, 0]);
assert_eq!(column_minima(&matrix), vec![2, 2, 2]);
}
#[test]
fn smawk_4x4() {
let matrix = vec![
vec![4, 5, 5, 5], //
vec![2, 3, 3, 3],
vec![2, 3, 3, 3],
vec![2, 2, 2, 2],
];
assert_eq!(row_minima(&matrix), vec![0, 0, 0, 0]);
assert_eq!(column_minima(&matrix), vec![1, 3, 3, 3]);
}
#[test]
fn smawk_5x5() {
let matrix = vec![
vec![3, 2, 4, 5, 6],
vec![2, 1, 3, 3, 4],
vec![2, 1, 3, 3, 4],
vec![3, 2, 4, 3, 4],
vec![4, 3, 2, 1, 1],
];
assert_eq!(row_minima(&matrix), vec![1, 1, 1, 1, 3]);
assert_eq!(column_minima(&matrix), vec![1, 1, 4, 4, 4]);
}
#[test]
fn online_1x1() {
let matrix = vec![vec![0]];
let minima = vec![(0, 0)];
assert_eq!(online_column_minima(0, 1, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_2x2() {
let matrix = vec![
vec![0, 2], //
vec![0, 0],
];
let minima = vec![(0, 0), (0, 2)];
assert_eq!(online_column_minima(0, 2, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_3x3() {
let matrix = vec![
vec![0, 4, 4], //
vec![0, 0, 4],
vec![0, 0, 0],
];
let minima = vec![(0, 0), (0, 4), (0, 4)];
assert_eq!(online_column_minima(0, 3, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_4x4() {
let matrix = vec![
vec![0, 5, 5, 5], //
vec![0, 0, 3, 3],
vec![0, 0, 0, 3],
vec![0, 0, 0, 0],
];
let minima = vec![(0, 0), (0, 5), (1, 3), (1, 3)];
assert_eq!(online_column_minima(0, 4, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_5x5() {
let matrix = vec![
vec![0, 2, 4, 6, 7],
vec![0, 0, 3, 4, 5],
vec![0, 0, 0, 3, 4],
vec![0, 0, 0, 0, 4],
vec![0, 0, 0, 0, 0],
];
let minima = vec![(0, 0), (0, 2), (1, 3), (2, 3), (2, 4)];
assert_eq!(online_column_minima(0, 5, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn smawk_works_with_partial_ord() {
let matrix = vec![
vec![3.0, 2.0], //
vec![2.0, 1.0],
];
assert_eq!(row_minima(&matrix), vec![1, 1]);
assert_eq!(column_minima(&matrix), vec![1, 1]);
}
#[test]
fn online_works_with_partial_ord() {
let matrix = vec![
vec![0.0, 2.0], //
vec![0.0, 0.0],
];
let minima = vec![(0, 0.0), (0, 2.0)];
assert_eq!(
online_column_minima(0.0, 2, |_, i: usize, j: usize| matrix[i][j]),
minima
);
}
}

121
vendor/smawk/src/monge.rs vendored Normal file
View File

@ -0,0 +1,121 @@
//! Functions for generating and checking Monge arrays.
//!
//! The functions here are mostly meant to be used for testing
//! correctness of the SMAWK implementation.
use crate::Matrix;
use std::num::Wrapping;
use std::ops::Add;
/// Verify that a matrix is a Monge matrix.
///
/// A [Monge matrix] \(or array) is a matrix where the following
/// inequality holds:
///
/// ```text
/// M[i, j] + M[i', j'] <= M[i, j'] + M[i', j] for all i < i', j < j'
/// ```
///
/// The inequality says that the sum of the main diagonal is less than
/// the sum of the antidiagonal. Checking this condition is done by
/// checking *n* ✕ *m* submatrices, so the running time is O(*mn*).
///
/// [Monge matrix]: https://en.wikipedia.org/wiki/Monge_array
pub fn is_monge<T: Ord + Copy, M: Matrix<T>>(matrix: &M) -> bool
where
Wrapping<T>: Add<Output = Wrapping<T>>,
{
/// Returns `Ok(a + b)` if the computation can be done without
/// overflow, otherwise `Err(a + b - T::MAX - 1)` is returned.
fn checked_add<T: Ord + Copy>(a: Wrapping<T>, b: Wrapping<T>) -> Result<T, T>
where
Wrapping<T>: Add<Output = Wrapping<T>>,
{
let sum = a + b;
if sum < a {
Err(sum.0)
} else {
Ok(sum.0)
}
}
(0..matrix.nrows() - 1)
.flat_map(|row| (0..matrix.ncols() - 1).map(move |col| (row, col)))
.all(|(row, col)| {
let top_left = Wrapping(matrix.index(row, col));
let top_right = Wrapping(matrix.index(row, col + 1));
let bot_left = Wrapping(matrix.index(row + 1, col));
let bot_right = Wrapping(matrix.index(row + 1, col + 1));
match (
checked_add(top_left, bot_right),
checked_add(bot_left, top_right),
) {
(Ok(a), Ok(b)) => a <= b, // No overflow.
(Err(a), Err(b)) => a <= b, // Double overflow.
(Ok(_), Err(_)) => true, // Anti-diagonal overflow.
(Err(_), Ok(_)) => false, // Main diagonal overflow.
}
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_monge_handles_overflow() {
// The x + y <= z + w computations will overflow for an u8
// matrix unless is_monge is careful.
let matrix: Vec<Vec<u8>> = vec![
vec![200, 200, 200, 200],
vec![200, 200, 200, 200],
vec![200, 200, 200, 200],
];
assert!(is_monge(&matrix));
}
#[test]
fn monge_constant_rows() {
let matrix = vec![
vec![42, 42, 42, 42],
vec![0, 0, 0, 0],
vec![100, 100, 100, 100],
vec![1000, 1000, 1000, 1000],
];
assert!(is_monge(&matrix));
}
#[test]
fn monge_constant_cols() {
let matrix = vec![
vec![42, 0, 100, 1000],
vec![42, 0, 100, 1000],
vec![42, 0, 100, 1000],
vec![42, 0, 100, 1000],
];
assert!(is_monge(&matrix));
}
#[test]
fn monge_upper_right() {
let matrix = vec![
vec![10, 10, 42, 42, 42],
vec![10, 10, 42, 42, 42],
vec![10, 10, 10, 10, 10],
vec![10, 10, 10, 10, 10],
];
assert!(is_monge(&matrix));
}
#[test]
fn monge_lower_left() {
let matrix = vec![
vec![10, 10, 10, 10, 10],
vec![10, 10, 10, 10, 10],
vec![42, 42, 42, 10, 10],
vec![42, 42, 42, 10, 10],
];
assert!(is_monge(&matrix));
}
}

191
vendor/smawk/src/recursive.rs vendored Normal file
View File

@ -0,0 +1,191 @@
//! Recursive algorithm for finding column minima.
//!
//! The functions here are mostly meant to be used for testing
//! correctness of the SMAWK implementation.
//!
//! **Note: this module is only available if you enable the `ndarray`
//! Cargo feature.**
use ndarray::{s, Array2, ArrayView2, Axis};
/// Compute row minima in O(*m* + *n* log *m*) time.
///
/// This function computes row minima in a totally monotone matrix
/// using a recursive algorithm.
///
/// Running time on an *m* ✕ *n* matrix: O(*m* + *n* log *m*).
///
/// # Examples
///
/// ```
/// let matrix = ndarray::arr2(&[[4, 2, 4, 3],
/// [5, 3, 5, 3],
/// [5, 3, 3, 1]]);
/// assert_eq!(smawk::recursive::row_minima(&matrix),
/// vec![1, 1, 3]);
/// ```
///
/// # Panics
///
/// It is an error to call this on a matrix with zero columns.
pub fn row_minima<T: Ord>(matrix: &Array2<T>) -> Vec<usize> {
let mut minima = vec![0; matrix.nrows()];
recursive_inner(matrix.view(), &|| Direction::Row, 0, &mut minima);
minima
}
/// Compute column minima in O(*n* + *m* log *n*) time.
///
/// This function computes column minima in a totally monotone matrix
/// using a recursive algorithm.
///
/// Running time on an *m* ✕ *n* matrix: O(*n* + *m* log *n*).
///
/// # Examples
///
/// ```
/// let matrix = ndarray::arr2(&[[4, 2, 4, 3],
/// [5, 3, 5, 3],
/// [5, 3, 3, 1]]);
/// assert_eq!(smawk::recursive::column_minima(&matrix),
/// vec![0, 0, 2, 2]);
/// ```
///
/// # Panics
///
/// It is an error to call this on a matrix with zero rows.
pub fn column_minima<T: Ord>(matrix: &Array2<T>) -> Vec<usize> {
let mut minima = vec![0; matrix.ncols()];
recursive_inner(matrix.view(), &|| Direction::Column, 0, &mut minima);
minima
}
/// The type of minima (row or column) we compute.
enum Direction {
Row,
Column,
}
/// Compute the minima along the given direction (`Direction::Row` for
/// row minima and `Direction::Column` for column minima).
///
/// The direction is given as a generic function argument to allow
/// monomorphization to kick in. The function calls will be inlined
/// and optimized away and the result is that the compiler generates
/// differnet code for finding row and column minima.
fn recursive_inner<T: Ord, F: Fn() -> Direction>(
matrix: ArrayView2<'_, T>,
dir: &F,
offset: usize,
minima: &mut [usize],
) {
if matrix.is_empty() {
return;
}
let axis = match dir() {
Direction::Row => Axis(0),
Direction::Column => Axis(1),
};
let mid = matrix.len_of(axis) / 2;
let min_idx = crate::brute_force::lane_minimum(matrix.index_axis(axis, mid));
minima[mid] = offset + min_idx;
if mid == 0 {
return; // Matrix has a single row or column, so we're done.
}
let top_left = match dir() {
Direction::Row => matrix.slice(s![..mid, ..(min_idx + 1)]),
Direction::Column => matrix.slice(s![..(min_idx + 1), ..mid]),
};
let bot_right = match dir() {
Direction::Row => matrix.slice(s![(mid + 1).., min_idx..]),
Direction::Column => matrix.slice(s![min_idx.., (mid + 1)..]),
};
recursive_inner(top_left, dir, offset, &mut minima[..mid]);
recursive_inner(bot_right, dir, offset + min_idx, &mut minima[mid + 1..]);
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::arr2;
#[test]
fn recursive_1x1() {
let matrix = arr2(&[[2]]);
let minima = vec![0];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn recursive_2x1() {
let matrix = arr2(&[
[3], //
[2],
]);
let minima = vec![0, 0];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn recursive_1x2() {
let matrix = arr2(&[[2, 1]]);
let minima = vec![1];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn recursive_2x2() {
let matrix = arr2(&[
[3, 2], //
[2, 1],
]);
let minima = vec![1, 1];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn recursive_3x3() {
let matrix = arr2(&[
[3, 4, 4], //
[3, 4, 4],
[2, 3, 3],
]);
let minima = vec![0, 0, 0];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn recursive_4x4() {
let matrix = arr2(&[
[4, 5, 5, 5], //
[2, 3, 3, 3],
[2, 3, 3, 3],
[2, 2, 2, 2],
]);
let minima = vec![0, 0, 0, 0];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
#[test]
fn recursive_5x5() {
let matrix = arr2(&[
[3, 2, 4, 5, 6],
[2, 1, 3, 3, 4],
[2, 1, 3, 3, 4],
[3, 2, 4, 3, 4],
[4, 3, 2, 1, 1],
]);
let minima = vec![1, 1, 1, 1, 3];
assert_eq!(row_minima(&matrix), minima);
assert_eq!(column_minima(&matrix.reversed_axes()), minima);
}
}

104
vendor/smawk/tests/agreement.rs vendored Normal file
View File

@ -0,0 +1,104 @@
#![cfg(feature = "ndarray")]
use ndarray::{s, Array2};
use rand::SeedableRng;
use rand_chacha::ChaCha20Rng;
use smawk::{brute_force, online_column_minima, recursive};
mod random_monge;
use random_monge::random_monge_matrix;
/// Check that the brute force, recursive, and SMAWK functions
/// give identical results on a large number of randomly generated
/// Monge matrices.
#[test]
fn column_minima_agree() {
let sizes = vec![1, 2, 3, 4, 5, 10, 15, 20, 30];
let mut rng = ChaCha20Rng::seed_from_u64(0);
for _ in 0..4 {
for m in sizes.clone().iter() {
for n in sizes.clone().iter() {
let matrix: Array2<i32> = random_monge_matrix(*m, *n, &mut rng);
// Compute and test row minima.
let brute_force = brute_force::row_minima(&matrix);
let recursive = recursive::row_minima(&matrix);
let smawk = smawk::row_minima(&matrix);
assert_eq!(
brute_force, recursive,
"recursive and brute force differs on:\n{:?}",
matrix
);
assert_eq!(
brute_force, smawk,
"SMAWK and brute force differs on:\n{:?}",
matrix
);
// Do the same for the column minima.
let brute_force = brute_force::column_minima(&matrix);
let recursive = recursive::column_minima(&matrix);
let smawk = smawk::column_minima(&matrix);
assert_eq!(
brute_force, recursive,
"recursive and brute force differs on:\n{:?}",
matrix
);
assert_eq!(
brute_force, smawk,
"SMAWK and brute force differs on:\n{:?}",
matrix
);
}
}
}
}
/// Check that the brute force and online SMAWK functions give
/// identical results on a large number of randomly generated
/// Monge matrices.
#[test]
fn online_agree() {
let sizes = vec![1, 2, 3, 4, 5, 10, 15, 20, 30, 50];
let mut rng = ChaCha20Rng::seed_from_u64(0);
for _ in 0..5 {
for &size in &sizes {
// Random totally monotone square matrix of the
// desired size.
let mut matrix: Array2<i32> = random_monge_matrix(size, size, &mut rng);
// Adjust matrix so the column minima are above the
// diagonal. The brute_force::column_minima will still
// work just fine on such a mangled Monge matrix.
let max = *matrix.iter().max().unwrap_or(&0);
for idx in 0..(size as isize) {
// Using the maximum value of the matrix instead
// of i32::max_value() makes for prettier matrices
// in case we want to print them.
matrix.slice_mut(s![idx..idx + 1, ..idx + 1]).fill(max);
}
// The online algorithm always returns the initial
// value for the left-most column -- without
// inspecting the column at all. So we fill the
// left-most column with this value to have the brute
// force algorithm do the same.
let initial = 42;
matrix.slice_mut(s![0.., ..1]).fill(initial);
// Brute-force computation of column minima, returned
// in the same form as online_column_minima.
let brute_force = brute_force::column_minima(&matrix)
.iter()
.enumerate()
.map(|(j, &i)| (i, matrix[[i, j]]))
.collect::<Vec<_>>();
let online = online_column_minima(initial, size, |_, i, j| matrix[[i, j]]);
assert_eq!(
brute_force, online,
"brute force and online differ on:\n{:3?}",
matrix
);
}
}
}

83
vendor/smawk/tests/complexity.rs vendored Normal file
View File

@ -0,0 +1,83 @@
#![cfg(feature = "ndarray")]
use ndarray::{Array1, Array2};
use rand::SeedableRng;
use rand_chacha::ChaCha20Rng;
use smawk::online_column_minima;
mod random_monge;
use random_monge::random_monge_matrix;
#[derive(Debug)]
struct LinRegression {
alpha: f64,
beta: f64,
r_squared: f64,
}
/// Square an expression. Works equally well for floats and matrices.
macro_rules! squared {
($x:expr) => {
$x * $x
};
}
/// Compute the mean of a 1-dimensional array.
macro_rules! mean {
($a:expr) => {
$a.mean().expect("Mean of empty array")
};
}
/// Compute a simple linear regression from the list of values.
///
/// See <https://en.wikipedia.org/wiki/Simple_linear_regression>.
fn linear_regression(values: &[(usize, i32)]) -> LinRegression {
let xs = values.iter().map(|&(x, _)| x as f64).collect::<Array1<_>>();
let ys = values.iter().map(|&(_, y)| y as f64).collect::<Array1<_>>();
let xs_mean = mean!(&xs);
let ys_mean = mean!(&ys);
let xs_ys_mean = mean!(&xs * &ys);
let cov_xs_ys = ((&xs - xs_mean) * (&ys - ys_mean)).sum();
let var_xs = squared!(&xs - xs_mean).sum();
let beta = cov_xs_ys / var_xs;
let alpha = ys_mean - beta * xs_mean;
let r_squared = squared!(xs_ys_mean - xs_mean * ys_mean)
/ ((mean!(&xs * &xs) - squared!(xs_mean)) * (mean!(&ys * &ys) - squared!(ys_mean)));
LinRegression {
alpha: alpha,
beta: beta,
r_squared: r_squared,
}
}
/// Check that the number of matrix accesses in `online_column_minima`
/// grows as O(*n*) for *n* ✕ *n* matrix.
#[test]
fn online_linear_complexity() {
let mut rng = ChaCha20Rng::seed_from_u64(0);
let mut data = vec![];
for &size in &[1, 2, 3, 4, 5, 10, 15, 20, 30, 40, 50, 60, 70, 80, 90, 100] {
let matrix: Array2<i32> = random_monge_matrix(size, size, &mut rng);
let count = std::cell::RefCell::new(0);
online_column_minima(0, size, |_, i, j| {
*count.borrow_mut() += 1;
matrix[[i, j]]
});
data.push((size, count.into_inner()));
}
let lin_reg = linear_regression(&data);
assert!(
lin_reg.r_squared > 0.95,
"r² = {:.4} is lower than expected for a linear fit\nData points: {:?}\n{:?}",
lin_reg.r_squared,
data,
lin_reg
);
}

83
vendor/smawk/tests/monge.rs vendored Normal file
View File

@ -0,0 +1,83 @@
#![cfg(feature = "ndarray")]
use ndarray::{arr2, Array, Array2};
use rand::SeedableRng;
use rand_chacha::ChaCha20Rng;
use smawk::monge::is_monge;
mod random_monge;
use random_monge::{random_monge_matrix, MongePrim};
#[test]
fn random_monge() {
let mut rng = ChaCha20Rng::seed_from_u64(0);
let matrix: Array2<u8> = random_monge_matrix(5, 5, &mut rng);
assert!(is_monge(&matrix));
assert_eq!(
matrix,
arr2(&[
[2, 3, 4, 4, 5],
[5, 5, 6, 6, 7],
[3, 3, 4, 4, 5],
[5, 2, 3, 3, 4],
[5, 2, 3, 3, 4]
])
);
}
#[test]
fn monge_constant_rows() {
let mut rng = ChaCha20Rng::seed_from_u64(0);
let matrix: Array2<u8> = MongePrim::ConstantRows.to_matrix(5, 4, &mut rng);
assert!(is_monge(&matrix));
for row in matrix.rows() {
let elem = row[0];
assert_eq!(row, Array::from_elem(matrix.ncols(), elem));
}
}
#[test]
fn monge_constant_cols() {
let mut rng = ChaCha20Rng::seed_from_u64(0);
let matrix: Array2<u8> = MongePrim::ConstantCols.to_matrix(5, 4, &mut rng);
assert!(is_monge(&matrix));
for column in matrix.columns() {
let elem = column[0];
assert_eq!(column, Array::from_elem(matrix.nrows(), elem));
}
}
#[test]
fn monge_upper_right_ones() {
let mut rng = ChaCha20Rng::seed_from_u64(1);
let matrix: Array2<u8> = MongePrim::UpperRightOnes.to_matrix(5, 4, &mut rng);
assert!(is_monge(&matrix));
assert_eq!(
matrix,
arr2(&[
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]
])
);
}
#[test]
fn monge_lower_left_ones() {
let mut rng = ChaCha20Rng::seed_from_u64(1);
let matrix: Array2<u8> = MongePrim::LowerLeftOnes.to_matrix(5, 4, &mut rng);
assert!(is_monge(&matrix));
assert_eq!(
matrix,
arr2(&[
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0]
])
);
}

83
vendor/smawk/tests/random_monge/mod.rs vendored Normal file
View File

@ -0,0 +1,83 @@
//! Test functionality for generating random Monge matrices.
// The code is put here so we can reuse it in different integration
// tests, without Cargo finding it when `cargo test` is run. See the
// section on "Submodules in Integration Tests" in
// https://doc.rust-lang.org/book/ch11-03-test-organization.html
use ndarray::{s, Array2};
use num_traits::PrimInt;
use rand::distributions::{Distribution, Standard};
use rand::Rng;
/// A Monge matrix can be decomposed into one of these primitive
/// building blocks.
#[derive(Copy, Clone)]
pub enum MongePrim {
ConstantRows,
ConstantCols,
UpperRightOnes,
LowerLeftOnes,
}
impl MongePrim {
/// Generate a Monge matrix from a primitive.
pub fn to_matrix<T: PrimInt, R: Rng>(&self, m: usize, n: usize, rng: &mut R) -> Array2<T>
where
Standard: Distribution<T>,
{
let mut matrix = Array2::from_elem((m, n), T::zero());
// Avoid panic in UpperRightOnes and LowerLeftOnes below.
if m == 0 || n == 0 {
return matrix;
}
match *self {
MongePrim::ConstantRows => {
for mut row in matrix.rows_mut() {
if rng.gen::<bool>() {
row.fill(T::one())
}
}
}
MongePrim::ConstantCols => {
for mut col in matrix.columns_mut() {
if rng.gen::<bool>() {
col.fill(T::one())
}
}
}
MongePrim::UpperRightOnes => {
let i = rng.gen_range(0..(m + 1) as isize);
let j = rng.gen_range(0..(n + 1) as isize);
matrix.slice_mut(s![..i, -j..]).fill(T::one());
}
MongePrim::LowerLeftOnes => {
let i = rng.gen_range(0..(m + 1) as isize);
let j = rng.gen_range(0..(n + 1) as isize);
matrix.slice_mut(s![-i.., ..j]).fill(T::one());
}
}
matrix
}
}
/// Generate a random Monge matrix.
pub fn random_monge_matrix<R: Rng, T: PrimInt>(m: usize, n: usize, rng: &mut R) -> Array2<T>
where
Standard: Distribution<T>,
{
let monge_primitives = [
MongePrim::ConstantRows,
MongePrim::ConstantCols,
MongePrim::LowerLeftOnes,
MongePrim::UpperRightOnes,
];
let mut matrix = Array2::from_elem((m, n), T::zero());
for _ in 0..(m + n) {
let monge = monge_primitives[rng.gen_range(0..monge_primitives.len())];
matrix = matrix + monge.to_matrix(m, n, rng);
}
matrix
}

9
vendor/smawk/tests/version-numbers.rs vendored Normal file
View File

@ -0,0 +1,9 @@
#[test]
fn test_readme_deps() {
version_sync::assert_markdown_deps_updated!("README.md");
}
#[test]
fn test_html_root_url() {
version_sync::assert_html_root_url_updated!("src/lib.rs");
}