Initial vendor packages

Signed-off-by: Valentin Popov <valentin@popov.link>
This commit is contained in:
2024-01-08 01:21:28 +04:00
parent 5ecd8cf2cb
commit 1b6a04ca55
7309 changed files with 2160054 additions and 0 deletions

85
vendor/rayon/src/array.rs vendored Normal file
View File

@@ -0,0 +1,85 @@
//! Parallel iterator types for [arrays] (`[T; N]`)
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! [arrays]: https://doc.rust-lang.org/std/primitive.array.html
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::slice::{Iter, IterMut};
use crate::vec::DrainProducer;
use std::mem::ManuallyDrop;
impl<'data, T: Sync + 'data, const N: usize> IntoParallelIterator for &'data [T; N] {
type Item = &'data T;
type Iter = Iter<'data, T>;
fn into_par_iter(self) -> Self::Iter {
<&[T]>::into_par_iter(self)
}
}
impl<'data, T: Send + 'data, const N: usize> IntoParallelIterator for &'data mut [T; N] {
type Item = &'data mut T;
type Iter = IterMut<'data, T>;
fn into_par_iter(self) -> Self::Iter {
<&mut [T]>::into_par_iter(self)
}
}
impl<T: Send, const N: usize> IntoParallelIterator for [T; N] {
type Item = T;
type Iter = IntoIter<T, N>;
fn into_par_iter(self) -> Self::Iter {
IntoIter { array: self }
}
}
/// Parallel iterator that moves out of an array.
#[derive(Debug, Clone)]
pub struct IntoIter<T: Send, const N: usize> {
array: [T; N],
}
impl<T: Send, const N: usize> ParallelIterator for IntoIter<T, N> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(N)
}
}
impl<T: Send, const N: usize> IndexedParallelIterator for IntoIter<T, N> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
N
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
unsafe {
// Drain every item, and then the local array can just fall out of scope.
let mut array = ManuallyDrop::new(self.array);
let producer = DrainProducer::new(array.as_mut_slice());
callback.callback(producer)
}
}
}

View File

@@ -0,0 +1,120 @@
//! This module contains the parallel iterator types for heaps
//! (`BinaryHeap<T>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::BinaryHeap;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::vec;
/// Parallel iterator over a binary heap
#[derive(Debug, Clone)]
pub struct IntoIter<T: Ord + Send> {
inner: vec::IntoIter<T>,
}
impl<T: Ord + Send> IntoParallelIterator for BinaryHeap<T> {
type Item = T;
type Iter = IntoIter<T>;
fn into_par_iter(self) -> Self::Iter {
IntoIter {
inner: Vec::from(self).into_par_iter(),
}
}
}
delegate_indexed_iterator! {
IntoIter<T> => T,
impl<T: Ord + Send>
}
/// Parallel iterator over an immutable reference to a binary heap
#[derive(Debug)]
pub struct Iter<'a, T: Ord + Sync> {
inner: vec::IntoIter<&'a T>,
}
impl<'a, T: Ord + Sync> Clone for Iter<'a, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
into_par_vec! {
&'a BinaryHeap<T> => Iter<'a, T>,
impl<'a, T: Ord + Sync>
}
delegate_indexed_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Ord + Sync + 'a>
}
// `BinaryHeap` doesn't have a mutable `Iterator`
/// Draining parallel iterator that moves out of a binary heap,
/// but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'a, T: Ord + Send> {
heap: &'a mut BinaryHeap<T>,
}
impl<'a, T: Ord + Send> ParallelDrainFull for &'a mut BinaryHeap<T> {
type Iter = Drain<'a, T>;
type Item = T;
fn par_drain(self) -> Self::Iter {
Drain { heap: self }
}
}
impl<'a, T: Ord + Send> ParallelIterator for Drain<'a, T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'a, T: Ord + Send> IndexedParallelIterator for Drain<'a, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.heap.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
super::DrainGuard::new(self.heap)
.par_drain(..)
.with_producer(callback)
}
}
impl<'a, T: Ord + Send> Drop for Drain<'a, T> {
fn drop(&mut self) {
if !self.heap.is_empty() {
// We must not have produced, so just call a normal drain to remove the items.
self.heap.drain();
}
}
}

View File

@@ -0,0 +1,66 @@
//! This module contains the parallel iterator types for B-Tree maps
//! (`BTreeMap<K, V>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::BTreeMap;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::vec;
/// Parallel iterator over a B-Tree map
#[derive(Debug)] // std doesn't Clone
pub struct IntoIter<K: Ord + Send, V: Send> {
inner: vec::IntoIter<(K, V)>,
}
into_par_vec! {
BTreeMap<K, V> => IntoIter<K, V>,
impl<K: Ord + Send, V: Send>
}
delegate_iterator! {
IntoIter<K, V> => (K, V),
impl<K: Ord + Send, V: Send>
}
/// Parallel iterator over an immutable reference to a B-Tree map
#[derive(Debug)]
pub struct Iter<'a, K: Ord + Sync, V: Sync> {
inner: vec::IntoIter<(&'a K, &'a V)>,
}
impl<'a, K: Ord + Sync, V: Sync> Clone for Iter<'a, K, V> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
into_par_vec! {
&'a BTreeMap<K, V> => Iter<'a, K, V>,
impl<'a, K: Ord + Sync, V: Sync>
}
delegate_iterator! {
Iter<'a, K, V> => (&'a K, &'a V),
impl<'a, K: Ord + Sync + 'a, V: Sync + 'a>
}
/// Parallel iterator over a mutable reference to a B-Tree map
#[derive(Debug)]
pub struct IterMut<'a, K: Ord + Sync, V: Send> {
inner: vec::IntoIter<(&'a K, &'a mut V)>,
}
into_par_vec! {
&'a mut BTreeMap<K, V> => IterMut<'a, K, V>,
impl<'a, K: Ord + Sync, V: Send>
}
delegate_iterator! {
IterMut<'a, K, V> => (&'a K, &'a mut V),
impl<'a, K: Ord + Sync + 'a, V: Send + 'a>
}

View File

@@ -0,0 +1,52 @@
//! This module contains the parallel iterator types for B-Tree sets
//! (`BTreeSet<T>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::BTreeSet;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::vec;
/// Parallel iterator over a B-Tree set
#[derive(Debug)] // std doesn't Clone
pub struct IntoIter<T: Ord + Send> {
inner: vec::IntoIter<T>,
}
into_par_vec! {
BTreeSet<T> => IntoIter<T>,
impl<T: Ord + Send>
}
delegate_iterator! {
IntoIter<T> => T,
impl<T: Ord + Send>
}
/// Parallel iterator over an immutable reference to a B-Tree set
#[derive(Debug)]
pub struct Iter<'a, T: Ord + Sync> {
inner: vec::IntoIter<&'a T>,
}
impl<'a, T: Ord + Sync + 'a> Clone for Iter<'a, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
into_par_vec! {
&'a BTreeSet<T> => Iter<'a, T>,
impl<'a, T: Ord + Sync>
}
delegate_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Ord + Sync + 'a>
}
// `BTreeSet` doesn't have a mutable `Iterator`

View File

@@ -0,0 +1,96 @@
//! This module contains the parallel iterator types for hash maps
//! (`HashMap<K, V>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::HashMap;
use std::hash::{BuildHasher, Hash};
use std::marker::PhantomData;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::vec;
/// Parallel iterator over a hash map
#[derive(Debug)] // std doesn't Clone
pub struct IntoIter<K: Hash + Eq + Send, V: Send> {
inner: vec::IntoIter<(K, V)>,
}
into_par_vec! {
HashMap<K, V, S> => IntoIter<K, V>,
impl<K: Hash + Eq + Send, V: Send, S: BuildHasher>
}
delegate_iterator! {
IntoIter<K, V> => (K, V),
impl<K: Hash + Eq + Send, V: Send>
}
/// Parallel iterator over an immutable reference to a hash map
#[derive(Debug)]
pub struct Iter<'a, K: Hash + Eq + Sync, V: Sync> {
inner: vec::IntoIter<(&'a K, &'a V)>,
}
impl<'a, K: Hash + Eq + Sync, V: Sync> Clone for Iter<'a, K, V> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
into_par_vec! {
&'a HashMap<K, V, S> => Iter<'a, K, V>,
impl<'a, K: Hash + Eq + Sync, V: Sync, S: BuildHasher>
}
delegate_iterator! {
Iter<'a, K, V> => (&'a K, &'a V),
impl<'a, K: Hash + Eq + Sync + 'a, V: Sync + 'a>
}
/// Parallel iterator over a mutable reference to a hash map
#[derive(Debug)]
pub struct IterMut<'a, K: Hash + Eq + Sync, V: Send> {
inner: vec::IntoIter<(&'a K, &'a mut V)>,
}
into_par_vec! {
&'a mut HashMap<K, V, S> => IterMut<'a, K, V>,
impl<'a, K: Hash + Eq + Sync, V: Send, S: BuildHasher>
}
delegate_iterator! {
IterMut<'a, K, V> => (&'a K, &'a mut V),
impl<'a, K: Hash + Eq + Sync + 'a, V: Send + 'a>
}
/// Draining parallel iterator that moves out of a hash map,
/// but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'a, K: Hash + Eq + Send, V: Send> {
inner: vec::IntoIter<(K, V)>,
marker: PhantomData<&'a mut HashMap<K, V>>,
}
impl<'a, K: Hash + Eq + Send, V: Send, S: BuildHasher> ParallelDrainFull
for &'a mut HashMap<K, V, S>
{
type Iter = Drain<'a, K, V>;
type Item = (K, V);
fn par_drain(self) -> Self::Iter {
let vec: Vec<_> = self.drain().collect();
Drain {
inner: vec.into_par_iter(),
marker: PhantomData,
}
}
}
delegate_iterator! {
Drain<'_, K, V> => (K, V),
impl<K: Hash + Eq + Send, V: Send>
}

View File

@@ -0,0 +1,80 @@
//! This module contains the parallel iterator types for hash sets
//! (`HashSet<T>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::HashSet;
use std::hash::{BuildHasher, Hash};
use std::marker::PhantomData;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::vec;
/// Parallel iterator over a hash set
#[derive(Debug)] // std doesn't Clone
pub struct IntoIter<T: Hash + Eq + Send> {
inner: vec::IntoIter<T>,
}
into_par_vec! {
HashSet<T, S> => IntoIter<T>,
impl<T: Hash + Eq + Send, S: BuildHasher>
}
delegate_iterator! {
IntoIter<T> => T,
impl<T: Hash + Eq + Send>
}
/// Parallel iterator over an immutable reference to a hash set
#[derive(Debug)]
pub struct Iter<'a, T: Hash + Eq + Sync> {
inner: vec::IntoIter<&'a T>,
}
impl<'a, T: Hash + Eq + Sync> Clone for Iter<'a, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
into_par_vec! {
&'a HashSet<T, S> => Iter<'a, T>,
impl<'a, T: Hash + Eq + Sync, S: BuildHasher>
}
delegate_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Hash + Eq + Sync + 'a>
}
// `HashSet` doesn't have a mutable `Iterator`
/// Draining parallel iterator that moves out of a hash set,
/// but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'a, T: Hash + Eq + Send> {
inner: vec::IntoIter<T>,
marker: PhantomData<&'a mut HashSet<T>>,
}
impl<'a, T: Hash + Eq + Send, S: BuildHasher> ParallelDrainFull for &'a mut HashSet<T, S> {
type Iter = Drain<'a, T>;
type Item = T;
fn par_drain(self) -> Self::Iter {
let vec: Vec<_> = self.drain().collect();
Drain {
inner: vec.into_par_iter(),
marker: PhantomData,
}
}
}
delegate_iterator! {
Drain<'_, T> => T,
impl<T: Hash + Eq + Send>
}

View File

@@ -0,0 +1,66 @@
//! This module contains the parallel iterator types for linked lists
//! (`LinkedList<T>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::LinkedList;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::vec;
/// Parallel iterator over a linked list
#[derive(Debug, Clone)]
pub struct IntoIter<T: Send> {
inner: vec::IntoIter<T>,
}
into_par_vec! {
LinkedList<T> => IntoIter<T>,
impl<T: Send>
}
delegate_iterator! {
IntoIter<T> => T,
impl<T: Send>
}
/// Parallel iterator over an immutable reference to a linked list
#[derive(Debug)]
pub struct Iter<'a, T: Sync> {
inner: vec::IntoIter<&'a T>,
}
impl<'a, T: Sync> Clone for Iter<'a, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
into_par_vec! {
&'a LinkedList<T> => Iter<'a, T>,
impl<'a, T: Sync>
}
delegate_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Sync + 'a>
}
/// Parallel iterator over a mutable reference to a linked list
#[derive(Debug)]
pub struct IterMut<'a, T: Send> {
inner: vec::IntoIter<&'a mut T>,
}
into_par_vec! {
&'a mut LinkedList<T> => IterMut<'a, T>,
impl<'a, T: Send>
}
delegate_iterator! {
IterMut<'a, T> => &'a mut T,
impl<'a, T: Send + 'a>
}

84
vendor/rayon/src/collections/mod.rs vendored Normal file
View File

@@ -0,0 +1,84 @@
//! Parallel iterator types for [standard collections][std::collections]
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! [std::collections]: https://doc.rust-lang.org/stable/std/collections/
/// Convert an iterable collection into a parallel iterator by first
/// collecting into a temporary `Vec`, then iterating that.
macro_rules! into_par_vec {
($t:ty => $iter:ident<$($i:tt),*>, impl $($args:tt)*) => {
impl $($args)* IntoParallelIterator for $t {
type Item = <$t as IntoIterator>::Item;
type Iter = $iter<$($i),*>;
fn into_par_iter(self) -> Self::Iter {
use std::iter::FromIterator;
$iter { inner: Vec::from_iter(self).into_par_iter() }
}
}
};
}
pub mod binary_heap;
pub mod btree_map;
pub mod btree_set;
pub mod hash_map;
pub mod hash_set;
pub mod linked_list;
pub mod vec_deque;
use self::drain_guard::DrainGuard;
mod drain_guard {
use crate::iter::ParallelDrainRange;
use std::mem;
use std::ops::RangeBounds;
/// A proxy for draining a collection by converting to a `Vec` and back.
///
/// This is used for draining `BinaryHeap` and `VecDeque`, which both have
/// zero-allocation conversions to/from `Vec`, though not zero-cost:
/// - `BinaryHeap` will heapify from `Vec`, but at least that will be empty.
/// - `VecDeque` has to shift items to offset 0 when converting to `Vec`.
#[allow(missing_debug_implementations)]
pub(super) struct DrainGuard<'a, T, C: From<Vec<T>>> {
collection: &'a mut C,
vec: Vec<T>,
}
impl<'a, T, C> DrainGuard<'a, T, C>
where
C: Default + From<Vec<T>>,
Vec<T>: From<C>,
{
pub(super) fn new(collection: &'a mut C) -> Self {
Self {
// Temporarily steal the inner `Vec` so we can drain in place.
vec: Vec::from(mem::take(collection)),
collection,
}
}
}
impl<'a, T, C: From<Vec<T>>> Drop for DrainGuard<'a, T, C> {
fn drop(&mut self) {
// Restore the collection from the `Vec` with its original capacity.
*self.collection = C::from(mem::take(&mut self.vec));
}
}
impl<'a, T, C> ParallelDrainRange<usize> for &'a mut DrainGuard<'_, T, C>
where
T: Send,
C: From<Vec<T>>,
{
type Iter = crate::vec::Drain<'a, T>;
type Item = T;
fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
self.vec.par_drain(range)
}
}
}

View File

@@ -0,0 +1,159 @@
//! This module contains the parallel iterator types for double-ended queues
//! (`VecDeque<T>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::VecDeque;
use std::ops::{Range, RangeBounds};
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::math::simplify_range;
use crate::slice;
use crate::vec;
/// Parallel iterator over a double-ended queue
#[derive(Debug, Clone)]
pub struct IntoIter<T: Send> {
inner: vec::IntoIter<T>,
}
impl<T: Send> IntoParallelIterator for VecDeque<T> {
type Item = T;
type Iter = IntoIter<T>;
fn into_par_iter(self) -> Self::Iter {
// NOTE: requires data movement if the deque doesn't start at offset 0.
let inner = Vec::from(self).into_par_iter();
IntoIter { inner }
}
}
delegate_indexed_iterator! {
IntoIter<T> => T,
impl<T: Send>
}
/// Parallel iterator over an immutable reference to a double-ended queue
#[derive(Debug)]
pub struct Iter<'a, T: Sync> {
inner: Chain<slice::Iter<'a, T>, slice::Iter<'a, T>>,
}
impl<'a, T: Sync> Clone for Iter<'a, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
impl<'a, T: Sync> IntoParallelIterator for &'a VecDeque<T> {
type Item = &'a T;
type Iter = Iter<'a, T>;
fn into_par_iter(self) -> Self::Iter {
let (a, b) = self.as_slices();
Iter {
inner: a.into_par_iter().chain(b),
}
}
}
delegate_indexed_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Sync + 'a>
}
/// Parallel iterator over a mutable reference to a double-ended queue
#[derive(Debug)]
pub struct IterMut<'a, T: Send> {
inner: Chain<slice::IterMut<'a, T>, slice::IterMut<'a, T>>,
}
impl<'a, T: Send> IntoParallelIterator for &'a mut VecDeque<T> {
type Item = &'a mut T;
type Iter = IterMut<'a, T>;
fn into_par_iter(self) -> Self::Iter {
let (a, b) = self.as_mut_slices();
IterMut {
inner: a.into_par_iter().chain(b),
}
}
}
delegate_indexed_iterator! {
IterMut<'a, T> => &'a mut T,
impl<'a, T: Send + 'a>
}
/// Draining parallel iterator that moves a range out of a double-ended queue,
/// but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'a, T: Send> {
deque: &'a mut VecDeque<T>,
range: Range<usize>,
orig_len: usize,
}
impl<'a, T: Send> ParallelDrainRange<usize> for &'a mut VecDeque<T> {
type Iter = Drain<'a, T>;
type Item = T;
fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
Drain {
orig_len: self.len(),
range: simplify_range(range, self.len()),
deque: self,
}
}
}
impl<'a, T: Send> ParallelIterator for Drain<'a, T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'a, T: Send> IndexedParallelIterator for Drain<'a, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.range.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
// NOTE: requires data movement if the deque doesn't start at offset 0.
super::DrainGuard::new(self.deque)
.par_drain(self.range.clone())
.with_producer(callback)
}
}
impl<'a, T: Send> Drop for Drain<'a, T> {
fn drop(&mut self) {
if self.deque.len() != self.orig_len - self.range.len() {
// We must not have produced, so just call a normal drain to remove the items.
assert_eq!(self.deque.len(), self.orig_len);
self.deque.drain(self.range.clone());
}
}
}

View File

@@ -0,0 +1,14 @@
/*! ```compile_fail,E0599
use rayon::prelude::*;
// zip requires data of exact size, but filter yields only bounded
// size, so check that we cannot apply it.
let a: Vec<usize> = (0..1024).collect();
let mut v = vec![];
a.par_iter()
.filter_map(|&x| Some(x as f32))
.collect_into_vec(&mut v); //~ ERROR no method
``` */

View File

@@ -0,0 +1,14 @@
/*! ```compile_fail,E0277
use rayon::prelude::*;
// zip requires data of exact size, but filter yields only bounded
// size, so check that we cannot apply it.
let mut a: Vec<usize> = (0..1024).rev().collect();
let b: Vec<usize> = (0..1024).collect();
a.par_iter()
.zip(b.par_iter().filter(|&&x| x > 3)); //~ ERROR
``` */

View File

@@ -0,0 +1,13 @@
/*! ```compile_fail,E0277
// Check that we can't use the par-iter API to access contents of a `Cell`.
use rayon::prelude::*;
use std::cell::Cell;
let c = Cell::new(42_i32);
(0_i32..1024).into_par_iter()
.map(|_| c.get()) //~ ERROR E0277
.min();
``` */

7
vendor/rayon/src/compile_fail/mod.rs vendored Normal file
View File

@@ -0,0 +1,7 @@
// These modules contain `compile_fail` doc tests.
mod cannot_collect_filtermap_data;
mod cannot_zip_filtered_data;
mod cell_par_iter;
mod must_use;
mod no_send_par_iter;
mod rc_par_iter;

View File

@@ -0,0 +1,69 @@
// Check that we are flagged for ignoring `must_use` parallel adaptors.
// (unfortunately there's no error code for `unused_must_use`)
macro_rules! must_use {
($( $name:ident #[$expr:meta] )*) => {$(
/// First sanity check that the expression is OK.
///
/// ```
/// #![deny(unused_must_use)]
///
/// use rayon::prelude::*;
///
/// let v: Vec<_> = (0..100).map(Some).collect();
/// let _ =
#[$expr]
/// ```
///
/// Now trigger the `must_use`.
///
/// ```compile_fail
/// #![deny(unused_must_use)]
///
/// use rayon::prelude::*;
///
/// let v: Vec<_> = (0..100).map(Some).collect();
#[$expr]
/// ```
mod $name {}
)*}
}
must_use! {
step_by /** v.par_iter().step_by(2); */
chain /** v.par_iter().chain(&v); */
chunks /** v.par_iter().chunks(2); */
fold_chunks /** v.par_iter().fold_chunks(2, || 0, |x, _| x); */
fold_chunks_with /** v.par_iter().fold_chunks_with(2, 0, |x, _| x); */
cloned /** v.par_iter().cloned(); */
copied /** v.par_iter().copied(); */
enumerate /** v.par_iter().enumerate(); */
filter /** v.par_iter().filter(|_| true); */
filter_map /** v.par_iter().filter_map(|x| *x); */
flat_map /** v.par_iter().flat_map(|x| *x); */
flat_map_iter /** v.par_iter().flat_map_iter(|x| *x); */
flatten /** v.par_iter().flatten(); */
flatten_iter /** v.par_iter().flatten_iter(); */
fold /** v.par_iter().fold(|| 0, |x, _| x); */
fold_with /** v.par_iter().fold_with(0, |x, _| x); */
try_fold /** v.par_iter().try_fold(|| 0, |x, _| Some(x)); */
try_fold_with /** v.par_iter().try_fold_with(0, |x, _| Some(x)); */
inspect /** v.par_iter().inspect(|_| {}); */
interleave /** v.par_iter().interleave(&v); */
interleave_shortest /** v.par_iter().interleave_shortest(&v); */
intersperse /** v.par_iter().intersperse(&None); */
map /** v.par_iter().map(|x| x); */
map_with /** v.par_iter().map_with(0, |_, x| x); */
map_init /** v.par_iter().map_init(|| 0, |_, x| x); */
panic_fuse /** v.par_iter().panic_fuse(); */
positions /** v.par_iter().positions(|_| true); */
rev /** v.par_iter().rev(); */
skip /** v.par_iter().skip(1); */
take /** v.par_iter().take(1); */
update /** v.par_iter().update(|_| {}); */
while_some /** v.par_iter().cloned().while_some(); */
with_max_len /** v.par_iter().with_max_len(1); */
with_min_len /** v.par_iter().with_min_len(1); */
zip /** v.par_iter().zip(&v); */
zip_eq /** v.par_iter().zip_eq(&v); */
}

View File

@@ -0,0 +1,58 @@
// Check that `!Send` types fail early.
/** ```compile_fail,E0277
use rayon::prelude::*;
use std::ptr::null;
#[derive(Copy, Clone)]
struct NoSend(*const ());
unsafe impl Sync for NoSend {}
let x = Some(NoSend(null()));
x.par_iter()
.map(|&x| x) //~ ERROR
.count(); //~ ERROR
``` */
mod map {}
/** ```compile_fail,E0277
use rayon::prelude::*;
use std::ptr::null;
#[derive(Copy, Clone)]
struct NoSend(*const ());
unsafe impl Sync for NoSend {}
let x = Some(NoSend(null()));
x.par_iter()
.filter_map(|&x| Some(x)) //~ ERROR
.count(); //~ ERROR
``` */
mod filter_map {}
/** ```compile_fail,E0277
use rayon::prelude::*;
use std::ptr::null;
#[derive(Copy, Clone)]
struct NoSend(*const ());
unsafe impl Sync for NoSend {}
let x = Some(NoSend(null()));
x.par_iter()
.cloned() //~ ERROR
.count(); //~ ERROR
``` */
mod cloned {}

View File

@@ -0,0 +1,15 @@
/*! ```compile_fail,E0599
// Check that we can't use the par-iter API to access contents of an
// `Rc`.
use rayon::prelude::*;
use std::rc::Rc;
let x = vec![Rc::new(22), Rc::new(23)];
let mut y = vec![];
x.into_par_iter() //~ ERROR no method named `into_par_iter`
.map(|rc| *rc)
.collect_into_vec(&mut y);
``` */

109
vendor/rayon/src/delegate.rs vendored Normal file
View File

@@ -0,0 +1,109 @@
//! Macros for delegating newtype iterators to inner types.
// Note: these place `impl` bounds at the end, as token gobbling is the only way
// I know how to consume an arbitrary list of constraints, with `$($args:tt)*`.
/// Creates a parallel iterator implementation which simply wraps an inner type
/// and delegates all methods inward. The actual struct must already be
/// declared with an `inner` field.
///
/// The implementation of `IntoParallelIterator` should be added separately.
macro_rules! delegate_iterator {
($iter:ty => $item:ty ,
impl $( $args:tt )*
) => {
impl $( $args )* ParallelIterator for $iter {
type Item = $item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where C: UnindexedConsumer<Self::Item>
{
self.inner.drive_unindexed(consumer)
}
fn opt_len(&self) -> Option<usize> {
self.inner.opt_len()
}
}
}
}
/// Creates an indexed parallel iterator implementation which simply wraps an
/// inner type and delegates all methods inward. The actual struct must already
/// be declared with an `inner` field.
macro_rules! delegate_indexed_iterator {
($iter:ty => $item:ty ,
impl $( $args:tt )*
) => {
delegate_iterator!{
$iter => $item ,
impl $( $args )*
}
impl $( $args )* IndexedParallelIterator for $iter {
fn drive<C>(self, consumer: C) -> C::Result
where C: Consumer<Self::Item>
{
self.inner.drive(consumer)
}
fn len(&self) -> usize {
self.inner.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where CB: ProducerCallback<Self::Item>
{
self.inner.with_producer(callback)
}
}
}
}
#[test]
fn unindexed_example() {
use crate::collections::btree_map::IntoIter;
use crate::iter::plumbing::*;
use crate::prelude::*;
use std::collections::BTreeMap;
struct MyIntoIter<T: Ord + Send, U: Send> {
inner: IntoIter<T, U>,
}
delegate_iterator! {
MyIntoIter<T, U> => (T, U),
impl<T: Ord + Send, U: Send>
}
let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let iter = MyIntoIter {
inner: map.into_par_iter(),
};
let vec: Vec<_> = iter.map(|(k, _)| k).collect();
assert_eq!(vec, &[1, 2, 3]);
}
#[test]
fn indexed_example() {
use crate::iter::plumbing::*;
use crate::prelude::*;
use crate::vec::IntoIter;
struct MyIntoIter<T: Send> {
inner: IntoIter<T>,
}
delegate_indexed_iterator! {
MyIntoIter<T> => T,
impl<T: Send>
}
let iter = MyIntoIter {
inner: vec![1, 2, 3].into_par_iter(),
};
let mut vec = vec![];
iter.collect_into_vec(&mut vec);
assert_eq!(vec, &[1, 2, 3]);
}

268
vendor/rayon/src/iter/chain.rs vendored Normal file
View File

@@ -0,0 +1,268 @@
use super::plumbing::*;
use super::*;
use rayon_core::join;
use std::cmp;
use std::iter;
/// `Chain` is an iterator that joins `b` after `a` in one continuous iterator.
/// This struct is created by the [`chain()`] method on [`ParallelIterator`]
///
/// [`chain()`]: trait.ParallelIterator.html#method.chain
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Chain<A, B>
where
A: ParallelIterator,
B: ParallelIterator<Item = A::Item>,
{
a: A,
b: B,
}
impl<A, B> Chain<A, B>
where
A: ParallelIterator,
B: ParallelIterator<Item = A::Item>,
{
/// Creates a new `Chain` iterator.
pub(super) fn new(a: A, b: B) -> Self {
Chain { a, b }
}
}
impl<A, B> ParallelIterator for Chain<A, B>
where
A: ParallelIterator,
B: ParallelIterator<Item = A::Item>,
{
type Item = A::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let Chain { a, b } = self;
// If we returned a value from our own `opt_len`, then the collect consumer in particular
// will balk at being treated like an actual `UnindexedConsumer`. But when we do know the
// length, we can use `Consumer::split_at` instead, and this is still harmless for other
// truly-unindexed consumers too.
let (left, right, reducer) = if let Some(len) = a.opt_len() {
consumer.split_at(len)
} else {
let reducer = consumer.to_reducer();
(consumer.split_off_left(), consumer, reducer)
};
let (a, b) = join(|| a.drive_unindexed(left), || b.drive_unindexed(right));
reducer.reduce(a, b)
}
fn opt_len(&self) -> Option<usize> {
self.a.opt_len()?.checked_add(self.b.opt_len()?)
}
}
impl<A, B> IndexedParallelIterator for Chain<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator<Item = A::Item>,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let Chain { a, b } = self;
let (left, right, reducer) = consumer.split_at(a.len());
let (a, b) = join(|| a.drive(left), || b.drive(right));
reducer.reduce(a, b)
}
fn len(&self) -> usize {
self.a.len().checked_add(self.b.len()).expect("overflow")
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let a_len = self.a.len();
return self.a.with_producer(CallbackA {
callback,
a_len,
b: self.b,
});
struct CallbackA<CB, B> {
callback: CB,
a_len: usize,
b: B,
}
impl<CB, B> ProducerCallback<B::Item> for CallbackA<CB, B>
where
B: IndexedParallelIterator,
CB: ProducerCallback<B::Item>,
{
type Output = CB::Output;
fn callback<A>(self, a_producer: A) -> Self::Output
where
A: Producer<Item = B::Item>,
{
self.b.with_producer(CallbackB {
callback: self.callback,
a_len: self.a_len,
a_producer,
})
}
}
struct CallbackB<CB, A> {
callback: CB,
a_len: usize,
a_producer: A,
}
impl<CB, A> ProducerCallback<A::Item> for CallbackB<CB, A>
where
A: Producer,
CB: ProducerCallback<A::Item>,
{
type Output = CB::Output;
fn callback<B>(self, b_producer: B) -> Self::Output
where
B: Producer<Item = A::Item>,
{
let producer = ChainProducer::new(self.a_len, self.a_producer, b_producer);
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
struct ChainProducer<A, B>
where
A: Producer,
B: Producer<Item = A::Item>,
{
a_len: usize,
a: A,
b: B,
}
impl<A, B> ChainProducer<A, B>
where
A: Producer,
B: Producer<Item = A::Item>,
{
fn new(a_len: usize, a: A, b: B) -> Self {
ChainProducer { a_len, a, b }
}
}
impl<A, B> Producer for ChainProducer<A, B>
where
A: Producer,
B: Producer<Item = A::Item>,
{
type Item = A::Item;
type IntoIter = ChainSeq<A::IntoIter, B::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
ChainSeq::new(self.a.into_iter(), self.b.into_iter())
}
fn min_len(&self) -> usize {
cmp::max(self.a.min_len(), self.b.min_len())
}
fn max_len(&self) -> usize {
cmp::min(self.a.max_len(), self.b.max_len())
}
fn split_at(self, index: usize) -> (Self, Self) {
if index <= self.a_len {
let a_rem = self.a_len - index;
let (a_left, a_right) = self.a.split_at(index);
let (b_left, b_right) = self.b.split_at(0);
(
ChainProducer::new(index, a_left, b_left),
ChainProducer::new(a_rem, a_right, b_right),
)
} else {
let (a_left, a_right) = self.a.split_at(self.a_len);
let (b_left, b_right) = self.b.split_at(index - self.a_len);
(
ChainProducer::new(self.a_len, a_left, b_left),
ChainProducer::new(0, a_right, b_right),
)
}
}
fn fold_with<F>(self, mut folder: F) -> F
where
F: Folder<A::Item>,
{
folder = self.a.fold_with(folder);
if folder.full() {
folder
} else {
self.b.fold_with(folder)
}
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Wrapper for Chain to implement ExactSizeIterator
struct ChainSeq<A, B> {
chain: iter::Chain<A, B>,
}
impl<A, B> ChainSeq<A, B> {
fn new(a: A, b: B) -> ChainSeq<A, B>
where
A: ExactSizeIterator,
B: ExactSizeIterator<Item = A::Item>,
{
ChainSeq { chain: a.chain(b) }
}
}
impl<A, B> Iterator for ChainSeq<A, B>
where
A: Iterator,
B: Iterator<Item = A::Item>,
{
type Item = A::Item;
fn next(&mut self) -> Option<Self::Item> {
self.chain.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.chain.size_hint()
}
}
impl<A, B> ExactSizeIterator for ChainSeq<A, B>
where
A: ExactSizeIterator,
B: ExactSizeIterator<Item = A::Item>,
{
}
impl<A, B> DoubleEndedIterator for ChainSeq<A, B>
where
A: DoubleEndedIterator,
B: DoubleEndedIterator<Item = A::Item>,
{
fn next_back(&mut self) -> Option<Self::Item> {
self.chain.next_back()
}
}

226
vendor/rayon/src/iter/chunks.rs vendored Normal file
View File

@@ -0,0 +1,226 @@
use std::cmp::min;
use super::plumbing::*;
use super::*;
use crate::math::div_round_up;
/// `Chunks` is an iterator that groups elements of an underlying iterator.
///
/// This struct is created by the [`chunks()`] method on [`IndexedParallelIterator`]
///
/// [`chunks()`]: trait.IndexedParallelIterator.html#method.chunks
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Chunks<I>
where
I: IndexedParallelIterator,
{
size: usize,
i: I,
}
impl<I> Chunks<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `Chunks` iterator
pub(super) fn new(i: I, size: usize) -> Self {
Chunks { i, size }
}
}
impl<I> ParallelIterator for Chunks<I>
where
I: IndexedParallelIterator,
{
type Item = Vec<I::Item>;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: Consumer<Vec<I::Item>>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for Chunks<I>
where
I: IndexedParallelIterator,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
div_round_up(self.i.len(), self.size)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.i.len();
return self.i.with_producer(Callback {
size: self.size,
len,
callback,
});
struct Callback<CB> {
size: usize,
len: usize,
callback: CB,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<Vec<T>>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = ChunkProducer::new(self.size, self.len, base, Vec::from_iter);
self.callback.callback(producer)
}
}
}
}
pub(super) struct ChunkProducer<P, F> {
chunk_size: usize,
len: usize,
base: P,
map: F,
}
impl<P, F> ChunkProducer<P, F> {
pub(super) fn new(chunk_size: usize, len: usize, base: P, map: F) -> Self {
Self {
chunk_size,
len,
base,
map,
}
}
}
impl<P, F, T> Producer for ChunkProducer<P, F>
where
P: Producer,
F: Fn(P::IntoIter) -> T + Send + Clone,
{
type Item = T;
type IntoIter = std::iter::Map<ChunkSeq<P>, F>;
fn into_iter(self) -> Self::IntoIter {
let chunks = ChunkSeq {
chunk_size: self.chunk_size,
len: self.len,
inner: if self.len > 0 { Some(self.base) } else { None },
};
chunks.map(self.map)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = min(index * self.chunk_size, self.len);
let (left, right) = self.base.split_at(elem_index);
(
ChunkProducer {
chunk_size: self.chunk_size,
len: elem_index,
base: left,
map: self.map.clone(),
},
ChunkProducer {
chunk_size: self.chunk_size,
len: self.len - elem_index,
base: right,
map: self.map,
},
)
}
fn min_len(&self) -> usize {
div_round_up(self.base.min_len(), self.chunk_size)
}
fn max_len(&self) -> usize {
self.base.max_len() / self.chunk_size
}
}
pub(super) struct ChunkSeq<P> {
chunk_size: usize,
len: usize,
inner: Option<P>,
}
impl<P> Iterator for ChunkSeq<P>
where
P: Producer,
{
type Item = P::IntoIter;
fn next(&mut self) -> Option<Self::Item> {
let producer = self.inner.take()?;
if self.len > self.chunk_size {
let (left, right) = producer.split_at(self.chunk_size);
self.inner = Some(right);
self.len -= self.chunk_size;
Some(left.into_iter())
} else {
debug_assert!(self.len > 0);
self.len = 0;
Some(producer.into_iter())
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<P> ExactSizeIterator for ChunkSeq<P>
where
P: Producer,
{
#[inline]
fn len(&self) -> usize {
div_round_up(self.len, self.chunk_size)
}
}
impl<P> DoubleEndedIterator for ChunkSeq<P>
where
P: Producer,
{
fn next_back(&mut self) -> Option<Self::Item> {
let producer = self.inner.take()?;
if self.len > self.chunk_size {
let mut size = self.len % self.chunk_size;
if size == 0 {
size = self.chunk_size;
}
let (left, right) = producer.split_at(self.len - size);
self.inner = Some(left);
self.len -= size;
Some(right.into_iter())
} else {
debug_assert!(self.len > 0);
self.len = 0;
Some(producer.into_iter())
}
}
}

223
vendor/rayon/src/iter/cloned.rs vendored Normal file
View File

@@ -0,0 +1,223 @@
use super::plumbing::*;
use super::*;
use std::iter;
/// `Cloned` is an iterator that clones the elements of an underlying iterator.
///
/// This struct is created by the [`cloned()`] method on [`ParallelIterator`]
///
/// [`cloned()`]: trait.ParallelIterator.html#method.cloned
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Cloned<I: ParallelIterator> {
base: I,
}
impl<I> Cloned<I>
where
I: ParallelIterator,
{
/// Creates a new `Cloned` iterator.
pub(super) fn new(base: I) -> Self {
Cloned { base }
}
}
impl<'a, T, I> ParallelIterator for Cloned<I>
where
I: ParallelIterator<Item = &'a T>,
T: 'a + Clone + Send + Sync,
{
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = ClonedConsumer::new(consumer);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<'a, T, I> IndexedParallelIterator for Cloned<I>
where
I: IndexedParallelIterator<Item = &'a T>,
T: 'a + Clone + Send + Sync,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = ClonedConsumer::new(consumer);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback { callback });
struct Callback<CB> {
callback: CB,
}
impl<'a, T, CB> ProducerCallback<&'a T> for Callback<CB>
where
CB: ProducerCallback<T>,
T: 'a + Clone + Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = &'a T>,
{
let producer = ClonedProducer { base };
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
struct ClonedProducer<P> {
base: P,
}
impl<'a, T, P> Producer for ClonedProducer<P>
where
P: Producer<Item = &'a T>,
T: 'a + Clone,
{
type Item = T;
type IntoIter = iter::Cloned<P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().cloned()
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
ClonedProducer { base: left },
ClonedProducer { base: right },
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.base.fold_with(ClonedFolder { base: folder }).base
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct ClonedConsumer<C> {
base: C,
}
impl<C> ClonedConsumer<C> {
fn new(base: C) -> Self {
ClonedConsumer { base }
}
}
impl<'a, T, C> Consumer<&'a T> for ClonedConsumer<C>
where
C: Consumer<T>,
T: 'a + Clone,
{
type Folder = ClonedFolder<C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
ClonedConsumer::new(left),
ClonedConsumer::new(right),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
ClonedFolder {
base: self.base.into_folder(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'a, T, C> UnindexedConsumer<&'a T> for ClonedConsumer<C>
where
C: UnindexedConsumer<T>,
T: 'a + Clone,
{
fn split_off_left(&self) -> Self {
ClonedConsumer::new(self.base.split_off_left())
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct ClonedFolder<F> {
base: F,
}
impl<'a, T, F> Folder<&'a T> for ClonedFolder<F>
where
F: Folder<T>,
T: 'a + Clone,
{
type Result = F::Result;
fn consume(self, item: &'a T) -> Self {
ClonedFolder {
base: self.base.consume(item.clone()),
}
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = &'a T>,
{
self.base = self.base.consume_iter(iter.into_iter().cloned());
self
}
fn complete(self) -> F::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

View File

@@ -0,0 +1,186 @@
use super::super::plumbing::*;
use crate::SendPtr;
use std::marker::PhantomData;
use std::ptr;
use std::slice;
pub(super) struct CollectConsumer<'c, T: Send> {
/// See `CollectResult` for explanation of why this is not a slice
start: SendPtr<T>,
len: usize,
marker: PhantomData<&'c mut T>,
}
impl<T: Send> CollectConsumer<'_, T> {
/// Create a collector for `len` items in the unused capacity of the vector.
pub(super) fn appender(vec: &mut Vec<T>, len: usize) -> CollectConsumer<'_, T> {
let start = vec.len();
assert!(vec.capacity() - start >= len);
// SAFETY: We already made sure to have the additional space allocated.
// The pointer is derived from `Vec` directly, not through a `Deref`,
// so it has provenance over the whole allocation.
unsafe { CollectConsumer::new(vec.as_mut_ptr().add(start), len) }
}
}
impl<'c, T: Send + 'c> CollectConsumer<'c, T> {
/// The target memory is considered uninitialized, and will be
/// overwritten without reading or dropping existing values.
unsafe fn new(start: *mut T, len: usize) -> Self {
CollectConsumer {
start: SendPtr(start),
len,
marker: PhantomData,
}
}
}
/// CollectResult represents an initialized part of the target slice.
///
/// This is a proxy owner of the elements in the slice; when it drops,
/// the elements will be dropped, unless its ownership is released before then.
#[must_use]
pub(super) struct CollectResult<'c, T> {
/// This pointer and length has the same representation as a slice,
/// but retains the provenance of the entire array so that we can merge
/// these regions together in `CollectReducer`.
start: SendPtr<T>,
total_len: usize,
/// The current initialized length after `start`
initialized_len: usize,
/// Lifetime invariance guarantees that the data flows from consumer to result,
/// especially for the `scope_fn` callback in `Collect::with_consumer`.
invariant_lifetime: PhantomData<&'c mut &'c mut [T]>,
}
unsafe impl<'c, T> Send for CollectResult<'c, T> where T: Send {}
impl<'c, T> CollectResult<'c, T> {
/// The current length of the collect result
pub(super) fn len(&self) -> usize {
self.initialized_len
}
/// Release ownership of the slice of elements, and return the length
pub(super) fn release_ownership(mut self) -> usize {
let ret = self.initialized_len;
self.initialized_len = 0;
ret
}
}
impl<'c, T> Drop for CollectResult<'c, T> {
fn drop(&mut self) {
// Drop the first `self.initialized_len` elements, which have been recorded
// to be initialized by the folder.
unsafe {
ptr::drop_in_place(slice::from_raw_parts_mut(
self.start.0,
self.initialized_len,
));
}
}
}
impl<'c, T: Send + 'c> Consumer<T> for CollectConsumer<'c, T> {
type Folder = CollectResult<'c, T>;
type Reducer = CollectReducer;
type Result = CollectResult<'c, T>;
fn split_at(self, index: usize) -> (Self, Self, CollectReducer) {
let CollectConsumer { start, len, .. } = self;
// Produce new consumers.
// SAFETY: This assert checks that `index` is a valid offset for `start`
unsafe {
assert!(index <= len);
(
CollectConsumer::new(start.0, index),
CollectConsumer::new(start.0.add(index), len - index),
CollectReducer,
)
}
}
fn into_folder(self) -> Self::Folder {
// Create a result/folder that consumes values and writes them
// into the region after start. The initial result has length 0.
CollectResult {
start: self.start,
total_len: self.len,
initialized_len: 0,
invariant_lifetime: PhantomData,
}
}
fn full(&self) -> bool {
false
}
}
impl<'c, T: Send + 'c> Folder<T> for CollectResult<'c, T> {
type Result = Self;
fn consume(mut self, item: T) -> Self {
assert!(
self.initialized_len < self.total_len,
"too many values pushed to consumer"
);
// SAFETY: The assert above is a bounds check for this write, and we
// avoid assignment here so we do not drop an uninitialized T.
unsafe {
// Write item and increase the initialized length
self.start.0.add(self.initialized_len).write(item);
self.initialized_len += 1;
}
self
}
fn complete(self) -> Self::Result {
// NB: We don't explicitly check that the local writes were complete,
// but Collect will assert the total result length in the end.
self
}
fn full(&self) -> bool {
false
}
}
/// Pretend to be unindexed for `special_collect_into_vec`,
/// but we should never actually get used that way...
impl<'c, T: Send + 'c> UnindexedConsumer<T> for CollectConsumer<'c, T> {
fn split_off_left(&self) -> Self {
unreachable!("CollectConsumer must be indexed!")
}
fn to_reducer(&self) -> Self::Reducer {
CollectReducer
}
}
/// CollectReducer combines adjacent chunks; the result must always
/// be contiguous so that it is one combined slice.
pub(super) struct CollectReducer;
impl<'c, T> Reducer<CollectResult<'c, T>> for CollectReducer {
fn reduce(
self,
mut left: CollectResult<'c, T>,
right: CollectResult<'c, T>,
) -> CollectResult<'c, T> {
// Merge if the CollectResults are adjacent and in left to right order
// else: drop the right piece now and total length will end up short in the end,
// when the correctness of the collected result is asserted.
unsafe {
let left_end = left.start.0.add(left.initialized_len);
if left_end == right.start.0 {
left.total_len += right.total_len;
left.initialized_len += right.release_ownership();
}
left
}
}
}

116
vendor/rayon/src/iter/collect/mod.rs vendored Normal file
View File

@@ -0,0 +1,116 @@
use super::{IndexedParallelIterator, ParallelIterator};
mod consumer;
use self::consumer::CollectConsumer;
use self::consumer::CollectResult;
use super::unzip::unzip_indexed;
mod test;
/// Collects the results of the exact iterator into the specified vector.
///
/// This is called by `IndexedParallelIterator::collect_into_vec`.
pub(super) fn collect_into_vec<I, T>(pi: I, v: &mut Vec<T>)
where
I: IndexedParallelIterator<Item = T>,
T: Send,
{
v.truncate(0); // clear any old data
let len = pi.len();
collect_with_consumer(v, len, |consumer| pi.drive(consumer));
}
/// Collects the results of the iterator into the specified vector.
///
/// Technically, this only works for `IndexedParallelIterator`, but we're faking a
/// bit of specialization here until Rust can do that natively. Callers are
/// using `opt_len` to find the length before calling this, and only exact
/// iterators will return anything but `None` there.
///
/// Since the type system doesn't understand that contract, we have to allow
/// *any* `ParallelIterator` here, and `CollectConsumer` has to also implement
/// `UnindexedConsumer`. That implementation panics `unreachable!` in case
/// there's a bug where we actually do try to use this unindexed.
pub(super) fn special_extend<I, T>(pi: I, len: usize, v: &mut Vec<T>)
where
I: ParallelIterator<Item = T>,
T: Send,
{
collect_with_consumer(v, len, |consumer| pi.drive_unindexed(consumer));
}
/// Unzips the results of the exact iterator into the specified vectors.
///
/// This is called by `IndexedParallelIterator::unzip_into_vecs`.
pub(super) fn unzip_into_vecs<I, A, B>(pi: I, left: &mut Vec<A>, right: &mut Vec<B>)
where
I: IndexedParallelIterator<Item = (A, B)>,
A: Send,
B: Send,
{
// clear any old data
left.truncate(0);
right.truncate(0);
let len = pi.len();
collect_with_consumer(right, len, |right_consumer| {
let mut right_result = None;
collect_with_consumer(left, len, |left_consumer| {
let (left_r, right_r) = unzip_indexed(pi, left_consumer, right_consumer);
right_result = Some(right_r);
left_r
});
right_result.unwrap()
});
}
/// Create a consumer on the slice of memory we are collecting into.
///
/// The consumer needs to be used inside the scope function, and the
/// complete collect result passed back.
///
/// This method will verify the collect result, and panic if the slice
/// was not fully written into. Otherwise, in the successful case,
/// the vector is complete with the collected result.
fn collect_with_consumer<T, F>(vec: &mut Vec<T>, len: usize, scope_fn: F)
where
T: Send,
F: FnOnce(CollectConsumer<'_, T>) -> CollectResult<'_, T>,
{
// Reserve space for `len` more elements in the vector,
vec.reserve(len);
// Create the consumer and run the callback for collection.
let result = scope_fn(CollectConsumer::appender(vec, len));
// The `CollectResult` represents a contiguous part of the slice, that has
// been written to. On unwind here, the `CollectResult` will be dropped. If
// some producers on the way did not produce enough elements, partial
// `CollectResult`s may have been dropped without being reduced to the final
// result, and we will see that as the length coming up short.
//
// Here, we assert that added length is fully initialized. This is checked
// by the following assert, which verifies if a complete `CollectResult`
// was produced; if the length is correct, it is necessarily covering the
// target slice. Since we know that the consumer cannot have escaped from
// `drive` (by parametricity, essentially), we know that any stores that
// will happen, have happened. Unless some code is buggy, that means we
// should have seen `len` total writes.
let actual_writes = result.len();
assert!(
actual_writes == len,
"expected {} total writes, but got {}",
len,
actual_writes
);
// Release the result's mutable borrow and "proxy ownership"
// of the elements, before the vector takes it over.
result.release_ownership();
let new_len = vec.len() + len;
unsafe {
vec.set_len(new_len);
}
}

373
vendor/rayon/src/iter/collect/test.rs vendored Normal file
View File

@@ -0,0 +1,373 @@
#![cfg(test)]
#![allow(unused_assignments)]
// These tests are primarily targeting "abusive" producers that will
// try to drive the "collect consumer" incorrectly. These should
// result in panics.
use super::collect_with_consumer;
use crate::iter::plumbing::*;
use rayon_core::join;
use std::fmt;
use std::panic;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread::Result as ThreadResult;
/// Promises to produce 2 items, but then produces 3. Does not do any
/// splits at all.
#[test]
#[should_panic(expected = "too many values")]
fn produce_too_many_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 2, |consumer| {
let mut folder = consumer.into_folder();
folder = folder.consume(22);
folder = folder.consume(23);
folder = folder.consume(24);
unreachable!("folder does not complete")
});
}
/// Produces fewer items than promised. Does not do any
/// splits at all.
#[test]
#[should_panic(expected = "expected 5 total writes, but got 2")]
fn produce_fewer_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 5, |consumer| {
let mut folder = consumer.into_folder();
folder = folder.consume(22);
folder = folder.consume(23);
folder.complete()
});
}
// Complete is not called by the consumer. Hence,the collection vector is not fully initialized.
#[test]
#[should_panic(expected = "expected 4 total writes, but got 2")]
fn left_produces_items_with_no_complete() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3);
right_folder.complete()
});
}
// Complete is not called by the right consumer. Hence,the
// collection vector is not fully initialized.
#[test]
#[should_panic(expected = "expected 4 total writes, but got 2")]
fn right_produces_items_with_no_complete() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3);
left_folder.complete()
});
}
// Complete is not called by the consumer. Hence,the collection vector is not fully initialized.
#[test]
#[cfg_attr(not(panic = "unwind"), ignore)]
fn produces_items_with_no_complete() {
let counter = DropCounter::default();
let mut v = vec![];
let panic_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
collect_with_consumer(&mut v, 2, |consumer| {
let mut folder = consumer.into_folder();
folder = folder.consume(counter.element());
folder = folder.consume(counter.element());
panic!("folder does not complete");
});
}));
assert!(v.is_empty());
assert_is_panic_with_message(&panic_result, "folder does not complete");
counter.assert_drop_count();
}
// The left consumer produces too many items while the right
// consumer produces correct number.
#[test]
#[should_panic(expected = "too many values")]
fn left_produces_too_many_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1).consume(2);
right_folder = right_folder.consume(2).consume(3);
let _ = right_folder.complete();
unreachable!("folder does not complete");
});
}
// The right consumer produces too many items while the left
// consumer produces correct number.
#[test]
#[should_panic(expected = "too many values")]
fn right_produces_too_many_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3).consume(4);
let _ = left_folder.complete();
unreachable!("folder does not complete");
});
}
// The left consumer produces fewer items while the right
// consumer produces correct number.
#[test]
#[should_panic(expected = "expected 4 total writes, but got 1")]
fn left_produces_fewer_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0);
right_folder = right_folder.consume(2).consume(3);
let left_result = left_folder.complete();
let right_result = right_folder.complete();
reducer.reduce(left_result, right_result)
});
}
// The left and right consumer produce the correct number but
// only left result is returned
#[test]
#[should_panic(expected = "expected 4 total writes, but got 2")]
fn only_left_result() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3);
let left_result = left_folder.complete();
let _ = right_folder.complete();
left_result
});
}
// The left and right consumer produce the correct number but
// only right result is returned
#[test]
#[should_panic(expected = "expected 4 total writes, but got 2")]
fn only_right_result() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3);
let _ = left_folder.complete();
right_folder.complete()
});
}
// The left and right consumer produce the correct number but reduce
// in the wrong order.
#[test]
#[should_panic(expected = "expected 4 total writes, but got 2")]
fn reducer_does_not_preserve_order() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3);
let left_result = left_folder.complete();
let right_result = right_folder.complete();
reducer.reduce(right_result, left_result)
});
}
// The right consumer produces fewer items while the left
// consumer produces correct number.
#[test]
#[should_panic(expected = "expected 4 total writes, but got 3")]
fn right_produces_fewer_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2);
let left_result = left_folder.complete();
let right_result = right_folder.complete();
reducer.reduce(left_result, right_result)
});
}
// The left consumer panics and the right stops short, like `panic_fuse()`.
// We should get the left panic without finishing `collect_with_consumer`.
#[test]
#[should_panic(expected = "left consumer panic")]
fn left_panics() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let (left_result, right_result) = join(
|| {
let mut left_folder = left_consumer.into_folder();
left_folder = left_folder.consume(0);
panic!("left consumer panic");
},
|| {
let mut right_folder = right_consumer.into_folder();
right_folder = right_folder.consume(2);
right_folder.complete() // early return
},
);
reducer.reduce(left_result, right_result)
});
unreachable!();
}
// The right consumer panics and the left stops short, like `panic_fuse()`.
// We should get the right panic without finishing `collect_with_consumer`.
#[test]
#[should_panic(expected = "right consumer panic")]
fn right_panics() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let (left_result, right_result) = join(
|| {
let mut left_folder = left_consumer.into_folder();
left_folder = left_folder.consume(0);
left_folder.complete() // early return
},
|| {
let mut right_folder = right_consumer.into_folder();
right_folder = right_folder.consume(2);
panic!("right consumer panic");
},
);
reducer.reduce(left_result, right_result)
});
unreachable!();
}
// The left consumer produces fewer items while the right
// consumer produces correct number; check that created elements are dropped
#[test]
#[cfg_attr(not(panic = "unwind"), ignore)]
fn left_produces_fewer_items_drops() {
let counter = DropCounter::default();
let mut v = vec![];
let panic_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(counter.element());
right_folder = right_folder
.consume(counter.element())
.consume(counter.element());
let left_result = left_folder.complete();
let right_result = right_folder.complete();
reducer.reduce(left_result, right_result)
});
}));
assert!(v.is_empty());
assert_is_panic_with_message(&panic_result, "expected 4 total writes, but got 1");
counter.assert_drop_count();
}
/// This counter can create elements, and then count and verify
/// the number of which have actually been dropped again.
#[derive(Default)]
struct DropCounter {
created: AtomicUsize,
dropped: AtomicUsize,
}
struct Element<'a>(&'a AtomicUsize);
impl DropCounter {
fn created(&self) -> usize {
self.created.load(Ordering::SeqCst)
}
fn dropped(&self) -> usize {
self.dropped.load(Ordering::SeqCst)
}
fn element(&self) -> Element<'_> {
self.created.fetch_add(1, Ordering::SeqCst);
Element(&self.dropped)
}
fn assert_drop_count(&self) {
assert_eq!(
self.created(),
self.dropped(),
"Expected {} dropped elements, but found {}",
self.created(),
self.dropped()
);
}
}
impl<'a> Drop for Element<'a> {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
/// Assert that the result from catch_unwind is a panic that contains expected message
fn assert_is_panic_with_message<T>(result: &ThreadResult<T>, expected: &str)
where
T: fmt::Debug,
{
match result {
Ok(value) => {
panic!(
"assertion failure: Expected panic, got successful {:?}",
value
);
}
Err(error) => {
let message_str = error.downcast_ref::<&'static str>().cloned();
let message_string = error.downcast_ref::<String>().map(String::as_str);
if let Some(message) = message_str.or(message_string) {
if !message.contains(expected) {
panic!(
"assertion failure: Expected {:?}, but found panic with {:?}",
expected, message
);
}
// assertion passes
} else {
panic!(
"assertion failure: Expected {:?}, but found panic with unknown value",
expected
);
}
}
}
}

223
vendor/rayon/src/iter/copied.rs vendored Normal file
View File

@@ -0,0 +1,223 @@
use super::plumbing::*;
use super::*;
use std::iter;
/// `Copied` is an iterator that copies the elements of an underlying iterator.
///
/// This struct is created by the [`copied()`] method on [`ParallelIterator`]
///
/// [`copied()`]: trait.ParallelIterator.html#method.copied
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Copied<I: ParallelIterator> {
base: I,
}
impl<I> Copied<I>
where
I: ParallelIterator,
{
/// Creates a new `Copied` iterator.
pub(super) fn new(base: I) -> Self {
Copied { base }
}
}
impl<'a, T, I> ParallelIterator for Copied<I>
where
I: ParallelIterator<Item = &'a T>,
T: 'a + Copy + Send + Sync,
{
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = CopiedConsumer::new(consumer);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<'a, T, I> IndexedParallelIterator for Copied<I>
where
I: IndexedParallelIterator<Item = &'a T>,
T: 'a + Copy + Send + Sync,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = CopiedConsumer::new(consumer);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback { callback });
struct Callback<CB> {
callback: CB,
}
impl<'a, T, CB> ProducerCallback<&'a T> for Callback<CB>
where
CB: ProducerCallback<T>,
T: 'a + Copy + Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = &'a T>,
{
let producer = CopiedProducer { base };
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
struct CopiedProducer<P> {
base: P,
}
impl<'a, T, P> Producer for CopiedProducer<P>
where
P: Producer<Item = &'a T>,
T: 'a + Copy,
{
type Item = T;
type IntoIter = iter::Copied<P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().copied()
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
CopiedProducer { base: left },
CopiedProducer { base: right },
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.base.fold_with(CopiedFolder { base: folder }).base
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct CopiedConsumer<C> {
base: C,
}
impl<C> CopiedConsumer<C> {
fn new(base: C) -> Self {
CopiedConsumer { base }
}
}
impl<'a, T, C> Consumer<&'a T> for CopiedConsumer<C>
where
C: Consumer<T>,
T: 'a + Copy,
{
type Folder = CopiedFolder<C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
CopiedConsumer::new(left),
CopiedConsumer::new(right),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
CopiedFolder {
base: self.base.into_folder(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'a, T, C> UnindexedConsumer<&'a T> for CopiedConsumer<C>
where
C: UnindexedConsumer<T>,
T: 'a + Copy,
{
fn split_off_left(&self) -> Self {
CopiedConsumer::new(self.base.split_off_left())
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct CopiedFolder<F> {
base: F,
}
impl<'a, T, F> Folder<&'a T> for CopiedFolder<F>
where
F: Folder<T>,
T: 'a + Copy,
{
type Result = F::Result;
fn consume(self, &item: &'a T) -> Self {
CopiedFolder {
base: self.base.consume(item),
}
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = &'a T>,
{
self.base = self.base.consume_iter(iter.into_iter().copied());
self
}
fn complete(self) -> F::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

104
vendor/rayon/src/iter/empty.rs vendored Normal file
View File

@@ -0,0 +1,104 @@
use crate::iter::plumbing::*;
use crate::iter::*;
use std::fmt;
use std::marker::PhantomData;
/// Creates a parallel iterator that produces nothing.
///
/// This admits no parallelism on its own, but it could be used for code that
/// deals with generic parallel iterators.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// use rayon::iter::empty;
///
/// let pi = (0..1234).into_par_iter()
/// .chain(empty())
/// .chain(1234..10_000);
///
/// assert_eq!(pi.count(), 10_000);
/// ```
pub fn empty<T: Send>() -> Empty<T> {
Empty {
marker: PhantomData,
}
}
/// Iterator adaptor for [the `empty()` function](fn.empty.html).
pub struct Empty<T: Send> {
marker: PhantomData<T>,
}
impl<T: Send> Clone for Empty<T> {
fn clone(&self) -> Self {
empty()
}
}
impl<T: Send> fmt::Debug for Empty<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Empty")
}
}
impl<T: Send> ParallelIterator for Empty<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.drive(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(0)
}
}
impl<T: Send> IndexedParallelIterator for Empty<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
consumer.into_folder().complete()
}
fn len(&self) -> usize {
0
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(EmptyProducer(PhantomData))
}
}
/// Private empty producer
struct EmptyProducer<T: Send>(PhantomData<T>);
impl<T: Send> Producer for EmptyProducer<T> {
type Item = T;
type IntoIter = std::iter::Empty<T>;
fn into_iter(self) -> Self::IntoIter {
std::iter::empty()
}
fn split_at(self, index: usize) -> (Self, Self) {
debug_assert_eq!(index, 0);
(self, EmptyProducer(PhantomData))
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder
}
}

133
vendor/rayon/src/iter/enumerate.rs vendored Normal file
View File

@@ -0,0 +1,133 @@
use super::plumbing::*;
use super::*;
use std::iter;
use std::ops::Range;
use std::usize;
/// `Enumerate` is an iterator that returns the current count along with the element.
/// This struct is created by the [`enumerate()`] method on [`IndexedParallelIterator`]
///
/// [`enumerate()`]: trait.IndexedParallelIterator.html#method.enumerate
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Enumerate<I: IndexedParallelIterator> {
base: I,
}
impl<I> Enumerate<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `Enumerate` iterator.
pub(super) fn new(base: I) -> Self {
Enumerate { base }
}
}
impl<I> ParallelIterator for Enumerate<I>
where
I: IndexedParallelIterator,
{
type Item = (usize, I::Item);
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for Enumerate<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback { callback });
struct Callback<CB> {
callback: CB,
}
impl<I, CB> ProducerCallback<I> for Callback<CB>
where
CB: ProducerCallback<(usize, I)>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = I>,
{
let producer = EnumerateProducer { base, offset: 0 };
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Producer implementation
struct EnumerateProducer<P> {
base: P,
offset: usize,
}
impl<P> Producer for EnumerateProducer<P>
where
P: Producer,
{
type Item = (usize, P::Item);
type IntoIter = iter::Zip<Range<usize>, P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
// Enumerate only works for IndexedParallelIterators. Since those
// have a max length of usize::MAX, their max index is
// usize::MAX - 1, so the range 0..usize::MAX includes all
// possible indices.
//
// However, we should to use a precise end to the range, otherwise
// reversing the iterator may have to walk back a long ways before
// `Zip::next_back` can produce anything.
let base = self.base.into_iter();
let end = self.offset + base.len();
(self.offset..end).zip(base)
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
EnumerateProducer {
base: left,
offset: self.offset,
},
EnumerateProducer {
base: right,
offset: self.offset + index,
},
)
}
}

614
vendor/rayon/src/iter/extend.rs vendored Normal file
View File

@@ -0,0 +1,614 @@
use super::noop::NoopConsumer;
use super::plumbing::{Consumer, Folder, Reducer, UnindexedConsumer};
use super::{IntoParallelIterator, ParallelExtend, ParallelIterator};
use std::borrow::Cow;
use std::collections::LinkedList;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use std::collections::{BinaryHeap, VecDeque};
use std::hash::{BuildHasher, Hash};
/// Performs a generic `par_extend` by collecting to a `LinkedList<Vec<_>>` in
/// parallel, then extending the collection sequentially.
macro_rules! extend {
($self:ident, $par_iter:ident, $extend:ident) => {
$extend(
$self,
$par_iter.into_par_iter().drive_unindexed(ListVecConsumer),
);
};
}
/// Computes the total length of a `LinkedList<Vec<_>>`.
fn len<T>(list: &LinkedList<Vec<T>>) -> usize {
list.iter().map(Vec::len).sum()
}
struct ListVecConsumer;
struct ListVecFolder<T> {
vec: Vec<T>,
}
impl<T: Send> Consumer<T> for ListVecConsumer {
type Folder = ListVecFolder<T>;
type Reducer = ListReducer;
type Result = LinkedList<Vec<T>>;
fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
(Self, Self, ListReducer)
}
fn into_folder(self) -> Self::Folder {
ListVecFolder { vec: Vec::new() }
}
fn full(&self) -> bool {
false
}
}
impl<T: Send> UnindexedConsumer<T> for ListVecConsumer {
fn split_off_left(&self) -> Self {
Self
}
fn to_reducer(&self) -> Self::Reducer {
ListReducer
}
}
impl<T> Folder<T> for ListVecFolder<T> {
type Result = LinkedList<Vec<T>>;
fn consume(mut self, item: T) -> Self {
self.vec.push(item);
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.vec.extend(iter);
self
}
fn complete(self) -> Self::Result {
let mut list = LinkedList::new();
if !self.vec.is_empty() {
list.push_back(self.vec);
}
list
}
fn full(&self) -> bool {
false
}
}
fn heap_extend<T, Item>(heap: &mut BinaryHeap<T>, list: LinkedList<Vec<Item>>)
where
BinaryHeap<T>: Extend<Item>,
{
heap.reserve(len(&list));
for vec in list {
heap.extend(vec);
}
}
/// Extends a binary heap with items from a parallel iterator.
impl<T> ParallelExtend<T> for BinaryHeap<T>
where
T: Ord + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend!(self, par_iter, heap_extend);
}
}
/// Extends a binary heap with copied items from a parallel iterator.
impl<'a, T> ParallelExtend<&'a T> for BinaryHeap<T>
where
T: 'a + Copy + Ord + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend!(self, par_iter, heap_extend);
}
}
fn btree_map_extend<K, V, Item>(map: &mut BTreeMap<K, V>, list: LinkedList<Vec<Item>>)
where
BTreeMap<K, V>: Extend<Item>,
{
for vec in list {
map.extend(vec);
}
}
/// Extends a B-tree map with items from a parallel iterator.
impl<K, V> ParallelExtend<(K, V)> for BTreeMap<K, V>
where
K: Ord + Send,
V: Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
extend!(self, par_iter, btree_map_extend);
}
}
/// Extends a B-tree map with copied items from a parallel iterator.
impl<'a, K: 'a, V: 'a> ParallelExtend<(&'a K, &'a V)> for BTreeMap<K, V>
where
K: Copy + Ord + Send + Sync,
V: Copy + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (&'a K, &'a V)>,
{
extend!(self, par_iter, btree_map_extend);
}
}
fn btree_set_extend<T, Item>(set: &mut BTreeSet<T>, list: LinkedList<Vec<Item>>)
where
BTreeSet<T>: Extend<Item>,
{
for vec in list {
set.extend(vec);
}
}
/// Extends a B-tree set with items from a parallel iterator.
impl<T> ParallelExtend<T> for BTreeSet<T>
where
T: Ord + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend!(self, par_iter, btree_set_extend);
}
}
/// Extends a B-tree set with copied items from a parallel iterator.
impl<'a, T> ParallelExtend<&'a T> for BTreeSet<T>
where
T: 'a + Copy + Ord + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend!(self, par_iter, btree_set_extend);
}
}
fn hash_map_extend<K, V, S, Item>(map: &mut HashMap<K, V, S>, list: LinkedList<Vec<Item>>)
where
HashMap<K, V, S>: Extend<Item>,
K: Eq + Hash,
S: BuildHasher,
{
map.reserve(len(&list));
for vec in list {
map.extend(vec);
}
}
/// Extends a hash map with items from a parallel iterator.
impl<K, V, S> ParallelExtend<(K, V)> for HashMap<K, V, S>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
// See the map_collect benchmarks in rayon-demo for different strategies.
extend!(self, par_iter, hash_map_extend);
}
}
/// Extends a hash map with copied items from a parallel iterator.
impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for HashMap<K, V, S>
where
K: Copy + Eq + Hash + Send + Sync,
V: Copy + Send + Sync,
S: BuildHasher + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (&'a K, &'a V)>,
{
extend!(self, par_iter, hash_map_extend);
}
}
fn hash_set_extend<T, S, Item>(set: &mut HashSet<T, S>, list: LinkedList<Vec<Item>>)
where
HashSet<T, S>: Extend<Item>,
T: Eq + Hash,
S: BuildHasher,
{
set.reserve(len(&list));
for vec in list {
set.extend(vec);
}
}
/// Extends a hash set with items from a parallel iterator.
impl<T, S> ParallelExtend<T> for HashSet<T, S>
where
T: Eq + Hash + Send,
S: BuildHasher + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend!(self, par_iter, hash_set_extend);
}
}
/// Extends a hash set with copied items from a parallel iterator.
impl<'a, T, S> ParallelExtend<&'a T> for HashSet<T, S>
where
T: 'a + Copy + Eq + Hash + Send + Sync,
S: BuildHasher + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend!(self, par_iter, hash_set_extend);
}
}
/// Extends a linked list with items from a parallel iterator.
impl<T> ParallelExtend<T> for LinkedList<T>
where
T: Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
let mut list = par_iter.into_par_iter().drive_unindexed(ListConsumer);
self.append(&mut list);
}
}
/// Extends a linked list with copied items from a parallel iterator.
impl<'a, T> ParallelExtend<&'a T> for LinkedList<T>
where
T: 'a + Copy + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
self.par_extend(par_iter.into_par_iter().copied())
}
}
struct ListConsumer;
struct ListFolder<T> {
list: LinkedList<T>,
}
struct ListReducer;
impl<T: Send> Consumer<T> for ListConsumer {
type Folder = ListFolder<T>;
type Reducer = ListReducer;
type Result = LinkedList<T>;
fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
(Self, Self, ListReducer)
}
fn into_folder(self) -> Self::Folder {
ListFolder {
list: LinkedList::new(),
}
}
fn full(&self) -> bool {
false
}
}
impl<T: Send> UnindexedConsumer<T> for ListConsumer {
fn split_off_left(&self) -> Self {
Self
}
fn to_reducer(&self) -> Self::Reducer {
ListReducer
}
}
impl<T> Folder<T> for ListFolder<T> {
type Result = LinkedList<T>;
fn consume(mut self, item: T) -> Self {
self.list.push_back(item);
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.list.extend(iter);
self
}
fn complete(self) -> Self::Result {
self.list
}
fn full(&self) -> bool {
false
}
}
impl<T> Reducer<LinkedList<T>> for ListReducer {
fn reduce(self, mut left: LinkedList<T>, mut right: LinkedList<T>) -> LinkedList<T> {
left.append(&mut right);
left
}
}
fn flat_string_extend(string: &mut String, list: LinkedList<String>) {
string.reserve(list.iter().map(String::len).sum());
string.extend(list);
}
/// Extends a string with characters from a parallel iterator.
impl ParallelExtend<char> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = char>,
{
// This is like `extend`, but `Vec<char>` is less efficient to deal
// with than `String`, so instead collect to `LinkedList<String>`.
let list = par_iter.into_par_iter().drive_unindexed(ListStringConsumer);
flat_string_extend(self, list);
}
}
/// Extends a string with copied characters from a parallel iterator.
impl<'a> ParallelExtend<&'a char> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a char>,
{
self.par_extend(par_iter.into_par_iter().copied())
}
}
struct ListStringConsumer;
struct ListStringFolder {
string: String,
}
impl Consumer<char> for ListStringConsumer {
type Folder = ListStringFolder;
type Reducer = ListReducer;
type Result = LinkedList<String>;
fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
(Self, Self, ListReducer)
}
fn into_folder(self) -> Self::Folder {
ListStringFolder {
string: String::new(),
}
}
fn full(&self) -> bool {
false
}
}
impl UnindexedConsumer<char> for ListStringConsumer {
fn split_off_left(&self) -> Self {
Self
}
fn to_reducer(&self) -> Self::Reducer {
ListReducer
}
}
impl Folder<char> for ListStringFolder {
type Result = LinkedList<String>;
fn consume(mut self, item: char) -> Self {
self.string.push(item);
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = char>,
{
self.string.extend(iter);
self
}
fn complete(self) -> Self::Result {
let mut list = LinkedList::new();
if !self.string.is_empty() {
list.push_back(self.string);
}
list
}
fn full(&self) -> bool {
false
}
}
fn string_extend<Item>(string: &mut String, list: LinkedList<Vec<Item>>)
where
String: Extend<Item>,
Item: AsRef<str>,
{
let len = list.iter().flatten().map(Item::as_ref).map(str::len).sum();
string.reserve(len);
for vec in list {
string.extend(vec);
}
}
/// Extends a string with string slices from a parallel iterator.
impl<'a> ParallelExtend<&'a str> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a str>,
{
extend!(self, par_iter, string_extend);
}
}
/// Extends a string with strings from a parallel iterator.
impl ParallelExtend<String> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = String>,
{
extend!(self, par_iter, string_extend);
}
}
/// Extends a string with boxed strings from a parallel iterator.
impl ParallelExtend<Box<str>> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = Box<str>>,
{
extend!(self, par_iter, string_extend);
}
}
/// Extends a string with string slices from a parallel iterator.
impl<'a> ParallelExtend<Cow<'a, str>> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = Cow<'a, str>>,
{
extend!(self, par_iter, string_extend);
}
}
fn deque_extend<T, Item>(deque: &mut VecDeque<T>, list: LinkedList<Vec<Item>>)
where
VecDeque<T>: Extend<Item>,
{
deque.reserve(len(&list));
for vec in list {
deque.extend(vec);
}
}
/// Extends a deque with items from a parallel iterator.
impl<T> ParallelExtend<T> for VecDeque<T>
where
T: Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend!(self, par_iter, deque_extend);
}
}
/// Extends a deque with copied items from a parallel iterator.
impl<'a, T> ParallelExtend<&'a T> for VecDeque<T>
where
T: 'a + Copy + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend!(self, par_iter, deque_extend);
}
}
fn vec_append<T>(vec: &mut Vec<T>, list: LinkedList<Vec<T>>) {
vec.reserve(len(&list));
for mut other in list {
vec.append(&mut other);
}
}
/// Extends a vector with items from a parallel iterator.
impl<T> ParallelExtend<T> for Vec<T>
where
T: Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
// See the vec_collect benchmarks in rayon-demo for different strategies.
let par_iter = par_iter.into_par_iter();
match par_iter.opt_len() {
Some(len) => {
// When Rust gets specialization, we can get here for indexed iterators
// without relying on `opt_len`. Until then, `special_extend()` fakes
// an unindexed mode on the promise that `opt_len()` is accurate.
super::collect::special_extend(par_iter, len, self);
}
None => {
// This works like `extend`, but `Vec::append` is more efficient.
let list = par_iter.drive_unindexed(ListVecConsumer);
vec_append(self, list);
}
}
}
}
/// Extends a vector with copied items from a parallel iterator.
impl<'a, T> ParallelExtend<&'a T> for Vec<T>
where
T: 'a + Copy + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
self.par_extend(par_iter.into_par_iter().copied())
}
}
/// Collapses all unit items from a parallel iterator into one.
impl ParallelExtend<()> for () {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = ()>,
{
par_iter.into_par_iter().drive_unindexed(NoopConsumer)
}
}

141
vendor/rayon/src/iter/filter.rs vendored Normal file
View File

@@ -0,0 +1,141 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `Filter` takes a predicate `filter_op` and filters out elements that match.
/// This struct is created by the [`filter()`] method on [`ParallelIterator`]
///
/// [`filter()`]: trait.ParallelIterator.html#method.filter
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Filter<I: ParallelIterator, P> {
base: I,
filter_op: P,
}
impl<I: ParallelIterator + Debug, P> Debug for Filter<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Filter").field("base", &self.base).finish()
}
}
impl<I, P> Filter<I, P>
where
I: ParallelIterator,
{
/// Creates a new `Filter` iterator.
pub(super) fn new(base: I, filter_op: P) -> Self {
Filter { base, filter_op }
}
}
impl<I, P> ParallelIterator for Filter<I, P>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync + Send,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = FilterConsumer::new(consumer, &self.filter_op);
self.base.drive_unindexed(consumer1)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct FilterConsumer<'p, C, P> {
base: C,
filter_op: &'p P,
}
impl<'p, C, P> FilterConsumer<'p, C, P> {
fn new(base: C, filter_op: &'p P) -> Self {
FilterConsumer { base, filter_op }
}
}
impl<'p, T, C, P: 'p> Consumer<T> for FilterConsumer<'p, C, P>
where
C: Consumer<T>,
P: Fn(&T) -> bool + Sync,
{
type Folder = FilterFolder<'p, C::Folder, P>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FilterConsumer::new(left, self.filter_op),
FilterConsumer::new(right, self.filter_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FilterFolder {
base: self.base.into_folder(),
filter_op: self.filter_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'p, T, C, P: 'p> UnindexedConsumer<T> for FilterConsumer<'p, C, P>
where
C: UnindexedConsumer<T>,
P: Fn(&T) -> bool + Sync,
{
fn split_off_left(&self) -> Self {
FilterConsumer::new(self.base.split_off_left(), self.filter_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FilterFolder<'p, C, P> {
base: C,
filter_op: &'p P,
}
impl<'p, C, P, T> Folder<T> for FilterFolder<'p, C, P>
where
C: Folder<T>,
P: Fn(&T) -> bool + 'p,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let filter_op = self.filter_op;
if filter_op(&item) {
let base = self.base.consume(item);
FilterFolder { base, filter_op }
} else {
self
}
}
// This cannot easily specialize `consume_iter` to be better than
// the default, because that requires checking `self.base.full()`
// during a call to `self.base.consume_iter()`. (#632)
fn complete(self) -> Self::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

142
vendor/rayon/src/iter/filter_map.rs vendored Normal file
View File

@@ -0,0 +1,142 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `FilterMap` creates an iterator that uses `filter_op` to both filter and map elements.
/// This struct is created by the [`filter_map()`] method on [`ParallelIterator`].
///
/// [`filter_map()`]: trait.ParallelIterator.html#method.filter_map
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FilterMap<I: ParallelIterator, P> {
base: I,
filter_op: P,
}
impl<I: ParallelIterator + Debug, P> Debug for FilterMap<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FilterMap")
.field("base", &self.base)
.finish()
}
}
impl<I: ParallelIterator, P> FilterMap<I, P> {
/// Creates a new `FilterMap` iterator.
pub(super) fn new(base: I, filter_op: P) -> Self {
FilterMap { base, filter_op }
}
}
impl<I, P, R> ParallelIterator for FilterMap<I, P>
where
I: ParallelIterator,
P: Fn(I::Item) -> Option<R> + Sync + Send,
R: Send,
{
type Item = R;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer = FilterMapConsumer::new(consumer, &self.filter_op);
self.base.drive_unindexed(consumer)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct FilterMapConsumer<'p, C, P> {
base: C,
filter_op: &'p P,
}
impl<'p, C, P: 'p> FilterMapConsumer<'p, C, P> {
fn new(base: C, filter_op: &'p P) -> Self {
FilterMapConsumer { base, filter_op }
}
}
impl<'p, T, U, C, P> Consumer<T> for FilterMapConsumer<'p, C, P>
where
C: Consumer<U>,
P: Fn(T) -> Option<U> + Sync + 'p,
{
type Folder = FilterMapFolder<'p, C::Folder, P>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FilterMapConsumer::new(left, self.filter_op),
FilterMapConsumer::new(right, self.filter_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
let base = self.base.into_folder();
FilterMapFolder {
base,
filter_op: self.filter_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'p, T, U, C, P> UnindexedConsumer<T> for FilterMapConsumer<'p, C, P>
where
C: UnindexedConsumer<U>,
P: Fn(T) -> Option<U> + Sync + 'p,
{
fn split_off_left(&self) -> Self {
FilterMapConsumer::new(self.base.split_off_left(), self.filter_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FilterMapFolder<'p, C, P> {
base: C,
filter_op: &'p P,
}
impl<'p, T, U, C, P> Folder<T> for FilterMapFolder<'p, C, P>
where
C: Folder<U>,
P: Fn(T) -> Option<U> + Sync + 'p,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let filter_op = self.filter_op;
if let Some(mapped_item) = filter_op(item) {
let base = self.base.consume(mapped_item);
FilterMapFolder { base, filter_op }
} else {
self
}
}
// This cannot easily specialize `consume_iter` to be better than
// the default, because that requires checking `self.base.full()`
// during a call to `self.base.consume_iter()`. (#632)
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

120
vendor/rayon/src/iter/find.rs vendored Normal file
View File

@@ -0,0 +1,120 @@
use super::plumbing::*;
use super::*;
use std::sync::atomic::{AtomicBool, Ordering};
pub(super) fn find<I, P>(pi: I, find_op: P) -> Option<I::Item>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync,
{
let found = AtomicBool::new(false);
let consumer = FindConsumer::new(&find_op, &found);
pi.drive_unindexed(consumer)
}
struct FindConsumer<'p, P> {
find_op: &'p P,
found: &'p AtomicBool,
}
impl<'p, P> FindConsumer<'p, P> {
fn new(find_op: &'p P, found: &'p AtomicBool) -> Self {
FindConsumer { find_op, found }
}
}
impl<'p, T, P: 'p> Consumer<T> for FindConsumer<'p, P>
where
T: Send,
P: Fn(&T) -> bool + Sync,
{
type Folder = FindFolder<'p, T, P>;
type Reducer = FindReducer;
type Result = Option<T>;
fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
(self.split_off_left(), self, FindReducer)
}
fn into_folder(self) -> Self::Folder {
FindFolder {
find_op: self.find_op,
found: self.found,
item: None,
}
}
fn full(&self) -> bool {
self.found.load(Ordering::Relaxed)
}
}
impl<'p, T, P: 'p> UnindexedConsumer<T> for FindConsumer<'p, P>
where
T: Send,
P: Fn(&T) -> bool + Sync,
{
fn split_off_left(&self) -> Self {
FindConsumer::new(self.find_op, self.found)
}
fn to_reducer(&self) -> Self::Reducer {
FindReducer
}
}
struct FindFolder<'p, T, P> {
find_op: &'p P,
found: &'p AtomicBool,
item: Option<T>,
}
impl<'p, T, P> Folder<T> for FindFolder<'p, T, P>
where
P: Fn(&T) -> bool + 'p,
{
type Result = Option<T>;
fn consume(mut self, item: T) -> Self {
if (self.find_op)(&item) {
self.found.store(true, Ordering::Relaxed);
self.item = Some(item);
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
fn not_full<T>(found: &AtomicBool) -> impl Fn(&T) -> bool + '_ {
move |_| !found.load(Ordering::Relaxed)
}
self.item = iter
.into_iter()
// stop iterating if another thread has found something
.take_while(not_full(self.found))
.find(self.find_op);
if self.item.is_some() {
self.found.store(true, Ordering::Relaxed)
}
self
}
fn complete(self) -> Self::Result {
self.item
}
fn full(&self) -> bool {
self.found.load(Ordering::Relaxed)
}
}
struct FindReducer;
impl<T> Reducer<Option<T>> for FindReducer {
fn reduce(self, left: Option<T>, right: Option<T>) -> Option<T> {
left.or(right)
}
}

View File

@@ -0,0 +1,238 @@
use super::plumbing::*;
use super::*;
use std::cell::Cell;
use std::sync::atomic::{AtomicUsize, Ordering};
#[cfg(test)]
mod test;
// The key optimization for find_first is that a consumer can stop its search if
// some consumer to its left already found a match (and similarly for consumers
// to the right for find_last). To make this work, all consumers need some
// notion of their position in the data relative to other consumers, including
// unindexed consumers that have no built-in notion of position.
//
// To solve this, we assign each consumer a lower and upper bound for an
// imaginary "range" of data that it consumes. The initial consumer starts with
// the range 0..usize::max_value(). The split divides this range in half so that
// one resulting consumer has the range 0..(usize::max_value() / 2), and the
// other has (usize::max_value() / 2)..usize::max_value(). Every subsequent
// split divides the range in half again until it cannot be split anymore
// (i.e. its length is 1), in which case the split returns two consumers with
// the same range. In that case both consumers will continue to consume all
// their data regardless of whether a better match is found, but the reducer
// will still return the correct answer.
#[derive(Copy, Clone)]
enum MatchPosition {
Leftmost,
Rightmost,
}
/// Returns true if pos1 is a better match than pos2 according to MatchPosition
#[inline]
fn better_position(pos1: usize, pos2: usize, mp: MatchPosition) -> bool {
match mp {
MatchPosition::Leftmost => pos1 < pos2,
MatchPosition::Rightmost => pos1 > pos2,
}
}
pub(super) fn find_first<I, P>(pi: I, find_op: P) -> Option<I::Item>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync,
{
let best_found = AtomicUsize::new(usize::max_value());
let consumer = FindConsumer::new(&find_op, MatchPosition::Leftmost, &best_found);
pi.drive_unindexed(consumer)
}
pub(super) fn find_last<I, P>(pi: I, find_op: P) -> Option<I::Item>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync,
{
let best_found = AtomicUsize::new(0);
let consumer = FindConsumer::new(&find_op, MatchPosition::Rightmost, &best_found);
pi.drive_unindexed(consumer)
}
struct FindConsumer<'p, P> {
find_op: &'p P,
lower_bound: Cell<usize>,
upper_bound: usize,
match_position: MatchPosition,
best_found: &'p AtomicUsize,
}
impl<'p, P> FindConsumer<'p, P> {
fn new(find_op: &'p P, match_position: MatchPosition, best_found: &'p AtomicUsize) -> Self {
FindConsumer {
find_op,
lower_bound: Cell::new(0),
upper_bound: usize::max_value(),
match_position,
best_found,
}
}
fn current_index(&self) -> usize {
match self.match_position {
MatchPosition::Leftmost => self.lower_bound.get(),
MatchPosition::Rightmost => self.upper_bound,
}
}
}
impl<'p, T, P> Consumer<T> for FindConsumer<'p, P>
where
T: Send,
P: Fn(&T) -> bool + Sync,
{
type Folder = FindFolder<'p, T, P>;
type Reducer = FindReducer;
type Result = Option<T>;
fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
let dir = self.match_position;
(
self.split_off_left(),
self,
FindReducer {
match_position: dir,
},
)
}
fn into_folder(self) -> Self::Folder {
FindFolder {
find_op: self.find_op,
boundary: self.current_index(),
match_position: self.match_position,
best_found: self.best_found,
item: None,
}
}
fn full(&self) -> bool {
// can stop consuming if the best found index so far is *strictly*
// better than anything this consumer will find
better_position(
self.best_found.load(Ordering::Relaxed),
self.current_index(),
self.match_position,
)
}
}
impl<'p, T, P> UnindexedConsumer<T> for FindConsumer<'p, P>
where
T: Send,
P: Fn(&T) -> bool + Sync,
{
fn split_off_left(&self) -> Self {
// Upper bound for one consumer will be lower bound for the other. This
// overlap is okay, because only one of the bounds will be used for
// comparing against best_found; the other is kept only to be able to
// divide the range in half.
//
// When the resolution of usize has been exhausted (i.e. when
// upper_bound = lower_bound), both results of this split will have the
// same range. When that happens, we lose the ability to tell one
// consumer to stop working when the other finds a better match, but the
// reducer ensures that the best answer is still returned (see the test
// above).
let old_lower_bound = self.lower_bound.get();
let median = old_lower_bound + ((self.upper_bound - old_lower_bound) / 2);
self.lower_bound.set(median);
FindConsumer {
find_op: self.find_op,
lower_bound: Cell::new(old_lower_bound),
upper_bound: median,
match_position: self.match_position,
best_found: self.best_found,
}
}
fn to_reducer(&self) -> Self::Reducer {
FindReducer {
match_position: self.match_position,
}
}
}
struct FindFolder<'p, T, P> {
find_op: &'p P,
boundary: usize,
match_position: MatchPosition,
best_found: &'p AtomicUsize,
item: Option<T>,
}
impl<'p, P: 'p + Fn(&T) -> bool, T> Folder<T> for FindFolder<'p, T, P> {
type Result = Option<T>;
fn consume(mut self, item: T) -> Self {
let found_best_in_range = match self.match_position {
MatchPosition::Leftmost => self.item.is_some(),
MatchPosition::Rightmost => false,
};
if !found_best_in_range && (self.find_op)(&item) {
// Continuously try to set best_found until we succeed or we
// discover a better match was already found.
let mut current = self.best_found.load(Ordering::Relaxed);
loop {
if better_position(current, self.boundary, self.match_position) {
break;
}
match self.best_found.compare_exchange_weak(
current,
self.boundary,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => {
self.item = Some(item);
break;
}
Err(v) => current = v,
}
}
}
self
}
fn complete(self) -> Self::Result {
self.item
}
fn full(&self) -> bool {
let found_best_in_range = match self.match_position {
MatchPosition::Leftmost => self.item.is_some(),
MatchPosition::Rightmost => false,
};
found_best_in_range
|| better_position(
self.best_found.load(Ordering::Relaxed),
self.boundary,
self.match_position,
)
}
}
struct FindReducer {
match_position: MatchPosition,
}
impl<T> Reducer<Option<T>> for FindReducer {
fn reduce(self, left: Option<T>, right: Option<T>) -> Option<T> {
match self.match_position {
MatchPosition::Leftmost => left.or(right),
MatchPosition::Rightmost => right.or(left),
}
}
}

View File

@@ -0,0 +1,106 @@
use super::*;
use std::sync::atomic::AtomicUsize;
#[test]
fn same_range_first_consumers_return_correct_answer() {
let find_op = |x: &i32| x % 2 == 0;
let first_found = AtomicUsize::new(usize::max_value());
let far_right_consumer = FindConsumer::new(&find_op, MatchPosition::Leftmost, &first_found);
// We save a consumer that will be far to the right of the main consumer (and therefore not
// sharing an index range with that consumer) for fullness testing
let consumer = far_right_consumer.split_off_left();
// split until we have an indivisible range
let bits_in_usize = usize::min_value().count_zeros();
for _ in 0..bits_in_usize {
consumer.split_off_left();
}
let reducer = consumer.to_reducer();
// the left and right folders should now have the same range, having
// exhausted the resolution of usize
let left_folder = consumer.split_off_left().into_folder();
let right_folder = consumer.into_folder();
let left_folder = left_folder.consume(0).consume(1);
assert_eq!(left_folder.boundary, right_folder.boundary);
// expect not full even though a better match has been found because the
// ranges are the same
assert!(!right_folder.full());
assert!(far_right_consumer.full());
let right_folder = right_folder.consume(2).consume(3);
assert_eq!(
reducer.reduce(left_folder.complete(), right_folder.complete()),
Some(0)
);
}
#[test]
fn same_range_last_consumers_return_correct_answer() {
let find_op = |x: &i32| x % 2 == 0;
let last_found = AtomicUsize::new(0);
let consumer = FindConsumer::new(&find_op, MatchPosition::Rightmost, &last_found);
// We save a consumer that will be far to the left of the main consumer (and therefore not
// sharing an index range with that consumer) for fullness testing
let far_left_consumer = consumer.split_off_left();
// split until we have an indivisible range
let bits_in_usize = usize::min_value().count_zeros();
for _ in 0..bits_in_usize {
consumer.split_off_left();
}
let reducer = consumer.to_reducer();
// due to the exact calculation in split_off_left, the very last consumer has a
// range of width 2, so we use the second-to-last consumer instead to get
// the same boundary on both folders
let consumer = consumer.split_off_left();
let left_folder = consumer.split_off_left().into_folder();
let right_folder = consumer.into_folder();
let right_folder = right_folder.consume(2).consume(3);
assert_eq!(left_folder.boundary, right_folder.boundary);
// expect not full even though a better match has been found because the
// ranges are the same
assert!(!left_folder.full());
assert!(far_left_consumer.full());
let left_folder = left_folder.consume(0).consume(1);
assert_eq!(
reducer.reduce(left_folder.complete(), right_folder.complete()),
Some(2)
);
}
// These tests requires that a folder be assigned to an iterator with more than
// one element. We can't necessarily determine when that will happen for a given
// input to find_first/find_last, so we test the folder directly here instead.
#[test]
fn find_first_folder_does_not_clobber_first_found() {
let best_found = AtomicUsize::new(usize::max_value());
let f = FindFolder {
find_op: &(|&_: &i32| -> bool { true }),
boundary: 0,
match_position: MatchPosition::Leftmost,
best_found: &best_found,
item: None,
};
let f = f.consume(0_i32).consume(1_i32).consume(2_i32);
assert!(f.full());
assert_eq!(f.complete(), Some(0_i32));
}
#[test]
fn find_last_folder_yields_last_match() {
let best_found = AtomicUsize::new(0);
let f = FindFolder {
find_op: &(|&_: &i32| -> bool { true }),
boundary: 0,
match_position: MatchPosition::Rightmost,
best_found: &best_found,
item: None,
};
let f = f.consume(0_i32).consume(1_i32).consume(2_i32);
assert_eq!(f.complete(), Some(2_i32));
}

154
vendor/rayon/src/iter/flat_map.rs vendored Normal file
View File

@@ -0,0 +1,154 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `FlatMap` maps each element to a parallel iterator, then flattens these iterators together.
/// This struct is created by the [`flat_map()`] method on [`ParallelIterator`]
///
/// [`flat_map()`]: trait.ParallelIterator.html#method.flat_map
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FlatMap<I: ParallelIterator, F> {
base: I,
map_op: F,
}
impl<I: ParallelIterator + Debug, F> Debug for FlatMap<I, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FlatMap").field("base", &self.base).finish()
}
}
impl<I: ParallelIterator, F> FlatMap<I, F> {
/// Creates a new `FlatMap` iterator.
pub(super) fn new(base: I, map_op: F) -> Self {
FlatMap { base, map_op }
}
}
impl<I, F, PI> ParallelIterator for FlatMap<I, F>
where
I: ParallelIterator,
F: Fn(I::Item) -> PI + Sync + Send,
PI: IntoParallelIterator,
{
type Item = PI::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer = FlatMapConsumer::new(consumer, &self.map_op);
self.base.drive_unindexed(consumer)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct FlatMapConsumer<'f, C, F> {
base: C,
map_op: &'f F,
}
impl<'f, C, F> FlatMapConsumer<'f, C, F> {
fn new(base: C, map_op: &'f F) -> Self {
FlatMapConsumer { base, map_op }
}
}
impl<'f, T, U, C, F> Consumer<T> for FlatMapConsumer<'f, C, F>
where
C: UnindexedConsumer<U::Item>,
F: Fn(T) -> U + Sync,
U: IntoParallelIterator,
{
type Folder = FlatMapFolder<'f, C, F, C::Result>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FlatMapConsumer::new(left, self.map_op),
FlatMapConsumer::new(right, self.map_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FlatMapFolder {
base: self.base,
map_op: self.map_op,
previous: None,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, U, C, F> UnindexedConsumer<T> for FlatMapConsumer<'f, C, F>
where
C: UnindexedConsumer<U::Item>,
F: Fn(T) -> U + Sync,
U: IntoParallelIterator,
{
fn split_off_left(&self) -> Self {
FlatMapConsumer::new(self.base.split_off_left(), self.map_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FlatMapFolder<'f, C, F, R> {
base: C,
map_op: &'f F,
previous: Option<R>,
}
impl<'f, T, U, C, F> Folder<T> for FlatMapFolder<'f, C, F, C::Result>
where
C: UnindexedConsumer<U::Item>,
F: Fn(T) -> U + Sync,
U: IntoParallelIterator,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let map_op = self.map_op;
let par_iter = map_op(item).into_par_iter();
let consumer = self.base.split_off_left();
let result = par_iter.drive_unindexed(consumer);
let previous = match self.previous {
None => Some(result),
Some(previous) => {
let reducer = self.base.to_reducer();
Some(reducer.reduce(previous, result))
}
};
FlatMapFolder {
base: self.base,
map_op,
previous,
}
}
fn complete(self) -> Self::Result {
match self.previous {
Some(previous) => previous,
None => self.base.into_folder().complete(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}

147
vendor/rayon/src/iter/flat_map_iter.rs vendored Normal file
View File

@@ -0,0 +1,147 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `FlatMapIter` maps each element to a serial iterator, then flattens these iterators together.
/// This struct is created by the [`flat_map_iter()`] method on [`ParallelIterator`]
///
/// [`flat_map_iter()`]: trait.ParallelIterator.html#method.flat_map_iter
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FlatMapIter<I: ParallelIterator, F> {
base: I,
map_op: F,
}
impl<I: ParallelIterator + Debug, F> Debug for FlatMapIter<I, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FlatMapIter")
.field("base", &self.base)
.finish()
}
}
impl<I: ParallelIterator, F> FlatMapIter<I, F> {
/// Creates a new `FlatMapIter` iterator.
pub(super) fn new(base: I, map_op: F) -> Self {
FlatMapIter { base, map_op }
}
}
impl<I, F, SI> ParallelIterator for FlatMapIter<I, F>
where
I: ParallelIterator,
F: Fn(I::Item) -> SI + Sync + Send,
SI: IntoIterator,
SI::Item: Send,
{
type Item = SI::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer = FlatMapIterConsumer::new(consumer, &self.map_op);
self.base.drive_unindexed(consumer)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct FlatMapIterConsumer<'f, C, F> {
base: C,
map_op: &'f F,
}
impl<'f, C, F> FlatMapIterConsumer<'f, C, F> {
fn new(base: C, map_op: &'f F) -> Self {
FlatMapIterConsumer { base, map_op }
}
}
impl<'f, T, U, C, F> Consumer<T> for FlatMapIterConsumer<'f, C, F>
where
C: UnindexedConsumer<U::Item>,
F: Fn(T) -> U + Sync,
U: IntoIterator,
{
type Folder = FlatMapIterFolder<'f, C::Folder, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FlatMapIterConsumer::new(left, self.map_op),
FlatMapIterConsumer::new(right, self.map_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FlatMapIterFolder {
base: self.base.into_folder(),
map_op: self.map_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, U, C, F> UnindexedConsumer<T> for FlatMapIterConsumer<'f, C, F>
where
C: UnindexedConsumer<U::Item>,
F: Fn(T) -> U + Sync,
U: IntoIterator,
{
fn split_off_left(&self) -> Self {
FlatMapIterConsumer::new(self.base.split_off_left(), self.map_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FlatMapIterFolder<'f, C, F> {
base: C,
map_op: &'f F,
}
impl<'f, T, U, C, F> Folder<T> for FlatMapIterFolder<'f, C, F>
where
C: Folder<U::Item>,
F: Fn(T) -> U,
U: IntoIterator,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let map_op = self.map_op;
let base = self.base.consume_iter(map_op(item));
FlatMapIterFolder { base, map_op }
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
let map_op = self.map_op;
let iter = iter.into_iter().flat_map(map_op);
let base = self.base.consume_iter(iter);
FlatMapIterFolder { base, map_op }
}
fn complete(self) -> Self::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

140
vendor/rayon/src/iter/flatten.rs vendored Normal file
View File

@@ -0,0 +1,140 @@
use super::plumbing::*;
use super::*;
/// `Flatten` turns each element to a parallel iterator, then flattens these iterators
/// together. This struct is created by the [`flatten()`] method on [`ParallelIterator`].
///
/// [`flatten()`]: trait.ParallelIterator.html#method.flatten
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Flatten<I: ParallelIterator> {
base: I,
}
impl<I> Flatten<I>
where
I: ParallelIterator,
I::Item: IntoParallelIterator,
{
/// Creates a new `Flatten` iterator.
pub(super) fn new(base: I) -> Self {
Flatten { base }
}
}
impl<I> ParallelIterator for Flatten<I>
where
I: ParallelIterator,
I::Item: IntoParallelIterator,
{
type Item = <I::Item as IntoParallelIterator>::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer = FlattenConsumer::new(consumer);
self.base.drive_unindexed(consumer)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct FlattenConsumer<C> {
base: C,
}
impl<C> FlattenConsumer<C> {
fn new(base: C) -> Self {
FlattenConsumer { base }
}
}
impl<T, C> Consumer<T> for FlattenConsumer<C>
where
C: UnindexedConsumer<T::Item>,
T: IntoParallelIterator,
{
type Folder = FlattenFolder<C, C::Result>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FlattenConsumer::new(left),
FlattenConsumer::new(right),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FlattenFolder {
base: self.base,
previous: None,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<T, C> UnindexedConsumer<T> for FlattenConsumer<C>
where
C: UnindexedConsumer<T::Item>,
T: IntoParallelIterator,
{
fn split_off_left(&self) -> Self {
FlattenConsumer::new(self.base.split_off_left())
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FlattenFolder<C, R> {
base: C,
previous: Option<R>,
}
impl<T, C> Folder<T> for FlattenFolder<C, C::Result>
where
C: UnindexedConsumer<T::Item>,
T: IntoParallelIterator,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let par_iter = item.into_par_iter();
let consumer = self.base.split_off_left();
let result = par_iter.drive_unindexed(consumer);
let previous = match self.previous {
None => Some(result),
Some(previous) => {
let reducer = self.base.to_reducer();
Some(reducer.reduce(previous, result))
}
};
FlattenFolder {
base: self.base,
previous,
}
}
fn complete(self) -> Self::Result {
match self.previous {
Some(previous) => previous,
None => self.base.into_folder().complete(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}

132
vendor/rayon/src/iter/flatten_iter.rs vendored Normal file
View File

@@ -0,0 +1,132 @@
use super::plumbing::*;
use super::*;
/// `FlattenIter` turns each element to a serial iterator, then flattens these iterators
/// together. This struct is created by the [`flatten_iter()`] method on [`ParallelIterator`].
///
/// [`flatten_iter()`]: trait.ParallelIterator.html#method.flatten_iter
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct FlattenIter<I: ParallelIterator> {
base: I,
}
impl<I> FlattenIter<I>
where
I: ParallelIterator,
I::Item: IntoIterator,
<I::Item as IntoIterator>::Item: Send,
{
/// Creates a new `FlattenIter` iterator.
pub(super) fn new(base: I) -> Self {
FlattenIter { base }
}
}
impl<I> ParallelIterator for FlattenIter<I>
where
I: ParallelIterator,
I::Item: IntoIterator,
<I::Item as IntoIterator>::Item: Send,
{
type Item = <I::Item as IntoIterator>::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer = FlattenIterConsumer::new(consumer);
self.base.drive_unindexed(consumer)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct FlattenIterConsumer<C> {
base: C,
}
impl<C> FlattenIterConsumer<C> {
fn new(base: C) -> Self {
FlattenIterConsumer { base }
}
}
impl<T, C> Consumer<T> for FlattenIterConsumer<C>
where
C: UnindexedConsumer<T::Item>,
T: IntoIterator,
{
type Folder = FlattenIterFolder<C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FlattenIterConsumer::new(left),
FlattenIterConsumer::new(right),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FlattenIterFolder {
base: self.base.into_folder(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<T, C> UnindexedConsumer<T> for FlattenIterConsumer<C>
where
C: UnindexedConsumer<T::Item>,
T: IntoIterator,
{
fn split_off_left(&self) -> Self {
FlattenIterConsumer::new(self.base.split_off_left())
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FlattenIterFolder<C> {
base: C,
}
impl<T, C> Folder<T> for FlattenIterFolder<C>
where
C: Folder<T::Item>,
T: IntoIterator,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let base = self.base.consume_iter(item);
FlattenIterFolder { base }
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
let iter = iter.into_iter().flatten();
let base = self.base.consume_iter(iter);
FlattenIterFolder { base }
}
fn complete(self) -> Self::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

302
vendor/rayon/src/iter/fold.rs vendored Normal file
View File

@@ -0,0 +1,302 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
impl<U, I, ID, F> Fold<I, ID, F>
where
I: ParallelIterator,
F: Fn(U, I::Item) -> U + Sync + Send,
ID: Fn() -> U + Sync + Send,
U: Send,
{
pub(super) fn new(base: I, identity: ID, fold_op: F) -> Self {
Fold {
base,
identity,
fold_op,
}
}
}
/// `Fold` is an iterator that applies a function over an iterator producing a single value.
/// This struct is created by the [`fold()`] method on [`ParallelIterator`]
///
/// [`fold()`]: trait.ParallelIterator.html#method.fold
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Fold<I, ID, F> {
base: I,
identity: ID,
fold_op: F,
}
impl<I: ParallelIterator + Debug, ID, F> Debug for Fold<I, ID, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Fold").field("base", &self.base).finish()
}
}
impl<U, I, ID, F> ParallelIterator for Fold<I, ID, F>
where
I: ParallelIterator,
F: Fn(U, I::Item) -> U + Sync + Send,
ID: Fn() -> U + Sync + Send,
U: Send,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = FoldConsumer {
base: consumer,
fold_op: &self.fold_op,
identity: &self.identity,
};
self.base.drive_unindexed(consumer1)
}
}
struct FoldConsumer<'c, C, ID, F> {
base: C,
fold_op: &'c F,
identity: &'c ID,
}
impl<'r, U, T, C, ID, F> Consumer<T> for FoldConsumer<'r, C, ID, F>
where
C: Consumer<U>,
F: Fn(U, T) -> U + Sync,
ID: Fn() -> U + Sync,
U: Send,
{
type Folder = FoldFolder<'r, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FoldConsumer { base: left, ..self },
FoldConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FoldFolder {
base: self.base.into_folder(),
item: (self.identity)(),
fold_op: self.fold_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'r, U, T, C, ID, F> UnindexedConsumer<T> for FoldConsumer<'r, C, ID, F>
where
C: UnindexedConsumer<U>,
F: Fn(U, T) -> U + Sync,
ID: Fn() -> U + Sync,
U: Send,
{
fn split_off_left(&self) -> Self {
FoldConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FoldFolder<'r, C, ID, F> {
base: C,
fold_op: &'r F,
item: ID,
}
impl<'r, C, ID, F, T> Folder<T> for FoldFolder<'r, C, ID, F>
where
C: Folder<ID>,
F: Fn(ID, T) -> ID + Sync,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let item = (self.fold_op)(self.item, item);
FoldFolder {
base: self.base,
fold_op: self.fold_op,
item,
}
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
fn not_full<C, ID, T>(base: &C) -> impl Fn(&T) -> bool + '_
where
C: Folder<ID>,
{
move |_| !base.full()
}
let base = self.base;
let item = iter
.into_iter()
// stop iterating if another thread has finished
.take_while(not_full(&base))
.fold(self.item, self.fold_op);
FoldFolder {
base,
item,
fold_op: self.fold_op,
}
}
fn complete(self) -> C::Result {
self.base.consume(self.item).complete()
}
fn full(&self) -> bool {
self.base.full()
}
}
// ///////////////////////////////////////////////////////////////////////////
impl<U, I, F> FoldWith<I, U, F>
where
I: ParallelIterator,
F: Fn(U, I::Item) -> U + Sync + Send,
U: Send + Clone,
{
pub(super) fn new(base: I, item: U, fold_op: F) -> Self {
FoldWith {
base,
item,
fold_op,
}
}
}
/// `FoldWith` is an iterator that applies a function over an iterator producing a single value.
/// This struct is created by the [`fold_with()`] method on [`ParallelIterator`]
///
/// [`fold_with()`]: trait.ParallelIterator.html#method.fold_with
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FoldWith<I, U, F> {
base: I,
item: U,
fold_op: F,
}
impl<I: ParallelIterator + Debug, U: Debug, F> Debug for FoldWith<I, U, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FoldWith")
.field("base", &self.base)
.field("item", &self.item)
.finish()
}
}
impl<U, I, F> ParallelIterator for FoldWith<I, U, F>
where
I: ParallelIterator,
F: Fn(U, I::Item) -> U + Sync + Send,
U: Send + Clone,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = FoldWithConsumer {
base: consumer,
item: self.item,
fold_op: &self.fold_op,
};
self.base.drive_unindexed(consumer1)
}
}
struct FoldWithConsumer<'c, C, U, F> {
base: C,
item: U,
fold_op: &'c F,
}
impl<'r, U, T, C, F> Consumer<T> for FoldWithConsumer<'r, C, U, F>
where
C: Consumer<U>,
F: Fn(U, T) -> U + Sync,
U: Send + Clone,
{
type Folder = FoldFolder<'r, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FoldWithConsumer {
base: left,
item: self.item.clone(),
..self
},
FoldWithConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FoldFolder {
base: self.base.into_folder(),
item: self.item,
fold_op: self.fold_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'r, U, T, C, F> UnindexedConsumer<T> for FoldWithConsumer<'r, C, U, F>
where
C: UnindexedConsumer<U>,
F: Fn(U, T) -> U + Sync,
U: Send + Clone,
{
fn split_off_left(&self) -> Self {
FoldWithConsumer {
base: self.base.split_off_left(),
item: self.item.clone(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}

236
vendor/rayon/src/iter/fold_chunks.rs vendored Normal file
View File

@@ -0,0 +1,236 @@
use std::fmt::{self, Debug};
use super::chunks::ChunkProducer;
use super::plumbing::*;
use super::*;
use crate::math::div_round_up;
/// `FoldChunks` is an iterator that groups elements of an underlying iterator and applies a
/// function over them, producing a single value for each group.
///
/// This struct is created by the [`fold_chunks()`] method on [`IndexedParallelIterator`]
///
/// [`fold_chunks()`]: trait.IndexedParallelIterator.html#method.fold_chunks
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FoldChunks<I, ID, F>
where
I: IndexedParallelIterator,
{
base: I,
chunk_size: usize,
fold_op: F,
identity: ID,
}
impl<I: IndexedParallelIterator + Debug, ID, F> Debug for FoldChunks<I, ID, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Fold")
.field("base", &self.base)
.field("chunk_size", &self.chunk_size)
.finish()
}
}
impl<I, ID, U, F> FoldChunks<I, ID, F>
where
I: IndexedParallelIterator,
ID: Fn() -> U + Send + Sync,
F: Fn(U, I::Item) -> U + Send + Sync,
U: Send,
{
/// Creates a new `FoldChunks` iterator
pub(super) fn new(base: I, chunk_size: usize, identity: ID, fold_op: F) -> Self {
FoldChunks {
base,
chunk_size,
identity,
fold_op,
}
}
}
impl<I, ID, U, F> ParallelIterator for FoldChunks<I, ID, F>
where
I: IndexedParallelIterator,
ID: Fn() -> U + Send + Sync,
F: Fn(U, I::Item) -> U + Send + Sync,
U: Send,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: Consumer<U>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I, ID, U, F> IndexedParallelIterator for FoldChunks<I, ID, F>
where
I: IndexedParallelIterator,
ID: Fn() -> U + Send + Sync,
F: Fn(U, I::Item) -> U + Send + Sync,
U: Send,
{
fn len(&self) -> usize {
div_round_up(self.base.len(), self.chunk_size)
}
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.base.len();
return self.base.with_producer(Callback {
chunk_size: self.chunk_size,
len,
identity: self.identity,
fold_op: self.fold_op,
callback,
});
struct Callback<CB, ID, F> {
chunk_size: usize,
len: usize,
identity: ID,
fold_op: F,
callback: CB,
}
impl<T, CB, ID, U, F> ProducerCallback<T> for Callback<CB, ID, F>
where
CB: ProducerCallback<U>,
ID: Fn() -> U + Send + Sync,
F: Fn(U, T) -> U + Send + Sync,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let identity = &self.identity;
let fold_op = &self.fold_op;
let fold_iter = move |iter: P::IntoIter| iter.fold(identity(), fold_op);
let producer = ChunkProducer::new(self.chunk_size, self.len, base, fold_iter);
self.callback.callback(producer)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use std::ops::Add;
#[test]
fn check_fold_chunks() {
let words = "bishbashbosh!"
.chars()
.collect::<Vec<_>>()
.into_par_iter()
.fold_chunks(4, String::new, |mut s, c| {
s.push(c);
s
})
.collect::<Vec<_>>();
assert_eq!(words, vec!["bish", "bash", "bosh", "!"]);
}
// 'closure' values for tests below
fn id() -> i32 {
0
}
fn sum<T, U>(x: T, y: U) -> T
where
T: Add<U, Output = T>,
{
x + y
}
#[test]
#[should_panic(expected = "chunk_size must not be zero")]
fn check_fold_chunks_zero_size() {
let _: Vec<i32> = vec![1, 2, 3]
.into_par_iter()
.fold_chunks(0, id, sum)
.collect();
}
#[test]
fn check_fold_chunks_even_size() {
assert_eq!(
vec![1 + 2 + 3, 4 + 5 + 6, 7 + 8 + 9],
(1..10)
.into_par_iter()
.fold_chunks(3, id, sum)
.collect::<Vec<i32>>()
);
}
#[test]
fn check_fold_chunks_empty() {
let v: Vec<i32> = vec![];
let expected: Vec<i32> = vec![];
assert_eq!(
expected,
v.into_par_iter()
.fold_chunks(2, id, sum)
.collect::<Vec<i32>>()
);
}
#[test]
fn check_fold_chunks_len() {
assert_eq!(4, (0..8).into_par_iter().fold_chunks(2, id, sum).len());
assert_eq!(3, (0..9).into_par_iter().fold_chunks(3, id, sum).len());
assert_eq!(3, (0..8).into_par_iter().fold_chunks(3, id, sum).len());
assert_eq!(1, (&[1]).par_iter().fold_chunks(3, id, sum).len());
assert_eq!(0, (0..0).into_par_iter().fold_chunks(3, id, sum).len());
}
#[test]
fn check_fold_chunks_uneven() {
let cases: Vec<(Vec<u32>, usize, Vec<u32>)> = vec![
((0..5).collect(), 3, vec![0 + 1 + 2, 3 + 4]),
(vec![1], 5, vec![1]),
((0..4).collect(), 3, vec![0 + 1 + 2, 3]),
];
for (i, (v, n, expected)) in cases.into_iter().enumerate() {
let mut res: Vec<u32> = vec![];
v.par_iter()
.fold_chunks(n, || 0, sum)
.collect_into_vec(&mut res);
assert_eq!(expected, res, "Case {} failed", i);
res.truncate(0);
v.into_par_iter()
.fold_chunks(n, || 0, sum)
.rev()
.collect_into_vec(&mut res);
assert_eq!(
expected.into_iter().rev().collect::<Vec<u32>>(),
res,
"Case {} reversed failed",
i
);
}
}
}

View File

@@ -0,0 +1,231 @@
use std::fmt::{self, Debug};
use super::chunks::ChunkProducer;
use super::plumbing::*;
use super::*;
use crate::math::div_round_up;
/// `FoldChunksWith` is an iterator that groups elements of an underlying iterator and applies a
/// function over them, producing a single value for each group.
///
/// This struct is created by the [`fold_chunks_with()`] method on [`IndexedParallelIterator`]
///
/// [`fold_chunks_with()`]: trait.IndexedParallelIterator.html#method.fold_chunks
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FoldChunksWith<I, U, F>
where
I: IndexedParallelIterator,
{
base: I,
chunk_size: usize,
item: U,
fold_op: F,
}
impl<I: IndexedParallelIterator + Debug, U: Debug, F> Debug for FoldChunksWith<I, U, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Fold")
.field("base", &self.base)
.field("chunk_size", &self.chunk_size)
.field("item", &self.item)
.finish()
}
}
impl<I, U, F> FoldChunksWith<I, U, F>
where
I: IndexedParallelIterator,
U: Send + Clone,
F: Fn(U, I::Item) -> U + Send + Sync,
{
/// Creates a new `FoldChunksWith` iterator
pub(super) fn new(base: I, chunk_size: usize, item: U, fold_op: F) -> Self {
FoldChunksWith {
base,
chunk_size,
item,
fold_op,
}
}
}
impl<I, U, F> ParallelIterator for FoldChunksWith<I, U, F>
where
I: IndexedParallelIterator,
U: Send + Clone,
F: Fn(U, I::Item) -> U + Send + Sync,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: Consumer<U>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I, U, F> IndexedParallelIterator for FoldChunksWith<I, U, F>
where
I: IndexedParallelIterator,
U: Send + Clone,
F: Fn(U, I::Item) -> U + Send + Sync,
{
fn len(&self) -> usize {
div_round_up(self.base.len(), self.chunk_size)
}
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.base.len();
return self.base.with_producer(Callback {
chunk_size: self.chunk_size,
len,
item: self.item,
fold_op: self.fold_op,
callback,
});
struct Callback<CB, T, F> {
chunk_size: usize,
len: usize,
item: T,
fold_op: F,
callback: CB,
}
impl<T, U, F, CB> ProducerCallback<T> for Callback<CB, U, F>
where
CB: ProducerCallback<U>,
U: Send + Clone,
F: Fn(U, T) -> U + Send + Sync,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let item = self.item;
let fold_op = &self.fold_op;
let fold_iter = move |iter: P::IntoIter| iter.fold(item.clone(), fold_op);
let producer = ChunkProducer::new(self.chunk_size, self.len, base, fold_iter);
self.callback.callback(producer)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use std::ops::Add;
#[test]
fn check_fold_chunks_with() {
let words = "bishbashbosh!"
.chars()
.collect::<Vec<_>>()
.into_par_iter()
.fold_chunks_with(4, String::new(), |mut s, c| {
s.push(c);
s
})
.collect::<Vec<_>>();
assert_eq!(words, vec!["bish", "bash", "bosh", "!"]);
}
// 'closure' value for tests below
fn sum<T, U>(x: T, y: U) -> T
where
T: Add<U, Output = T>,
{
x + y
}
#[test]
#[should_panic(expected = "chunk_size must not be zero")]
fn check_fold_chunks_zero_size() {
let _: Vec<i32> = vec![1, 2, 3]
.into_par_iter()
.fold_chunks_with(0, 0, sum)
.collect();
}
#[test]
fn check_fold_chunks_even_size() {
assert_eq!(
vec![1 + 2 + 3, 4 + 5 + 6, 7 + 8 + 9],
(1..10)
.into_par_iter()
.fold_chunks_with(3, 0, sum)
.collect::<Vec<i32>>()
);
}
#[test]
fn check_fold_chunks_with_empty() {
let v: Vec<i32> = vec![];
let expected: Vec<i32> = vec![];
assert_eq!(
expected,
v.into_par_iter()
.fold_chunks_with(2, 0, sum)
.collect::<Vec<i32>>()
);
}
#[test]
fn check_fold_chunks_len() {
assert_eq!(4, (0..8).into_par_iter().fold_chunks_with(2, 0, sum).len());
assert_eq!(3, (0..9).into_par_iter().fold_chunks_with(3, 0, sum).len());
assert_eq!(3, (0..8).into_par_iter().fold_chunks_with(3, 0, sum).len());
assert_eq!(1, (&[1]).par_iter().fold_chunks_with(3, 0, sum).len());
assert_eq!(0, (0..0).into_par_iter().fold_chunks_with(3, 0, sum).len());
}
#[test]
fn check_fold_chunks_uneven() {
let cases: Vec<(Vec<u32>, usize, Vec<u32>)> = vec![
((0..5).collect(), 3, vec![0 + 1 + 2, 3 + 4]),
(vec![1], 5, vec![1]),
((0..4).collect(), 3, vec![0 + 1 + 2, 3]),
];
for (i, (v, n, expected)) in cases.into_iter().enumerate() {
let mut res: Vec<u32> = vec![];
v.par_iter()
.fold_chunks_with(n, 0, sum)
.collect_into_vec(&mut res);
assert_eq!(expected, res, "Case {} failed", i);
res.truncate(0);
v.into_par_iter()
.fold_chunks_with(n, 0, sum)
.rev()
.collect_into_vec(&mut res);
assert_eq!(
expected.into_iter().rev().collect::<Vec<u32>>(),
res,
"Case {} reversed failed",
i
);
}
}
}

77
vendor/rayon/src/iter/for_each.rs vendored Normal file
View File

@@ -0,0 +1,77 @@
use super::noop::*;
use super::plumbing::*;
use super::ParallelIterator;
pub(super) fn for_each<I, F, T>(pi: I, op: &F)
where
I: ParallelIterator<Item = T>,
F: Fn(T) + Sync,
T: Send,
{
let consumer = ForEachConsumer { op };
pi.drive_unindexed(consumer)
}
struct ForEachConsumer<'f, F> {
op: &'f F,
}
impl<'f, F, T> Consumer<T> for ForEachConsumer<'f, F>
where
F: Fn(T) + Sync,
{
type Folder = ForEachConsumer<'f, F>;
type Reducer = NoopReducer;
type Result = ();
fn split_at(self, _index: usize) -> (Self, Self, NoopReducer) {
(self.split_off_left(), self, NoopReducer)
}
fn into_folder(self) -> Self {
self
}
fn full(&self) -> bool {
false
}
}
impl<'f, F, T> Folder<T> for ForEachConsumer<'f, F>
where
F: Fn(T) + Sync,
{
type Result = ();
fn consume(self, item: T) -> Self {
(self.op)(item);
self
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
iter.into_iter().for_each(self.op);
self
}
fn complete(self) {}
fn full(&self) -> bool {
false
}
}
impl<'f, F, T> UnindexedConsumer<T> for ForEachConsumer<'f, F>
where
F: Fn(T) + Sync,
{
fn split_off_left(&self) -> Self {
ForEachConsumer { op: self.op }
}
fn to_reducer(&self) -> NoopReducer {
NoopReducer
}
}

279
vendor/rayon/src/iter/from_par_iter.rs vendored Normal file
View File

@@ -0,0 +1,279 @@
use super::noop::NoopConsumer;
use super::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
use std::borrow::Cow;
use std::collections::LinkedList;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use std::collections::{BinaryHeap, VecDeque};
use std::hash::{BuildHasher, Hash};
use std::rc::Rc;
use std::sync::Arc;
/// Creates an empty default collection and extends it.
fn collect_extended<C, I>(par_iter: I) -> C
where
I: IntoParallelIterator,
C: ParallelExtend<I::Item> + Default,
{
let mut collection = C::default();
collection.par_extend(par_iter);
collection
}
/// Collects items from a parallel iterator into a vector.
impl<T> FromParallelIterator<T> for Vec<T>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
collect_extended(par_iter)
}
}
/// Collects items from a parallel iterator into a boxed slice.
impl<T> FromParallelIterator<T> for Box<[T]>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Vec::from_par_iter(par_iter).into()
}
}
/// Collects items from a parallel iterator into a reference-counted slice.
impl<T> FromParallelIterator<T> for Rc<[T]>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Vec::from_par_iter(par_iter).into()
}
}
/// Collects items from a parallel iterator into an atomically-reference-counted slice.
impl<T> FromParallelIterator<T> for Arc<[T]>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Vec::from_par_iter(par_iter).into()
}
}
/// Collects items from a parallel iterator into a vecdeque.
impl<T> FromParallelIterator<T> for VecDeque<T>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Vec::from_par_iter(par_iter).into()
}
}
/// Collects items from a parallel iterator into a binaryheap.
/// The heap-ordering is calculated serially after all items are collected.
impl<T> FromParallelIterator<T> for BinaryHeap<T>
where
T: Ord + Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Vec::from_par_iter(par_iter).into()
}
}
/// Collects items from a parallel iterator into a freshly allocated
/// linked list.
impl<T> FromParallelIterator<T> for LinkedList<T>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
collect_extended(par_iter)
}
}
/// Collects (key, value) pairs from a parallel iterator into a
/// hashmap. If multiple pairs correspond to the same key, then the
/// ones produced earlier in the parallel iterator will be
/// overwritten, just as with a sequential iterator.
impl<K, V, S> FromParallelIterator<(K, V)> for HashMap<K, V, S>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher + Default + Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = (K, V)>,
{
collect_extended(par_iter)
}
}
/// Collects (key, value) pairs from a parallel iterator into a
/// btreemap. If multiple pairs correspond to the same key, then the
/// ones produced earlier in the parallel iterator will be
/// overwritten, just as with a sequential iterator.
impl<K, V> FromParallelIterator<(K, V)> for BTreeMap<K, V>
where
K: Ord + Send,
V: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = (K, V)>,
{
collect_extended(par_iter)
}
}
/// Collects values from a parallel iterator into a hashset.
impl<V, S> FromParallelIterator<V> for HashSet<V, S>
where
V: Eq + Hash + Send,
S: BuildHasher + Default + Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = V>,
{
collect_extended(par_iter)
}
}
/// Collects values from a parallel iterator into a btreeset.
impl<V> FromParallelIterator<V> for BTreeSet<V>
where
V: Send + Ord,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = V>,
{
collect_extended(par_iter)
}
}
/// Collects characters from a parallel iterator into a string.
impl FromParallelIterator<char> for String {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = char>,
{
collect_extended(par_iter)
}
}
/// Collects characters from a parallel iterator into a string.
impl<'a> FromParallelIterator<&'a char> for String {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = &'a char>,
{
collect_extended(par_iter)
}
}
/// Collects string slices from a parallel iterator into a string.
impl<'a> FromParallelIterator<&'a str> for String {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = &'a str>,
{
collect_extended(par_iter)
}
}
/// Collects strings from a parallel iterator into one large string.
impl FromParallelIterator<String> for String {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = String>,
{
collect_extended(par_iter)
}
}
/// Collects boxed strings from a parallel iterator into one large string.
impl FromParallelIterator<Box<str>> for String {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = Box<str>>,
{
collect_extended(par_iter)
}
}
/// Collects string slices from a parallel iterator into a string.
impl<'a> FromParallelIterator<Cow<'a, str>> for String {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = Cow<'a, str>>,
{
collect_extended(par_iter)
}
}
/// Collects an arbitrary `Cow` collection.
///
/// Note, the standard library only has `FromIterator` for `Cow<'a, str>` and
/// `Cow<'a, [T]>`, because no one thought to add a blanket implementation
/// before it was stabilized.
impl<'a, C: ?Sized, T> FromParallelIterator<T> for Cow<'a, C>
where
C: ToOwned,
C::Owned: FromParallelIterator<T>,
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Cow::Owned(C::Owned::from_par_iter(par_iter))
}
}
/// Collapses all unit items from a parallel iterator into one.
///
/// This is more useful when combined with higher-level abstractions, like
/// collecting to a `Result<(), E>` where you only care about errors:
///
/// ```
/// use std::io::*;
/// use rayon::prelude::*;
///
/// let data = vec![1, 2, 3, 4, 5];
/// let res: Result<()> = data.par_iter()
/// .map(|x| writeln!(stdout(), "{}", x))
/// .collect();
/// assert!(res.is_ok());
/// ```
impl FromParallelIterator<()> for () {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = ()>,
{
par_iter.into_par_iter().drive_unindexed(NoopConsumer)
}
}

257
vendor/rayon/src/iter/inspect.rs vendored Normal file
View File

@@ -0,0 +1,257 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
use std::iter;
/// `Inspect` is an iterator that calls a function with a reference to each
/// element before yielding it.
///
/// This struct is created by the [`inspect()`] method on [`ParallelIterator`]
///
/// [`inspect()`]: trait.ParallelIterator.html#method.inspect
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Inspect<I: ParallelIterator, F> {
base: I,
inspect_op: F,
}
impl<I: ParallelIterator + Debug, F> Debug for Inspect<I, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Inspect").field("base", &self.base).finish()
}
}
impl<I, F> Inspect<I, F>
where
I: ParallelIterator,
{
/// Creates a new `Inspect` iterator.
pub(super) fn new(base: I, inspect_op: F) -> Self {
Inspect { base, inspect_op }
}
}
impl<I, F> ParallelIterator for Inspect<I, F>
where
I: ParallelIterator,
F: Fn(&I::Item) + Sync + Send,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = InspectConsumer::new(consumer, &self.inspect_op);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I, F> IndexedParallelIterator for Inspect<I, F>
where
I: IndexedParallelIterator,
F: Fn(&I::Item) + Sync + Send,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = InspectConsumer::new(consumer, &self.inspect_op);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
inspect_op: self.inspect_op,
});
struct Callback<CB, F> {
callback: CB,
inspect_op: F,
}
impl<T, F, CB> ProducerCallback<T> for Callback<CB, F>
where
CB: ProducerCallback<T>,
F: Fn(&T) + Sync,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = InspectProducer {
base,
inspect_op: &self.inspect_op,
};
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
struct InspectProducer<'f, P, F> {
base: P,
inspect_op: &'f F,
}
impl<'f, P, F> Producer for InspectProducer<'f, P, F>
where
P: Producer,
F: Fn(&P::Item) + Sync,
{
type Item = P::Item;
type IntoIter = iter::Inspect<P::IntoIter, &'f F>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().inspect(self.inspect_op)
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
InspectProducer {
base: left,
inspect_op: self.inspect_op,
},
InspectProducer {
base: right,
inspect_op: self.inspect_op,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = InspectFolder {
base: folder,
inspect_op: self.inspect_op,
};
self.base.fold_with(folder1).base
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct InspectConsumer<'f, C, F> {
base: C,
inspect_op: &'f F,
}
impl<'f, C, F> InspectConsumer<'f, C, F> {
fn new(base: C, inspect_op: &'f F) -> Self {
InspectConsumer { base, inspect_op }
}
}
impl<'f, T, C, F> Consumer<T> for InspectConsumer<'f, C, F>
where
C: Consumer<T>,
F: Fn(&T) + Sync,
{
type Folder = InspectFolder<'f, C::Folder, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
InspectConsumer::new(left, self.inspect_op),
InspectConsumer::new(right, self.inspect_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
InspectFolder {
base: self.base.into_folder(),
inspect_op: self.inspect_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, C, F> UnindexedConsumer<T> for InspectConsumer<'f, C, F>
where
C: UnindexedConsumer<T>,
F: Fn(&T) + Sync,
{
fn split_off_left(&self) -> Self {
InspectConsumer::new(self.base.split_off_left(), self.inspect_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct InspectFolder<'f, C, F> {
base: C,
inspect_op: &'f F,
}
impl<'f, T, C, F> Folder<T> for InspectFolder<'f, C, F>
where
C: Folder<T>,
F: Fn(&T),
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
(self.inspect_op)(&item);
InspectFolder {
base: self.base.consume(item),
inspect_op: self.inspect_op,
}
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self
.base
.consume_iter(iter.into_iter().inspect(self.inspect_op));
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

336
vendor/rayon/src/iter/interleave.rs vendored Normal file
View File

@@ -0,0 +1,336 @@
use super::plumbing::*;
use super::*;
use std::cmp;
use std::iter::Fuse;
/// `Interleave` is an iterator that interleaves elements of iterators
/// `i` and `j` in one continuous iterator. This struct is created by
/// the [`interleave()`] method on [`IndexedParallelIterator`]
///
/// [`interleave()`]: trait.IndexedParallelIterator.html#method.interleave
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Interleave<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
i: I,
j: J,
}
impl<I, J> Interleave<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
/// Creates a new `Interleave` iterator
pub(super) fn new(i: I, j: J) -> Self {
Interleave { i, j }
}
}
impl<I, J> ParallelIterator for Interleave<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: Consumer<I::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I, J> IndexedParallelIterator for Interleave<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.i.len().checked_add(self.j.len()).expect("overflow")
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let (i_len, j_len) = (self.i.len(), self.j.len());
return self.i.with_producer(CallbackI {
callback,
i_len,
j_len,
i_next: false,
j: self.j,
});
struct CallbackI<CB, J> {
callback: CB,
i_len: usize,
j_len: usize,
i_next: bool,
j: J,
}
impl<CB, J> ProducerCallback<J::Item> for CallbackI<CB, J>
where
J: IndexedParallelIterator,
CB: ProducerCallback<J::Item>,
{
type Output = CB::Output;
fn callback<I>(self, i_producer: I) -> Self::Output
where
I: Producer<Item = J::Item>,
{
self.j.with_producer(CallbackJ {
i_producer,
i_len: self.i_len,
j_len: self.j_len,
i_next: self.i_next,
callback: self.callback,
})
}
}
struct CallbackJ<CB, I> {
callback: CB,
i_len: usize,
j_len: usize,
i_next: bool,
i_producer: I,
}
impl<CB, I> ProducerCallback<I::Item> for CallbackJ<CB, I>
where
I: Producer,
CB: ProducerCallback<I::Item>,
{
type Output = CB::Output;
fn callback<J>(self, j_producer: J) -> Self::Output
where
J: Producer<Item = I::Item>,
{
let producer = InterleaveProducer::new(
self.i_producer,
j_producer,
self.i_len,
self.j_len,
self.i_next,
);
self.callback.callback(producer)
}
}
}
}
struct InterleaveProducer<I, J>
where
I: Producer,
J: Producer<Item = I::Item>,
{
i: I,
j: J,
i_len: usize,
j_len: usize,
i_next: bool,
}
impl<I, J> InterleaveProducer<I, J>
where
I: Producer,
J: Producer<Item = I::Item>,
{
fn new(i: I, j: J, i_len: usize, j_len: usize, i_next: bool) -> InterleaveProducer<I, J> {
InterleaveProducer {
i,
j,
i_len,
j_len,
i_next,
}
}
}
impl<I, J> Producer for InterleaveProducer<I, J>
where
I: Producer,
J: Producer<Item = I::Item>,
{
type Item = I::Item;
type IntoIter = InterleaveSeq<I::IntoIter, J::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
InterleaveSeq {
i: self.i.into_iter().fuse(),
j: self.j.into_iter().fuse(),
i_next: self.i_next,
}
}
fn min_len(&self) -> usize {
cmp::max(self.i.min_len(), self.j.min_len())
}
fn max_len(&self) -> usize {
cmp::min(self.i.max_len(), self.j.max_len())
}
/// We know 0 < index <= self.i_len + self.j_len
///
/// Find a, b satisfying:
///
/// (1) 0 < a <= self.i_len
/// (2) 0 < b <= self.j_len
/// (3) a + b == index
///
/// For even splits, set a = b = index/2.
/// For odd splits, set a = (index/2)+1, b = index/2, if `i`
/// should yield the next element, otherwise, if `j` should yield
/// the next element, set a = index/2 and b = (index/2)+1
fn split_at(self, index: usize) -> (Self, Self) {
#[inline]
fn odd_offset(flag: bool) -> usize {
(!flag) as usize
}
let even = index % 2 == 0;
let idx = index >> 1;
// desired split
let (i_idx, j_idx) = (
idx + odd_offset(even || self.i_next),
idx + odd_offset(even || !self.i_next),
);
let (i_split, j_split) = if self.i_len >= i_idx && self.j_len >= j_idx {
(i_idx, j_idx)
} else if self.i_len >= i_idx {
// j too short
(index - self.j_len, self.j_len)
} else {
// i too short
(self.i_len, index - self.i_len)
};
let trailing_i_next = even == self.i_next;
let (i_left, i_right) = self.i.split_at(i_split);
let (j_left, j_right) = self.j.split_at(j_split);
(
InterleaveProducer::new(i_left, j_left, i_split, j_split, self.i_next),
InterleaveProducer::new(
i_right,
j_right,
self.i_len - i_split,
self.j_len - j_split,
trailing_i_next,
),
)
}
}
/// Wrapper for Interleave to implement DoubleEndedIterator and
/// ExactSizeIterator.
///
/// This iterator is fused.
struct InterleaveSeq<I, J> {
i: Fuse<I>,
j: Fuse<J>,
/// Flag to control which iterator should provide the next element. When
/// `false` then `i` produces the next element, otherwise `j` produces the
/// next element.
i_next: bool,
}
/// Iterator implementation for InterleaveSeq. This implementation is
/// taken more or less verbatim from itertools. It is replicated here
/// (instead of calling itertools directly), because we also need to
/// implement `DoubledEndedIterator` and `ExactSizeIterator`.
impl<I, J> Iterator for InterleaveSeq<I, J>
where
I: Iterator,
J: Iterator<Item = I::Item>,
{
type Item = I::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.i_next = !self.i_next;
if self.i_next {
match self.i.next() {
None => self.j.next(),
r => r,
}
} else {
match self.j.next() {
None => self.i.next(),
r => r,
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (ih, jh) = (self.i.size_hint(), self.j.size_hint());
let min = ih.0.saturating_add(jh.0);
let max = match (ih.1, jh.1) {
(Some(x), Some(y)) => x.checked_add(y),
_ => None,
};
(min, max)
}
}
// The implementation for DoubleEndedIterator requires
// ExactSizeIterator to provide `next_back()`. The last element will
// come from the iterator that runs out last (ie has the most elements
// in it). If the iterators have the same number of elements, then the
// last iterator will provide the last element.
impl<I, J> DoubleEndedIterator for InterleaveSeq<I, J>
where
I: DoubleEndedIterator + ExactSizeIterator,
J: DoubleEndedIterator<Item = I::Item> + ExactSizeIterator<Item = I::Item>,
{
#[inline]
fn next_back(&mut self) -> Option<I::Item> {
match self.i.len().cmp(&self.j.len()) {
Ordering::Less => self.j.next_back(),
Ordering::Equal => {
if self.i_next {
self.i.next_back()
} else {
self.j.next_back()
}
}
Ordering::Greater => self.i.next_back(),
}
}
}
impl<I, J> ExactSizeIterator for InterleaveSeq<I, J>
where
I: ExactSizeIterator,
J: ExactSizeIterator<Item = I::Item>,
{
#[inline]
fn len(&self) -> usize {
self.i.len() + self.j.len()
}
}

View File

@@ -0,0 +1,85 @@
use super::plumbing::*;
use super::*;
/// `InterleaveShortest` is an iterator that works similarly to
/// `Interleave`, but this version stops returning elements once one
/// of the iterators run out.
///
/// This struct is created by the [`interleave_shortest()`] method on
/// [`IndexedParallelIterator`].
///
/// [`interleave_shortest()`]: trait.IndexedParallelIterator.html#method.interleave_shortest
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct InterleaveShortest<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
interleave: Interleave<Take<I>, Take<J>>,
}
impl<I, J> InterleaveShortest<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
/// Creates a new `InterleaveShortest` iterator
pub(super) fn new(i: I, j: J) -> Self {
InterleaveShortest {
interleave: if i.len() <= j.len() {
// take equal lengths from both iterators
let n = i.len();
i.take(n).interleave(j.take(n))
} else {
// take one extra item from the first iterator
let n = j.len();
i.take(n + 1).interleave(j.take(n))
},
}
}
}
impl<I, J> ParallelIterator for InterleaveShortest<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: Consumer<I::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I, J> IndexedParallelIterator for InterleaveShortest<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.interleave.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
self.interleave.with_producer(callback)
}
}

410
vendor/rayon/src/iter/intersperse.rs vendored Normal file
View File

@@ -0,0 +1,410 @@
use super::plumbing::*;
use super::*;
use std::cell::Cell;
use std::iter::{self, Fuse};
/// `Intersperse` is an iterator that inserts a particular item between each
/// item of the adapted iterator. This struct is created by the
/// [`intersperse()`] method on [`ParallelIterator`]
///
/// [`intersperse()`]: trait.ParallelIterator.html#method.intersperse
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct Intersperse<I>
where
I: ParallelIterator,
I::Item: Clone,
{
base: I,
item: I::Item,
}
impl<I> Intersperse<I>
where
I: ParallelIterator,
I::Item: Clone,
{
/// Creates a new `Intersperse` iterator
pub(super) fn new(base: I, item: I::Item) -> Self {
Intersperse { base, item }
}
}
impl<I> ParallelIterator for Intersperse<I>
where
I: ParallelIterator,
I::Item: Clone + Send,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<I::Item>,
{
let consumer1 = IntersperseConsumer::new(consumer, self.item);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
match self.base.opt_len()? {
0 => Some(0),
len => len.checked_add(len - 1),
}
}
}
impl<I> IndexedParallelIterator for Intersperse<I>
where
I: IndexedParallelIterator,
I::Item: Clone + Send,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = IntersperseConsumer::new(consumer, self.item);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
let len = self.base.len();
if len > 0 {
len.checked_add(len - 1).expect("overflow")
} else {
0
}
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.len();
return self.base.with_producer(Callback {
callback,
item: self.item,
len,
});
struct Callback<CB, T> {
callback: CB,
item: T,
len: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB, T>
where
CB: ProducerCallback<T>,
T: Clone + Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = IntersperseProducer::new(base, self.item, self.len);
self.callback.callback(producer)
}
}
}
}
struct IntersperseProducer<P>
where
P: Producer,
{
base: P,
item: P::Item,
len: usize,
clone_first: bool,
}
impl<P> IntersperseProducer<P>
where
P: Producer,
{
fn new(base: P, item: P::Item, len: usize) -> Self {
IntersperseProducer {
base,
item,
len,
clone_first: false,
}
}
}
impl<P> Producer for IntersperseProducer<P>
where
P: Producer,
P::Item: Clone + Send,
{
type Item = P::Item;
type IntoIter = IntersperseIter<P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
IntersperseIter {
base: self.base.into_iter().fuse(),
item: self.item,
clone_first: self.len > 0 && self.clone_first,
// If there's more than one item, then even lengths end the opposite
// of how they started with respect to interspersed clones.
clone_last: self.len > 1 && ((self.len & 1 == 0) ^ self.clone_first),
}
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
debug_assert!(index <= self.len);
// The left needs half of the items from the base producer, and the
// other half will be our interspersed item. If we're not leading with
// a cloned item, then we need to round up the base number of items,
// otherwise round down.
let base_index = (index + !self.clone_first as usize) / 2;
let (left_base, right_base) = self.base.split_at(base_index);
let left = IntersperseProducer {
base: left_base,
item: self.item.clone(),
len: index,
clone_first: self.clone_first,
};
let right = IntersperseProducer {
base: right_base,
item: self.item,
len: self.len - index,
// If the index is odd, the right side toggles `clone_first`.
clone_first: (index & 1 == 1) ^ self.clone_first,
};
(left, right)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
let folder1 = IntersperseFolder {
base: folder,
item: self.item,
clone_first: self.clone_first,
};
self.base.fold_with(folder1).base
}
}
struct IntersperseIter<I>
where
I: Iterator,
{
base: Fuse<I>,
item: I::Item,
clone_first: bool,
clone_last: bool,
}
impl<I> Iterator for IntersperseIter<I>
where
I: DoubleEndedIterator + ExactSizeIterator,
I::Item: Clone,
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.clone_first {
self.clone_first = false;
Some(self.item.clone())
} else if let next @ Some(_) = self.base.next() {
// If there are any items left, we'll need another clone in front.
self.clone_first = self.base.len() != 0;
next
} else if self.clone_last {
self.clone_last = false;
Some(self.item.clone())
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<I> DoubleEndedIterator for IntersperseIter<I>
where
I: DoubleEndedIterator + ExactSizeIterator,
I::Item: Clone,
{
fn next_back(&mut self) -> Option<Self::Item> {
if self.clone_last {
self.clone_last = false;
Some(self.item.clone())
} else if let next_back @ Some(_) = self.base.next_back() {
// If there are any items left, we'll need another clone in back.
self.clone_last = self.base.len() != 0;
next_back
} else if self.clone_first {
self.clone_first = false;
Some(self.item.clone())
} else {
None
}
}
}
impl<I> ExactSizeIterator for IntersperseIter<I>
where
I: DoubleEndedIterator + ExactSizeIterator,
I::Item: Clone,
{
fn len(&self) -> usize {
let len = self.base.len();
len + len.saturating_sub(1) + self.clone_first as usize + self.clone_last as usize
}
}
struct IntersperseConsumer<C, T> {
base: C,
item: T,
clone_first: Cell<bool>,
}
impl<C, T> IntersperseConsumer<C, T>
where
C: Consumer<T>,
{
fn new(base: C, item: T) -> Self {
IntersperseConsumer {
base,
item,
clone_first: false.into(),
}
}
}
impl<C, T> Consumer<T> for IntersperseConsumer<C, T>
where
C: Consumer<T>,
T: Clone + Send,
{
type Folder = IntersperseFolder<C::Folder, T>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(mut self, index: usize) -> (Self, Self, Self::Reducer) {
// We'll feed twice as many items to the base consumer, except if we're
// not currently leading with a cloned item, then it's one less.
let base_index = index + index.saturating_sub(!self.clone_first.get() as usize);
let (left, right, reducer) = self.base.split_at(base_index);
let right = IntersperseConsumer {
base: right,
item: self.item.clone(),
clone_first: true.into(),
};
self.base = left;
(self, right, reducer)
}
fn into_folder(self) -> Self::Folder {
IntersperseFolder {
base: self.base.into_folder(),
item: self.item,
clone_first: self.clone_first.get(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<C, T> UnindexedConsumer<T> for IntersperseConsumer<C, T>
where
C: UnindexedConsumer<T>,
T: Clone + Send,
{
fn split_off_left(&self) -> Self {
let left = IntersperseConsumer {
base: self.base.split_off_left(),
item: self.item.clone(),
clone_first: self.clone_first.clone(),
};
self.clone_first.set(true);
left
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct IntersperseFolder<C, T> {
base: C,
item: T,
clone_first: bool,
}
impl<C, T> Folder<T> for IntersperseFolder<C, T>
where
C: Folder<T>,
T: Clone,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
if self.clone_first {
self.base = self.base.consume(self.item.clone());
if self.base.full() {
return self;
}
} else {
self.clone_first = true;
}
self.base = self.base.consume(item);
self
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
let mut clone_first = self.clone_first;
let between_item = self.item;
let base = self.base.consume_iter(iter.into_iter().flat_map(|item| {
let first = if clone_first {
Some(between_item.clone())
} else {
clone_first = true;
None
};
first.into_iter().chain(iter::once(item))
}));
IntersperseFolder {
base,
item: between_item,
clone_first,
}
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

271
vendor/rayon/src/iter/len.rs vendored Normal file
View File

@@ -0,0 +1,271 @@
use super::plumbing::*;
use super::*;
use std::cmp;
/// `MinLen` is an iterator that imposes a minimum length on iterator splits.
/// This struct is created by the [`with_min_len()`] method on [`IndexedParallelIterator`]
///
/// [`with_min_len()`]: trait.IndexedParallelIterator.html#method.with_min_len
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct MinLen<I: IndexedParallelIterator> {
base: I,
min: usize,
}
impl<I> MinLen<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `MinLen` iterator.
pub(super) fn new(base: I, min: usize) -> Self {
MinLen { base, min }
}
}
impl<I> ParallelIterator for MinLen<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for MinLen<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
min: self.min,
});
struct Callback<CB> {
callback: CB,
min: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MinLenProducer {
base,
min: self.min,
};
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
/// `MinLenProducer` implementation
struct MinLenProducer<P> {
base: P,
min: usize,
}
impl<P> Producer for MinLenProducer<P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = P::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter()
}
fn min_len(&self) -> usize {
cmp::max(self.min, self.base.min_len())
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MinLenProducer {
base: left,
min: self.min,
},
MinLenProducer {
base: right,
min: self.min,
},
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.base.fold_with(folder)
}
}
/// `MaxLen` is an iterator that imposes a maximum length on iterator splits.
/// This struct is created by the [`with_max_len()`] method on [`IndexedParallelIterator`]
///
/// [`with_max_len()`]: trait.IndexedParallelIterator.html#method.with_max_len
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct MaxLen<I: IndexedParallelIterator> {
base: I,
max: usize,
}
impl<I> MaxLen<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `MaxLen` iterator.
pub(super) fn new(base: I, max: usize) -> Self {
MaxLen { base, max }
}
}
impl<I> ParallelIterator for MaxLen<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for MaxLen<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
max: self.max,
});
struct Callback<CB> {
callback: CB,
max: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MaxLenProducer {
base,
max: self.max,
};
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
/// `MaxLenProducer` implementation
struct MaxLenProducer<P> {
base: P,
max: usize,
}
impl<P> Producer for MaxLenProducer<P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = P::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter()
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
cmp::min(self.max, self.base.max_len())
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MaxLenProducer {
base: left,
max: self.max,
},
MaxLenProducer {
base: right,
max: self.max,
},
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.base.fold_with(folder)
}
}

259
vendor/rayon/src/iter/map.rs vendored Normal file
View File

@@ -0,0 +1,259 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
use std::iter;
/// `Map` is an iterator that transforms the elements of an underlying iterator.
///
/// This struct is created by the [`map()`] method on [`ParallelIterator`]
///
/// [`map()`]: trait.ParallelIterator.html#method.map
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Map<I: ParallelIterator, F> {
base: I,
map_op: F,
}
impl<I: ParallelIterator + Debug, F> Debug for Map<I, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Map").field("base", &self.base).finish()
}
}
impl<I, F> Map<I, F>
where
I: ParallelIterator,
{
/// Creates a new `Map` iterator.
pub(super) fn new(base: I, map_op: F) -> Self {
Map { base, map_op }
}
}
impl<I, F, R> ParallelIterator for Map<I, F>
where
I: ParallelIterator,
F: Fn(I::Item) -> R + Sync + Send,
R: Send,
{
type Item = F::Output;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = MapConsumer::new(consumer, &self.map_op);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I, F, R> IndexedParallelIterator for Map<I, F>
where
I: IndexedParallelIterator,
F: Fn(I::Item) -> R + Sync + Send,
R: Send,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = MapConsumer::new(consumer, &self.map_op);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
map_op: self.map_op,
});
struct Callback<CB, F> {
callback: CB,
map_op: F,
}
impl<T, F, R, CB> ProducerCallback<T> for Callback<CB, F>
where
CB: ProducerCallback<R>,
F: Fn(T) -> R + Sync,
R: Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MapProducer {
base,
map_op: &self.map_op,
};
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
struct MapProducer<'f, P, F> {
base: P,
map_op: &'f F,
}
impl<'f, P, F, R> Producer for MapProducer<'f, P, F>
where
P: Producer,
F: Fn(P::Item) -> R + Sync,
R: Send,
{
type Item = F::Output;
type IntoIter = iter::Map<P::IntoIter, &'f F>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().map(self.map_op)
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MapProducer {
base: left,
map_op: self.map_op,
},
MapProducer {
base: right,
map_op: self.map_op,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = MapFolder {
base: folder,
map_op: self.map_op,
};
self.base.fold_with(folder1).base
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct MapConsumer<'f, C, F> {
base: C,
map_op: &'f F,
}
impl<'f, C, F> MapConsumer<'f, C, F> {
fn new(base: C, map_op: &'f F) -> Self {
MapConsumer { base, map_op }
}
}
impl<'f, T, R, C, F> Consumer<T> for MapConsumer<'f, C, F>
where
C: Consumer<F::Output>,
F: Fn(T) -> R + Sync,
R: Send,
{
type Folder = MapFolder<'f, C::Folder, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
MapConsumer::new(left, self.map_op),
MapConsumer::new(right, self.map_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
MapFolder {
base: self.base.into_folder(),
map_op: self.map_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, R, C, F> UnindexedConsumer<T> for MapConsumer<'f, C, F>
where
C: UnindexedConsumer<F::Output>,
F: Fn(T) -> R + Sync,
R: Send,
{
fn split_off_left(&self) -> Self {
MapConsumer::new(self.base.split_off_left(), self.map_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct MapFolder<'f, C, F> {
base: C,
map_op: &'f F,
}
impl<'f, T, R, C, F> Folder<T> for MapFolder<'f, C, F>
where
C: Folder<F::Output>,
F: Fn(T) -> R,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let mapped_item = (self.map_op)(item);
MapFolder {
base: self.base.consume(mapped_item),
map_op: self.map_op,
}
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self.base.consume_iter(iter.into_iter().map(self.map_op));
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

573
vendor/rayon/src/iter/map_with.rs vendored Normal file
View File

@@ -0,0 +1,573 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `MapWith` is an iterator that transforms the elements of an underlying iterator.
///
/// This struct is created by the [`map_with()`] method on [`ParallelIterator`]
///
/// [`map_with()`]: trait.ParallelIterator.html#method.map_with
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct MapWith<I: ParallelIterator, T, F> {
base: I,
item: T,
map_op: F,
}
impl<I: ParallelIterator + Debug, T: Debug, F> Debug for MapWith<I, T, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MapWith")
.field("base", &self.base)
.field("item", &self.item)
.finish()
}
}
impl<I, T, F> MapWith<I, T, F>
where
I: ParallelIterator,
{
/// Creates a new `MapWith` iterator.
pub(super) fn new(base: I, item: T, map_op: F) -> Self {
MapWith { base, item, map_op }
}
}
impl<I, T, F, R> ParallelIterator for MapWith<I, T, F>
where
I: ParallelIterator,
T: Send + Clone,
F: Fn(&mut T, I::Item) -> R + Sync + Send,
R: Send,
{
type Item = R;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = MapWithConsumer::new(consumer, self.item, &self.map_op);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I, T, F, R> IndexedParallelIterator for MapWith<I, T, F>
where
I: IndexedParallelIterator,
T: Send + Clone,
F: Fn(&mut T, I::Item) -> R + Sync + Send,
R: Send,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = MapWithConsumer::new(consumer, self.item, &self.map_op);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
item: self.item,
map_op: self.map_op,
});
struct Callback<CB, U, F> {
callback: CB,
item: U,
map_op: F,
}
impl<T, U, F, R, CB> ProducerCallback<T> for Callback<CB, U, F>
where
CB: ProducerCallback<R>,
U: Send + Clone,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MapWithProducer {
base,
item: self.item,
map_op: &self.map_op,
};
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
struct MapWithProducer<'f, P, U, F> {
base: P,
item: U,
map_op: &'f F,
}
impl<'f, P, U, F, R> Producer for MapWithProducer<'f, P, U, F>
where
P: Producer,
U: Send + Clone,
F: Fn(&mut U, P::Item) -> R + Sync,
R: Send,
{
type Item = R;
type IntoIter = MapWithIter<'f, P::IntoIter, U, F>;
fn into_iter(self) -> Self::IntoIter {
MapWithIter {
base: self.base.into_iter(),
item: self.item,
map_op: self.map_op,
}
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MapWithProducer {
base: left,
item: self.item.clone(),
map_op: self.map_op,
},
MapWithProducer {
base: right,
item: self.item,
map_op: self.map_op,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = MapWithFolder {
base: folder,
item: self.item,
map_op: self.map_op,
};
self.base.fold_with(folder1).base
}
}
struct MapWithIter<'f, I, U, F> {
base: I,
item: U,
map_op: &'f F,
}
impl<'f, I, U, F, R> Iterator for MapWithIter<'f, I, U, F>
where
I: Iterator,
F: Fn(&mut U, I::Item) -> R + Sync,
R: Send,
{
type Item = R;
fn next(&mut self) -> Option<R> {
let item = self.base.next()?;
Some((self.map_op)(&mut self.item, item))
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.base.size_hint()
}
}
impl<'f, I, U, F, R> DoubleEndedIterator for MapWithIter<'f, I, U, F>
where
I: DoubleEndedIterator,
F: Fn(&mut U, I::Item) -> R + Sync,
R: Send,
{
fn next_back(&mut self) -> Option<R> {
let item = self.base.next_back()?;
Some((self.map_op)(&mut self.item, item))
}
}
impl<'f, I, U, F, R> ExactSizeIterator for MapWithIter<'f, I, U, F>
where
I: ExactSizeIterator,
F: Fn(&mut U, I::Item) -> R + Sync,
R: Send,
{
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct MapWithConsumer<'f, C, U, F> {
base: C,
item: U,
map_op: &'f F,
}
impl<'f, C, U, F> MapWithConsumer<'f, C, U, F> {
fn new(base: C, item: U, map_op: &'f F) -> Self {
MapWithConsumer { base, item, map_op }
}
}
impl<'f, T, U, R, C, F> Consumer<T> for MapWithConsumer<'f, C, U, F>
where
C: Consumer<R>,
U: Send + Clone,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
type Folder = MapWithFolder<'f, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
MapWithConsumer::new(left, self.item.clone(), self.map_op),
MapWithConsumer::new(right, self.item, self.map_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
MapWithFolder {
base: self.base.into_folder(),
item: self.item,
map_op: self.map_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, U, R, C, F> UnindexedConsumer<T> for MapWithConsumer<'f, C, U, F>
where
C: UnindexedConsumer<R>,
U: Send + Clone,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
fn split_off_left(&self) -> Self {
MapWithConsumer::new(self.base.split_off_left(), self.item.clone(), self.map_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct MapWithFolder<'f, C, U, F> {
base: C,
item: U,
map_op: &'f F,
}
impl<'f, T, U, R, C, F> Folder<T> for MapWithFolder<'f, C, U, F>
where
C: Folder<R>,
F: Fn(&mut U, T) -> R,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
let mapped_item = (self.map_op)(&mut self.item, item);
self.base = self.base.consume(mapped_item);
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
fn with<'f, T, U, R>(
item: &'f mut U,
map_op: impl Fn(&mut U, T) -> R + 'f,
) -> impl FnMut(T) -> R + 'f {
move |x| map_op(item, x)
}
{
let mapped_iter = iter.into_iter().map(with(&mut self.item, self.map_op));
self.base = self.base.consume_iter(mapped_iter);
}
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}
// ------------------------------------------------------------------------------------------------
/// `MapInit` is an iterator that transforms the elements of an underlying iterator.
///
/// This struct is created by the [`map_init()`] method on [`ParallelIterator`]
///
/// [`map_init()`]: trait.ParallelIterator.html#method.map_init
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct MapInit<I: ParallelIterator, INIT, F> {
base: I,
init: INIT,
map_op: F,
}
impl<I: ParallelIterator + Debug, INIT, F> Debug for MapInit<I, INIT, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MapInit").field("base", &self.base).finish()
}
}
impl<I, INIT, F> MapInit<I, INIT, F>
where
I: ParallelIterator,
{
/// Creates a new `MapInit` iterator.
pub(super) fn new(base: I, init: INIT, map_op: F) -> Self {
MapInit { base, init, map_op }
}
}
impl<I, INIT, T, F, R> ParallelIterator for MapInit<I, INIT, F>
where
I: ParallelIterator,
INIT: Fn() -> T + Sync + Send,
F: Fn(&mut T, I::Item) -> R + Sync + Send,
R: Send,
{
type Item = R;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = MapInitConsumer::new(consumer, &self.init, &self.map_op);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I, INIT, T, F, R> IndexedParallelIterator for MapInit<I, INIT, F>
where
I: IndexedParallelIterator,
INIT: Fn() -> T + Sync + Send,
F: Fn(&mut T, I::Item) -> R + Sync + Send,
R: Send,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = MapInitConsumer::new(consumer, &self.init, &self.map_op);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
init: self.init,
map_op: self.map_op,
});
struct Callback<CB, INIT, F> {
callback: CB,
init: INIT,
map_op: F,
}
impl<T, INIT, U, F, R, CB> ProducerCallback<T> for Callback<CB, INIT, F>
where
CB: ProducerCallback<R>,
INIT: Fn() -> U + Sync,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MapInitProducer {
base,
init: &self.init,
map_op: &self.map_op,
};
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
struct MapInitProducer<'f, P, INIT, F> {
base: P,
init: &'f INIT,
map_op: &'f F,
}
impl<'f, P, INIT, U, F, R> Producer for MapInitProducer<'f, P, INIT, F>
where
P: Producer,
INIT: Fn() -> U + Sync,
F: Fn(&mut U, P::Item) -> R + Sync,
R: Send,
{
type Item = R;
type IntoIter = MapWithIter<'f, P::IntoIter, U, F>;
fn into_iter(self) -> Self::IntoIter {
MapWithIter {
base: self.base.into_iter(),
item: (self.init)(),
map_op: self.map_op,
}
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MapInitProducer {
base: left,
init: self.init,
map_op: self.map_op,
},
MapInitProducer {
base: right,
init: self.init,
map_op: self.map_op,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = MapWithFolder {
base: folder,
item: (self.init)(),
map_op: self.map_op,
};
self.base.fold_with(folder1).base
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct MapInitConsumer<'f, C, INIT, F> {
base: C,
init: &'f INIT,
map_op: &'f F,
}
impl<'f, C, INIT, F> MapInitConsumer<'f, C, INIT, F> {
fn new(base: C, init: &'f INIT, map_op: &'f F) -> Self {
MapInitConsumer { base, init, map_op }
}
}
impl<'f, T, INIT, U, R, C, F> Consumer<T> for MapInitConsumer<'f, C, INIT, F>
where
C: Consumer<R>,
INIT: Fn() -> U + Sync,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
type Folder = MapWithFolder<'f, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
MapInitConsumer::new(left, self.init, self.map_op),
MapInitConsumer::new(right, self.init, self.map_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
MapWithFolder {
base: self.base.into_folder(),
item: (self.init)(),
map_op: self.map_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, INIT, U, R, C, F> UnindexedConsumer<T> for MapInitConsumer<'f, C, INIT, F>
where
C: UnindexedConsumer<R>,
INIT: Fn() -> U + Sync,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
fn split_off_left(&self) -> Self {
MapInitConsumer::new(self.base.split_off_left(), self.init, self.map_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}

3531
vendor/rayon/src/iter/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

338
vendor/rayon/src/iter/multizip.rs vendored Normal file
View File

@@ -0,0 +1,338 @@
use super::plumbing::*;
use super::*;
/// `MultiZip` is an iterator that zips up a tuple of parallel iterators to
/// produce tuples of their items.
///
/// It is created by calling `into_par_iter()` on a tuple of types that
/// implement `IntoParallelIterator`, or `par_iter()`/`par_iter_mut()` with
/// types that are iterable by reference.
///
/// The implementation currently support tuples up to length 12.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
///
/// // This will iterate `r` by mutable reference, like `par_iter_mut()`, while
/// // ranges are all iterated by value like `into_par_iter()`.
/// // Note that the zipped iterator is only as long as the shortest input.
/// let mut r = vec![0; 3];
/// (&mut r, 1..10, 10..100, 100..1000).into_par_iter()
/// .for_each(|(r, x, y, z)| *r = x * y + z);
///
/// assert_eq!(&r, &[1 * 10 + 100, 2 * 11 + 101, 3 * 12 + 102]);
/// ```
///
/// For a group that should all be iterated by reference, you can use a tuple reference.
///
/// ```
/// use rayon::prelude::*;
///
/// let xs: Vec<_> = (1..10).collect();
/// let ys: Vec<_> = (10..100).collect();
/// let zs: Vec<_> = (100..1000).collect();
///
/// // Reference each input separately with `IntoParallelIterator`:
/// let r1: Vec<_> = (&xs, &ys, &zs).into_par_iter()
/// .map(|(x, y, z)| x * y + z)
/// .collect();
///
/// // Reference them all together with `IntoParallelRefIterator`:
/// let r2: Vec<_> = (xs, ys, zs).par_iter()
/// .map(|(x, y, z)| x * y + z)
/// .collect();
///
/// assert_eq!(r1, r2);
/// ```
///
/// Mutable references to a tuple will work similarly.
///
/// ```
/// use rayon::prelude::*;
///
/// let mut xs: Vec<_> = (1..4).collect();
/// let mut ys: Vec<_> = (-4..-1).collect();
/// let mut zs = vec![0; 3];
///
/// // Mutably reference each input separately with `IntoParallelIterator`:
/// (&mut xs, &mut ys, &mut zs).into_par_iter().for_each(|(x, y, z)| {
/// *z += *x + *y;
/// std::mem::swap(x, y);
/// });
///
/// assert_eq!(xs, (vec![-4, -3, -2]));
/// assert_eq!(ys, (vec![1, 2, 3]));
/// assert_eq!(zs, (vec![-3, -1, 1]));
///
/// // Mutably reference them all together with `IntoParallelRefMutIterator`:
/// let mut tuple = (xs, ys, zs);
/// tuple.par_iter_mut().for_each(|(x, y, z)| {
/// *z += *x + *y;
/// std::mem::swap(x, y);
/// });
///
/// assert_eq!(tuple, (vec![1, 2, 3], vec![-4, -3, -2], vec![-6, -2, 2]));
/// ```
#[derive(Debug, Clone)]
pub struct MultiZip<T> {
tuple: T,
}
// These macros greedily consume 4 or 2 items first to achieve log2 nesting depth.
// For example, 5 => 4,1 => (2,2),1.
//
// The tuples go up to 12, so we might want to greedily consume 8 too, but
// the depth works out the same if we let that expand on the right:
// 9 => 4,5 => (2,2),(4,1) => (2,2),((2,2),1)
// 12 => 4,8 => (2,2),(4,4) => (2,2),((2,2),(2,2))
//
// But if we ever increase to 13, we would want to split 8,5 rather than 4,9.
macro_rules! reduce {
($a:expr, $b:expr, $c:expr, $d:expr, $( $x:expr ),+ => $fn:path) => {
reduce!(reduce!($a, $b, $c, $d => $fn),
reduce!($( $x ),+ => $fn)
=> $fn)
};
($a:expr, $b:expr, $( $x:expr ),+ => $fn:path) => {
reduce!(reduce!($a, $b => $fn),
reduce!($( $x ),+ => $fn)
=> $fn)
};
($a:expr, $b:expr => $fn:path) => { $fn($a, $b) };
($a:expr => $fn:path) => { $a };
}
macro_rules! nest {
($A:tt, $B:tt, $C:tt, $D:tt, $( $X:tt ),+) => {
(nest!($A, $B, $C, $D), nest!($( $X ),+))
};
($A:tt, $B:tt, $( $X:tt ),+) => {
(($A, $B), nest!($( $X ),+))
};
($A:tt, $B:tt) => { ($A, $B) };
($A:tt) => { $A };
}
macro_rules! flatten {
($( $T:ident ),+) => {{
#[allow(non_snake_case)]
fn flatten<$( $T ),+>(nest!($( $T ),+) : nest!($( $T ),+)) -> ($( $T, )+) {
($( $T, )+)
}
flatten
}};
}
macro_rules! multizip_impls {
($(
$Tuple:ident {
$(($idx:tt) -> $T:ident)+
}
)+) => {
$(
impl<$( $T, )+> IntoParallelIterator for ($( $T, )+)
where
$(
$T: IntoParallelIterator,
$T::Iter: IndexedParallelIterator,
)+
{
type Item = ($( $T::Item, )+);
type Iter = MultiZip<($( $T::Iter, )+)>;
fn into_par_iter(self) -> Self::Iter {
MultiZip {
tuple: ( $( self.$idx.into_par_iter(), )+ ),
}
}
}
impl<'a, $( $T, )+> IntoParallelIterator for &'a ($( $T, )+)
where
$(
$T: IntoParallelRefIterator<'a>,
$T::Iter: IndexedParallelIterator,
)+
{
type Item = ($( $T::Item, )+);
type Iter = MultiZip<($( $T::Iter, )+)>;
fn into_par_iter(self) -> Self::Iter {
MultiZip {
tuple: ( $( self.$idx.par_iter(), )+ ),
}
}
}
impl<'a, $( $T, )+> IntoParallelIterator for &'a mut ($( $T, )+)
where
$(
$T: IntoParallelRefMutIterator<'a>,
$T::Iter: IndexedParallelIterator,
)+
{
type Item = ($( $T::Item, )+);
type Iter = MultiZip<($( $T::Iter, )+)>;
fn into_par_iter(self) -> Self::Iter {
MultiZip {
tuple: ( $( self.$idx.par_iter_mut(), )+ ),
}
}
}
impl<$( $T, )+> ParallelIterator for MultiZip<($( $T, )+)>
where
$( $T: IndexedParallelIterator, )+
{
type Item = ($( $T::Item, )+);
fn drive_unindexed<CONSUMER>(self, consumer: CONSUMER) -> CONSUMER::Result
where
CONSUMER: UnindexedConsumer<Self::Item>,
{
self.drive(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<$( $T, )+> IndexedParallelIterator for MultiZip<($( $T, )+)>
where
$( $T: IndexedParallelIterator, )+
{
fn drive<CONSUMER>(self, consumer: CONSUMER) -> CONSUMER::Result
where
CONSUMER: Consumer<Self::Item>,
{
reduce!($( self.tuple.$idx ),+ => IndexedParallelIterator::zip)
.map(flatten!($( $T ),+))
.drive(consumer)
}
fn len(&self) -> usize {
reduce!($( self.tuple.$idx.len() ),+ => Ord::min)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
reduce!($( self.tuple.$idx ),+ => IndexedParallelIterator::zip)
.map(flatten!($( $T ),+))
.with_producer(callback)
}
}
)+
}
}
multizip_impls! {
Tuple1 {
(0) -> A
}
Tuple2 {
(0) -> A
(1) -> B
}
Tuple3 {
(0) -> A
(1) -> B
(2) -> C
}
Tuple4 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
}
Tuple5 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
}
Tuple6 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
}
Tuple7 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
}
Tuple8 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
}
Tuple9 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
}
Tuple10 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
}
Tuple11 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
}
Tuple12 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
}
}

59
vendor/rayon/src/iter/noop.rs vendored Normal file
View File

@@ -0,0 +1,59 @@
use super::plumbing::*;
pub(super) struct NoopConsumer;
impl<T> Consumer<T> for NoopConsumer {
type Folder = NoopConsumer;
type Reducer = NoopReducer;
type Result = ();
fn split_at(self, _index: usize) -> (Self, Self, NoopReducer) {
(NoopConsumer, NoopConsumer, NoopReducer)
}
fn into_folder(self) -> Self {
self
}
fn full(&self) -> bool {
false
}
}
impl<T> Folder<T> for NoopConsumer {
type Result = ();
fn consume(self, _item: T) -> Self {
self
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
iter.into_iter().for_each(drop);
self
}
fn complete(self) {}
fn full(&self) -> bool {
false
}
}
impl<T> UnindexedConsumer<T> for NoopConsumer {
fn split_off_left(&self) -> Self {
NoopConsumer
}
fn to_reducer(&self) -> NoopReducer {
NoopReducer
}
}
pub(super) struct NoopReducer;
impl Reducer<()> for NoopReducer {
fn reduce(self, _left: (), _right: ()) {}
}

68
vendor/rayon/src/iter/once.rs vendored Normal file
View File

@@ -0,0 +1,68 @@
use crate::iter::plumbing::*;
use crate::iter::*;
/// Creates a parallel iterator that produces an element exactly once.
///
/// This admits no parallelism on its own, but it could be chained to existing
/// parallel iterators to extend their contents, or otherwise used for any code
/// that deals with generic parallel iterators.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// use rayon::iter::once;
///
/// let pi = (0..1234).into_par_iter()
/// .chain(once(-1))
/// .chain(1234..10_000);
///
/// assert_eq!(pi.clone().count(), 10_001);
/// assert_eq!(pi.clone().filter(|&x| x < 0).count(), 1);
/// assert_eq!(pi.position_any(|x| x < 0), Some(1234));
/// ```
pub fn once<T: Send>(item: T) -> Once<T> {
Once { item }
}
/// Iterator adaptor for [the `once()` function](fn.once.html).
#[derive(Clone, Debug)]
pub struct Once<T: Send> {
item: T,
}
impl<T: Send> ParallelIterator for Once<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.drive(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(1)
}
}
impl<T: Send> IndexedParallelIterator for Once<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
consumer.into_folder().consume(self.item).complete()
}
fn len(&self) -> usize {
1
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
// Let `OptionProducer` handle it.
Some(self.item).into_par_iter().with_producer(callback)
}
}

342
vendor/rayon/src/iter/panic_fuse.rs vendored Normal file
View File

@@ -0,0 +1,342 @@
use super::plumbing::*;
use super::*;
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
/// `PanicFuse` is an adaptor that wraps an iterator with a fuse in case
/// of panics, to halt all threads as soon as possible.
///
/// This struct is created by the [`panic_fuse()`] method on [`ParallelIterator`]
///
/// [`panic_fuse()`]: trait.ParallelIterator.html#method.panic_fuse
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct PanicFuse<I: ParallelIterator> {
base: I,
}
/// Helper that sets a bool to `true` if dropped while unwinding.
#[derive(Clone)]
struct Fuse<'a>(&'a AtomicBool);
impl<'a> Drop for Fuse<'a> {
#[inline]
fn drop(&mut self) {
if thread::panicking() {
self.0.store(true, Ordering::Relaxed);
}
}
}
impl<'a> Fuse<'a> {
#[inline]
fn panicked(&self) -> bool {
self.0.load(Ordering::Relaxed)
}
}
impl<I> PanicFuse<I>
where
I: ParallelIterator,
{
/// Creates a new `PanicFuse` iterator.
pub(super) fn new(base: I) -> PanicFuse<I> {
PanicFuse { base }
}
}
impl<I> ParallelIterator for PanicFuse<I>
where
I: ParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let panicked = AtomicBool::new(false);
let consumer1 = PanicFuseConsumer {
base: consumer,
fuse: Fuse(&panicked),
};
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I> IndexedParallelIterator for PanicFuse<I>
where
I: IndexedParallelIterator,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let panicked = AtomicBool::new(false);
let consumer1 = PanicFuseConsumer {
base: consumer,
fuse: Fuse(&panicked),
};
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback { callback });
struct Callback<CB> {
callback: CB,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let panicked = AtomicBool::new(false);
let producer = PanicFuseProducer {
base,
fuse: Fuse(&panicked),
};
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Producer implementation
struct PanicFuseProducer<'a, P> {
base: P,
fuse: Fuse<'a>,
}
impl<'a, P> Producer for PanicFuseProducer<'a, P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = PanicFuseIter<'a, P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
PanicFuseIter {
base: self.base.into_iter(),
fuse: self.fuse,
}
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
PanicFuseProducer {
base: left,
fuse: self.fuse.clone(),
},
PanicFuseProducer {
base: right,
fuse: self.fuse,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = PanicFuseFolder {
base: folder,
fuse: self.fuse,
};
self.base.fold_with(folder1).base
}
}
struct PanicFuseIter<'a, I> {
base: I,
fuse: Fuse<'a>,
}
impl<'a, I> Iterator for PanicFuseIter<'a, I>
where
I: Iterator,
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.fuse.panicked() {
None
} else {
self.base.next()
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.base.size_hint()
}
}
impl<'a, I> DoubleEndedIterator for PanicFuseIter<'a, I>
where
I: DoubleEndedIterator,
{
fn next_back(&mut self) -> Option<Self::Item> {
if self.fuse.panicked() {
None
} else {
self.base.next_back()
}
}
}
impl<'a, I> ExactSizeIterator for PanicFuseIter<'a, I>
where
I: ExactSizeIterator,
{
fn len(&self) -> usize {
self.base.len()
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct PanicFuseConsumer<'a, C> {
base: C,
fuse: Fuse<'a>,
}
impl<'a, T, C> Consumer<T> for PanicFuseConsumer<'a, C>
where
C: Consumer<T>,
{
type Folder = PanicFuseFolder<'a, C::Folder>;
type Reducer = PanicFuseReducer<'a, C::Reducer>;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
PanicFuseConsumer {
base: left,
fuse: self.fuse.clone(),
},
PanicFuseConsumer {
base: right,
fuse: self.fuse.clone(),
},
PanicFuseReducer {
base: reducer,
_fuse: self.fuse,
},
)
}
fn into_folder(self) -> Self::Folder {
PanicFuseFolder {
base: self.base.into_folder(),
fuse: self.fuse,
}
}
fn full(&self) -> bool {
self.fuse.panicked() || self.base.full()
}
}
impl<'a, T, C> UnindexedConsumer<T> for PanicFuseConsumer<'a, C>
where
C: UnindexedConsumer<T>,
{
fn split_off_left(&self) -> Self {
PanicFuseConsumer {
base: self.base.split_off_left(),
fuse: self.fuse.clone(),
}
}
fn to_reducer(&self) -> Self::Reducer {
PanicFuseReducer {
base: self.base.to_reducer(),
_fuse: self.fuse.clone(),
}
}
}
struct PanicFuseFolder<'a, C> {
base: C,
fuse: Fuse<'a>,
}
impl<'a, T, C> Folder<T> for PanicFuseFolder<'a, C>
where
C: Folder<T>,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
self.base = self.base.consume(item);
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
fn cool<'a, T>(fuse: &'a Fuse<'_>) -> impl Fn(&T) -> bool + 'a {
move |_| !fuse.panicked()
}
self.base = {
let fuse = &self.fuse;
let iter = iter.into_iter().take_while(cool(fuse));
self.base.consume_iter(iter)
};
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.fuse.panicked() || self.base.full()
}
}
struct PanicFuseReducer<'a, C> {
base: C,
_fuse: Fuse<'a>,
}
impl<'a, T, C> Reducer<T> for PanicFuseReducer<'a, C>
where
C: Reducer<T>,
{
fn reduce(self, left: T, right: T) -> T {
self.base.reduce(left, right)
}
}

167
vendor/rayon/src/iter/par_bridge.rs vendored Normal file
View File

@@ -0,0 +1,167 @@
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Mutex;
use crate::iter::plumbing::{bridge_unindexed, Folder, UnindexedConsumer, UnindexedProducer};
use crate::iter::ParallelIterator;
use crate::{current_num_threads, current_thread_index};
/// Conversion trait to convert an `Iterator` to a `ParallelIterator`.
///
/// This creates a "bridge" from a sequential iterator to a parallel one, by distributing its items
/// across the Rayon thread pool. This has the advantage of being able to parallelize just about
/// anything, but the resulting `ParallelIterator` can be less efficient than if you started with
/// `par_iter` instead. However, it can still be useful for iterators that are difficult to
/// parallelize by other means, like channels or file or network I/O.
///
/// Iterator items are pulled by `next()` one at a time, synchronized from each thread that is
/// ready for work, so this may become a bottleneck if the serial iterator can't keep up with the
/// parallel demand. The items are not buffered by `IterBridge`, so it's fine to use this with
/// large or even unbounded iterators.
///
/// The resulting iterator is not guaranteed to keep the order of the original iterator.
///
/// # Examples
///
/// To use this trait, take an existing `Iterator` and call `par_bridge` on it. After that, you can
/// use any of the `ParallelIterator` methods:
///
/// ```
/// use rayon::iter::ParallelBridge;
/// use rayon::prelude::ParallelIterator;
/// use std::sync::mpsc::channel;
///
/// let rx = {
/// let (tx, rx) = channel();
///
/// tx.send("one!");
/// tx.send("two!");
/// tx.send("three!");
///
/// rx
/// };
///
/// let mut output: Vec<&'static str> = rx.into_iter().par_bridge().collect();
/// output.sort_unstable();
///
/// assert_eq!(&*output, &["one!", "three!", "two!"]);
/// ```
pub trait ParallelBridge: Sized {
/// Creates a bridge from this type to a `ParallelIterator`.
fn par_bridge(self) -> IterBridge<Self>;
}
impl<T: Iterator + Send> ParallelBridge for T
where
T::Item: Send,
{
fn par_bridge(self) -> IterBridge<Self> {
IterBridge { iter: self }
}
}
/// `IterBridge` is a parallel iterator that wraps a sequential iterator.
///
/// This type is created when using the `par_bridge` method on `ParallelBridge`. See the
/// [`ParallelBridge`] documentation for details.
///
/// [`ParallelBridge`]: trait.ParallelBridge.html
#[derive(Debug, Clone)]
pub struct IterBridge<Iter> {
iter: Iter,
}
impl<Iter: Iterator + Send> ParallelIterator for IterBridge<Iter>
where
Iter::Item: Send,
{
type Item = Iter::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let num_threads = current_num_threads();
let threads_started: Vec<_> = (0..num_threads).map(|_| AtomicBool::new(false)).collect();
bridge_unindexed(
&IterParallelProducer {
split_count: AtomicUsize::new(num_threads),
iter: Mutex::new(self.iter.fuse()),
threads_started: &threads_started,
},
consumer,
)
}
}
struct IterParallelProducer<'a, Iter> {
split_count: AtomicUsize,
iter: Mutex<std::iter::Fuse<Iter>>,
threads_started: &'a [AtomicBool],
}
impl<Iter: Iterator + Send> UnindexedProducer for &IterParallelProducer<'_, Iter> {
type Item = Iter::Item;
fn split(self) -> (Self, Option<Self>) {
let mut count = self.split_count.load(Ordering::SeqCst);
loop {
// Check if the iterator is exhausted
if let Some(new_count) = count.checked_sub(1) {
match self.split_count.compare_exchange_weak(
count,
new_count,
Ordering::SeqCst,
Ordering::SeqCst,
) {
Ok(_) => return (self, Some(self)),
Err(last_count) => count = last_count,
}
} else {
return (self, None);
}
}
}
fn fold_with<F>(self, mut folder: F) -> F
where
F: Folder<Self::Item>,
{
// Guard against work-stealing-induced recursion, in case `Iter::next()`
// calls rayon internally, so we don't deadlock our mutex. We might also
// be recursing via `folder` methods, which doesn't present a mutex hazard,
// but it's lower overhead for us to just check this once, rather than
// updating additional shared state on every mutex lock/unlock.
// (If this isn't a rayon thread, then there's no work-stealing anyway...)
if let Some(i) = current_thread_index() {
// Note: If the number of threads in the pool ever grows dynamically, then
// we'll end up sharing flags and may falsely detect recursion -- that's
// still fine for overall correctness, just not optimal for parallelism.
let thread_started = &self.threads_started[i % self.threads_started.len()];
if thread_started.swap(true, Ordering::Relaxed) {
// We can't make progress with a nested mutex, so just return and let
// the outermost loop continue with the rest of the iterator items.
return folder;
}
}
loop {
if let Ok(mut iter) = self.iter.lock() {
if let Some(it) = iter.next() {
drop(iter);
folder = folder.consume(it);
if folder.full() {
return folder;
}
} else {
return folder;
}
} else {
// any panics from other threads will have been caught by the pool,
// and will be re-thrown when joined - just exit
return folder;
}
}
}
}

315
vendor/rayon/src/iter/plumbing/README.md vendored Normal file
View File

@@ -0,0 +1,315 @@
# Parallel Iterators
These are some notes on the design of the parallel iterator traits.
This file does not describe how to **use** parallel iterators.
## The challenge
Parallel iterators are more complicated than sequential iterators.
The reason is that they have to be able to split themselves up and
operate in parallel across the two halves.
The current design for parallel iterators has two distinct modes in
which they can be used; as we will see, not all iterators support both
modes (which is why there are two):
- **Pull mode** (the `Producer` and `UnindexedProducer` traits): in this mode,
the iterator is asked to produce the next item using a call to `next`. This
is basically like a normal iterator, but with a twist: you can split the
iterator in half to produce disjoint items in separate threads.
- in the `Producer` trait, splitting is done with `split_at`, which accepts
an index where the split should be performed. Only indexed iterators can
work in this mode, as they know exactly how much data they will produce,
and how to locate the requested index.
- in the `UnindexedProducer` trait, splitting is done with `split`, which
simply requests that the producer divide itself *approximately* in half.
This is useful when the exact length and/or layout is unknown, as with
`String` characters, or when the length might exceed `usize`, as with
`Range<u64>` on 32-bit platforms.
- In theory, any `Producer` could act unindexed, but we don't currently
use that possibility. When you know the exact length, a `split` can
simply be implemented as `split_at(length/2)`.
- **Push mode** (the `Consumer` and `UnindexedConsumer` traits): in
this mode, the iterator instead is *given* each item in turn, which
is then processed. This is the opposite of a normal iterator. It's
more like a `for_each` call: each time a new item is produced, the
`consume` method is called with that item. (The traits themselves are
a bit more complex, as they support state that can be threaded
through and ultimately reduced.) Like producers, there are two
variants of consumers which differ in how the split is performed:
- in the `Consumer` trait, splitting is done with `split_at`, which
accepts an index where the split should be performed. All
iterators can work in this mode. The resulting halves thus have an
idea about how much data they expect to consume.
- in the `UnindexedConsumer` trait, splitting is done with
`split_off_left`. There is no index: the resulting halves must be
prepared to process any amount of data, and they don't know where that
data falls in the overall stream.
- Not all consumers can operate in this mode. It works for
`for_each` and `reduce`, for example, but it does not work for
`collect_into_vec`, since in that case the position of each item is
important for knowing where it ends up in the target collection.
## How iterator execution proceeds
We'll walk through this example iterator chain to start. This chain
demonstrates more-or-less the full complexity of what can happen.
```rust
vec1.par_iter()
.zip(vec2.par_iter())
.flat_map(some_function)
.for_each(some_other_function)
```
To handle an iterator chain, we start by creating consumers. This
works from the end. So in this case, the call to `for_each` is the
final step, so it will create a `ForEachConsumer` that, given an item,
just calls `some_other_function` with that item. (`ForEachConsumer` is
a very simple consumer because it doesn't need to thread any state
between items at all.)
Now, the `for_each` call will pass this consumer to the base iterator,
which is the `flat_map`. It will do this by calling the `drive_unindexed`
method on the `ParallelIterator` trait. `drive_unindexed` basically
says "produce items for this iterator and feed them to this consumer";
it only works for unindexed consumers.
(As an aside, it is interesting that only some consumers can work in
unindexed mode, but all producers can *drive* an unindexed consumer.
In contrast, only some producers can drive an *indexed* consumer, but
all consumers can be supplied indexes. Isn't variance neat.)
As it happens, `FlatMap` only works with unindexed consumers anyway.
This is because flat-map basically has no idea how many items it will
produce. If you ask flat-map to produce the 22nd item, it can't do it,
at least not without some intermediate state. It doesn't know whether
processing the first item will create 1 item, 3 items, or 100;
therefore, to produce an arbitrary item, it would basically just have
to start at the beginning and execute sequentially, which is not what
we want. But for unindexed consumers, this doesn't matter, since they
don't need to know how much data they will get.
Therefore, `FlatMap` can wrap the `ForEachConsumer` with a
`FlatMapConsumer` that feeds to it. This `FlatMapConsumer` will be
given one item. It will then invoke `some_function` to get a parallel
iterator out. It will then ask this new parallel iterator to drive the
`ForEachConsumer`. The `drive_unindexed` method on `flat_map` can then
pass the `FlatMapConsumer` up the chain to the previous item, which is
`zip`. At this point, something interesting happens.
## Switching from push to pull mode
If you think about `zip`, it can't really be implemented as a
consumer, at least not without an intermediate thread and some
channels or something (or maybe coroutines). The problem is that it
has to walk two iterators *in lockstep*. Basically, it can't call two
`drive` methods simultaneously, it can only call one at a time. So at
this point, the `zip` iterator needs to switch from *push mode* into
*pull mode*.
You'll note that `Zip` is only usable if its inputs implement
`IndexedParallelIterator`, meaning that they can produce data starting
at random points in the stream. This need to switch to push mode is
exactly why. If we want to split a zip iterator at position 22, we
need to be able to start zipping items from index 22 right away,
without having to start from index 0.
Anyway, so at this point, the `drive_unindexed` method for `Zip` stops
creating consumers. Instead, it creates a *producer*, a `ZipProducer`,
to be exact, and calls the `bridge` function in the `internals`
module. Creating a `ZipProducer` will in turn create producers for
the two iterators being zipped. This is possible because they both
implement `IndexedParallelIterator`.
The `bridge` function will then connect the consumer, which is
handling the `flat_map` and `for_each`, with the producer, which is
handling the `zip` and its predecessors. It will split down until the
chunks seem reasonably small, then pull items from the producer and
feed them to the consumer.
## The base case
The other time that `bridge` gets used is when we bottom out in an
indexed producer, such as a slice or range. There is also a
`bridge_unindexed` equivalent for - you guessed it - unindexed producers,
such as string characters.
<a name="producer-callback">
## What on earth is `ProducerCallback`?
We saw that when you call a parallel action method like
`par_iter.reduce()`, that will create a "reducing" consumer and then
invoke `par_iter.drive_unindexed()` (or `par_iter.drive()`) as
appropriate. This may create yet more consumers as we proceed up the
parallel iterator chain. But at some point we're going to get to the
start of the chain, or to a parallel iterator (like `zip()`) that has
to coordinate multiple inputs. At that point, we need to start
converting parallel iterators into producers.
The way we do this is by invoking the method `with_producer()`, defined on
`IndexedParallelIterator`. This is a callback scheme. In an ideal world,
it would work like this:
```rust
base_iter.with_producer(|base_producer| {
// here, `base_producer` is the producer for `base_iter`
});
```
In that case, we could implement a combinator like `map()` by getting
the producer for the base iterator, wrapping it to make our own
`MapProducer`, and then passing that to the callback. Something like
this:
```rust
struct MapProducer<'f, P, F: 'f> {
base: P,
map_op: &'f F,
}
impl<I, F> IndexedParallelIterator for Map<I, F>
where I: IndexedParallelIterator,
F: MapOp<I::Item>,
{
fn with_producer<CB>(self, callback: CB) -> CB::Output {
let map_op = &self.map_op;
self.base_iter.with_producer(|base_producer| {
// Here `producer` is the producer for `self.base_iter`.
// Wrap that to make a `MapProducer`
let map_producer = MapProducer {
base: base_producer,
map_op: map_op
};
// invoke the callback with the wrapped version
callback(map_producer)
});
}
});
```
This example demonstrates some of the power of the callback scheme.
It winds up being a very flexible setup. For one thing, it means we
can take ownership of `par_iter`; we can then in turn give ownership
away of its bits and pieces into the producer (this is very useful if
the iterator owns an `&mut` slice, for example), or create shared
references and put *those* in the producer. In the case of map, for
example, the parallel iterator owns the `map_op`, and we borrow
references to it which we then put into the `MapProducer` (this means
the `MapProducer` can easily split itself and share those references).
The `with_producer` method can also create resources that are needed
during the parallel execution, since the producer does not have to be
returned.
Unfortunately there is a catch. We can't actually use closures the way
I showed you. To see why, think about the type that `map_producer`
would have to have. If we were going to write the `with_producer`
method using a closure, it would have to look something like this:
```rust
pub trait IndexedParallelIterator: ParallelIterator {
type Producer;
fn with_producer<CB, R>(self, callback: CB) -> R
where CB: FnOnce(Self::Producer) -> R;
...
}
```
Note that we had to add this associated type `Producer` so that
we could specify the argument of the callback to be `Self::Producer`.
Now, imagine trying to write that `MapProducer` impl using this style:
```rust
impl<I, F> IndexedParallelIterator for Map<I, F>
where I: IndexedParallelIterator,
F: MapOp<I::Item>,
{
type MapProducer = MapProducer<'f, P::Producer, F>;
// ^^ wait, what is this `'f`?
fn with_producer<CB, R>(self, callback: CB) -> R
where CB: FnOnce(Self::Producer) -> R
{
let map_op = &self.map_op;
// ^^^^^^ `'f` is (conceptually) the lifetime of this reference,
// so it will be different for each call to `with_producer`!
}
}
```
This may look familiar to you: it's the same problem that we have
trying to define an `Iterable` trait. Basically, the producer type
needs to include a lifetime (here, `'f`) that refers to the body of
`with_producer` and hence is not in scope at the impl level.
If we had [associated type constructors][1598], we could solve this
problem that way. But there is another solution. We can use a
dedicated callback trait like `ProducerCallback`, instead of `FnOnce`:
[1598]: https://github.com/rust-lang/rfcs/pull/1598
```rust
pub trait ProducerCallback<T> {
type Output;
fn callback<P>(self, producer: P) -> Self::Output
where P: Producer<Item=T>;
}
```
Using this trait, the signature of `with_producer()` looks like this:
```rust
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
```
Notice that this signature **never has to name the producer type** --
there is no associated type `Producer` anymore. This is because the
`callback()` method is generically over **all** producers `P`.
The problem is that now the `||` sugar doesn't work anymore. So we
have to manually create the callback struct, which is a mite tedious.
So our `MapProducer` code looks like this:
```rust
impl<I, F> IndexedParallelIterator for Map<I, F>
where I: IndexedParallelIterator,
F: MapOp<I::Item>,
{
fn with_producer<CB>(self, callback: CB) -> CB::Output
where CB: ProducerCallback<Self::Item>
{
return self.base.with_producer(Callback { callback: callback, map_op: self.map_op });
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
// Manual version of the closure sugar: create an instance
// of a struct that implements `ProducerCallback`.
// The struct declaration. Each field is something that need to capture from the
// creating scope.
struct Callback<CB, F> {
callback: CB,
map_op: F,
}
// Implement the `ProducerCallback` trait. This is pure boilerplate.
impl<T, F, CB> ProducerCallback<T> for Callback<CB, F>
where F: MapOp<T>,
CB: ProducerCallback<F::Output>
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where P: Producer<Item=T>
{
// The body of the closure is here:
let producer = MapProducer { base: base,
map_op: &self.map_op };
self.callback.callback(producer)
}
}
}
}
```
OK, a bit tedious, but it works!

484
vendor/rayon/src/iter/plumbing/mod.rs vendored Normal file
View File

@@ -0,0 +1,484 @@
//! Traits and functions used to implement parallel iteration. These are
//! low-level details -- users of parallel iterators should not need to
//! interact with them directly. See [the `plumbing` README][r] for a general overview.
//!
//! [r]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
use crate::join_context;
use super::IndexedParallelIterator;
use std::cmp;
use std::usize;
/// The `ProducerCallback` trait is a kind of generic closure,
/// [analogous to `FnOnce`][FnOnce]. See [the corresponding section in
/// the plumbing README][r] for more details.
///
/// [r]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md#producer-callback
/// [FnOnce]: https://doc.rust-lang.org/std/ops/trait.FnOnce.html
pub trait ProducerCallback<T> {
/// The type of value returned by this callback. Analogous to
/// [`Output` from the `FnOnce` trait][Output].
///
/// [Output]: https://doc.rust-lang.org/std/ops/trait.FnOnce.html#associatedtype.Output
type Output;
/// Invokes the callback with the given producer as argument. The
/// key point of this trait is that this method is generic over
/// `P`, and hence implementors must be defined for any producer.
fn callback<P>(self, producer: P) -> Self::Output
where
P: Producer<Item = T>;
}
/// A `Producer` is effectively a "splittable `IntoIterator`". That
/// is, a producer is a value which can be converted into an iterator
/// at any time: at that point, it simply produces items on demand,
/// like any iterator. But what makes a `Producer` special is that,
/// *before* we convert to an iterator, we can also **split** it at a
/// particular point using the `split_at` method. This will yield up
/// two producers, one producing the items before that point, and one
/// producing the items after that point (these two producers can then
/// independently be split further, or be converted into iterators).
/// In Rayon, this splitting is used to divide between threads.
/// See [the `plumbing` README][r] for further details.
///
/// Note that each producer will always produce a fixed number of
/// items N. However, this number N is not queryable through the API;
/// the consumer is expected to track it.
///
/// NB. You might expect `Producer` to extend the `IntoIterator`
/// trait. However, [rust-lang/rust#20671][20671] prevents us from
/// declaring the DoubleEndedIterator and ExactSizeIterator
/// constraints on a required IntoIterator trait, so we inline
/// IntoIterator here until that issue is fixed.
///
/// [r]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
/// [20671]: https://github.com/rust-lang/rust/issues/20671
pub trait Producer: Send + Sized {
/// The type of item that will be produced by this producer once
/// it is converted into an iterator.
type Item;
/// The type of iterator we will become.
type IntoIter: Iterator<Item = Self::Item> + DoubleEndedIterator + ExactSizeIterator;
/// Convert `self` into an iterator; at this point, no more parallel splits
/// are possible.
fn into_iter(self) -> Self::IntoIter;
/// The minimum number of items that we will process
/// sequentially. Defaults to 1, which means that we will split
/// all the way down to a single item. This can be raised higher
/// using the [`with_min_len`] method, which will force us to
/// create sequential tasks at a larger granularity. Note that
/// Rayon automatically normally attempts to adjust the size of
/// parallel splits to reduce overhead, so this should not be
/// needed.
///
/// [`with_min_len`]: ../trait.IndexedParallelIterator.html#method.with_min_len
fn min_len(&self) -> usize {
1
}
/// The maximum number of items that we will process
/// sequentially. Defaults to MAX, which means that we can choose
/// not to split at all. This can be lowered using the
/// [`with_max_len`] method, which will force us to create more
/// parallel tasks. Note that Rayon automatically normally
/// attempts to adjust the size of parallel splits to reduce
/// overhead, so this should not be needed.
///
/// [`with_max_len`]: ../trait.IndexedParallelIterator.html#method.with_max_len
fn max_len(&self) -> usize {
usize::MAX
}
/// Split into two producers; one produces items `0..index`, the
/// other `index..N`. Index must be less than or equal to `N`.
fn split_at(self, index: usize) -> (Self, Self);
/// Iterate the producer, feeding each element to `folder`, and
/// stop when the folder is full (or all elements have been consumed).
///
/// The provided implementation is sufficient for most iterables.
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self.into_iter())
}
}
/// A consumer is effectively a [generalized "fold" operation][fold],
/// and in fact each consumer will eventually be converted into a
/// [`Folder`]. What makes a consumer special is that, like a
/// [`Producer`], it can be **split** into multiple consumers using
/// the `split_at` method. When a consumer is split, it produces two
/// consumers, as well as a **reducer**. The two consumers can be fed
/// items independently, and when they are done the reducer is used to
/// combine their two results into one. See [the `plumbing`
/// README][r] for further details.
///
/// [r]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
/// [fold]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.fold
/// [`Folder`]: trait.Folder.html
/// [`Producer`]: trait.Producer.html
pub trait Consumer<Item>: Send + Sized {
/// The type of folder that this consumer can be converted into.
type Folder: Folder<Item, Result = Self::Result>;
/// The type of reducer that is produced if this consumer is split.
type Reducer: Reducer<Self::Result>;
/// The type of result that this consumer will ultimately produce.
type Result: Send;
/// Divide the consumer into two consumers, one processing items
/// `0..index` and one processing items from `index..`. Also
/// produces a reducer that can be used to reduce the results at
/// the end.
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer);
/// Convert the consumer into a folder that can consume items
/// sequentially, eventually producing a final result.
fn into_folder(self) -> Self::Folder;
/// Hint whether this `Consumer` would like to stop processing
/// further items, e.g. if a search has been completed.
fn full(&self) -> bool;
}
/// The `Folder` trait encapsulates [the standard fold
/// operation][fold]. It can be fed many items using the `consume`
/// method. At the end, once all items have been consumed, it can then
/// be converted (using `complete`) into a final value.
///
/// [fold]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.fold
pub trait Folder<Item>: Sized {
/// The type of result that will ultimately be produced by the folder.
type Result;
/// Consume next item and return new sequential state.
fn consume(self, item: Item) -> Self;
/// Consume items from the iterator until full, and return new sequential state.
///
/// This method is **optional**. The default simply iterates over
/// `iter`, invoking `consume` and checking after each iteration
/// whether `full` returns false.
///
/// The main reason to override it is if you can provide a more
/// specialized, efficient implementation.
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = Item>,
{
for item in iter {
self = self.consume(item);
if self.full() {
break;
}
}
self
}
/// Finish consuming items, produce final result.
fn complete(self) -> Self::Result;
/// Hint whether this `Folder` would like to stop processing
/// further items, e.g. if a search has been completed.
fn full(&self) -> bool;
}
/// The reducer is the final step of a `Consumer` -- after a consumer
/// has been split into two parts, and each of those parts has been
/// fully processed, we are left with two results. The reducer is then
/// used to combine those two results into one. See [the `plumbing`
/// README][r] for further details.
///
/// [r]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
pub trait Reducer<Result> {
/// Reduce two final results into one; this is executed after a
/// split.
fn reduce(self, left: Result, right: Result) -> Result;
}
/// A stateless consumer can be freely copied. These consumers can be
/// used like regular consumers, but they also support a
/// `split_off_left` method that does not take an index to split, but
/// simply splits at some arbitrary point (`for_each`, for example,
/// produces an unindexed consumer).
pub trait UnindexedConsumer<I>: Consumer<I> {
/// Splits off a "left" consumer and returns it. The `self`
/// consumer should then be used to consume the "right" portion of
/// the data. (The ordering matters for methods like find_first --
/// values produced by the returned value are given precedence
/// over values produced by `self`.) Once the left and right
/// halves have been fully consumed, you should reduce the results
/// with the result of `to_reducer`.
fn split_off_left(&self) -> Self;
/// Creates a reducer that can be used to combine the results from
/// a split consumer.
fn to_reducer(&self) -> Self::Reducer;
}
/// A variant on `Producer` which does not know its exact length or
/// cannot represent it in a `usize`. These producers act like
/// ordinary producers except that they cannot be told to split at a
/// particular point. Instead, you just ask them to split 'somewhere'.
///
/// (In principle, `Producer` could extend this trait; however, it
/// does not because to do so would require producers to carry their
/// own length with them.)
pub trait UnindexedProducer: Send + Sized {
/// The type of item returned by this producer.
type Item;
/// Split midway into a new producer if possible, otherwise return `None`.
fn split(self) -> (Self, Option<Self>);
/// Iterate the producer, feeding each element to `folder`, and
/// stop when the folder is full (or all elements have been consumed).
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>;
}
/// A splitter controls the policy for splitting into smaller work items.
///
/// Thief-splitting is an adaptive policy that starts by splitting into
/// enough jobs for every worker thread, and then resets itself whenever a
/// job is actually stolen into a different thread.
#[derive(Clone, Copy)]
struct Splitter {
/// The `splits` tell us approximately how many remaining times we'd
/// like to split this job. We always just divide it by two though, so
/// the effective number of pieces will be `next_power_of_two()`.
splits: usize,
}
impl Splitter {
#[inline]
fn new() -> Splitter {
Splitter {
splits: crate::current_num_threads(),
}
}
#[inline]
fn try_split(&mut self, stolen: bool) -> bool {
let Splitter { splits } = *self;
if stolen {
// This job was stolen! Reset the number of desired splits to the
// thread count, if that's more than we had remaining anyway.
self.splits = cmp::max(crate::current_num_threads(), self.splits / 2);
true
} else if splits > 0 {
// We have splits remaining, make it so.
self.splits /= 2;
true
} else {
// Not stolen, and no more splits -- we're done!
false
}
}
}
/// The length splitter is built on thief-splitting, but additionally takes
/// into account the remaining length of the iterator.
#[derive(Clone, Copy)]
struct LengthSplitter {
inner: Splitter,
/// The smallest we're willing to divide into. Usually this is just 1,
/// but you can choose a larger working size with `with_min_len()`.
min: usize,
}
impl LengthSplitter {
/// Creates a new splitter based on lengths.
///
/// The `min` is a hard lower bound. We'll never split below that, but
/// of course an iterator might start out smaller already.
///
/// The `max` is an upper bound on the working size, used to determine
/// the minimum number of times we need to split to get under that limit.
/// The adaptive algorithm may very well split even further, but never
/// smaller than the `min`.
#[inline]
fn new(min: usize, max: usize, len: usize) -> LengthSplitter {
let mut splitter = LengthSplitter {
inner: Splitter::new(),
min: cmp::max(min, 1),
};
// Divide the given length by the max working length to get the minimum
// number of splits we need to get under that max. This rounds down,
// but the splitter actually gives `next_power_of_two()` pieces anyway.
// e.g. len 12345 / max 100 = 123 min_splits -> 128 pieces.
let min_splits = len / cmp::max(max, 1);
// Only update the value if it's not splitting enough already.
if min_splits > splitter.inner.splits {
splitter.inner.splits = min_splits;
}
splitter
}
#[inline]
fn try_split(&mut self, len: usize, stolen: bool) -> bool {
// If splitting wouldn't make us too small, try the inner splitter.
len / 2 >= self.min && self.inner.try_split(stolen)
}
}
/// This helper function is used to "connect" a parallel iterator to a
/// consumer. It will convert the `par_iter` into a producer P and
/// then pull items from P and feed them to `consumer`, splitting and
/// creating parallel threads as needed.
///
/// This is useful when you are implementing your own parallel
/// iterators: it is often used as the definition of the
/// [`drive_unindexed`] or [`drive`] methods.
///
/// [`drive_unindexed`]: ../trait.ParallelIterator.html#tymethod.drive_unindexed
/// [`drive`]: ../trait.IndexedParallelIterator.html#tymethod.drive
pub fn bridge<I, C>(par_iter: I, consumer: C) -> C::Result
where
I: IndexedParallelIterator,
C: Consumer<I::Item>,
{
let len = par_iter.len();
return par_iter.with_producer(Callback { len, consumer });
struct Callback<C> {
len: usize,
consumer: C,
}
impl<C, I> ProducerCallback<I> for Callback<C>
where
C: Consumer<I>,
{
type Output = C::Result;
fn callback<P>(self, producer: P) -> C::Result
where
P: Producer<Item = I>,
{
bridge_producer_consumer(self.len, producer, self.consumer)
}
}
}
/// This helper function is used to "connect" a producer and a
/// consumer. You may prefer to call [`bridge`], which wraps this
/// function. This function will draw items from `producer` and feed
/// them to `consumer`, splitting and creating parallel tasks when
/// needed.
///
/// This is useful when you are implementing your own parallel
/// iterators: it is often used as the definition of the
/// [`drive_unindexed`] or [`drive`] methods.
///
/// [`bridge`]: fn.bridge.html
/// [`drive_unindexed`]: ../trait.ParallelIterator.html#tymethod.drive_unindexed
/// [`drive`]: ../trait.IndexedParallelIterator.html#tymethod.drive
pub fn bridge_producer_consumer<P, C>(len: usize, producer: P, consumer: C) -> C::Result
where
P: Producer,
C: Consumer<P::Item>,
{
let splitter = LengthSplitter::new(producer.min_len(), producer.max_len(), len);
return helper(len, false, splitter, producer, consumer);
fn helper<P, C>(
len: usize,
migrated: bool,
mut splitter: LengthSplitter,
producer: P,
consumer: C,
) -> C::Result
where
P: Producer,
C: Consumer<P::Item>,
{
if consumer.full() {
consumer.into_folder().complete()
} else if splitter.try_split(len, migrated) {
let mid = len / 2;
let (left_producer, right_producer) = producer.split_at(mid);
let (left_consumer, right_consumer, reducer) = consumer.split_at(mid);
let (left_result, right_result) = join_context(
|context| {
helper(
mid,
context.migrated(),
splitter,
left_producer,
left_consumer,
)
},
|context| {
helper(
len - mid,
context.migrated(),
splitter,
right_producer,
right_consumer,
)
},
);
reducer.reduce(left_result, right_result)
} else {
producer.fold_with(consumer.into_folder()).complete()
}
}
}
/// A variant of [`bridge_producer_consumer`] where the producer is an unindexed producer.
///
/// [`bridge_producer_consumer`]: fn.bridge_producer_consumer.html
pub fn bridge_unindexed<P, C>(producer: P, consumer: C) -> C::Result
where
P: UnindexedProducer,
C: UnindexedConsumer<P::Item>,
{
let splitter = Splitter::new();
bridge_unindexed_producer_consumer(false, splitter, producer, consumer)
}
fn bridge_unindexed_producer_consumer<P, C>(
migrated: bool,
mut splitter: Splitter,
producer: P,
consumer: C,
) -> C::Result
where
P: UnindexedProducer,
C: UnindexedConsumer<P::Item>,
{
if consumer.full() {
consumer.into_folder().complete()
} else if splitter.try_split(migrated) {
match producer.split() {
(left_producer, Some(right_producer)) => {
let (reducer, left_consumer, right_consumer) =
(consumer.to_reducer(), consumer.split_off_left(), consumer);
let bridge = bridge_unindexed_producer_consumer;
let (left_result, right_result) = join_context(
|context| bridge(context.migrated(), splitter, left_producer, left_consumer),
|context| bridge(context.migrated(), splitter, right_producer, right_consumer),
);
reducer.reduce(left_result, right_result)
}
(producer, None) => producer.fold_with(consumer.into_folder()).complete(),
}
} else {
producer.fold_with(consumer.into_folder()).complete()
}
}

137
vendor/rayon/src/iter/positions.rs vendored Normal file
View File

@@ -0,0 +1,137 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `Positions` takes a predicate `predicate` and filters out elements that match,
/// yielding their indices.
///
/// This struct is created by the [`positions()`] method on [`IndexedParallelIterator`]
///
/// [`positions()`]: trait.IndexedParallelIterator.html#method.positions
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Positions<I: IndexedParallelIterator, P> {
base: I,
predicate: P,
}
impl<I: IndexedParallelIterator + Debug, P> Debug for Positions<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Positions")
.field("base", &self.base)
.finish()
}
}
impl<I, P> Positions<I, P>
where
I: IndexedParallelIterator,
{
/// Create a new `Positions` iterator.
pub(super) fn new(base: I, predicate: P) -> Self {
Positions { base, predicate }
}
}
impl<I, P> ParallelIterator for Positions<I, P>
where
I: IndexedParallelIterator,
P: Fn(I::Item) -> bool + Sync + Send,
{
type Item = usize;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = PositionsConsumer::new(consumer, &self.predicate, 0);
self.base.drive(consumer1)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct PositionsConsumer<'p, C, P> {
base: C,
predicate: &'p P,
offset: usize,
}
impl<'p, C, P> PositionsConsumer<'p, C, P> {
fn new(base: C, predicate: &'p P, offset: usize) -> Self {
PositionsConsumer {
base,
predicate,
offset,
}
}
}
impl<'p, T, C, P> Consumer<T> for PositionsConsumer<'p, C, P>
where
C: Consumer<usize>,
P: Fn(T) -> bool + Sync,
{
type Folder = PositionsFolder<'p, C::Folder, P>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
PositionsConsumer::new(left, self.predicate, self.offset),
PositionsConsumer::new(right, self.predicate, self.offset + index),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
PositionsFolder {
base: self.base.into_folder(),
predicate: self.predicate,
offset: self.offset,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
struct PositionsFolder<'p, F, P> {
base: F,
predicate: &'p P,
offset: usize,
}
impl<F, P, T> Folder<T> for PositionsFolder<'_, F, P>
where
F: Folder<usize>,
P: Fn(T) -> bool,
{
type Result = F::Result;
fn consume(mut self, item: T) -> Self {
let index = self.offset;
self.offset += 1;
if (self.predicate)(item) {
self.base = self.base.consume(index);
}
self
}
// This cannot easily specialize `consume_iter` to be better than
// the default, because that requires checking `self.base.full()`
// during a call to `self.base.consume_iter()`. (#632)
fn complete(self) -> Self::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

114
vendor/rayon/src/iter/product.rs vendored Normal file
View File

@@ -0,0 +1,114 @@
use super::plumbing::*;
use super::ParallelIterator;
use std::iter::{self, Product};
use std::marker::PhantomData;
pub(super) fn product<PI, P>(pi: PI) -> P
where
PI: ParallelIterator,
P: Send + Product<PI::Item> + Product,
{
pi.drive_unindexed(ProductConsumer::new())
}
fn mul<T: Product>(left: T, right: T) -> T {
[left, right].into_iter().product()
}
struct ProductConsumer<P: Send> {
_marker: PhantomData<*const P>,
}
unsafe impl<P: Send> Send for ProductConsumer<P> {}
impl<P: Send> ProductConsumer<P> {
fn new() -> ProductConsumer<P> {
ProductConsumer {
_marker: PhantomData,
}
}
}
impl<P, T> Consumer<T> for ProductConsumer<P>
where
P: Send + Product<T> + Product,
{
type Folder = ProductFolder<P>;
type Reducer = Self;
type Result = P;
fn split_at(self, _index: usize) -> (Self, Self, Self) {
(
ProductConsumer::new(),
ProductConsumer::new(),
ProductConsumer::new(),
)
}
fn into_folder(self) -> Self::Folder {
ProductFolder {
product: iter::empty::<T>().product(),
}
}
fn full(&self) -> bool {
false
}
}
impl<P, T> UnindexedConsumer<T> for ProductConsumer<P>
where
P: Send + Product<T> + Product,
{
fn split_off_left(&self) -> Self {
ProductConsumer::new()
}
fn to_reducer(&self) -> Self::Reducer {
ProductConsumer::new()
}
}
impl<P> Reducer<P> for ProductConsumer<P>
where
P: Send + Product,
{
fn reduce(self, left: P, right: P) -> P {
mul(left, right)
}
}
struct ProductFolder<P> {
product: P,
}
impl<P, T> Folder<T> for ProductFolder<P>
where
P: Product<T> + Product,
{
type Result = P;
fn consume(self, item: T) -> Self {
ProductFolder {
product: mul(self.product, iter::once(item).product()),
}
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
ProductFolder {
product: mul(self.product, iter.into_iter().product()),
}
}
fn complete(self) -> P {
self.product
}
fn full(&self) -> bool {
false
}
}

116
vendor/rayon/src/iter/reduce.rs vendored Normal file
View File

@@ -0,0 +1,116 @@
use super::plumbing::*;
use super::ParallelIterator;
pub(super) fn reduce<PI, R, ID, T>(pi: PI, identity: ID, reduce_op: R) -> T
where
PI: ParallelIterator<Item = T>,
R: Fn(T, T) -> T + Sync,
ID: Fn() -> T + Sync,
T: Send,
{
let consumer = ReduceConsumer {
identity: &identity,
reduce_op: &reduce_op,
};
pi.drive_unindexed(consumer)
}
struct ReduceConsumer<'r, R, ID> {
identity: &'r ID,
reduce_op: &'r R,
}
impl<'r, R, ID> Copy for ReduceConsumer<'r, R, ID> {}
impl<'r, R, ID> Clone for ReduceConsumer<'r, R, ID> {
fn clone(&self) -> Self {
*self
}
}
impl<'r, R, ID, T> Consumer<T> for ReduceConsumer<'r, R, ID>
where
R: Fn(T, T) -> T + Sync,
ID: Fn() -> T + Sync,
T: Send,
{
type Folder = ReduceFolder<'r, R, T>;
type Reducer = Self;
type Result = T;
fn split_at(self, _index: usize) -> (Self, Self, Self) {
(self, self, self)
}
fn into_folder(self) -> Self::Folder {
ReduceFolder {
reduce_op: self.reduce_op,
item: (self.identity)(),
}
}
fn full(&self) -> bool {
false
}
}
impl<'r, R, ID, T> UnindexedConsumer<T> for ReduceConsumer<'r, R, ID>
where
R: Fn(T, T) -> T + Sync,
ID: Fn() -> T + Sync,
T: Send,
{
fn split_off_left(&self) -> Self {
*self
}
fn to_reducer(&self) -> Self::Reducer {
*self
}
}
impl<'r, R, ID, T> Reducer<T> for ReduceConsumer<'r, R, ID>
where
R: Fn(T, T) -> T + Sync,
{
fn reduce(self, left: T, right: T) -> T {
(self.reduce_op)(left, right)
}
}
struct ReduceFolder<'r, R, T> {
reduce_op: &'r R,
item: T,
}
impl<'r, R, T> Folder<T> for ReduceFolder<'r, R, T>
where
R: Fn(T, T) -> T,
{
type Result = T;
fn consume(self, item: T) -> Self {
ReduceFolder {
reduce_op: self.reduce_op,
item: (self.reduce_op)(self.item, item),
}
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
ReduceFolder {
reduce_op: self.reduce_op,
item: iter.into_iter().fold(self.item, self.reduce_op),
}
}
fn complete(self) -> T {
self.item
}
fn full(&self) -> bool {
false
}
}

241
vendor/rayon/src/iter/repeat.rs vendored Normal file
View File

@@ -0,0 +1,241 @@
use super::plumbing::*;
use super::*;
use std::iter;
use std::usize;
/// Iterator adaptor for [the `repeat()` function](fn.repeat.html).
#[derive(Debug, Clone)]
pub struct Repeat<T: Clone + Send> {
element: T,
}
/// Creates a parallel iterator that endlessly repeats `elt` (by
/// cloning it). Note that this iterator has "infinite" length, so
/// typically you would want to use `zip` or `take` or some other
/// means to shorten it, or consider using
/// [the `repeatn()` function](fn.repeatn.html) instead.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// use rayon::iter::repeat;
/// let x: Vec<(i32, i32)> = repeat(22).zip(0..3).collect();
/// assert_eq!(x, vec![(22, 0), (22, 1), (22, 2)]);
/// ```
pub fn repeat<T: Clone + Send>(elt: T) -> Repeat<T> {
Repeat { element: elt }
}
impl<T> Repeat<T>
where
T: Clone + Send,
{
/// Takes only `n` repeats of the element, similar to the general
/// [`take()`](trait.IndexedParallelIterator.html#method.take).
///
/// The resulting `RepeatN` is an `IndexedParallelIterator`, allowing
/// more functionality than `Repeat` alone.
pub fn take(self, n: usize) -> RepeatN<T> {
repeatn(self.element, n)
}
/// Iterates tuples, repeating the element with items from another
/// iterator, similar to the general
/// [`zip()`](trait.IndexedParallelIterator.html#method.zip).
pub fn zip<Z>(self, zip_op: Z) -> Zip<RepeatN<T>, Z::Iter>
where
Z: IntoParallelIterator,
Z::Iter: IndexedParallelIterator,
{
let z = zip_op.into_par_iter();
let n = z.len();
self.take(n).zip(z)
}
}
impl<T> ParallelIterator for Repeat<T>
where
T: Clone + Send,
{
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = RepeatProducer {
element: self.element,
};
bridge_unindexed(producer, consumer)
}
}
/// Unindexed producer for `Repeat`.
struct RepeatProducer<T: Clone + Send> {
element: T,
}
impl<T: Clone + Send> UnindexedProducer for RepeatProducer<T> {
type Item = T;
fn split(self) -> (Self, Option<Self>) {
(
RepeatProducer {
element: self.element.clone(),
},
Some(RepeatProducer {
element: self.element,
}),
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<T>,
{
folder.consume_iter(iter::repeat(self.element))
}
}
/// Iterator adaptor for [the `repeatn()` function](fn.repeatn.html).
#[derive(Debug, Clone)]
pub struct RepeatN<T: Clone + Send> {
element: T,
count: usize,
}
/// Creates a parallel iterator that produces `n` repeats of `elt`
/// (by cloning it).
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// use rayon::iter::repeatn;
/// let x: Vec<(i32, i32)> = repeatn(22, 3).zip(0..3).collect();
/// assert_eq!(x, vec![(22, 0), (22, 1), (22, 2)]);
/// ```
pub fn repeatn<T: Clone + Send>(elt: T, n: usize) -> RepeatN<T> {
RepeatN {
element: elt,
count: n,
}
}
impl<T> ParallelIterator for RepeatN<T>
where
T: Clone + Send,
{
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.count)
}
}
impl<T> IndexedParallelIterator for RepeatN<T>
where
T: Clone + Send,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(RepeatNProducer {
element: self.element,
count: self.count,
})
}
fn len(&self) -> usize {
self.count
}
}
/// Producer for `RepeatN`.
struct RepeatNProducer<T: Clone + Send> {
element: T,
count: usize,
}
impl<T: Clone + Send> Producer for RepeatNProducer<T> {
type Item = T;
type IntoIter = Iter<T>;
fn into_iter(self) -> Self::IntoIter {
Iter {
element: self.element,
count: self.count,
}
}
fn split_at(self, index: usize) -> (Self, Self) {
(
RepeatNProducer {
element: self.element.clone(),
count: index,
},
RepeatNProducer {
element: self.element,
count: self.count - index,
},
)
}
}
/// Iterator for `RepeatN`.
///
/// This is conceptually like `std::iter::Take<std::iter::Repeat<T>>`, but
/// we need `DoubleEndedIterator` and unconditional `ExactSizeIterator`.
struct Iter<T: Clone> {
element: T,
count: usize,
}
impl<T: Clone> Iterator for Iter<T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
if self.count > 0 {
self.count -= 1;
Some(self.element.clone())
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.count, Some(self.count))
}
}
impl<T: Clone> DoubleEndedIterator for Iter<T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.next()
}
}
impl<T: Clone> ExactSizeIterator for Iter<T> {
#[inline]
fn len(&self) -> usize {
self.count
}
}

123
vendor/rayon/src/iter/rev.rs vendored Normal file
View File

@@ -0,0 +1,123 @@
use super::plumbing::*;
use super::*;
use std::iter;
/// `Rev` is an iterator that produces elements in reverse order. This struct
/// is created by the [`rev()`] method on [`IndexedParallelIterator`]
///
/// [`rev()`]: trait.IndexedParallelIterator.html#method.rev
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Rev<I: IndexedParallelIterator> {
base: I,
}
impl<I> Rev<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `Rev` iterator.
pub(super) fn new(base: I) -> Self {
Rev { base }
}
}
impl<I> ParallelIterator for Rev<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for Rev<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.base.len();
return self.base.with_producer(Callback { callback, len });
struct Callback<CB> {
callback: CB,
len: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = RevProducer {
base,
len: self.len,
};
self.callback.callback(producer)
}
}
}
}
struct RevProducer<P> {
base: P,
len: usize,
}
impl<P> Producer for RevProducer<P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = iter::Rev<P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().rev()
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(self.len - index);
(
RevProducer {
base: right,
len: index,
},
RevProducer {
base: left,
len: self.len - index,
},
)
}
}

95
vendor/rayon/src/iter/skip.rs vendored Normal file
View File

@@ -0,0 +1,95 @@
use super::noop::NoopConsumer;
use super::plumbing::*;
use super::*;
use std::cmp::min;
/// `Skip` is an iterator that skips over the first `n` elements.
/// This struct is created by the [`skip()`] method on [`IndexedParallelIterator`]
///
/// [`skip()`]: trait.IndexedParallelIterator.html#method.skip
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Skip<I> {
base: I,
n: usize,
}
impl<I> Skip<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `Skip` iterator.
pub(super) fn new(base: I, n: usize) -> Self {
let n = min(base.len(), n);
Skip { base, n }
}
}
impl<I> ParallelIterator for Skip<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for Skip<I>
where
I: IndexedParallelIterator,
{
fn len(&self) -> usize {
self.base.len() - self.n
}
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
n: self.n,
});
struct Callback<CB> {
callback: CB,
n: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
crate::in_place_scope(|scope| {
let Self { callback, n } = self;
let (before_skip, after_skip) = base.split_at(n);
// Run the skipped part separately for side effects.
// We'll still get any panics propagated back by the scope.
scope.spawn(move |_| bridge_producer_consumer(n, before_skip, NoopConsumer));
callback.callback(after_skip)
})
}
}
}
}

144
vendor/rayon/src/iter/skip_any.rs vendored Normal file
View File

@@ -0,0 +1,144 @@
use super::plumbing::*;
use super::*;
use std::sync::atomic::{AtomicUsize, Ordering};
/// `SkipAny` is an iterator that skips over `n` elements from anywhere in `I`.
/// This struct is created by the [`skip_any()`] method on [`ParallelIterator`]
///
/// [`skip_any()`]: trait.ParallelIterator.html#method.skip_any
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct SkipAny<I: ParallelIterator> {
base: I,
count: usize,
}
impl<I> SkipAny<I>
where
I: ParallelIterator,
{
/// Creates a new `SkipAny` iterator.
pub(super) fn new(base: I, count: usize) -> Self {
SkipAny { base, count }
}
}
impl<I> ParallelIterator for SkipAny<I>
where
I: ParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = SkipAnyConsumer {
base: consumer,
count: &AtomicUsize::new(self.count),
};
self.base.drive_unindexed(consumer1)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct SkipAnyConsumer<'f, C> {
base: C,
count: &'f AtomicUsize,
}
impl<'f, T, C> Consumer<T> for SkipAnyConsumer<'f, C>
where
C: Consumer<T>,
T: Send,
{
type Folder = SkipAnyFolder<'f, C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
SkipAnyConsumer { base: left, ..self },
SkipAnyConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
SkipAnyFolder {
base: self.base.into_folder(),
count: self.count,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, C> UnindexedConsumer<T> for SkipAnyConsumer<'f, C>
where
C: UnindexedConsumer<T>,
T: Send,
{
fn split_off_left(&self) -> Self {
SkipAnyConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct SkipAnyFolder<'f, C> {
base: C,
count: &'f AtomicUsize,
}
fn checked_decrement(u: &AtomicUsize) -> bool {
u.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |u| u.checked_sub(1))
.is_ok()
}
impl<'f, T, C> Folder<T> for SkipAnyFolder<'f, C>
where
C: Folder<T>,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
if !checked_decrement(self.count) {
self.base = self.base.consume(item);
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self.base.consume_iter(
iter.into_iter()
.skip_while(move |_| checked_decrement(self.count)),
);
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

166
vendor/rayon/src/iter/skip_any_while.rs vendored Normal file
View File

@@ -0,0 +1,166 @@
use super::plumbing::*;
use super::*;
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
/// `SkipAnyWhile` is an iterator that skips over elements from anywhere in `I`
/// until the callback returns `false`.
/// This struct is created by the [`skip_any_while()`] method on [`ParallelIterator`]
///
/// [`skip_any_while()`]: trait.ParallelIterator.html#method.skip_any_while
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct SkipAnyWhile<I: ParallelIterator, P> {
base: I,
predicate: P,
}
impl<I: ParallelIterator + fmt::Debug, P> fmt::Debug for SkipAnyWhile<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SkipAnyWhile")
.field("base", &self.base)
.finish()
}
}
impl<I, P> SkipAnyWhile<I, P>
where
I: ParallelIterator,
{
/// Creates a new `SkipAnyWhile` iterator.
pub(super) fn new(base: I, predicate: P) -> Self {
SkipAnyWhile { base, predicate }
}
}
impl<I, P> ParallelIterator for SkipAnyWhile<I, P>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync + Send,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = SkipAnyWhileConsumer {
base: consumer,
predicate: &self.predicate,
skipping: &AtomicBool::new(true),
};
self.base.drive_unindexed(consumer1)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct SkipAnyWhileConsumer<'p, C, P> {
base: C,
predicate: &'p P,
skipping: &'p AtomicBool,
}
impl<'p, T, C, P> Consumer<T> for SkipAnyWhileConsumer<'p, C, P>
where
C: Consumer<T>,
P: Fn(&T) -> bool + Sync,
{
type Folder = SkipAnyWhileFolder<'p, C::Folder, P>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
SkipAnyWhileConsumer { base: left, ..self },
SkipAnyWhileConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
SkipAnyWhileFolder {
base: self.base.into_folder(),
predicate: self.predicate,
skipping: self.skipping,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'p, T, C, P> UnindexedConsumer<T> for SkipAnyWhileConsumer<'p, C, P>
where
C: UnindexedConsumer<T>,
P: Fn(&T) -> bool + Sync,
{
fn split_off_left(&self) -> Self {
SkipAnyWhileConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct SkipAnyWhileFolder<'p, C, P> {
base: C,
predicate: &'p P,
skipping: &'p AtomicBool,
}
fn skip<T>(item: &T, skipping: &AtomicBool, predicate: &impl Fn(&T) -> bool) -> bool {
if !skipping.load(Ordering::Relaxed) {
return false;
}
if predicate(item) {
return true;
}
skipping.store(false, Ordering::Relaxed);
false
}
impl<'p, T, C, P> Folder<T> for SkipAnyWhileFolder<'p, C, P>
where
C: Folder<T>,
P: Fn(&T) -> bool + 'p,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
if !skip(&item, self.skipping, self.predicate) {
self.base = self.base.consume(item);
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self.base.consume_iter(
iter.into_iter()
.skip_while(move |x| skip(x, self.skipping, self.predicate)),
);
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

174
vendor/rayon/src/iter/splitter.rs vendored Normal file
View File

@@ -0,0 +1,174 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// The `split` function takes arbitrary data and a closure that knows how to
/// split it, and turns this into a `ParallelIterator`.
///
/// # Examples
///
/// As a simple example, Rayon can recursively split ranges of indices
///
/// ```
/// use rayon::iter;
/// use rayon::prelude::*;
/// use std::ops::Range;
///
///
/// // We define a range of indices as follows
/// type Range1D = Range<usize>;
///
/// // Splitting it in two can be done like this
/// fn split_range1(r: Range1D) -> (Range1D, Option<Range1D>) {
/// // We are mathematically unable to split the range if there is only
/// // one point inside of it, but we could stop splitting before that.
/// if r.end - r.start <= 1 { return (r, None); }
///
/// // Here, our range is considered large enough to be splittable
/// let midpoint = r.start + (r.end - r.start) / 2;
/// (r.start..midpoint, Some(midpoint..r.end))
/// }
///
/// // By using iter::split, Rayon will split the range until it has enough work
/// // to feed the CPU cores, then give us the resulting sub-ranges
/// iter::split(0..4096, split_range1).for_each(|sub_range| {
/// // As our initial range had a power-of-two size, the final sub-ranges
/// // should have power-of-two sizes too
/// assert!((sub_range.end - sub_range.start).is_power_of_two());
/// });
/// ```
///
/// This recursive splitting can be extended to two or three dimensions,
/// to reproduce a classic "block-wise" parallelization scheme of graphics and
/// numerical simulations:
///
/// ```
/// # use rayon::iter;
/// # use rayon::prelude::*;
/// # use std::ops::Range;
/// # type Range1D = Range<usize>;
/// # fn split_range1(r: Range1D) -> (Range1D, Option<Range1D>) {
/// # if r.end - r.start <= 1 { return (r, None); }
/// # let midpoint = r.start + (r.end - r.start) / 2;
/// # (r.start..midpoint, Some(midpoint..r.end))
/// # }
/// #
/// // A two-dimensional range of indices can be built out of two 1D ones
/// struct Range2D {
/// // Range of horizontal indices
/// pub rx: Range1D,
///
/// // Range of vertical indices
/// pub ry: Range1D,
/// }
///
/// // We want to recursively split them by the largest dimension until we have
/// // enough sub-ranges to feed our mighty multi-core CPU. This function
/// // carries out one such split.
/// fn split_range2(r2: Range2D) -> (Range2D, Option<Range2D>) {
/// // Decide on which axis (horizontal/vertical) the range should be split
/// let width = r2.rx.end - r2.rx.start;
/// let height = r2.ry.end - r2.ry.start;
/// if width >= height {
/// // This is a wide range, split it on the horizontal axis
/// let (split_rx, ry) = (split_range1(r2.rx), r2.ry);
/// let out1 = Range2D {
/// rx: split_rx.0,
/// ry: ry.clone(),
/// };
/// let out2 = split_rx.1.map(|rx| Range2D { rx, ry });
/// (out1, out2)
/// } else {
/// // This is a tall range, split it on the vertical axis
/// let (rx, split_ry) = (r2.rx, split_range1(r2.ry));
/// let out1 = Range2D {
/// rx: rx.clone(),
/// ry: split_ry.0,
/// };
/// let out2 = split_ry.1.map(|ry| Range2D { rx, ry, });
/// (out1, out2)
/// }
/// }
///
/// // Again, rayon can handle the recursive splitting for us
/// let range = Range2D { rx: 0..800, ry: 0..600 };
/// iter::split(range, split_range2).for_each(|sub_range| {
/// // If the sub-ranges were indeed split by the largest dimension, then
/// // if no dimension was twice larger than the other initially, this
/// // property will remain true in the final sub-ranges.
/// let width = sub_range.rx.end - sub_range.rx.start;
/// let height = sub_range.ry.end - sub_range.ry.start;
/// assert!((width / 2 <= height) && (height / 2 <= width));
/// });
/// ```
///
pub fn split<D, S>(data: D, splitter: S) -> Split<D, S>
where
D: Send,
S: Fn(D) -> (D, Option<D>) + Sync,
{
Split { data, splitter }
}
/// `Split` is a parallel iterator using arbitrary data and a splitting function.
/// This struct is created by the [`split()`] function.
///
/// [`split()`]: fn.split.html
#[derive(Clone)]
pub struct Split<D, S> {
data: D,
splitter: S,
}
impl<D: Debug, S> Debug for Split<D, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Split").field("data", &self.data).finish()
}
}
impl<D, S> ParallelIterator for Split<D, S>
where
D: Send,
S: Fn(D) -> (D, Option<D>) + Sync + Send,
{
type Item = D;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = SplitProducer {
data: self.data,
splitter: &self.splitter,
};
bridge_unindexed(producer, consumer)
}
}
struct SplitProducer<'a, D, S> {
data: D,
splitter: &'a S,
}
impl<'a, D, S> UnindexedProducer for SplitProducer<'a, D, S>
where
D: Send,
S: Fn(D) -> (D, Option<D>) + Sync,
{
type Item = D;
fn split(mut self) -> (Self, Option<Self>) {
let splitter = self.splitter;
let (left, right) = splitter(self.data);
self.data = left;
(self, right.map(|data| SplitProducer { data, splitter }))
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume(self.data)
}
}

143
vendor/rayon/src/iter/step_by.rs vendored Normal file
View File

@@ -0,0 +1,143 @@
use std::cmp::min;
use super::plumbing::*;
use super::*;
use crate::math::div_round_up;
use std::iter;
use std::usize;
/// `StepBy` is an iterator that skips `n` elements between each yield, where `n` is the given step.
/// This struct is created by the [`step_by()`] method on [`IndexedParallelIterator`]
///
/// [`step_by()`]: trait.IndexedParallelIterator.html#method.step_by
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct StepBy<I: IndexedParallelIterator> {
base: I,
step: usize,
}
impl<I> StepBy<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `StepBy` iterator.
pub(super) fn new(base: I, step: usize) -> Self {
StepBy { base, step }
}
}
impl<I> ParallelIterator for StepBy<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for StepBy<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
div_round_up(self.base.len(), self.step)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.base.len();
return self.base.with_producer(Callback {
callback,
step: self.step,
len,
});
struct Callback<CB> {
callback: CB,
step: usize,
len: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = StepByProducer {
base,
step: self.step,
len: self.len,
};
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Producer implementation
struct StepByProducer<P> {
base: P,
step: usize,
len: usize,
}
impl<P> Producer for StepByProducer<P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = iter::StepBy<P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().step_by(self.step)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = min(index * self.step, self.len);
let (left, right) = self.base.split_at(elem_index);
(
StepByProducer {
base: left,
step: self.step,
len: elem_index,
},
StepByProducer {
base: right,
step: self.step,
len: self.len - elem_index,
},
)
}
fn min_len(&self) -> usize {
div_round_up(self.base.min_len(), self.step)
}
fn max_len(&self) -> usize {
self.base.max_len() / self.step
}
}

110
vendor/rayon/src/iter/sum.rs vendored Normal file
View File

@@ -0,0 +1,110 @@
use super::plumbing::*;
use super::ParallelIterator;
use std::iter::{self, Sum};
use std::marker::PhantomData;
pub(super) fn sum<PI, S>(pi: PI) -> S
where
PI: ParallelIterator,
S: Send + Sum<PI::Item> + Sum,
{
pi.drive_unindexed(SumConsumer::new())
}
fn add<T: Sum>(left: T, right: T) -> T {
[left, right].into_iter().sum()
}
struct SumConsumer<S: Send> {
_marker: PhantomData<*const S>,
}
unsafe impl<S: Send> Send for SumConsumer<S> {}
impl<S: Send> SumConsumer<S> {
fn new() -> SumConsumer<S> {
SumConsumer {
_marker: PhantomData,
}
}
}
impl<S, T> Consumer<T> for SumConsumer<S>
where
S: Send + Sum<T> + Sum,
{
type Folder = SumFolder<S>;
type Reducer = Self;
type Result = S;
fn split_at(self, _index: usize) -> (Self, Self, Self) {
(SumConsumer::new(), SumConsumer::new(), SumConsumer::new())
}
fn into_folder(self) -> Self::Folder {
SumFolder {
sum: iter::empty::<T>().sum(),
}
}
fn full(&self) -> bool {
false
}
}
impl<S, T> UnindexedConsumer<T> for SumConsumer<S>
where
S: Send + Sum<T> + Sum,
{
fn split_off_left(&self) -> Self {
SumConsumer::new()
}
fn to_reducer(&self) -> Self::Reducer {
SumConsumer::new()
}
}
impl<S> Reducer<S> for SumConsumer<S>
where
S: Send + Sum,
{
fn reduce(self, left: S, right: S) -> S {
add(left, right)
}
}
struct SumFolder<S> {
sum: S,
}
impl<S, T> Folder<T> for SumFolder<S>
where
S: Sum<T> + Sum,
{
type Result = S;
fn consume(self, item: T) -> Self {
SumFolder {
sum: add(self.sum, iter::once(item).sum()),
}
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
SumFolder {
sum: add(self.sum, iter.into_iter().sum()),
}
}
fn complete(self) -> S {
self.sum
}
fn full(&self) -> bool {
false
}
}

86
vendor/rayon/src/iter/take.rs vendored Normal file
View File

@@ -0,0 +1,86 @@
use super::plumbing::*;
use super::*;
use std::cmp::min;
/// `Take` is an iterator that iterates over the first `n` elements.
/// This struct is created by the [`take()`] method on [`IndexedParallelIterator`]
///
/// [`take()`]: trait.IndexedParallelIterator.html#method.take
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Take<I> {
base: I,
n: usize,
}
impl<I> Take<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `Take` iterator.
pub(super) fn new(base: I, n: usize) -> Self {
let n = min(base.len(), n);
Take { base, n }
}
}
impl<I> ParallelIterator for Take<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for Take<I>
where
I: IndexedParallelIterator,
{
fn len(&self) -> usize {
self.n
}
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
n: self.n,
});
struct Callback<CB> {
callback: CB,
n: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let (producer, _) = base.split_at(self.n);
self.callback.callback(producer)
}
}
}
}

144
vendor/rayon/src/iter/take_any.rs vendored Normal file
View File

@@ -0,0 +1,144 @@
use super::plumbing::*;
use super::*;
use std::sync::atomic::{AtomicUsize, Ordering};
/// `TakeAny` is an iterator that iterates over `n` elements from anywhere in `I`.
/// This struct is created by the [`take_any()`] method on [`ParallelIterator`]
///
/// [`take_any()`]: trait.ParallelIterator.html#method.take_any
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct TakeAny<I: ParallelIterator> {
base: I,
count: usize,
}
impl<I> TakeAny<I>
where
I: ParallelIterator,
{
/// Creates a new `TakeAny` iterator.
pub(super) fn new(base: I, count: usize) -> Self {
TakeAny { base, count }
}
}
impl<I> ParallelIterator for TakeAny<I>
where
I: ParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = TakeAnyConsumer {
base: consumer,
count: &AtomicUsize::new(self.count),
};
self.base.drive_unindexed(consumer1)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct TakeAnyConsumer<'f, C> {
base: C,
count: &'f AtomicUsize,
}
impl<'f, T, C> Consumer<T> for TakeAnyConsumer<'f, C>
where
C: Consumer<T>,
T: Send,
{
type Folder = TakeAnyFolder<'f, C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
TakeAnyConsumer { base: left, ..self },
TakeAnyConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
TakeAnyFolder {
base: self.base.into_folder(),
count: self.count,
}
}
fn full(&self) -> bool {
self.count.load(Ordering::Relaxed) == 0 || self.base.full()
}
}
impl<'f, T, C> UnindexedConsumer<T> for TakeAnyConsumer<'f, C>
where
C: UnindexedConsumer<T>,
T: Send,
{
fn split_off_left(&self) -> Self {
TakeAnyConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct TakeAnyFolder<'f, C> {
base: C,
count: &'f AtomicUsize,
}
fn checked_decrement(u: &AtomicUsize) -> bool {
u.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |u| u.checked_sub(1))
.is_ok()
}
impl<'f, T, C> Folder<T> for TakeAnyFolder<'f, C>
where
C: Folder<T>,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
if checked_decrement(self.count) {
self.base = self.base.consume(item);
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self.base.consume_iter(
iter.into_iter()
.take_while(move |_| checked_decrement(self.count)),
);
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.count.load(Ordering::Relaxed) == 0 || self.base.full()
}
}

166
vendor/rayon/src/iter/take_any_while.rs vendored Normal file
View File

@@ -0,0 +1,166 @@
use super::plumbing::*;
use super::*;
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
/// `TakeAnyWhile` is an iterator that iterates over elements from anywhere in `I`
/// until the callback returns `false`.
/// This struct is created by the [`take_any_while()`] method on [`ParallelIterator`]
///
/// [`take_any_while()`]: trait.ParallelIterator.html#method.take_any_while
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct TakeAnyWhile<I: ParallelIterator, P> {
base: I,
predicate: P,
}
impl<I: ParallelIterator + fmt::Debug, P> fmt::Debug for TakeAnyWhile<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TakeAnyWhile")
.field("base", &self.base)
.finish()
}
}
impl<I, P> TakeAnyWhile<I, P>
where
I: ParallelIterator,
{
/// Creates a new `TakeAnyWhile` iterator.
pub(super) fn new(base: I, predicate: P) -> Self {
TakeAnyWhile { base, predicate }
}
}
impl<I, P> ParallelIterator for TakeAnyWhile<I, P>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync + Send,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = TakeAnyWhileConsumer {
base: consumer,
predicate: &self.predicate,
taking: &AtomicBool::new(true),
};
self.base.drive_unindexed(consumer1)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct TakeAnyWhileConsumer<'p, C, P> {
base: C,
predicate: &'p P,
taking: &'p AtomicBool,
}
impl<'p, T, C, P> Consumer<T> for TakeAnyWhileConsumer<'p, C, P>
where
C: Consumer<T>,
P: Fn(&T) -> bool + Sync,
{
type Folder = TakeAnyWhileFolder<'p, C::Folder, P>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
TakeAnyWhileConsumer { base: left, ..self },
TakeAnyWhileConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
TakeAnyWhileFolder {
base: self.base.into_folder(),
predicate: self.predicate,
taking: self.taking,
}
}
fn full(&self) -> bool {
!self.taking.load(Ordering::Relaxed) || self.base.full()
}
}
impl<'p, T, C, P> UnindexedConsumer<T> for TakeAnyWhileConsumer<'p, C, P>
where
C: UnindexedConsumer<T>,
P: Fn(&T) -> bool + Sync,
{
fn split_off_left(&self) -> Self {
TakeAnyWhileConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct TakeAnyWhileFolder<'p, C, P> {
base: C,
predicate: &'p P,
taking: &'p AtomicBool,
}
fn take<T>(item: &T, taking: &AtomicBool, predicate: &impl Fn(&T) -> bool) -> bool {
if !taking.load(Ordering::Relaxed) {
return false;
}
if predicate(item) {
return true;
}
taking.store(false, Ordering::Relaxed);
false
}
impl<'p, T, C, P> Folder<T> for TakeAnyWhileFolder<'p, C, P>
where
C: Folder<T>,
P: Fn(&T) -> bool + 'p,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
if take(&item, self.taking, self.predicate) {
self.base = self.base.consume(item);
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self.base.consume_iter(
iter.into_iter()
.take_while(move |x| take(x, self.taking, self.predicate)),
);
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
!self.taking.load(Ordering::Relaxed) || self.base.full()
}
}

2188
vendor/rayon/src/iter/test.rs vendored Normal file

File diff suppressed because it is too large Load Diff

298
vendor/rayon/src/iter/try_fold.rs vendored Normal file
View File

@@ -0,0 +1,298 @@
use super::plumbing::*;
use super::ParallelIterator;
use super::Try;
use std::fmt::{self, Debug};
use std::marker::PhantomData;
use std::ops::ControlFlow::{self, Break, Continue};
impl<U, I, ID, F> TryFold<I, U, ID, F>
where
I: ParallelIterator,
F: Fn(U::Output, I::Item) -> U + Sync + Send,
ID: Fn() -> U::Output + Sync + Send,
U: Try + Send,
{
pub(super) fn new(base: I, identity: ID, fold_op: F) -> Self {
TryFold {
base,
identity,
fold_op,
marker: PhantomData,
}
}
}
/// `TryFold` is an iterator that applies a function over an iterator producing a single value.
/// This struct is created by the [`try_fold()`] method on [`ParallelIterator`]
///
/// [`try_fold()`]: trait.ParallelIterator.html#method.try_fold
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct TryFold<I, U, ID, F> {
base: I,
identity: ID,
fold_op: F,
marker: PhantomData<U>,
}
impl<U, I: ParallelIterator + Debug, ID, F> Debug for TryFold<I, U, ID, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TryFold").field("base", &self.base).finish()
}
}
impl<U, I, ID, F> ParallelIterator for TryFold<I, U, ID, F>
where
I: ParallelIterator,
F: Fn(U::Output, I::Item) -> U + Sync + Send,
ID: Fn() -> U::Output + Sync + Send,
U: Try + Send,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = TryFoldConsumer {
base: consumer,
identity: &self.identity,
fold_op: &self.fold_op,
marker: PhantomData,
};
self.base.drive_unindexed(consumer1)
}
}
struct TryFoldConsumer<'c, U, C, ID, F> {
base: C,
identity: &'c ID,
fold_op: &'c F,
marker: PhantomData<U>,
}
impl<'r, U, T, C, ID, F> Consumer<T> for TryFoldConsumer<'r, U, C, ID, F>
where
C: Consumer<U>,
F: Fn(U::Output, T) -> U + Sync,
ID: Fn() -> U::Output + Sync,
U: Try + Send,
{
type Folder = TryFoldFolder<'r, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
TryFoldConsumer { base: left, ..self },
TryFoldConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
TryFoldFolder {
base: self.base.into_folder(),
control: Continue((self.identity)()),
fold_op: self.fold_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'r, U, T, C, ID, F> UnindexedConsumer<T> for TryFoldConsumer<'r, U, C, ID, F>
where
C: UnindexedConsumer<U>,
F: Fn(U::Output, T) -> U + Sync,
ID: Fn() -> U::Output + Sync,
U: Try + Send,
{
fn split_off_left(&self) -> Self {
TryFoldConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct TryFoldFolder<'r, C, U: Try, F> {
base: C,
fold_op: &'r F,
control: ControlFlow<U::Residual, U::Output>,
}
impl<'r, C, U, F, T> Folder<T> for TryFoldFolder<'r, C, U, F>
where
C: Folder<U>,
F: Fn(U::Output, T) -> U + Sync,
U: Try,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
let fold_op = self.fold_op;
if let Continue(acc) = self.control {
self.control = fold_op(acc, item).branch();
}
self
}
fn complete(self) -> C::Result {
let item = match self.control {
Continue(c) => U::from_output(c),
Break(r) => U::from_residual(r),
};
self.base.consume(item).complete()
}
fn full(&self) -> bool {
match self.control {
Break(_) => true,
_ => self.base.full(),
}
}
}
// ///////////////////////////////////////////////////////////////////////////
impl<U, I, F> TryFoldWith<I, U, F>
where
I: ParallelIterator,
F: Fn(U::Output, I::Item) -> U + Sync,
U: Try + Send,
U::Output: Clone + Send,
{
pub(super) fn new(base: I, item: U::Output, fold_op: F) -> Self {
TryFoldWith {
base,
item,
fold_op,
}
}
}
/// `TryFoldWith` is an iterator that applies a function over an iterator producing a single value.
/// This struct is created by the [`try_fold_with()`] method on [`ParallelIterator`]
///
/// [`try_fold_with()`]: trait.ParallelIterator.html#method.try_fold_with
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct TryFoldWith<I, U: Try, F> {
base: I,
item: U::Output,
fold_op: F,
}
impl<I: ParallelIterator + Debug, U: Try, F> Debug for TryFoldWith<I, U, F>
where
U::Output: Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TryFoldWith")
.field("base", &self.base)
.field("item", &self.item)
.finish()
}
}
impl<U, I, F> ParallelIterator for TryFoldWith<I, U, F>
where
I: ParallelIterator,
F: Fn(U::Output, I::Item) -> U + Sync + Send,
U: Try + Send,
U::Output: Clone + Send,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = TryFoldWithConsumer {
base: consumer,
item: self.item,
fold_op: &self.fold_op,
};
self.base.drive_unindexed(consumer1)
}
}
struct TryFoldWithConsumer<'c, C, U: Try, F> {
base: C,
item: U::Output,
fold_op: &'c F,
}
impl<'r, U, T, C, F> Consumer<T> for TryFoldWithConsumer<'r, C, U, F>
where
C: Consumer<U>,
F: Fn(U::Output, T) -> U + Sync,
U: Try + Send,
U::Output: Clone + Send,
{
type Folder = TryFoldFolder<'r, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
TryFoldWithConsumer {
base: left,
item: self.item.clone(),
..self
},
TryFoldWithConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
TryFoldFolder {
base: self.base.into_folder(),
control: Continue(self.item),
fold_op: self.fold_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'r, U, T, C, F> UnindexedConsumer<T> for TryFoldWithConsumer<'r, C, U, F>
where
C: UnindexedConsumer<U>,
F: Fn(U::Output, T) -> U + Sync,
U: Try + Send,
U::Output: Clone + Send,
{
fn split_off_left(&self) -> Self {
TryFoldWithConsumer {
base: self.base.split_off_left(),
item: self.item.clone(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}

131
vendor/rayon/src/iter/try_reduce.rs vendored Normal file
View File

@@ -0,0 +1,131 @@
use super::plumbing::*;
use super::ParallelIterator;
use super::Try;
use std::ops::ControlFlow::{self, Break, Continue};
use std::sync::atomic::{AtomicBool, Ordering};
pub(super) fn try_reduce<PI, R, ID, T>(pi: PI, identity: ID, reduce_op: R) -> T
where
PI: ParallelIterator<Item = T>,
R: Fn(T::Output, T::Output) -> T + Sync,
ID: Fn() -> T::Output + Sync,
T: Try + Send,
{
let full = AtomicBool::new(false);
let consumer = TryReduceConsumer {
identity: &identity,
reduce_op: &reduce_op,
full: &full,
};
pi.drive_unindexed(consumer)
}
struct TryReduceConsumer<'r, R, ID> {
identity: &'r ID,
reduce_op: &'r R,
full: &'r AtomicBool,
}
impl<'r, R, ID> Copy for TryReduceConsumer<'r, R, ID> {}
impl<'r, R, ID> Clone for TryReduceConsumer<'r, R, ID> {
fn clone(&self) -> Self {
*self
}
}
impl<'r, R, ID, T> Consumer<T> for TryReduceConsumer<'r, R, ID>
where
R: Fn(T::Output, T::Output) -> T + Sync,
ID: Fn() -> T::Output + Sync,
T: Try + Send,
{
type Folder = TryReduceFolder<'r, R, T>;
type Reducer = Self;
type Result = T;
fn split_at(self, _index: usize) -> (Self, Self, Self) {
(self, self, self)
}
fn into_folder(self) -> Self::Folder {
TryReduceFolder {
reduce_op: self.reduce_op,
control: Continue((self.identity)()),
full: self.full,
}
}
fn full(&self) -> bool {
self.full.load(Ordering::Relaxed)
}
}
impl<'r, R, ID, T> UnindexedConsumer<T> for TryReduceConsumer<'r, R, ID>
where
R: Fn(T::Output, T::Output) -> T + Sync,
ID: Fn() -> T::Output + Sync,
T: Try + Send,
{
fn split_off_left(&self) -> Self {
*self
}
fn to_reducer(&self) -> Self::Reducer {
*self
}
}
impl<'r, R, ID, T> Reducer<T> for TryReduceConsumer<'r, R, ID>
where
R: Fn(T::Output, T::Output) -> T + Sync,
T: Try,
{
fn reduce(self, left: T, right: T) -> T {
match (left.branch(), right.branch()) {
(Continue(left), Continue(right)) => (self.reduce_op)(left, right),
(Break(r), _) | (_, Break(r)) => T::from_residual(r),
}
}
}
struct TryReduceFolder<'r, R, T: Try> {
reduce_op: &'r R,
control: ControlFlow<T::Residual, T::Output>,
full: &'r AtomicBool,
}
impl<'r, R, T> Folder<T> for TryReduceFolder<'r, R, T>
where
R: Fn(T::Output, T::Output) -> T,
T: Try,
{
type Result = T;
fn consume(mut self, item: T) -> Self {
let reduce_op = self.reduce_op;
self.control = match (self.control, item.branch()) {
(Continue(left), Continue(right)) => reduce_op(left, right).branch(),
(control @ Break(_), _) | (_, control @ Break(_)) => control,
};
if let Break(_) = self.control {
self.full.store(true, Ordering::Relaxed);
}
self
}
fn complete(self) -> T {
match self.control {
Continue(c) => T::from_output(c),
Break(r) => T::from_residual(r),
}
}
fn full(&self) -> bool {
match self.control {
Break(_) => true,
_ => self.full.load(Ordering::Relaxed),
}
}
}

132
vendor/rayon/src/iter/try_reduce_with.rs vendored Normal file
View File

@@ -0,0 +1,132 @@
use super::plumbing::*;
use super::ParallelIterator;
use super::Try;
use std::ops::ControlFlow::{self, Break, Continue};
use std::sync::atomic::{AtomicBool, Ordering};
pub(super) fn try_reduce_with<PI, R, T>(pi: PI, reduce_op: R) -> Option<T>
where
PI: ParallelIterator<Item = T>,
R: Fn(T::Output, T::Output) -> T + Sync,
T: Try + Send,
{
let full = AtomicBool::new(false);
let consumer = TryReduceWithConsumer {
reduce_op: &reduce_op,
full: &full,
};
pi.drive_unindexed(consumer)
}
struct TryReduceWithConsumer<'r, R> {
reduce_op: &'r R,
full: &'r AtomicBool,
}
impl<'r, R> Copy for TryReduceWithConsumer<'r, R> {}
impl<'r, R> Clone for TryReduceWithConsumer<'r, R> {
fn clone(&self) -> Self {
*self
}
}
impl<'r, R, T> Consumer<T> for TryReduceWithConsumer<'r, R>
where
R: Fn(T::Output, T::Output) -> T + Sync,
T: Try + Send,
{
type Folder = TryReduceWithFolder<'r, R, T>;
type Reducer = Self;
type Result = Option<T>;
fn split_at(self, _index: usize) -> (Self, Self, Self) {
(self, self, self)
}
fn into_folder(self) -> Self::Folder {
TryReduceWithFolder {
reduce_op: self.reduce_op,
opt_control: None,
full: self.full,
}
}
fn full(&self) -> bool {
self.full.load(Ordering::Relaxed)
}
}
impl<'r, R, T> UnindexedConsumer<T> for TryReduceWithConsumer<'r, R>
where
R: Fn(T::Output, T::Output) -> T + Sync,
T: Try + Send,
{
fn split_off_left(&self) -> Self {
*self
}
fn to_reducer(&self) -> Self::Reducer {
*self
}
}
impl<'r, R, T> Reducer<Option<T>> for TryReduceWithConsumer<'r, R>
where
R: Fn(T::Output, T::Output) -> T + Sync,
T: Try,
{
fn reduce(self, left: Option<T>, right: Option<T>) -> Option<T> {
let reduce_op = self.reduce_op;
match (left, right) {
(Some(left), Some(right)) => match (left.branch(), right.branch()) {
(Continue(left), Continue(right)) => Some(reduce_op(left, right)),
(Break(r), _) | (_, Break(r)) => Some(T::from_residual(r)),
},
(None, x) | (x, None) => x,
}
}
}
struct TryReduceWithFolder<'r, R, T: Try> {
reduce_op: &'r R,
opt_control: Option<ControlFlow<T::Residual, T::Output>>,
full: &'r AtomicBool,
}
impl<'r, R, T> Folder<T> for TryReduceWithFolder<'r, R, T>
where
R: Fn(T::Output, T::Output) -> T,
T: Try,
{
type Result = Option<T>;
fn consume(mut self, item: T) -> Self {
let reduce_op = self.reduce_op;
let control = match (self.opt_control, item.branch()) {
(Some(Continue(left)), Continue(right)) => reduce_op(left, right).branch(),
(Some(control @ Break(_)), _) | (_, control) => control,
};
if let Break(_) = control {
self.full.store(true, Ordering::Relaxed)
}
self.opt_control = Some(control);
self
}
fn complete(self) -> Option<T> {
match self.opt_control {
Some(Continue(c)) => Some(T::from_output(c)),
Some(Break(r)) => Some(T::from_residual(r)),
None => None,
}
}
fn full(&self) -> bool {
match self.opt_control {
Some(Break(_)) => true,
_ => self.full.load(Ordering::Relaxed),
}
}
}

525
vendor/rayon/src/iter/unzip.rs vendored Normal file
View File

@@ -0,0 +1,525 @@
use super::plumbing::*;
use super::*;
/// This trait abstracts the different ways we can "unzip" one parallel
/// iterator into two distinct consumers, which we can handle almost
/// identically apart from how to process the individual items.
trait UnzipOp<T>: Sync + Send {
/// The type of item expected by the left consumer.
type Left: Send;
/// The type of item expected by the right consumer.
type Right: Send;
/// Consumes one item and feeds it to one or both of the underlying folders.
fn consume<FA, FB>(&self, item: T, left: FA, right: FB) -> (FA, FB)
where
FA: Folder<Self::Left>,
FB: Folder<Self::Right>;
/// Reports whether this op may support indexed consumers.
/// - e.g. true for `unzip` where the item count passed through directly.
/// - e.g. false for `partition` where the sorting is not yet known.
fn indexable() -> bool {
false
}
}
/// Runs an unzip-like operation into default `ParallelExtend` collections.
fn execute<I, OP, FromA, FromB>(pi: I, op: OP) -> (FromA, FromB)
where
I: ParallelIterator,
OP: UnzipOp<I::Item>,
FromA: Default + Send + ParallelExtend<OP::Left>,
FromB: Default + Send + ParallelExtend<OP::Right>,
{
let mut a = FromA::default();
let mut b = FromB::default();
execute_into(&mut a, &mut b, pi, op);
(a, b)
}
/// Runs an unzip-like operation into `ParallelExtend` collections.
fn execute_into<I, OP, FromA, FromB>(a: &mut FromA, b: &mut FromB, pi: I, op: OP)
where
I: ParallelIterator,
OP: UnzipOp<I::Item>,
FromA: Send + ParallelExtend<OP::Left>,
FromB: Send + ParallelExtend<OP::Right>,
{
// We have no idea what the consumers will look like for these
// collections' `par_extend`, but we can intercept them in our own
// `drive_unindexed`. Start with the left side, type `A`:
let iter = UnzipA { base: pi, op, b };
a.par_extend(iter);
}
/// Unzips the items of a parallel iterator into a pair of arbitrary
/// `ParallelExtend` containers.
///
/// This is called by `ParallelIterator::unzip`.
pub(super) fn unzip<I, A, B, FromA, FromB>(pi: I) -> (FromA, FromB)
where
I: ParallelIterator<Item = (A, B)>,
FromA: Default + Send + ParallelExtend<A>,
FromB: Default + Send + ParallelExtend<B>,
A: Send,
B: Send,
{
execute(pi, Unzip)
}
/// Unzips an `IndexedParallelIterator` into two arbitrary `Consumer`s.
///
/// This is called by `super::collect::unzip_into_vecs`.
pub(super) fn unzip_indexed<I, A, B, CA, CB>(pi: I, left: CA, right: CB) -> (CA::Result, CB::Result)
where
I: IndexedParallelIterator<Item = (A, B)>,
CA: Consumer<A>,
CB: Consumer<B>,
A: Send,
B: Send,
{
let consumer = UnzipConsumer {
op: &Unzip,
left,
right,
};
pi.drive(consumer)
}
/// An `UnzipOp` that splits a tuple directly into the two consumers.
struct Unzip;
impl<A: Send, B: Send> UnzipOp<(A, B)> for Unzip {
type Left = A;
type Right = B;
fn consume<FA, FB>(&self, item: (A, B), left: FA, right: FB) -> (FA, FB)
where
FA: Folder<A>,
FB: Folder<B>,
{
(left.consume(item.0), right.consume(item.1))
}
fn indexable() -> bool {
true
}
}
/// Partitions the items of a parallel iterator into a pair of arbitrary
/// `ParallelExtend` containers.
///
/// This is called by `ParallelIterator::partition`.
pub(super) fn partition<I, A, B, P>(pi: I, predicate: P) -> (A, B)
where
I: ParallelIterator,
A: Default + Send + ParallelExtend<I::Item>,
B: Default + Send + ParallelExtend<I::Item>,
P: Fn(&I::Item) -> bool + Sync + Send,
{
execute(pi, Partition { predicate })
}
/// An `UnzipOp` that routes items depending on a predicate function.
struct Partition<P> {
predicate: P,
}
impl<P, T> UnzipOp<T> for Partition<P>
where
P: Fn(&T) -> bool + Sync + Send,
T: Send,
{
type Left = T;
type Right = T;
fn consume<FA, FB>(&self, item: T, left: FA, right: FB) -> (FA, FB)
where
FA: Folder<T>,
FB: Folder<T>,
{
if (self.predicate)(&item) {
(left.consume(item), right)
} else {
(left, right.consume(item))
}
}
}
/// Partitions and maps the items of a parallel iterator into a pair of
/// arbitrary `ParallelExtend` containers.
///
/// This called by `ParallelIterator::partition_map`.
pub(super) fn partition_map<I, A, B, P, L, R>(pi: I, predicate: P) -> (A, B)
where
I: ParallelIterator,
A: Default + Send + ParallelExtend<L>,
B: Default + Send + ParallelExtend<R>,
P: Fn(I::Item) -> Either<L, R> + Sync + Send,
L: Send,
R: Send,
{
execute(pi, PartitionMap { predicate })
}
/// An `UnzipOp` that routes items depending on how they are mapped `Either`.
struct PartitionMap<P> {
predicate: P,
}
impl<P, L, R, T> UnzipOp<T> for PartitionMap<P>
where
P: Fn(T) -> Either<L, R> + Sync + Send,
L: Send,
R: Send,
{
type Left = L;
type Right = R;
fn consume<FA, FB>(&self, item: T, left: FA, right: FB) -> (FA, FB)
where
FA: Folder<L>,
FB: Folder<R>,
{
match (self.predicate)(item) {
Either::Left(item) => (left.consume(item), right),
Either::Right(item) => (left, right.consume(item)),
}
}
}
/// A fake iterator to intercept the `Consumer` for type `A`.
struct UnzipA<'b, I, OP, FromB> {
base: I,
op: OP,
b: &'b mut FromB,
}
impl<'b, I, OP, FromB> ParallelIterator for UnzipA<'b, I, OP, FromB>
where
I: ParallelIterator,
OP: UnzipOp<I::Item>,
FromB: Send + ParallelExtend<OP::Right>,
{
type Item = OP::Left;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let mut result = None;
{
// Now it's time to find the consumer for type `B`
let iter = UnzipB {
base: self.base,
op: self.op,
left_consumer: consumer,
left_result: &mut result,
};
self.b.par_extend(iter);
}
// NB: If for some reason `b.par_extend` doesn't actually drive the
// iterator, then we won't have a result for the left side to return
// at all. We can't fake an arbitrary consumer's result, so panic.
result.expect("unzip consumers didn't execute!")
}
fn opt_len(&self) -> Option<usize> {
if OP::indexable() {
self.base.opt_len()
} else {
None
}
}
}
/// A fake iterator to intercept the `Consumer` for type `B`.
struct UnzipB<'r, I, OP, CA>
where
I: ParallelIterator,
OP: UnzipOp<I::Item>,
CA: UnindexedConsumer<OP::Left>,
CA::Result: 'r,
{
base: I,
op: OP,
left_consumer: CA,
left_result: &'r mut Option<CA::Result>,
}
impl<'r, I, OP, CA> ParallelIterator for UnzipB<'r, I, OP, CA>
where
I: ParallelIterator,
OP: UnzipOp<I::Item>,
CA: UnindexedConsumer<OP::Left>,
{
type Item = OP::Right;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
// Now that we have two consumers, we can unzip the real iterator.
let consumer = UnzipConsumer {
op: &self.op,
left: self.left_consumer,
right: consumer,
};
let result = self.base.drive_unindexed(consumer);
*self.left_result = Some(result.0);
result.1
}
fn opt_len(&self) -> Option<usize> {
if OP::indexable() {
self.base.opt_len()
} else {
None
}
}
}
/// `Consumer` that unzips into two other `Consumer`s
struct UnzipConsumer<'a, OP, CA, CB> {
op: &'a OP,
left: CA,
right: CB,
}
impl<'a, T, OP, CA, CB> Consumer<T> for UnzipConsumer<'a, OP, CA, CB>
where
OP: UnzipOp<T>,
CA: Consumer<OP::Left>,
CB: Consumer<OP::Right>,
{
type Folder = UnzipFolder<'a, OP, CA::Folder, CB::Folder>;
type Reducer = UnzipReducer<CA::Reducer, CB::Reducer>;
type Result = (CA::Result, CB::Result);
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left1, left2, left_reducer) = self.left.split_at(index);
let (right1, right2, right_reducer) = self.right.split_at(index);
(
UnzipConsumer {
op: self.op,
left: left1,
right: right1,
},
UnzipConsumer {
op: self.op,
left: left2,
right: right2,
},
UnzipReducer {
left: left_reducer,
right: right_reducer,
},
)
}
fn into_folder(self) -> Self::Folder {
UnzipFolder {
op: self.op,
left: self.left.into_folder(),
right: self.right.into_folder(),
}
}
fn full(&self) -> bool {
// don't stop until everyone is full
self.left.full() && self.right.full()
}
}
impl<'a, T, OP, CA, CB> UnindexedConsumer<T> for UnzipConsumer<'a, OP, CA, CB>
where
OP: UnzipOp<T>,
CA: UnindexedConsumer<OP::Left>,
CB: UnindexedConsumer<OP::Right>,
{
fn split_off_left(&self) -> Self {
UnzipConsumer {
op: self.op,
left: self.left.split_off_left(),
right: self.right.split_off_left(),
}
}
fn to_reducer(&self) -> Self::Reducer {
UnzipReducer {
left: self.left.to_reducer(),
right: self.right.to_reducer(),
}
}
}
/// `Folder` that unzips into two other `Folder`s
struct UnzipFolder<'a, OP, FA, FB> {
op: &'a OP,
left: FA,
right: FB,
}
impl<'a, T, OP, FA, FB> Folder<T> for UnzipFolder<'a, OP, FA, FB>
where
OP: UnzipOp<T>,
FA: Folder<OP::Left>,
FB: Folder<OP::Right>,
{
type Result = (FA::Result, FB::Result);
fn consume(self, item: T) -> Self {
let (left, right) = self.op.consume(item, self.left, self.right);
UnzipFolder {
op: self.op,
left,
right,
}
}
fn complete(self) -> Self::Result {
(self.left.complete(), self.right.complete())
}
fn full(&self) -> bool {
// don't stop until everyone is full
self.left.full() && self.right.full()
}
}
/// `Reducer` that unzips into two other `Reducer`s
struct UnzipReducer<RA, RB> {
left: RA,
right: RB,
}
impl<A, B, RA, RB> Reducer<(A, B)> for UnzipReducer<RA, RB>
where
RA: Reducer<A>,
RB: Reducer<B>,
{
fn reduce(self, left: (A, B), right: (A, B)) -> (A, B) {
(
self.left.reduce(left.0, right.0),
self.right.reduce(left.1, right.1),
)
}
}
impl<A, B, FromA, FromB> ParallelExtend<(A, B)> for (FromA, FromB)
where
A: Send,
B: Send,
FromA: Send + ParallelExtend<A>,
FromB: Send + ParallelExtend<B>,
{
fn par_extend<I>(&mut self, pi: I)
where
I: IntoParallelIterator<Item = (A, B)>,
{
execute_into(&mut self.0, &mut self.1, pi.into_par_iter(), Unzip);
}
}
impl<L, R, A, B> ParallelExtend<Either<L, R>> for (A, B)
where
L: Send,
R: Send,
A: Send + ParallelExtend<L>,
B: Send + ParallelExtend<R>,
{
fn par_extend<I>(&mut self, pi: I)
where
I: IntoParallelIterator<Item = Either<L, R>>,
{
execute_into(&mut self.0, &mut self.1, pi.into_par_iter(), UnEither);
}
}
/// An `UnzipOp` that routes items depending on their `Either` variant.
struct UnEither;
impl<L, R> UnzipOp<Either<L, R>> for UnEither
where
L: Send,
R: Send,
{
type Left = L;
type Right = R;
fn consume<FL, FR>(&self, item: Either<L, R>, left: FL, right: FR) -> (FL, FR)
where
FL: Folder<L>,
FR: Folder<R>,
{
match item {
Either::Left(item) => (left.consume(item), right),
Either::Right(item) => (left, right.consume(item)),
}
}
}
impl<A, B, FromA, FromB> FromParallelIterator<(A, B)> for (FromA, FromB)
where
A: Send,
B: Send,
FromA: Send + FromParallelIterator<A>,
FromB: Send + FromParallelIterator<B>,
{
fn from_par_iter<I>(pi: I) -> Self
where
I: IntoParallelIterator<Item = (A, B)>,
{
let (a, b): (Collector<FromA>, Collector<FromB>) = pi.into_par_iter().unzip();
(a.result.unwrap(), b.result.unwrap())
}
}
impl<L, R, A, B> FromParallelIterator<Either<L, R>> for (A, B)
where
L: Send,
R: Send,
A: Send + FromParallelIterator<L>,
B: Send + FromParallelIterator<R>,
{
fn from_par_iter<I>(pi: I) -> Self
where
I: IntoParallelIterator<Item = Either<L, R>>,
{
fn identity<T>(x: T) -> T {
x
}
let (a, b): (Collector<A>, Collector<B>) = pi.into_par_iter().partition_map(identity);
(a.result.unwrap(), b.result.unwrap())
}
}
/// Shim to implement a one-time `ParallelExtend` using `FromParallelIterator`.
struct Collector<FromT> {
result: Option<FromT>,
}
impl<FromT> Default for Collector<FromT> {
fn default() -> Self {
Collector { result: None }
}
}
impl<T, FromT> ParallelExtend<T> for Collector<FromT>
where
T: Send,
FromT: Send + FromParallelIterator<T>,
{
fn par_extend<I>(&mut self, pi: I)
where
I: IntoParallelIterator<Item = T>,
{
debug_assert!(self.result.is_none());
self.result = Some(pi.into_par_iter().collect());
}
}

327
vendor/rayon/src/iter/update.rs vendored Normal file
View File

@@ -0,0 +1,327 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `Update` is an iterator that mutates the elements of an
/// underlying iterator before they are yielded.
///
/// This struct is created by the [`update()`] method on [`ParallelIterator`]
///
/// [`update()`]: trait.ParallelIterator.html#method.update
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Update<I: ParallelIterator, F> {
base: I,
update_op: F,
}
impl<I: ParallelIterator + Debug, F> Debug for Update<I, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Update").field("base", &self.base).finish()
}
}
impl<I, F> Update<I, F>
where
I: ParallelIterator,
{
/// Creates a new `Update` iterator.
pub(super) fn new(base: I, update_op: F) -> Self {
Update { base, update_op }
}
}
impl<I, F> ParallelIterator for Update<I, F>
where
I: ParallelIterator,
F: Fn(&mut I::Item) + Send + Sync,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = UpdateConsumer::new(consumer, &self.update_op);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I, F> IndexedParallelIterator for Update<I, F>
where
I: IndexedParallelIterator,
F: Fn(&mut I::Item) + Send + Sync,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = UpdateConsumer::new(consumer, &self.update_op);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
update_op: self.update_op,
});
struct Callback<CB, F> {
callback: CB,
update_op: F,
}
impl<T, F, CB> ProducerCallback<T> for Callback<CB, F>
where
CB: ProducerCallback<T>,
F: Fn(&mut T) + Send + Sync,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = UpdateProducer {
base,
update_op: &self.update_op,
};
self.callback.callback(producer)
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
struct UpdateProducer<'f, P, F> {
base: P,
update_op: &'f F,
}
impl<'f, P, F> Producer for UpdateProducer<'f, P, F>
where
P: Producer,
F: Fn(&mut P::Item) + Send + Sync,
{
type Item = P::Item;
type IntoIter = UpdateSeq<P::IntoIter, &'f F>;
fn into_iter(self) -> Self::IntoIter {
UpdateSeq {
base: self.base.into_iter(),
update_op: self.update_op,
}
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
UpdateProducer {
base: left,
update_op: self.update_op,
},
UpdateProducer {
base: right,
update_op: self.update_op,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = UpdateFolder {
base: folder,
update_op: self.update_op,
};
self.base.fold_with(folder1).base
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct UpdateConsumer<'f, C, F> {
base: C,
update_op: &'f F,
}
impl<'f, C, F> UpdateConsumer<'f, C, F> {
fn new(base: C, update_op: &'f F) -> Self {
UpdateConsumer { base, update_op }
}
}
impl<'f, T, C, F> Consumer<T> for UpdateConsumer<'f, C, F>
where
C: Consumer<T>,
F: Fn(&mut T) + Send + Sync,
{
type Folder = UpdateFolder<'f, C::Folder, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
UpdateConsumer::new(left, self.update_op),
UpdateConsumer::new(right, self.update_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
UpdateFolder {
base: self.base.into_folder(),
update_op: self.update_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, C, F> UnindexedConsumer<T> for UpdateConsumer<'f, C, F>
where
C: UnindexedConsumer<T>,
F: Fn(&mut T) + Send + Sync,
{
fn split_off_left(&self) -> Self {
UpdateConsumer::new(self.base.split_off_left(), self.update_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct UpdateFolder<'f, C, F> {
base: C,
update_op: &'f F,
}
fn apply<T>(update_op: impl Fn(&mut T)) -> impl Fn(T) -> T {
move |mut item| {
update_op(&mut item);
item
}
}
impl<'f, T, C, F> Folder<T> for UpdateFolder<'f, C, F>
where
C: Folder<T>,
F: Fn(&mut T),
{
type Result = C::Result;
fn consume(self, mut item: T) -> Self {
(self.update_op)(&mut item);
UpdateFolder {
base: self.base.consume(item),
update_op: self.update_op,
}
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
let update_op = self.update_op;
self.base = self
.base
.consume_iter(iter.into_iter().map(apply(update_op)));
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}
/// Standard Update adaptor, based on `itertools::adaptors::Update`
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
struct UpdateSeq<I, F> {
base: I,
update_op: F,
}
impl<I, F> Iterator for UpdateSeq<I, F>
where
I: Iterator,
F: Fn(&mut I::Item),
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
let mut v = self.base.next()?;
(self.update_op)(&mut v);
Some(v)
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.base.size_hint()
}
fn fold<Acc, G>(self, init: Acc, g: G) -> Acc
where
G: FnMut(Acc, Self::Item) -> Acc,
{
self.base.map(apply(self.update_op)).fold(init, g)
}
// if possible, re-use inner iterator specializations in collect
fn collect<C>(self) -> C
where
C: ::std::iter::FromIterator<Self::Item>,
{
self.base.map(apply(self.update_op)).collect()
}
}
impl<I, F> ExactSizeIterator for UpdateSeq<I, F>
where
I: ExactSizeIterator,
F: Fn(&mut I::Item),
{
}
impl<I, F> DoubleEndedIterator for UpdateSeq<I, F>
where
I: DoubleEndedIterator,
F: Fn(&mut I::Item),
{
fn next_back(&mut self) -> Option<Self::Item> {
let mut v = self.base.next_back()?;
(self.update_op)(&mut v);
Some(v)
}
}

154
vendor/rayon/src/iter/while_some.rs vendored Normal file
View File

@@ -0,0 +1,154 @@
use super::plumbing::*;
use super::*;
use std::sync::atomic::{AtomicBool, Ordering};
/// `WhileSome` is an iterator that yields the `Some` elements of an iterator,
/// halting as soon as any `None` is produced.
///
/// This struct is created by the [`while_some()`] method on [`ParallelIterator`]
///
/// [`while_some()`]: trait.ParallelIterator.html#method.while_some
/// [`ParallelIterator`]: trait.ParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct WhileSome<I: ParallelIterator> {
base: I,
}
impl<I> WhileSome<I>
where
I: ParallelIterator,
{
/// Creates a new `WhileSome` iterator.
pub(super) fn new(base: I) -> Self {
WhileSome { base }
}
}
impl<I, T> ParallelIterator for WhileSome<I>
where
I: ParallelIterator<Item = Option<T>>,
T: Send,
{
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let full = AtomicBool::new(false);
let consumer1 = WhileSomeConsumer {
base: consumer,
full: &full,
};
self.base.drive_unindexed(consumer1)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Consumer implementation
struct WhileSomeConsumer<'f, C> {
base: C,
full: &'f AtomicBool,
}
impl<'f, T, C> Consumer<Option<T>> for WhileSomeConsumer<'f, C>
where
C: Consumer<T>,
T: Send,
{
type Folder = WhileSomeFolder<'f, C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
WhileSomeConsumer { base: left, ..self },
WhileSomeConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
WhileSomeFolder {
base: self.base.into_folder(),
full: self.full,
}
}
fn full(&self) -> bool {
self.full.load(Ordering::Relaxed) || self.base.full()
}
}
impl<'f, T, C> UnindexedConsumer<Option<T>> for WhileSomeConsumer<'f, C>
where
C: UnindexedConsumer<T>,
T: Send,
{
fn split_off_left(&self) -> Self {
WhileSomeConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct WhileSomeFolder<'f, C> {
base: C,
full: &'f AtomicBool,
}
impl<'f, T, C> Folder<Option<T>> for WhileSomeFolder<'f, C>
where
C: Folder<T>,
{
type Result = C::Result;
fn consume(mut self, item: Option<T>) -> Self {
match item {
Some(item) => self.base = self.base.consume(item),
None => self.full.store(true, Ordering::Relaxed),
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = Option<T>>,
{
fn some<T>(full: &AtomicBool) -> impl Fn(&Option<T>) -> bool + '_ {
move |x| match *x {
Some(_) => !full.load(Ordering::Relaxed),
None => {
full.store(true, Ordering::Relaxed);
false
}
}
}
self.base = self.base.consume_iter(
iter.into_iter()
.take_while(some(self.full))
.map(Option::unwrap),
);
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.full.load(Ordering::Relaxed) || self.base.full()
}
}

159
vendor/rayon/src/iter/zip.rs vendored Normal file
View File

@@ -0,0 +1,159 @@
use super::plumbing::*;
use super::*;
use std::cmp;
use std::iter;
/// `Zip` is an iterator that zips up `a` and `b` into a single iterator
/// of pairs. This struct is created by the [`zip()`] method on
/// [`IndexedParallelIterator`]
///
/// [`zip()`]: trait.IndexedParallelIterator.html#method.zip
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Zip<A: IndexedParallelIterator, B: IndexedParallelIterator> {
a: A,
b: B,
}
impl<A, B> Zip<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator,
{
/// Creates a new `Zip` iterator.
pub(super) fn new(a: A, b: B) -> Self {
Zip { a, b }
}
}
impl<A, B> ParallelIterator for Zip<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator,
{
type Item = (A::Item, B::Item);
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<A, B> IndexedParallelIterator for Zip<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
cmp::min(self.a.len(), self.b.len())
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.a.with_producer(CallbackA {
callback,
b: self.b,
});
struct CallbackA<CB, B> {
callback: CB,
b: B,
}
impl<CB, ITEM, B> ProducerCallback<ITEM> for CallbackA<CB, B>
where
B: IndexedParallelIterator,
CB: ProducerCallback<(ITEM, B::Item)>,
{
type Output = CB::Output;
fn callback<A>(self, a_producer: A) -> Self::Output
where
A: Producer<Item = ITEM>,
{
self.b.with_producer(CallbackB {
a_producer,
callback: self.callback,
})
}
}
struct CallbackB<CB, A> {
a_producer: A,
callback: CB,
}
impl<CB, A, ITEM> ProducerCallback<ITEM> for CallbackB<CB, A>
where
A: Producer,
CB: ProducerCallback<(A::Item, ITEM)>,
{
type Output = CB::Output;
fn callback<B>(self, b_producer: B) -> Self::Output
where
B: Producer<Item = ITEM>,
{
self.callback.callback(ZipProducer {
a: self.a_producer,
b: b_producer,
})
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
struct ZipProducer<A: Producer, B: Producer> {
a: A,
b: B,
}
impl<A: Producer, B: Producer> Producer for ZipProducer<A, B> {
type Item = (A::Item, B::Item);
type IntoIter = iter::Zip<A::IntoIter, B::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
self.a.into_iter().zip(self.b.into_iter())
}
fn min_len(&self) -> usize {
cmp::max(self.a.min_len(), self.b.min_len())
}
fn max_len(&self) -> usize {
cmp::min(self.a.max_len(), self.b.max_len())
}
fn split_at(self, index: usize) -> (Self, Self) {
let (a_left, a_right) = self.a.split_at(index);
let (b_left, b_right) = self.b.split_at(index);
(
ZipProducer {
a: a_left,
b: b_left,
},
ZipProducer {
a: a_right,
b: b_right,
},
)
}
}

72
vendor/rayon/src/iter/zip_eq.rs vendored Normal file
View File

@@ -0,0 +1,72 @@
use super::plumbing::*;
use super::*;
/// An [`IndexedParallelIterator`] that iterates over two parallel iterators of equal
/// length simultaneously.
///
/// This struct is created by the [`zip_eq`] method on [`IndexedParallelIterator`],
/// see its documentation for more information.
///
/// [`zip_eq`]: trait.IndexedParallelIterator.html#method.zip_eq
/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct ZipEq<A: IndexedParallelIterator, B: IndexedParallelIterator> {
zip: Zip<A, B>,
}
impl<A, B> ZipEq<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator,
{
/// Creates a new `ZipEq` iterator.
pub(super) fn new(a: A, b: B) -> Self {
ZipEq {
zip: super::Zip::new(a, b),
}
}
}
impl<A, B> ParallelIterator for ZipEq<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator,
{
type Item = (A::Item, B::Item);
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self.zip, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.zip.len())
}
}
impl<A, B> IndexedParallelIterator for ZipEq<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self.zip, consumer)
}
fn len(&self) -> usize {
self.zip.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
self.zip.with_producer(callback)
}
}

160
vendor/rayon/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,160 @@
#![deny(missing_debug_implementations)]
#![deny(missing_docs)]
#![deny(unreachable_pub)]
#![warn(rust_2018_idioms)]
//! Data-parallelism library that makes it easy to convert sequential
//! computations into parallel
//!
//! Rayon is lightweight and convenient for introducing parallelism into existing
//! code. It guarantees data-race free executions and takes advantage of
//! parallelism when sensible, based on work-load at runtime.
//!
//! # How to use Rayon
//!
//! There are two ways to use Rayon:
//!
//! - **High-level parallel constructs** are the simplest way to use Rayon and also
//! typically the most efficient.
//! - [Parallel iterators][iter module] make it easy to convert a sequential iterator to
//! execute in parallel.
//! - The [`ParallelIterator`] trait defines general methods for all parallel iterators.
//! - The [`IndexedParallelIterator`] trait adds methods for iterators that support random
//! access.
//! - The [`par_sort`] method sorts `&mut [T]` slices (or vectors) in parallel.
//! - [`par_extend`] can be used to efficiently grow collections with items produced
//! by a parallel iterator.
//! - **Custom tasks** let you divide your work into parallel tasks yourself.
//! - [`join`] is used to subdivide a task into two pieces.
//! - [`scope`] creates a scope within which you can create any number of parallel tasks.
//! - [`ThreadPoolBuilder`] can be used to create your own thread pools or customize
//! the global one.
//!
//! [iter module]: iter/index.html
//! [`join`]: fn.join.html
//! [`scope`]: fn.scope.html
//! [`par_sort`]: slice/trait.ParallelSliceMut.html#method.par_sort
//! [`par_extend`]: iter/trait.ParallelExtend.html#tymethod.par_extend
//! [`ThreadPoolBuilder`]: struct.ThreadPoolBuilder.html
//!
//! # Basic usage and the Rayon prelude
//!
//! First, you will need to add `rayon` to your `Cargo.toml`.
//!
//! Next, to use parallel iterators or the other high-level methods,
//! you need to import several traits. Those traits are bundled into
//! the module [`rayon::prelude`]. It is recommended that you import
//! all of these traits at once by adding `use rayon::prelude::*` at
//! the top of each module that uses Rayon methods.
//!
//! These traits give you access to the `par_iter` method which provides
//! parallel implementations of many iterative functions such as [`map`],
//! [`for_each`], [`filter`], [`fold`], and [more].
//!
//! [`rayon::prelude`]: prelude/index.html
//! [`map`]: iter/trait.ParallelIterator.html#method.map
//! [`for_each`]: iter/trait.ParallelIterator.html#method.for_each
//! [`filter`]: iter/trait.ParallelIterator.html#method.filter
//! [`fold`]: iter/trait.ParallelIterator.html#method.fold
//! [more]: iter/trait.ParallelIterator.html#provided-methods
//! [`ParallelIterator`]: iter/trait.ParallelIterator.html
//! [`IndexedParallelIterator`]: iter/trait.IndexedParallelIterator.html
//!
//! # Crate Layout
//!
//! Rayon extends many of the types found in the standard library with
//! parallel iterator implementations. The modules in the `rayon`
//! crate mirror [`std`] itself: so, e.g., the `option` module in
//! Rayon contains parallel iterators for the `Option` type, which is
//! found in [the `option` module of `std`]. Similarly, the
//! `collections` module in Rayon offers parallel iterator types for
//! [the `collections` from `std`]. You will rarely need to access
//! these submodules unless you need to name iterator types
//! explicitly.
//!
//! [the `option` module of `std`]: https://doc.rust-lang.org/std/option/index.html
//! [the `collections` from `std`]: https://doc.rust-lang.org/std/collections/index.html
//! [`std`]: https://doc.rust-lang.org/std/
//!
//! # Targets without threading
//!
//! Rayon has limited support for targets without `std` threading implementations.
//! See the [`rayon_core`] documentation for more information about its global fallback.
//!
//! # Other questions?
//!
//! See [the Rayon FAQ][faq].
//!
//! [faq]: https://github.com/rayon-rs/rayon/blob/master/FAQ.md
#[macro_use]
mod delegate;
#[macro_use]
mod private;
mod split_producer;
pub mod array;
pub mod collections;
pub mod iter;
pub mod option;
pub mod prelude;
pub mod range;
pub mod range_inclusive;
pub mod result;
pub mod slice;
pub mod str;
pub mod string;
pub mod vec;
mod math;
mod par_either;
mod compile_fail;
pub use rayon_core::FnContext;
pub use rayon_core::ThreadBuilder;
pub use rayon_core::ThreadPool;
pub use rayon_core::ThreadPoolBuildError;
pub use rayon_core::ThreadPoolBuilder;
pub use rayon_core::{broadcast, spawn_broadcast, BroadcastContext};
pub use rayon_core::{current_num_threads, current_thread_index, max_num_threads};
pub use rayon_core::{in_place_scope, scope, Scope};
pub use rayon_core::{in_place_scope_fifo, scope_fifo, ScopeFifo};
pub use rayon_core::{join, join_context};
pub use rayon_core::{spawn, spawn_fifo};
pub use rayon_core::{yield_local, yield_now, Yield};
/// We need to transmit raw pointers across threads. It is possible to do this
/// without any unsafe code by converting pointers to usize or to AtomicPtr<T>
/// then back to a raw pointer for use. We prefer this approach because code
/// that uses this type is more explicit.
///
/// Unsafe code is still required to dereference the pointer, so this type is
/// not unsound on its own, although it does partly lift the unconditional
/// !Send and !Sync on raw pointers. As always, dereference with care.
struct SendPtr<T>(*mut T);
// SAFETY: !Send for raw pointers is not for safety, just as a lint
unsafe impl<T: Send> Send for SendPtr<T> {}
// SAFETY: !Sync for raw pointers is not for safety, just as a lint
unsafe impl<T: Send> Sync for SendPtr<T> {}
impl<T> SendPtr<T> {
// Helper to avoid disjoint captures of `send_ptr.0`
fn get(self) -> *mut T {
self.0
}
}
// Implement Clone without the T: Clone bound from the derive
impl<T> Clone for SendPtr<T> {
fn clone(&self) -> Self {
*self
}
}
// Implement Copy without the T: Copy bound from the derive
impl<T> Copy for SendPtr<T> {}

54
vendor/rayon/src/math.rs vendored Normal file
View File

@@ -0,0 +1,54 @@
use std::ops::{Bound, Range, RangeBounds};
/// Divide `n` by `divisor`, and round up to the nearest integer
/// if not evenly divisible.
#[inline]
pub(super) fn div_round_up(n: usize, divisor: usize) -> usize {
debug_assert!(divisor != 0, "Division by zero!");
if n == 0 {
0
} else {
(n - 1) / divisor + 1
}
}
/// Normalize arbitrary `RangeBounds` to a `Range`
pub(super) fn simplify_range(range: impl RangeBounds<usize>, len: usize) -> Range<usize> {
let start = match range.start_bound() {
Bound::Unbounded => 0,
Bound::Included(&i) if i <= len => i,
Bound::Excluded(&i) if i < len => i + 1,
bound => panic!("range start {:?} should be <= length {}", bound, len),
};
let end = match range.end_bound() {
Bound::Unbounded => len,
Bound::Excluded(&i) if i <= len => i,
Bound::Included(&i) if i < len => i + 1,
bound => panic!("range end {:?} should be <= length {}", bound, len),
};
if start > end {
panic!(
"range start {:?} should be <= range end {:?}",
range.start_bound(),
range.end_bound()
);
}
start..end
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn check_div_round_up() {
assert_eq!(0, div_round_up(0, 5));
assert_eq!(1, div_round_up(5, 5));
assert_eq!(1, div_round_up(1, 5));
assert_eq!(2, div_round_up(3, 2));
assert_eq!(
usize::max_value() / 2 + 1,
div_round_up(usize::max_value(), 2)
);
}
}

203
vendor/rayon/src/option.rs vendored Normal file
View File

@@ -0,0 +1,203 @@
//! Parallel iterator types for [options][std::option]
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! [std::option]: https://doc.rust-lang.org/stable/std/option/
use crate::iter::plumbing::*;
use crate::iter::*;
use std::sync::atomic::{AtomicBool, Ordering};
/// A parallel iterator over the value in [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`into_par_iter`] function.
///
/// [`Option`]: https://doc.rust-lang.org/std/option/enum.Option.html
/// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some
/// [`into_par_iter`]: ../iter/trait.IntoParallelIterator.html#tymethod.into_par_iter
#[derive(Debug, Clone)]
pub struct IntoIter<T: Send> {
opt: Option<T>,
}
impl<T: Send> IntoParallelIterator for Option<T> {
type Item = T;
type Iter = IntoIter<T>;
fn into_par_iter(self) -> Self::Iter {
IntoIter { opt: self }
}
}
impl<T: Send> ParallelIterator for IntoIter<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.drive(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Send> IndexedParallelIterator for IntoIter<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let mut folder = consumer.into_folder();
if let Some(item) = self.opt {
folder = folder.consume(item);
}
folder.complete()
}
fn len(&self) -> usize {
match self.opt {
Some(_) => 1,
None => 0,
}
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(OptionProducer { opt: self.opt })
}
}
/// A parallel iterator over a reference to the [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`par_iter`] function.
///
/// [`Option`]: https://doc.rust-lang.org/std/option/enum.Option.html
/// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some
/// [`par_iter`]: ../iter/trait.IntoParallelRefIterator.html#tymethod.par_iter
#[derive(Debug)]
pub struct Iter<'a, T: Sync> {
inner: IntoIter<&'a T>,
}
impl<'a, T: Sync> Clone for Iter<'a, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
impl<'a, T: Sync> IntoParallelIterator for &'a Option<T> {
type Item = &'a T;
type Iter = Iter<'a, T>;
fn into_par_iter(self) -> Self::Iter {
Iter {
inner: self.as_ref().into_par_iter(),
}
}
}
delegate_indexed_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Sync + 'a>
}
/// A parallel iterator over a mutable reference to the [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`par_iter_mut`] function.
///
/// [`Option`]: https://doc.rust-lang.org/std/option/enum.Option.html
/// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some
/// [`par_iter_mut`]: ../iter/trait.IntoParallelRefMutIterator.html#tymethod.par_iter_mut
#[derive(Debug)]
pub struct IterMut<'a, T: Send> {
inner: IntoIter<&'a mut T>,
}
impl<'a, T: Send> IntoParallelIterator for &'a mut Option<T> {
type Item = &'a mut T;
type Iter = IterMut<'a, T>;
fn into_par_iter(self) -> Self::Iter {
IterMut {
inner: self.as_mut().into_par_iter(),
}
}
}
delegate_indexed_iterator! {
IterMut<'a, T> => &'a mut T,
impl<'a, T: Send + 'a>
}
/// Private producer for an option
struct OptionProducer<T: Send> {
opt: Option<T>,
}
impl<T: Send> Producer for OptionProducer<T> {
type Item = T;
type IntoIter = std::option::IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
self.opt.into_iter()
}
fn split_at(self, index: usize) -> (Self, Self) {
debug_assert!(index <= 1);
let none = OptionProducer { opt: None };
if index == 0 {
(none, self)
} else {
(self, none)
}
}
}
/// Collect an arbitrary `Option`-wrapped collection.
///
/// If any item is `None`, then all previous items collected are discarded,
/// and it returns only `None`.
impl<C, T> FromParallelIterator<Option<T>> for Option<C>
where
C: FromParallelIterator<T>,
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = Option<T>>,
{
fn check<T>(found_none: &AtomicBool) -> impl Fn(&Option<T>) + '_ {
move |item| {
if item.is_none() {
found_none.store(true, Ordering::Relaxed);
}
}
}
let found_none = AtomicBool::new(false);
let collection = par_iter
.into_par_iter()
.inspect(check(&found_none))
.while_some()
.collect();
if found_none.load(Ordering::Relaxed) {
None
} else {
Some(collection)
}
}
}

74
vendor/rayon/src/par_either.rs vendored Normal file
View File

@@ -0,0 +1,74 @@
use crate::iter::plumbing::*;
use crate::iter::Either::{Left, Right};
use crate::iter::*;
/// `Either<L, R>` is a parallel iterator if both `L` and `R` are parallel iterators.
impl<L, R> ParallelIterator for Either<L, R>
where
L: ParallelIterator,
R: ParallelIterator<Item = L::Item>,
{
type Item = L::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
match self {
Left(iter) => iter.drive_unindexed(consumer),
Right(iter) => iter.drive_unindexed(consumer),
}
}
fn opt_len(&self) -> Option<usize> {
self.as_ref().either(L::opt_len, R::opt_len)
}
}
impl<L, R> IndexedParallelIterator for Either<L, R>
where
L: IndexedParallelIterator,
R: IndexedParallelIterator<Item = L::Item>,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
match self {
Left(iter) => iter.drive(consumer),
Right(iter) => iter.drive(consumer),
}
}
fn len(&self) -> usize {
self.as_ref().either(L::len, R::len)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
match self {
Left(iter) => iter.with_producer(callback),
Right(iter) => iter.with_producer(callback),
}
}
}
/// `Either<L, R>` can be extended if both `L` and `R` are parallel extendable.
impl<L, R, T> ParallelExtend<T> for Either<L, R>
where
L: ParallelExtend<T>,
R: ParallelExtend<T>,
T: Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
match self.as_mut() {
Left(collection) => collection.par_extend(par_iter),
Right(collection) => collection.par_extend(par_iter),
}
}
}

17
vendor/rayon/src/prelude.rs vendored Normal file
View File

@@ -0,0 +1,17 @@
//! The rayon prelude imports the various `ParallelIterator` traits.
//! The intention is that one can include `use rayon::prelude::*` and
//! have easy access to the various traits and methods you will need.
pub use crate::iter::FromParallelIterator;
pub use crate::iter::IndexedParallelIterator;
pub use crate::iter::IntoParallelIterator;
pub use crate::iter::IntoParallelRefIterator;
pub use crate::iter::IntoParallelRefMutIterator;
pub use crate::iter::ParallelBridge;
pub use crate::iter::ParallelDrainFull;
pub use crate::iter::ParallelDrainRange;
pub use crate::iter::ParallelExtend;
pub use crate::iter::ParallelIterator;
pub use crate::slice::ParallelSlice;
pub use crate::slice::ParallelSliceMut;
pub use crate::str::ParallelString;

26
vendor/rayon/src/private.rs vendored Normal file
View File

@@ -0,0 +1,26 @@
//! The public parts of this private module are used to create traits
//! that cannot be implemented outside of our own crate. This way we
//! can feel free to extend those traits without worrying about it
//! being a breaking change for other implementations.
/// If this type is pub but not publicly reachable, third parties
/// can't name it and can't implement traits using it.
#[allow(missing_debug_implementations)]
pub struct PrivateMarker;
macro_rules! private_decl {
() => {
/// This trait is private; this method exists to make it
/// impossible to implement outside the crate.
#[doc(hidden)]
fn __rayon_private__(&self) -> crate::private::PrivateMarker;
};
}
macro_rules! private_impl {
() => {
fn __rayon_private__(&self) -> crate::private::PrivateMarker {
crate::private::PrivateMarker
}
};
}

462
vendor/rayon/src/range.rs vendored Normal file
View File

@@ -0,0 +1,462 @@
//! Parallel iterator types for [ranges][std::range],
//! the type for values created by `a..b` expressions
//!
//! You will rarely need to interact with this module directly unless you have
//! need to name one of the iterator types.
//!
//! ```
//! use rayon::prelude::*;
//!
//! let r = (0..100u64).into_par_iter()
//! .sum();
//!
//! // compare result with sequential calculation
//! assert_eq!((0..100).sum::<u64>(), r);
//! ```
//!
//! [std::range]: https://doc.rust-lang.org/core/ops/struct.Range.html
use crate::iter::plumbing::*;
use crate::iter::*;
use std::char;
use std::convert::TryFrom;
use std::ops::Range;
use std::usize;
/// Parallel iterator over a range, implemented for all integer types and `char`.
///
/// **Note:** The `zip` operation requires `IndexedParallelIterator`
/// which is not implemented for `u64`, `i64`, `u128`, or `i128`.
///
/// ```
/// use rayon::prelude::*;
///
/// let p = (0..25usize).into_par_iter()
/// .zip(0..25usize)
/// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
/// .map(|(x, y)| x * y)
/// .sum::<usize>();
///
/// let s = (0..25usize).zip(0..25)
/// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
/// .map(|(x, y)| x * y)
/// .sum();
///
/// assert_eq!(p, s);
/// ```
#[derive(Debug, Clone)]
pub struct Iter<T> {
range: Range<T>,
}
/// Implemented for ranges of all primitive integer types and `char`.
impl<T> IntoParallelIterator for Range<T>
where
Iter<T>: ParallelIterator,
{
type Item = <Iter<T> as ParallelIterator>::Item;
type Iter = Iter<T>;
fn into_par_iter(self) -> Self::Iter {
Iter { range: self }
}
}
struct IterProducer<T> {
range: Range<T>,
}
impl<T> IntoIterator for IterProducer<T>
where
Range<T>: Iterator,
{
type Item = <Range<T> as Iterator>::Item;
type IntoIter = Range<T>;
fn into_iter(self) -> Self::IntoIter {
self.range
}
}
/// These traits help drive integer type inference. Without them, an unknown `{integer}` type only
/// has constraints on `Iter<{integer}>`, which will probably give up and use `i32`. By adding
/// these traits on the item type, the compiler can see a more direct constraint to infer like
/// `{integer}: RangeInteger`, which works better. See `test_issue_833` for an example.
///
/// They have to be `pub` since they're seen in the public `impl ParallelIterator` constraints, but
/// we put them in a private modules so they're not actually reachable in our public API.
mod private {
use super::*;
/// Implementation details of `ParallelIterator for Iter<Self>`
pub trait RangeInteger: Sized + Send {
private_decl! {}
fn drive_unindexed<C>(iter: Iter<Self>, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self>;
fn opt_len(iter: &Iter<Self>) -> Option<usize>;
}
/// Implementation details of `IndexedParallelIterator for Iter<Self>`
pub trait IndexedRangeInteger: RangeInteger {
private_decl! {}
fn drive<C>(iter: Iter<Self>, consumer: C) -> C::Result
where
C: Consumer<Self>;
fn len(iter: &Iter<Self>) -> usize;
fn with_producer<CB>(iter: Iter<Self>, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self>;
}
}
use private::{IndexedRangeInteger, RangeInteger};
impl<T: RangeInteger> ParallelIterator for Iter<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<T>,
{
T::drive_unindexed(self, consumer)
}
#[inline]
fn opt_len(&self) -> Option<usize> {
T::opt_len(self)
}
}
impl<T: IndexedRangeInteger> IndexedParallelIterator for Iter<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<T>,
{
T::drive(self, consumer)
}
#[inline]
fn len(&self) -> usize {
T::len(self)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<T>,
{
T::with_producer(self, callback)
}
}
macro_rules! indexed_range_impl {
( $t:ty ) => {
impl RangeInteger for $t {
private_impl! {}
fn drive_unindexed<C>(iter: Iter<$t>, consumer: C) -> C::Result
where
C: UnindexedConsumer<$t>,
{
bridge(iter, consumer)
}
fn opt_len(iter: &Iter<$t>) -> Option<usize> {
Some(iter.range.len())
}
}
impl IndexedRangeInteger for $t {
private_impl! {}
fn drive<C>(iter: Iter<$t>, consumer: C) -> C::Result
where
C: Consumer<$t>,
{
bridge(iter, consumer)
}
fn len(iter: &Iter<$t>) -> usize {
iter.range.len()
}
fn with_producer<CB>(iter: Iter<$t>, callback: CB) -> CB::Output
where
CB: ProducerCallback<$t>,
{
callback.callback(IterProducer { range: iter.range })
}
}
impl Producer for IterProducer<$t> {
type Item = <Range<$t> as Iterator>::Item;
type IntoIter = Range<$t>;
fn into_iter(self) -> Self::IntoIter {
self.range
}
fn split_at(self, index: usize) -> (Self, Self) {
assert!(index <= self.range.len());
// For signed $t, the length and requested index could be greater than $t::MAX, and
// then `index as $t` could wrap to negative, so wrapping_add is necessary.
let mid = self.range.start.wrapping_add(index as $t);
let left = self.range.start..mid;
let right = mid..self.range.end;
(IterProducer { range: left }, IterProducer { range: right })
}
}
};
}
trait UnindexedRangeLen<L> {
fn len(&self) -> L;
}
macro_rules! unindexed_range_impl {
( $t:ty, $len_t:ty ) => {
impl UnindexedRangeLen<$len_t> for Range<$t> {
fn len(&self) -> $len_t {
let &Range { start, end } = self;
if end > start {
end.wrapping_sub(start) as $len_t
} else {
0
}
}
}
impl RangeInteger for $t {
private_impl! {}
fn drive_unindexed<C>(iter: Iter<$t>, consumer: C) -> C::Result
where
C: UnindexedConsumer<$t>,
{
#[inline]
fn offset(start: $t) -> impl Fn(usize) -> $t {
move |i| start.wrapping_add(i as $t)
}
if let Some(len) = iter.opt_len() {
// Drive this in indexed mode for better `collect`.
(0..len)
.into_par_iter()
.map(offset(iter.range.start))
.drive(consumer)
} else {
bridge_unindexed(IterProducer { range: iter.range }, consumer)
}
}
fn opt_len(iter: &Iter<$t>) -> Option<usize> {
usize::try_from(iter.range.len()).ok()
}
}
impl UnindexedProducer for IterProducer<$t> {
type Item = $t;
fn split(mut self) -> (Self, Option<Self>) {
let index = self.range.len() / 2;
if index > 0 {
let mid = self.range.start.wrapping_add(index as $t);
let right = mid..self.range.end;
self.range.end = mid;
(self, Some(IterProducer { range: right }))
} else {
(self, None)
}
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self)
}
}
};
}
// all Range<T> with ExactSizeIterator
indexed_range_impl! {u8}
indexed_range_impl! {u16}
indexed_range_impl! {u32}
indexed_range_impl! {usize}
indexed_range_impl! {i8}
indexed_range_impl! {i16}
indexed_range_impl! {i32}
indexed_range_impl! {isize}
// other Range<T> with just Iterator
unindexed_range_impl! {u64, u64}
unindexed_range_impl! {i64, u64}
unindexed_range_impl! {u128, u128}
unindexed_range_impl! {i128, u128}
// char is special because of the surrogate range hole
macro_rules! convert_char {
( $self:ident . $method:ident ( $( $arg:expr ),* ) ) => {{
let start = $self.range.start as u32;
let end = $self.range.end as u32;
if start < 0xD800 && 0xE000 < end {
// chain the before and after surrogate range fragments
(start..0xD800)
.into_par_iter()
.chain(0xE000..end)
.map(|codepoint| unsafe { char::from_u32_unchecked(codepoint) })
.$method($( $arg ),*)
} else {
// no surrogate range to worry about
(start..end)
.into_par_iter()
.map(|codepoint| unsafe { char::from_u32_unchecked(codepoint) })
.$method($( $arg ),*)
}
}};
}
impl ParallelIterator for Iter<char> {
type Item = char;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
convert_char!(self.drive(consumer))
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl IndexedParallelIterator for Iter<char> {
// Split at the surrogate range first if we're allowed to
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
convert_char!(self.drive(consumer))
}
fn len(&self) -> usize {
// Taken from <char as Step>::steps_between
let start = self.range.start as u32;
let end = self.range.end as u32;
if start < end {
let mut count = end - start;
if start < 0xD800 && 0xE000 <= end {
count -= 0x800
}
count as usize
} else {
0
}
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
convert_char!(self.with_producer(callback))
}
}
#[test]
fn check_range_split_at_overflow() {
// Note, this split index overflows i8!
let producer = IterProducer { range: -100i8..100 };
let (left, right) = producer.split_at(150);
let r1: i32 = left.range.map(i32::from).sum();
let r2: i32 = right.range.map(i32::from).sum();
assert_eq!(r1 + r2, -100);
}
#[test]
fn test_i128_len_doesnt_overflow() {
use std::{i128, u128};
// Using parse because some versions of rust don't allow long literals
let octillion: i128 = "1000000000000000000000000000".parse().unwrap();
let producer = IterProducer {
range: 0..octillion,
};
assert_eq!(octillion as u128, producer.range.len());
assert_eq!(octillion as u128, (0..octillion).len());
assert_eq!(2 * octillion as u128, (-octillion..octillion).len());
assert_eq!(u128::MAX, (i128::MIN..i128::MAX).len());
}
#[test]
fn test_u64_opt_len() {
use std::{u64, usize};
assert_eq!(Some(100), (0..100u64).into_par_iter().opt_len());
assert_eq!(
Some(usize::MAX),
(0..usize::MAX as u64).into_par_iter().opt_len()
);
if (usize::MAX as u64) < u64::MAX {
assert_eq!(
None,
(0..(usize::MAX as u64).wrapping_add(1))
.into_par_iter()
.opt_len()
);
assert_eq!(None, (0..u64::MAX).into_par_iter().opt_len());
}
}
#[test]
fn test_u128_opt_len() {
use std::{u128, usize};
assert_eq!(Some(100), (0..100u128).into_par_iter().opt_len());
assert_eq!(
Some(usize::MAX),
(0..usize::MAX as u128).into_par_iter().opt_len()
);
assert_eq!(None, (0..1 + usize::MAX as u128).into_par_iter().opt_len());
assert_eq!(None, (0..u128::MAX).into_par_iter().opt_len());
}
// `usize as i64` can overflow, so make sure to wrap it appropriately
// when using the `opt_len` "indexed" mode.
#[test]
#[cfg(target_pointer_width = "64")]
fn test_usize_i64_overflow() {
use crate::ThreadPoolBuilder;
use std::i64;
let iter = (-2..i64::MAX).into_par_iter();
assert_eq!(iter.opt_len(), Some(i64::MAX as usize + 2));
// always run with multiple threads to split into, or this will take forever...
let pool = ThreadPoolBuilder::new().num_threads(8).build().unwrap();
pool.install(|| assert_eq!(iter.find_last(|_| true), Some(i64::MAX - 1)));
}
#[test]
fn test_issue_833() {
fn is_even(n: i64) -> bool {
n % 2 == 0
}
// The integer type should be inferred from `is_even`
let v: Vec<_> = (1..100).into_par_iter().filter(|&x| is_even(x)).collect();
assert!(v.into_iter().eq((2..100).step_by(2)));
// Try examples with indexed iterators too
let pos = (0..100).into_par_iter().position_any(|x| x == 50i16);
assert_eq!(pos, Some(50usize));
assert!((0..100)
.into_par_iter()
.zip(0..100)
.all(|(a, b)| i16::eq(&a, &b)));
}

386
vendor/rayon/src/range_inclusive.rs vendored Normal file
View File

@@ -0,0 +1,386 @@
//! Parallel iterator types for [inclusive ranges][std::range],
//! the type for values created by `a..=b` expressions
//!
//! You will rarely need to interact with this module directly unless you have
//! need to name one of the iterator types.
//!
//! ```
//! use rayon::prelude::*;
//!
//! let r = (0..=100u64).into_par_iter()
//! .sum();
//!
//! // compare result with sequential calculation
//! assert_eq!((0..=100).sum::<u64>(), r);
//! ```
//!
//! [std::range]: https://doc.rust-lang.org/core/ops/struct.RangeInclusive.html
use crate::iter::plumbing::*;
use crate::iter::*;
use std::char;
use std::ops::RangeInclusive;
/// Parallel iterator over an inclusive range, implemented for all integer types and `char`.
///
/// **Note:** The `zip` operation requires `IndexedParallelIterator`
/// which is only implemented for `u8`, `i8`, `u16`, `i16`, and `char`.
///
/// ```
/// use rayon::prelude::*;
///
/// let p = (0..=25u16).into_par_iter()
/// .zip(0..=25u16)
/// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
/// .map(|(x, y)| x * y)
/// .sum::<u16>();
///
/// let s = (0..=25u16).zip(0..=25u16)
/// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
/// .map(|(x, y)| x * y)
/// .sum();
///
/// assert_eq!(p, s);
/// ```
#[derive(Debug, Clone)]
pub struct Iter<T> {
range: RangeInclusive<T>,
}
impl<T> Iter<T>
where
RangeInclusive<T>: Eq,
T: Ord + Copy,
{
/// Returns `Some((start, end))` for `start..=end`, or `None` if it is exhausted.
///
/// Note that `RangeInclusive` does not specify the bounds of an exhausted iterator,
/// so this is a way for us to figure out what we've got. Thankfully, all of the
/// integer types we care about can be trivially cloned.
fn bounds(&self) -> Option<(T, T)> {
let start = *self.range.start();
let end = *self.range.end();
if start <= end && self.range == (start..=end) {
// If the range is still nonempty, this is obviously true
// If the range is exhausted, either start > end or
// the range does not equal start..=end.
Some((start, end))
} else {
None
}
}
}
/// Implemented for ranges of all primitive integer types and `char`.
impl<T> IntoParallelIterator for RangeInclusive<T>
where
Iter<T>: ParallelIterator,
{
type Item = <Iter<T> as ParallelIterator>::Item;
type Iter = Iter<T>;
fn into_par_iter(self) -> Self::Iter {
Iter { range: self }
}
}
/// These traits help drive integer type inference. Without them, an unknown `{integer}` type only
/// has constraints on `Iter<{integer}>`, which will probably give up and use `i32`. By adding
/// these traits on the item type, the compiler can see a more direct constraint to infer like
/// `{integer}: RangeInteger`, which works better. See `test_issue_833` for an example.
///
/// They have to be `pub` since they're seen in the public `impl ParallelIterator` constraints, but
/// we put them in a private modules so they're not actually reachable in our public API.
mod private {
use super::*;
/// Implementation details of `ParallelIterator for Iter<Self>`
pub trait RangeInteger: Sized + Send {
private_decl! {}
fn drive_unindexed<C>(iter: Iter<Self>, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self>;
fn opt_len(iter: &Iter<Self>) -> Option<usize>;
}
/// Implementation details of `IndexedParallelIterator for Iter<Self>`
pub trait IndexedRangeInteger: RangeInteger {
private_decl! {}
fn drive<C>(iter: Iter<Self>, consumer: C) -> C::Result
where
C: Consumer<Self>;
fn len(iter: &Iter<Self>) -> usize;
fn with_producer<CB>(iter: Iter<Self>, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self>;
}
}
use private::{IndexedRangeInteger, RangeInteger};
impl<T: RangeInteger> ParallelIterator for Iter<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<T>,
{
T::drive_unindexed(self, consumer)
}
#[inline]
fn opt_len(&self) -> Option<usize> {
T::opt_len(self)
}
}
impl<T: IndexedRangeInteger> IndexedParallelIterator for Iter<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<T>,
{
T::drive(self, consumer)
}
#[inline]
fn len(&self) -> usize {
T::len(self)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<T>,
{
T::with_producer(self, callback)
}
}
macro_rules! convert {
( $iter:ident . $method:ident ( $( $arg:expr ),* ) ) => {
if let Some((start, end)) = $iter.bounds() {
if let Some(end) = end.checked_add(1) {
(start..end).into_par_iter().$method($( $arg ),*)
} else {
(start..end).into_par_iter().chain(once(end)).$method($( $arg ),*)
}
} else {
empty::<Self>().$method($( $arg ),*)
}
};
}
macro_rules! parallel_range_impl {
( $t:ty ) => {
impl RangeInteger for $t {
private_impl! {}
fn drive_unindexed<C>(iter: Iter<$t>, consumer: C) -> C::Result
where
C: UnindexedConsumer<$t>,
{
convert!(iter.drive_unindexed(consumer))
}
fn opt_len(iter: &Iter<$t>) -> Option<usize> {
convert!(iter.opt_len())
}
}
};
}
macro_rules! indexed_range_impl {
( $t:ty ) => {
parallel_range_impl! { $t }
impl IndexedRangeInteger for $t {
private_impl! {}
fn drive<C>(iter: Iter<$t>, consumer: C) -> C::Result
where
C: Consumer<$t>,
{
convert!(iter.drive(consumer))
}
fn len(iter: &Iter<$t>) -> usize {
iter.range.len()
}
fn with_producer<CB>(iter: Iter<$t>, callback: CB) -> CB::Output
where
CB: ProducerCallback<$t>,
{
convert!(iter.with_producer(callback))
}
}
};
}
// all RangeInclusive<T> with ExactSizeIterator
indexed_range_impl! {u8}
indexed_range_impl! {u16}
indexed_range_impl! {i8}
indexed_range_impl! {i16}
// other RangeInclusive<T> with just Iterator
parallel_range_impl! {usize}
parallel_range_impl! {isize}
parallel_range_impl! {u32}
parallel_range_impl! {i32}
parallel_range_impl! {u64}
parallel_range_impl! {i64}
parallel_range_impl! {u128}
parallel_range_impl! {i128}
// char is special
macro_rules! convert_char {
( $self:ident . $method:ident ( $( $arg:expr ),* ) ) => {
if let Some((start, end)) = $self.bounds() {
let start = start as u32;
let end = end as u32;
if start < 0xD800 && 0xE000 <= end {
// chain the before and after surrogate range fragments
(start..0xD800)
.into_par_iter()
.chain(0xE000..end + 1) // cannot use RangeInclusive, so add one to end
.map(|codepoint| unsafe { char::from_u32_unchecked(codepoint) })
.$method($( $arg ),*)
} else {
// no surrogate range to worry about
(start..end + 1) // cannot use RangeInclusive, so add one to end
.into_par_iter()
.map(|codepoint| unsafe { char::from_u32_unchecked(codepoint) })
.$method($( $arg ),*)
}
} else {
empty::<char>().$method($( $arg ),*)
}
};
}
impl ParallelIterator for Iter<char> {
type Item = char;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
convert_char!(self.drive(consumer))
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
// Range<u32> is broken on 16 bit platforms, may as well benefit from it
impl IndexedParallelIterator for Iter<char> {
// Split at the surrogate range first if we're allowed to
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
convert_char!(self.drive(consumer))
}
fn len(&self) -> usize {
if let Some((start, end)) = self.bounds() {
// Taken from <char as Step>::steps_between
let start = start as u32;
let end = end as u32;
let mut count = end - start;
if start < 0xD800 && 0xE000 <= end {
count -= 0x800
}
(count + 1) as usize // add one for inclusive
} else {
0
}
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
convert_char!(self.with_producer(callback))
}
}
#[test]
#[cfg(target_pointer_width = "64")]
fn test_u32_opt_len() {
use std::u32;
assert_eq!(Some(101), (0..=100u32).into_par_iter().opt_len());
assert_eq!(
Some(u32::MAX as usize),
(0..=u32::MAX - 1).into_par_iter().opt_len()
);
assert_eq!(
Some(u32::MAX as usize + 1),
(0..=u32::MAX).into_par_iter().opt_len()
);
}
#[test]
fn test_u64_opt_len() {
use std::{u64, usize};
assert_eq!(Some(101), (0..=100u64).into_par_iter().opt_len());
assert_eq!(
Some(usize::MAX),
(0..=usize::MAX as u64 - 1).into_par_iter().opt_len()
);
assert_eq!(None, (0..=usize::MAX as u64).into_par_iter().opt_len());
assert_eq!(None, (0..=u64::MAX).into_par_iter().opt_len());
}
#[test]
fn test_u128_opt_len() {
use std::{u128, usize};
assert_eq!(Some(101), (0..=100u128).into_par_iter().opt_len());
assert_eq!(
Some(usize::MAX),
(0..=usize::MAX as u128 - 1).into_par_iter().opt_len()
);
assert_eq!(None, (0..=usize::MAX as u128).into_par_iter().opt_len());
assert_eq!(None, (0..=u128::MAX).into_par_iter().opt_len());
}
// `usize as i64` can overflow, so make sure to wrap it appropriately
// when using the `opt_len` "indexed" mode.
#[test]
#[cfg(target_pointer_width = "64")]
fn test_usize_i64_overflow() {
use crate::ThreadPoolBuilder;
use std::i64;
let iter = (-2..=i64::MAX).into_par_iter();
assert_eq!(iter.opt_len(), Some(i64::MAX as usize + 3));
// always run with multiple threads to split into, or this will take forever...
let pool = ThreadPoolBuilder::new().num_threads(8).build().unwrap();
pool.install(|| assert_eq!(iter.find_last(|_| true), Some(i64::MAX)));
}
#[test]
fn test_issue_833() {
fn is_even(n: i64) -> bool {
n % 2 == 0
}
// The integer type should be inferred from `is_even`
let v: Vec<_> = (1..=100).into_par_iter().filter(|&x| is_even(x)).collect();
assert!(v.into_iter().eq((2..=100).step_by(2)));
// Try examples with indexed iterators too
let pos = (0..=100).into_par_iter().position_any(|x| x == 50i16);
assert_eq!(pos, Some(50usize));
assert!((0..=100)
.into_par_iter()
.zip(0..=100)
.all(|(a, b)| i16::eq(&a, &b)));
}

132
vendor/rayon/src/result.rs vendored Normal file
View File

@@ -0,0 +1,132 @@
//! Parallel iterator types for [results][std::result]
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! [std::result]: https://doc.rust-lang.org/stable/std/result/
use crate::iter::plumbing::*;
use crate::iter::*;
use std::sync::Mutex;
use crate::option;
/// Parallel iterator over a result
#[derive(Debug, Clone)]
pub struct IntoIter<T: Send> {
inner: option::IntoIter<T>,
}
impl<T: Send, E> IntoParallelIterator for Result<T, E> {
type Item = T;
type Iter = IntoIter<T>;
fn into_par_iter(self) -> Self::Iter {
IntoIter {
inner: self.ok().into_par_iter(),
}
}
}
delegate_indexed_iterator! {
IntoIter<T> => T,
impl<T: Send>
}
/// Parallel iterator over an immutable reference to a result
#[derive(Debug)]
pub struct Iter<'a, T: Sync> {
inner: option::IntoIter<&'a T>,
}
impl<'a, T: Sync> Clone for Iter<'a, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
impl<'a, T: Sync, E> IntoParallelIterator for &'a Result<T, E> {
type Item = &'a T;
type Iter = Iter<'a, T>;
fn into_par_iter(self) -> Self::Iter {
Iter {
inner: self.as_ref().ok().into_par_iter(),
}
}
}
delegate_indexed_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Sync + 'a>
}
/// Parallel iterator over a mutable reference to a result
#[derive(Debug)]
pub struct IterMut<'a, T: Send> {
inner: option::IntoIter<&'a mut T>,
}
impl<'a, T: Send, E> IntoParallelIterator for &'a mut Result<T, E> {
type Item = &'a mut T;
type Iter = IterMut<'a, T>;
fn into_par_iter(self) -> Self::Iter {
IterMut {
inner: self.as_mut().ok().into_par_iter(),
}
}
}
delegate_indexed_iterator! {
IterMut<'a, T> => &'a mut T,
impl<'a, T: Send + 'a>
}
/// Collect an arbitrary `Result`-wrapped collection.
///
/// If any item is `Err`, then all previous `Ok` items collected are
/// discarded, and it returns that error. If there are multiple errors, the
/// one returned is not deterministic.
impl<C, T, E> FromParallelIterator<Result<T, E>> for Result<C, E>
where
C: FromParallelIterator<T>,
T: Send,
E: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = Result<T, E>>,
{
fn ok<T, E>(saved: &Mutex<Option<E>>) -> impl Fn(Result<T, E>) -> Option<T> + '_ {
move |item| match item {
Ok(item) => Some(item),
Err(error) => {
// We don't need a blocking `lock()`, as anybody
// else holding the lock will also be writing
// `Some(error)`, and then ours is irrelevant.
if let Ok(mut guard) = saved.try_lock() {
if guard.is_none() {
*guard = Some(error);
}
}
None
}
}
}
let saved_error = Mutex::new(None);
let collection = par_iter
.into_par_iter()
.map(ok(&saved_error))
.while_some()
.collect();
match saved_error.into_inner().unwrap() {
Some(error) => Err(error),
None => Ok(collection),
}
}
}

389
vendor/rayon/src/slice/chunks.rs vendored Normal file
View File

@@ -0,0 +1,389 @@
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::math::div_round_up;
use std::cmp;
/// Parallel iterator over immutable non-overlapping chunks of a slice
#[derive(Debug)]
pub struct Chunks<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T: Sync> Chunks<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
Self { chunk_size, slice }
}
}
impl<'data, T: Sync> Clone for Chunks<'data, T> {
fn clone(&self) -> Self {
Chunks { ..*self }
}
}
impl<'data, T: Sync + 'data> ParallelIterator for Chunks<'data, T> {
type Item = &'data [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'data, T: Sync + 'data> IndexedParallelIterator for Chunks<'data, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
div_round_up(self.slice.len(), self.chunk_size)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(ChunksProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct ChunksProducer<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T: 'data + Sync> Producer for ChunksProducer<'data, T> {
type Item = &'data [T];
type IntoIter = ::std::slice::Chunks<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.chunks(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = cmp::min(index * self.chunk_size, self.slice.len());
let (left, right) = self.slice.split_at(elem_index);
(
ChunksProducer {
chunk_size: self.chunk_size,
slice: left,
},
ChunksProducer {
chunk_size: self.chunk_size,
slice: right,
},
)
}
}
/// Parallel iterator over immutable non-overlapping chunks of a slice
#[derive(Debug)]
pub struct ChunksExact<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
rem: &'data [T],
}
impl<'data, T: Sync> ChunksExact<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
let rem_len = slice.len() % chunk_size;
let len = slice.len() - rem_len;
let (slice, rem) = slice.split_at(len);
Self {
chunk_size,
slice,
rem,
}
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
pub fn remainder(&self) -> &'data [T] {
self.rem
}
}
impl<'data, T: Sync> Clone for ChunksExact<'data, T> {
fn clone(&self) -> Self {
ChunksExact { ..*self }
}
}
impl<'data, T: Sync + 'data> ParallelIterator for ChunksExact<'data, T> {
type Item = &'data [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'data, T: Sync + 'data> IndexedParallelIterator for ChunksExact<'data, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len() / self.chunk_size
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(ChunksExactProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct ChunksExactProducer<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T: 'data + Sync> Producer for ChunksExactProducer<'data, T> {
type Item = &'data [T];
type IntoIter = ::std::slice::ChunksExact<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.chunks_exact(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = index * self.chunk_size;
let (left, right) = self.slice.split_at(elem_index);
(
ChunksExactProducer {
chunk_size: self.chunk_size,
slice: left,
},
ChunksExactProducer {
chunk_size: self.chunk_size,
slice: right,
},
)
}
}
/// Parallel iterator over mutable non-overlapping chunks of a slice
#[derive(Debug)]
pub struct ChunksMut<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T: Send> ChunksMut<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
Self { chunk_size, slice }
}
}
impl<'data, T: Send + 'data> ParallelIterator for ChunksMut<'data, T> {
type Item = &'data mut [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'data, T: Send + 'data> IndexedParallelIterator for ChunksMut<'data, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
div_round_up(self.slice.len(), self.chunk_size)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(ChunksMutProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct ChunksMutProducer<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T: 'data + Send> Producer for ChunksMutProducer<'data, T> {
type Item = &'data mut [T];
type IntoIter = ::std::slice::ChunksMut<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.chunks_mut(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = cmp::min(index * self.chunk_size, self.slice.len());
let (left, right) = self.slice.split_at_mut(elem_index);
(
ChunksMutProducer {
chunk_size: self.chunk_size,
slice: left,
},
ChunksMutProducer {
chunk_size: self.chunk_size,
slice: right,
},
)
}
}
/// Parallel iterator over mutable non-overlapping chunks of a slice
#[derive(Debug)]
pub struct ChunksExactMut<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
rem: &'data mut [T],
}
impl<'data, T: Send> ChunksExactMut<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
let rem_len = slice.len() % chunk_size;
let len = slice.len() - rem_len;
let (slice, rem) = slice.split_at_mut(len);
Self {
chunk_size,
slice,
rem,
}
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
///
/// Note that this has to consume `self` to return the original lifetime of
/// the data, which prevents this from actually being used as a parallel
/// iterator since that also consumes. This method is provided for parity
/// with `std::iter::ChunksExactMut`, but consider calling `remainder()` or
/// `take_remainder()` as alternatives.
pub fn into_remainder(self) -> &'data mut [T] {
self.rem
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
///
/// Consider `take_remainder()` if you need access to the data with its
/// original lifetime, rather than borrowing through `&mut self` here.
pub fn remainder(&mut self) -> &mut [T] {
self.rem
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements. Subsequent calls will return an empty slice.
pub fn take_remainder(&mut self) -> &'data mut [T] {
std::mem::take(&mut self.rem)
}
}
impl<'data, T: Send + 'data> ParallelIterator for ChunksExactMut<'data, T> {
type Item = &'data mut [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'data, T: Send + 'data> IndexedParallelIterator for ChunksExactMut<'data, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len() / self.chunk_size
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(ChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct ChunksExactMutProducer<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T: 'data + Send> Producer for ChunksExactMutProducer<'data, T> {
type Item = &'data mut [T];
type IntoIter = ::std::slice::ChunksExactMut<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.chunks_exact_mut(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = index * self.chunk_size;
let (left, right) = self.slice.split_at_mut(elem_index);
(
ChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: left,
},
ChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: right,
},
)
}
}

755
vendor/rayon/src/slice/mergesort.rs vendored Normal file
View File

@@ -0,0 +1,755 @@
//! Parallel merge sort.
//!
//! This implementation is copied verbatim from `std::slice::sort` and then parallelized.
//! The only difference from the original is that the sequential `mergesort` returns
//! `MergesortResult` and leaves descending arrays intact.
use crate::iter::*;
use crate::slice::ParallelSliceMut;
use crate::SendPtr;
use std::mem;
use std::mem::size_of;
use std::ptr;
use std::slice;
unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
let old = *ptr;
*ptr = ptr.offset(1);
old
}
unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
*ptr = ptr.offset(-1);
*ptr
}
/// When dropped, copies from `src` into `dest` a sequence of length `len`.
struct CopyOnDrop<T> {
src: *const T,
dest: *mut T,
len: usize,
}
impl<T> Drop for CopyOnDrop<T> {
fn drop(&mut self) {
unsafe {
ptr::copy_nonoverlapping(self.src, self.dest, self.len);
}
}
}
/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
///
/// This is the integral subroutine of insertion sort.
fn insert_head<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
if v.len() >= 2 && is_less(&v[1], &v[0]) {
unsafe {
// There are three ways to implement insertion here:
//
// 1. Swap adjacent elements until the first one gets to its final destination.
// However, this way we copy data around more than is necessary. If elements are big
// structures (costly to copy), this method will be slow.
//
// 2. Iterate until the right place for the first element is found. Then shift the
// elements succeeding it to make room for it and finally place it into the
// remaining hole. This is a good method.
//
// 3. Copy the first element into a temporary variable. Iterate until the right place
// for it is found. As we go along, copy every traversed element into the slot
// preceding it. Finally, copy data from the temporary variable into the remaining
// hole. This method is very good. Benchmarks demonstrated slightly better
// performance than with the 2nd method.
//
// All methods were benchmarked, and the 3rd showed best results. So we chose that one.
let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
// Intermediate state of the insertion process is always tracked by `hole`, which
// serves two purposes:
// 1. Protects integrity of `v` from panics in `is_less`.
// 2. Fills the remaining hole in `v` in the end.
//
// Panic safety:
//
// If `is_less` panics at any point during the process, `hole` will get dropped and
// fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
// initially held exactly once.
let mut hole = InsertionHole {
src: &*tmp,
dest: &mut v[1],
};
ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
for i in 2..v.len() {
if !is_less(&v[i], &*tmp) {
break;
}
ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
hole.dest = &mut v[i];
}
// `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
}
}
// When dropped, copies from `src` into `dest`.
struct InsertionHole<T> {
src: *const T,
dest: *mut T,
}
impl<T> Drop for InsertionHole<T> {
fn drop(&mut self) {
unsafe {
ptr::copy_nonoverlapping(self.src, self.dest, 1);
}
}
}
}
/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
/// stores the result into `v[..]`.
///
/// # Safety
///
/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
let len = v.len();
let v = v.as_mut_ptr();
let v_mid = v.add(mid);
let v_end = v.add(len);
// The merge process first copies the shorter run into `buf`. Then it traces the newly copied
// run and the longer run forwards (or backwards), comparing their next unconsumed elements and
// copying the lesser (or greater) one into `v`.
//
// As soon as the shorter run is fully consumed, the process is done. If the longer run gets
// consumed first, then we must copy whatever is left of the shorter run into the remaining
// hole in `v`.
//
// Intermediate state of the process is always tracked by `hole`, which serves two purposes:
// 1. Protects integrity of `v` from panics in `is_less`.
// 2. Fills the remaining hole in `v` if the longer run gets consumed first.
//
// Panic safety:
//
// If `is_less` panics at any point during the process, `hole` will get dropped and fill the
// hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
// object it initially held exactly once.
let mut hole;
if mid <= len - mid {
// The left run is shorter.
ptr::copy_nonoverlapping(v, buf, mid);
hole = MergeHole {
start: buf,
end: buf.add(mid),
dest: v,
};
// Initially, these pointers point to the beginnings of their arrays.
let left = &mut hole.start;
let mut right = v_mid;
let out = &mut hole.dest;
while *left < hole.end && right < v_end {
// Consume the lesser side.
// If equal, prefer the left run to maintain stability.
let to_copy = if is_less(&*right, &**left) {
get_and_increment(&mut right)
} else {
get_and_increment(left)
};
ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
}
} else {
// The right run is shorter.
ptr::copy_nonoverlapping(v_mid, buf, len - mid);
hole = MergeHole {
start: buf,
end: buf.add(len - mid),
dest: v_mid,
};
// Initially, these pointers point past the ends of their arrays.
let left = &mut hole.dest;
let right = &mut hole.end;
let mut out = v_end;
while v < *left && buf < *right {
// Consume the greater side.
// If equal, prefer the right run to maintain stability.
let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
decrement_and_get(left)
} else {
decrement_and_get(right)
};
ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
}
}
// Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
// it will now be copied into the hole in `v`.
// When dropped, copies the range `start..end` into `dest..`.
struct MergeHole<T> {
start: *mut T,
end: *mut T,
dest: *mut T,
}
impl<T> Drop for MergeHole<T> {
fn drop(&mut self) {
// `T` is not a zero-sized type, so it's okay to divide by its size.
unsafe {
let len = self.end.offset_from(self.start) as usize;
ptr::copy_nonoverlapping(self.start, self.dest, len);
}
}
}
}
/// The result of merge sort.
#[must_use]
#[derive(Clone, Copy, PartialEq, Eq)]
enum MergesortResult {
/// The slice has already been sorted.
NonDescending,
/// The slice has been descending and therefore it was left intact.
Descending,
/// The slice was sorted.
Sorted,
}
/// A sorted run that starts at index `start` and is of length `len`.
#[derive(Clone, Copy)]
struct Run {
start: usize,
len: usize,
}
/// Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
/// if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
/// algorithm should continue building a new run instead, `None` is returned.
///
/// TimSort is infamous for its buggy implementations, as described here:
/// http://envisage-project.eu/timsort-specification-and-verification/
///
/// The gist of the story is: we must enforce the invariants on the top four runs on the stack.
/// Enforcing them on just top three is not sufficient to ensure that the invariants will still
/// hold for *all* runs in the stack.
///
/// This function correctly checks invariants for the top four runs. Additionally, if the top
/// run starts at index 0, it will always demand a merge operation until the stack is fully
/// collapsed, in order to complete the sort.
#[inline]
fn collapse(runs: &[Run]) -> Option<usize> {
let n = runs.len();
if n >= 2
&& (runs[n - 1].start == 0
|| runs[n - 2].len <= runs[n - 1].len
|| (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
|| (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
{
if n >= 3 && runs[n - 3].len < runs[n - 1].len {
Some(n - 3)
} else {
Some(n - 2)
}
} else {
None
}
}
/// Sorts a slice using merge sort, unless it is already in descending order.
///
/// This function doesn't modify the slice if it is already non-descending or descending.
/// Otherwise, it sorts the slice into non-descending order.
///
/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
/// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
///
/// The algorithm identifies strictly descending and non-descending subsequences, which are called
/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
/// satisfied:
///
/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
///
/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
///
/// # Safety
///
/// The argument `buf` is used as a temporary buffer and must be at least as long as `v`.
unsafe fn mergesort<T, F>(v: &mut [T], buf: *mut T, is_less: &F) -> MergesortResult
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
// Very short runs are extended using insertion sort to span at least this many elements.
const MIN_RUN: usize = 10;
let len = v.len();
// In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
// strange decision, but consider the fact that merges more often go in the opposite direction
// (forwards). According to benchmarks, merging forwards is slightly faster than merging
// backwards. To conclude, identifying runs by traversing backwards improves performance.
let mut runs = vec![];
let mut end = len;
while end > 0 {
// Find the next natural run, and reverse it if it's strictly descending.
let mut start = end - 1;
if start > 0 {
start -= 1;
if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
start -= 1;
}
// If this descending run covers the whole slice, return immediately.
if start == 0 && end == len {
return MergesortResult::Descending;
} else {
v[start..end].reverse();
}
} else {
while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
start -= 1;
}
// If this non-descending run covers the whole slice, return immediately.
if end - start == len {
return MergesortResult::NonDescending;
}
}
}
// Insert some more elements into the run if it's too short. Insertion sort is faster than
// merge sort on short sequences, so this significantly improves performance.
while start > 0 && end - start < MIN_RUN {
start -= 1;
insert_head(&mut v[start..end], &is_less);
}
// Push this run onto the stack.
runs.push(Run {
start,
len: end - start,
});
end = start;
// Merge some pairs of adjacent runs to satisfy the invariants.
while let Some(r) = collapse(&runs) {
let left = runs[r + 1];
let right = runs[r];
merge(
&mut v[left.start..right.start + right.len],
left.len,
buf,
&is_less,
);
runs[r] = Run {
start: left.start,
len: left.len + right.len,
};
runs.remove(r + 1);
}
}
// Finally, exactly one run must remain in the stack.
debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
// The original order of the slice was neither non-descending nor descending.
MergesortResult::Sorted
}
////////////////////////////////////////////////////////////////////////////
// Everything above this line is copied from `std::slice::sort` (with very minor tweaks).
// Everything below this line is parallelization.
////////////////////////////////////////////////////////////////////////////
/// Splits two sorted slices so that they can be merged in parallel.
///
/// Returns two indices `(a, b)` so that slices `left[..a]` and `right[..b]` come before
/// `left[a..]` and `right[b..]`.
fn split_for_merge<T, F>(left: &[T], right: &[T], is_less: &F) -> (usize, usize)
where
F: Fn(&T, &T) -> bool,
{
let left_len = left.len();
let right_len = right.len();
if left_len >= right_len {
let left_mid = left_len / 2;
// Find the first element in `right` that is greater than or equal to `left[left_mid]`.
let mut a = 0;
let mut b = right_len;
while a < b {
let m = a + (b - a) / 2;
if is_less(&right[m], &left[left_mid]) {
a = m + 1;
} else {
b = m;
}
}
(left_mid, a)
} else {
let right_mid = right_len / 2;
// Find the first element in `left` that is greater than `right[right_mid]`.
let mut a = 0;
let mut b = left_len;
while a < b {
let m = a + (b - a) / 2;
if is_less(&right[right_mid], &left[m]) {
b = m;
} else {
a = m + 1;
}
}
(a, right_mid)
}
}
/// Merges slices `left` and `right` in parallel and stores the result into `dest`.
///
/// # Safety
///
/// The `dest` pointer must have enough space to store the result.
///
/// Even if `is_less` panics at any point during the merge process, this function will fully copy
/// all elements from `left` and `right` into `dest` (not necessarily in sorted order).
unsafe fn par_merge<T, F>(left: &mut [T], right: &mut [T], dest: *mut T, is_less: &F)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
// Slices whose lengths sum up to this value are merged sequentially. This number is slightly
// larger than `CHUNK_LENGTH`, and the reason is that merging is faster than merge sorting, so
// merging needs a bit coarser granularity in order to hide the overhead of Rayon's task
// scheduling.
const MAX_SEQUENTIAL: usize = 5000;
let left_len = left.len();
let right_len = right.len();
// Intermediate state of the merge process, which serves two purposes:
// 1. Protects integrity of `dest` from panics in `is_less`.
// 2. Copies the remaining elements as soon as one of the two sides is exhausted.
//
// Panic safety:
//
// If `is_less` panics at any point during the merge process, `s` will get dropped and copy the
// remaining parts of `left` and `right` into `dest`.
let mut s = State {
left_start: left.as_mut_ptr(),
left_end: left.as_mut_ptr().add(left_len),
right_start: right.as_mut_ptr(),
right_end: right.as_mut_ptr().add(right_len),
dest,
};
if left_len == 0 || right_len == 0 || left_len + right_len < MAX_SEQUENTIAL {
while s.left_start < s.left_end && s.right_start < s.right_end {
// Consume the lesser side.
// If equal, prefer the left run to maintain stability.
let to_copy = if is_less(&*s.right_start, &*s.left_start) {
get_and_increment(&mut s.right_start)
} else {
get_and_increment(&mut s.left_start)
};
ptr::copy_nonoverlapping(to_copy, get_and_increment(&mut s.dest), 1);
}
} else {
// Function `split_for_merge` might panic. If that happens, `s` will get destructed and copy
// the whole `left` and `right` into `dest`.
let (left_mid, right_mid) = split_for_merge(left, right, is_less);
let (left_l, left_r) = left.split_at_mut(left_mid);
let (right_l, right_r) = right.split_at_mut(right_mid);
// Prevent the destructor of `s` from running. Rayon will ensure that both calls to
// `par_merge` happen. If one of the two calls panics, they will ensure that elements still
// get copied into `dest_left` and `dest_right``.
mem::forget(s);
// Wrap pointers in SendPtr so that they can be sent to another thread
// See the documentation of SendPtr for a full explanation
let dest_l = SendPtr(dest);
let dest_r = SendPtr(dest.add(left_l.len() + right_l.len()));
rayon_core::join(
move || par_merge(left_l, right_l, dest_l.get(), is_less),
move || par_merge(left_r, right_r, dest_r.get(), is_less),
);
}
// Finally, `s` gets dropped if we used sequential merge, thus copying the remaining elements
// all at once.
// When dropped, copies arrays `left_start..left_end` and `right_start..right_end` into `dest`,
// in that order.
struct State<T> {
left_start: *mut T,
left_end: *mut T,
right_start: *mut T,
right_end: *mut T,
dest: *mut T,
}
impl<T> Drop for State<T> {
fn drop(&mut self) {
let size = size_of::<T>();
let left_len = (self.left_end as usize - self.left_start as usize) / size;
let right_len = (self.right_end as usize - self.right_start as usize) / size;
// Copy array `left`, followed by `right`.
unsafe {
ptr::copy_nonoverlapping(self.left_start, self.dest, left_len);
self.dest = self.dest.add(left_len);
ptr::copy_nonoverlapping(self.right_start, self.dest, right_len);
}
}
}
}
/// Recursively merges pre-sorted chunks inside `v`.
///
/// Chunks of `v` are stored in `chunks` as intervals (inclusive left and exclusive right bound).
/// Argument `buf` is an auxiliary buffer that will be used during the procedure.
/// If `into_buf` is true, the result will be stored into `buf`, otherwise it will be in `v`.
///
/// # Safety
///
/// The number of chunks must be positive and they must be adjacent: the right bound of each chunk
/// must equal the left bound of the following chunk.
///
/// The buffer must be at least as long as `v`.
unsafe fn recurse<T, F>(
v: *mut T,
buf: *mut T,
chunks: &[(usize, usize)],
into_buf: bool,
is_less: &F,
) where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
let len = chunks.len();
debug_assert!(len > 0);
// Base case of the algorithm.
// If only one chunk is remaining, there's no more work to split and merge.
if len == 1 {
if into_buf {
// Copy the chunk from `v` into `buf`.
let (start, end) = chunks[0];
let src = v.add(start);
let dest = buf.add(start);
ptr::copy_nonoverlapping(src, dest, end - start);
}
return;
}
// Split the chunks into two halves.
let (start, _) = chunks[0];
let (mid, _) = chunks[len / 2];
let (_, end) = chunks[len - 1];
let (left, right) = chunks.split_at(len / 2);
// After recursive calls finish we'll have to merge chunks `(start, mid)` and `(mid, end)` from
// `src` into `dest`. If the current invocation has to store the result into `buf`, we'll
// merge chunks from `v` into `buf`, and vice versa.
//
// Recursive calls flip `into_buf` at each level of recursion. More concretely, `par_merge`
// merges chunks from `buf` into `v` at the first level, from `v` into `buf` at the second
// level etc.
let (src, dest) = if into_buf { (v, buf) } else { (buf, v) };
// Panic safety:
//
// If `is_less` panics at any point during the recursive calls, the destructor of `guard` will
// be executed, thus copying everything from `src` into `dest`. This way we ensure that all
// chunks are in fact copied into `dest`, even if the merge process doesn't finish.
let guard = CopyOnDrop {
src: src.add(start),
dest: dest.add(start),
len: end - start,
};
// Wrap pointers in SendPtr so that they can be sent to another thread
// See the documentation of SendPtr for a full explanation
let v = SendPtr(v);
let buf = SendPtr(buf);
rayon_core::join(
move || recurse(v.get(), buf.get(), left, !into_buf, is_less),
move || recurse(v.get(), buf.get(), right, !into_buf, is_less),
);
// Everything went all right - recursive calls didn't panic.
// Forget the guard in order to prevent its destructor from running.
mem::forget(guard);
// Merge chunks `(start, mid)` and `(mid, end)` from `src` into `dest`.
let src_left = slice::from_raw_parts_mut(src.add(start), mid - start);
let src_right = slice::from_raw_parts_mut(src.add(mid), end - mid);
par_merge(src_left, src_right, dest.add(start), is_less);
}
/// Sorts `v` using merge sort in parallel.
///
/// The algorithm is stable, allocates memory, and `O(n log n)` worst-case.
/// The allocated temporary buffer is of the same length as is `v`.
pub(super) fn par_mergesort<T, F>(v: &mut [T], is_less: F)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
// Slices of up to this length get sorted using insertion sort in order to avoid the cost of
// buffer allocation.
const MAX_INSERTION: usize = 20;
// The length of initial chunks. This number is as small as possible but so that the overhead
// of Rayon's task scheduling is still negligible.
const CHUNK_LENGTH: usize = 2000;
// Sorting has no meaningful behavior on zero-sized types.
if size_of::<T>() == 0 {
return;
}
let len = v.len();
// Short slices get sorted in-place via insertion sort to avoid allocations.
if len <= MAX_INSERTION {
if len >= 2 {
for i in (0..len - 1).rev() {
insert_head(&mut v[i..], &is_less);
}
}
return;
}
// Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
// shallow copies of the contents of `v` without risking the dtors running on copies if
// `is_less` panics.
let mut buf = Vec::<T>::with_capacity(len);
let buf = buf.as_mut_ptr();
// If the slice is not longer than one chunk would be, do sequential merge sort and return.
if len <= CHUNK_LENGTH {
let res = unsafe { mergesort(v, buf, &is_less) };
if res == MergesortResult::Descending {
v.reverse();
}
return;
}
// Split the slice into chunks and merge sort them in parallel.
// However, descending chunks will not be sorted - they will be simply left intact.
let mut iter = {
// Wrap pointer in SendPtr so that it can be sent to another thread
// See the documentation of SendPtr for a full explanation
let buf = SendPtr(buf);
let is_less = &is_less;
v.par_chunks_mut(CHUNK_LENGTH)
.with_max_len(1)
.enumerate()
.map(move |(i, chunk)| {
let l = CHUNK_LENGTH * i;
let r = l + chunk.len();
unsafe {
let buf = buf.get().add(l);
(l, r, mergesort(chunk, buf, is_less))
}
})
.collect::<Vec<_>>()
.into_iter()
.peekable()
};
// Now attempt to concatenate adjacent chunks that were left intact.
let mut chunks = Vec::with_capacity(iter.len());
while let Some((a, mut b, res)) = iter.next() {
// If this chunk was not modified by the sort procedure...
if res != MergesortResult::Sorted {
while let Some(&(x, y, r)) = iter.peek() {
// If the following chunk is of the same type and can be concatenated...
if r == res && (r == MergesortResult::Descending) == is_less(&v[x], &v[x - 1]) {
// Concatenate them.
b = y;
iter.next();
} else {
break;
}
}
}
// Descending chunks must be reversed.
if res == MergesortResult::Descending {
v[a..b].reverse();
}
chunks.push((a, b));
}
// All chunks are properly sorted.
// Now we just have to merge them together.
unsafe {
recurse(v.as_mut_ptr(), buf, &chunks, false, &is_less);
}
}
#[cfg(test)]
mod tests {
use super::split_for_merge;
use rand::distributions::Uniform;
use rand::{thread_rng, Rng};
#[test]
fn test_split_for_merge() {
fn check(left: &[u32], right: &[u32]) {
let (l, r) = split_for_merge(left, right, &|&a, &b| a < b);
assert!(left[..l]
.iter()
.all(|&x| right[r..].iter().all(|&y| x <= y)));
assert!(right[..r].iter().all(|&x| left[l..].iter().all(|&y| x < y)));
}
check(&[1, 2, 2, 2, 2, 3], &[1, 2, 2, 2, 2, 3]);
check(&[1, 2, 2, 2, 2, 3], &[]);
check(&[], &[1, 2, 2, 2, 2, 3]);
let rng = &mut thread_rng();
for _ in 0..100 {
let limit: u32 = rng.gen_range(1..21);
let left_len: usize = rng.gen_range(0..20);
let right_len: usize = rng.gen_range(0..20);
let mut left = rng
.sample_iter(&Uniform::new(0, limit))
.take(left_len)
.collect::<Vec<_>>();
let mut right = rng
.sample_iter(&Uniform::new(0, limit))
.take(right_len)
.collect::<Vec<_>>();
left.sort();
right.sort();
check(&left, &right);
}
}
}

1041
vendor/rayon/src/slice/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

903
vendor/rayon/src/slice/quicksort.rs vendored Normal file
View File

@@ -0,0 +1,903 @@
//! Parallel quicksort.
//!
//! This implementation is copied verbatim from `std::slice::sort_unstable` and then parallelized.
//! The only difference from the original is that calls to `recurse` are executed in parallel using
//! `rayon_core::join`.
use std::cmp;
use std::marker::PhantomData;
use std::mem::{self, MaybeUninit};
use std::ptr;
/// When dropped, copies from `src` into `dest`.
#[must_use]
struct CopyOnDrop<'a, T> {
src: *const T,
dest: *mut T,
/// `src` is often a local pointer here, make sure we have appropriate
/// PhantomData so that dropck can protect us.
marker: PhantomData<&'a mut T>,
}
impl<'a, T> CopyOnDrop<'a, T> {
/// Construct from a source pointer and a destination
/// Assumes dest lives longer than src, since there is no easy way to
/// copy down lifetime information from another pointer
unsafe fn new(src: &'a T, dest: *mut T) -> Self {
CopyOnDrop {
src,
dest,
marker: PhantomData,
}
}
}
impl<T> Drop for CopyOnDrop<'_, T> {
fn drop(&mut self) {
// SAFETY: This is a helper class.
// Please refer to its usage for correctness.
// Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
unsafe {
ptr::copy_nonoverlapping(self.src, self.dest, 1);
}
}
}
/// Shifts the first element to the right until it encounters a greater or equal element.
fn shift_head<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
let len = v.len();
// SAFETY: The unsafe operations below involves indexing without a bounds check (by offsetting a
// pointer) and copying memory (`ptr::copy_nonoverlapping`).
//
// a. Indexing:
// 1. We checked the size of the array to >=2.
// 2. All the indexing that we will do is always between {0 <= index < len} at most.
//
// b. Memory copying
// 1. We are obtaining pointers to references which are guaranteed to be valid.
// 2. They cannot overlap because we obtain pointers to difference indices of the slice.
// Namely, `i` and `i-1`.
// 3. If the slice is properly aligned, the elements are properly aligned.
// It is the caller's responsibility to make sure the slice is properly aligned.
//
// See comments below for further detail.
unsafe {
// If the first two elements are out-of-order...
if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) {
// Read the first element into a stack-allocated variable. If a following comparison
// operation panics, `hole` will get dropped and automatically write the element back
// into the slice.
let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0)));
let v = v.as_mut_ptr();
let mut hole = CopyOnDrop::new(&*tmp, v.add(1));
ptr::copy_nonoverlapping(v.add(1), v.add(0), 1);
for i in 2..len {
if !is_less(&*v.add(i), &*tmp) {
break;
}
// Move `i`-th element one place to the left, thus shifting the hole to the right.
ptr::copy_nonoverlapping(v.add(i), v.add(i - 1), 1);
hole.dest = v.add(i);
}
// `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
}
}
}
/// Shifts the last element to the left until it encounters a smaller or equal element.
fn shift_tail<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
let len = v.len();
// SAFETY: The unsafe operations below involves indexing without a bound check (by offsetting a
// pointer) and copying memory (`ptr::copy_nonoverlapping`).
//
// a. Indexing:
// 1. We checked the size of the array to >= 2.
// 2. All the indexing that we will do is always between `0 <= index < len-1` at most.
//
// b. Memory copying
// 1. We are obtaining pointers to references which are guaranteed to be valid.
// 2. They cannot overlap because we obtain pointers to difference indices of the slice.
// Namely, `i` and `i+1`.
// 3. If the slice is properly aligned, the elements are properly aligned.
// It is the caller's responsibility to make sure the slice is properly aligned.
//
// See comments below for further detail.
unsafe {
// If the last two elements are out-of-order...
if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) {
// Read the last element into a stack-allocated variable. If a following comparison
// operation panics, `hole` will get dropped and automatically write the element back
// into the slice.
let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1)));
let v = v.as_mut_ptr();
let mut hole = CopyOnDrop::new(&*tmp, v.add(len - 2));
ptr::copy_nonoverlapping(v.add(len - 2), v.add(len - 1), 1);
for i in (0..len - 2).rev() {
if !is_less(&*tmp, &*v.add(i)) {
break;
}
// Move `i`-th element one place to the right, thus shifting the hole to the left.
ptr::copy_nonoverlapping(v.add(i), v.add(i + 1), 1);
hole.dest = v.add(i);
}
// `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
}
}
}
/// Partially sorts a slice by shifting several out-of-order elements around.
///
/// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case.
#[cold]
fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &F) -> bool
where
F: Fn(&T, &T) -> bool,
{
// Maximum number of adjacent out-of-order pairs that will get shifted.
const MAX_STEPS: usize = 5;
// If the slice is shorter than this, don't shift any elements.
const SHORTEST_SHIFTING: usize = 50;
let len = v.len();
let mut i = 1;
for _ in 0..MAX_STEPS {
// SAFETY: We already explicitly did the bound checking with `i < len`.
// All our subsequent indexing is only in the range `0 <= index < len`
unsafe {
// Find the next pair of adjacent out-of-order elements.
while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) {
i += 1;
}
}
// Are we done?
if i == len {
return true;
}
// Don't shift elements on short arrays, that has a performance cost.
if len < SHORTEST_SHIFTING {
return false;
}
// Swap the found pair of elements. This puts them in correct order.
v.swap(i - 1, i);
// Shift the smaller element to the left.
shift_tail(&mut v[..i], is_less);
// Shift the greater element to the right.
shift_head(&mut v[i..], is_less);
}
// Didn't manage to sort the slice in the limited number of steps.
false
}
/// Sorts a slice using insertion sort, which is *O*(*n*^2) worst-case.
fn insertion_sort<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
for i in 1..v.len() {
shift_tail(&mut v[..i + 1], is_less);
}
}
/// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case.
#[cold]
fn heapsort<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
// This binary heap respects the invariant `parent >= child`.
let sift_down = |v: &mut [T], mut node| {
loop {
// Children of `node`.
let mut child = 2 * node + 1;
if child >= v.len() {
break;
}
// Choose the greater child.
if child + 1 < v.len() && is_less(&v[child], &v[child + 1]) {
child += 1;
}
// Stop if the invariant holds at `node`.
if !is_less(&v[node], &v[child]) {
break;
}
// Swap `node` with the greater child, move one step down, and continue sifting.
v.swap(node, child);
node = child;
}
};
// Build the heap in linear time.
for i in (0..v.len() / 2).rev() {
sift_down(v, i);
}
// Pop maximal elements from the heap.
for i in (1..v.len()).rev() {
v.swap(0, i);
sift_down(&mut v[..i], 0);
}
}
/// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal
/// to `pivot`.
///
/// Returns the number of elements smaller than `pivot`.
///
/// Partitioning is performed block-by-block in order to minimize the cost of branching operations.
/// This idea is presented in the [BlockQuicksort][pdf] paper.
///
/// [pdf]: https://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf
fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &F) -> usize
where
F: Fn(&T, &T) -> bool,
{
// Number of elements in a typical block.
const BLOCK: usize = 128;
// The partitioning algorithm repeats the following steps until completion:
//
// 1. Trace a block from the left side to identify elements greater than or equal to the pivot.
// 2. Trace a block from the right side to identify elements smaller than the pivot.
// 3. Exchange the identified elements between the left and right side.
//
// We keep the following variables for a block of elements:
//
// 1. `block` - Number of elements in the block.
// 2. `start` - Start pointer into the `offsets` array.
// 3. `end` - End pointer into the `offsets` array.
// 4. `offsets - Indices of out-of-order elements within the block.
// The current block on the left side (from `l` to `l.add(block_l)`).
let mut l = v.as_mut_ptr();
let mut block_l = BLOCK;
let mut start_l = ptr::null_mut();
let mut end_l = ptr::null_mut();
let mut offsets_l = [MaybeUninit::<u8>::uninit(); BLOCK];
// The current block on the right side (from `r.sub(block_r)` to `r`).
// SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe`
let mut r = unsafe { l.add(v.len()) };
let mut block_r = BLOCK;
let mut start_r = ptr::null_mut();
let mut end_r = ptr::null_mut();
let mut offsets_r = [MaybeUninit::<u8>::uninit(); BLOCK];
// FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
// than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
// Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
fn width<T>(l: *mut T, r: *mut T) -> usize {
assert!(mem::size_of::<T>() > 0);
// FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
// TODO unstable: (r.addr() - l.addr()) / mem::size_of::<T>()
(r as usize - l as usize) / mem::size_of::<T>()
}
loop {
// We are done with partitioning block-by-block when `l` and `r` get very close. Then we do
// some patch-up work in order to partition the remaining elements in between.
let is_done = width(l, r) <= 2 * BLOCK;
if is_done {
// Number of remaining elements (still not compared to the pivot).
let mut rem = width(l, r);
if start_l < end_l || start_r < end_r {
rem -= BLOCK;
}
// Adjust block sizes so that the left and right block don't overlap, but get perfectly
// aligned to cover the whole remaining gap.
if start_l < end_l {
block_r = rem;
} else if start_r < end_r {
block_l = rem;
} else {
// There were the same number of elements to switch on both blocks during the last
// iteration, so there are no remaining elements on either block. Cover the remaining
// items with roughly equally-sized blocks.
block_l = rem / 2;
block_r = rem - block_l;
}
debug_assert!(block_l <= BLOCK && block_r <= BLOCK);
debug_assert!(width(l, r) == block_l + block_r);
}
if start_l == end_l {
// Trace `block_l` elements from the left side.
// TODO unstable: start_l = MaybeUninit::slice_as_mut_ptr(&mut offsets_l);
start_l = offsets_l.as_mut_ptr() as *mut u8;
end_l = start_l;
let mut elem = l;
for i in 0..block_l {
// SAFETY: The unsafety operations below involve the usage of the `offset`.
// According to the conditions required by the function, we satisfy them because:
// 1. `offsets_l` is stack-allocated, and thus considered separate allocated object.
// 2. The function `is_less` returns a `bool`.
// Casting a `bool` will never overflow `isize`.
// 3. We have guaranteed that `block_l` will be `<= BLOCK`.
// Plus, `end_l` was initially set to the begin pointer of `offsets_` which was declared on the stack.
// Thus, we know that even in the worst case (all invocations of `is_less` returns false) we will only be at most 1 byte pass the end.
// Another unsafety operation here is dereferencing `elem`.
// However, `elem` was initially the begin pointer to the slice which is always valid.
unsafe {
// Branchless comparison.
*end_l = i as u8;
end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
elem = elem.offset(1);
}
}
}
if start_r == end_r {
// Trace `block_r` elements from the right side.
// TODO unstable: start_r = MaybeUninit::slice_as_mut_ptr(&mut offsets_r);
start_r = offsets_r.as_mut_ptr() as *mut u8;
end_r = start_r;
let mut elem = r;
for i in 0..block_r {
// SAFETY: The unsafety operations below involve the usage of the `offset`.
// According to the conditions required by the function, we satisfy them because:
// 1. `offsets_r` is stack-allocated, and thus considered separate allocated object.
// 2. The function `is_less` returns a `bool`.
// Casting a `bool` will never overflow `isize`.
// 3. We have guaranteed that `block_r` will be `<= BLOCK`.
// Plus, `end_r` was initially set to the begin pointer of `offsets_` which was declared on the stack.
// Thus, we know that even in the worst case (all invocations of `is_less` returns true) we will only be at most 1 byte pass the end.
// Another unsafety operation here is dereferencing `elem`.
// However, `elem` was initially `1 * sizeof(T)` past the end and we decrement it by `1 * sizeof(T)` before accessing it.
// Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
unsafe {
// Branchless comparison.
elem = elem.offset(-1);
*end_r = i as u8;
end_r = end_r.offset(is_less(&*elem, pivot) as isize);
}
}
}
// Number of out-of-order elements to swap between the left and right side.
let count = cmp::min(width(start_l, end_l), width(start_r, end_r));
if count > 0 {
macro_rules! left {
() => {
l.offset(*start_l as isize)
};
}
macro_rules! right {
() => {
r.offset(-(*start_r as isize) - 1)
};
}
// Instead of swapping one pair at the time, it is more efficient to perform a cyclic
// permutation. This is not strictly equivalent to swapping, but produces a similar
// result using fewer memory operations.
// SAFETY: The use of `ptr::read` is valid because there is at least one element in
// both `offsets_l` and `offsets_r`, so `left!` is a valid pointer to read from.
//
// The uses of `left!` involve calls to `offset` on `l`, which points to the
// beginning of `v`. All the offsets pointed-to by `start_l` are at most `block_l`, so
// these `offset` calls are safe as all reads are within the block. The same argument
// applies for the uses of `right!`.
//
// The calls to `start_l.offset` are valid because there are at most `count-1` of them,
// plus the final one at the end of the unsafe block, where `count` is the minimum number
// of collected offsets in `offsets_l` and `offsets_r`, so there is no risk of there not
// being enough elements. The same reasoning applies to the calls to `start_r.offset`.
//
// The calls to `copy_nonoverlapping` are safe because `left!` and `right!` are guaranteed
// not to overlap, and are valid because of the reasoning above.
unsafe {
let tmp = ptr::read(left!());
ptr::copy_nonoverlapping(right!(), left!(), 1);
for _ in 1..count {
start_l = start_l.offset(1);
ptr::copy_nonoverlapping(left!(), right!(), 1);
start_r = start_r.offset(1);
ptr::copy_nonoverlapping(right!(), left!(), 1);
}
ptr::copy_nonoverlapping(&tmp, right!(), 1);
mem::forget(tmp);
start_l = start_l.offset(1);
start_r = start_r.offset(1);
}
}
if start_l == end_l {
// All out-of-order elements in the left block were moved. Move to the next block.
// block-width-guarantee
// SAFETY: if `!is_done` then the slice width is guaranteed to be at least `2*BLOCK` wide. There
// are at most `BLOCK` elements in `offsets_l` because of its size, so the `offset` operation is
// safe. Otherwise, the debug assertions in the `is_done` case guarantee that
// `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
// for the smaller number of remaining elements.
l = unsafe { l.add(block_l) };
}
if start_r == end_r {
// All out-of-order elements in the right block were moved. Move to the previous block.
// SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
// or `block_r` has been adjusted for the last handful of elements.
r = unsafe { r.offset(-(block_r as isize)) };
}
if is_done {
break;
}
}
// All that remains now is at most one block (either the left or the right) with out-of-order
// elements that need to be moved. Such remaining elements can be simply shifted to the end
// within their block.
if start_l < end_l {
// The left block remains.
// Move its remaining out-of-order elements to the far right.
debug_assert_eq!(width(l, r), block_l);
while start_l < end_l {
// remaining-elements-safety
// SAFETY: while the loop condition holds there are still elements in `offsets_l`, so it
// is safe to point `end_l` to the previous element.
//
// The `ptr::swap` is safe if both its arguments are valid for reads and writes:
// - Per the debug assert above, the distance between `l` and `r` is `block_l`
// elements, so there can be at most `block_l` remaining offsets between `start_l`
// and `end_l`. This means `r` will be moved at most `block_l` steps back, which
// makes the `r.offset` calls valid (at that point `l == r`).
// - `offsets_l` contains valid offsets into `v` collected during the partitioning of
// the last block, so the `l.offset` calls are valid.
unsafe {
end_l = end_l.offset(-1);
ptr::swap(l.offset(*end_l as isize), r.offset(-1));
r = r.offset(-1);
}
}
width(v.as_mut_ptr(), r)
} else if start_r < end_r {
// The right block remains.
// Move its remaining out-of-order elements to the far left.
debug_assert_eq!(width(l, r), block_r);
while start_r < end_r {
// SAFETY: See the reasoning in [remaining-elements-safety].
unsafe {
end_r = end_r.offset(-1);
ptr::swap(l, r.offset(-(*end_r as isize) - 1));
l = l.offset(1);
}
}
width(v.as_mut_ptr(), l)
} else {
// Nothing else to do, we're done.
width(v.as_mut_ptr(), l)
}
}
/// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or
/// equal to `v[pivot]`.
///
/// Returns a tuple of:
///
/// 1. Number of elements smaller than `v[pivot]`.
/// 2. True if `v` was already partitioned.
fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> (usize, bool)
where
F: Fn(&T, &T) -> bool,
{
let (mid, was_partitioned) = {
// Place the pivot at the beginning of slice.
v.swap(0, pivot);
let (pivot, v) = v.split_at_mut(1);
let pivot = &mut pivot[0];
// Read the pivot into a stack-allocated variable for efficiency. If a following comparison
// operation panics, the pivot will be automatically written back into the slice.
// SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe.
let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
let _pivot_guard = unsafe { CopyOnDrop::new(&*tmp, pivot) };
let pivot = &*tmp;
// Find the first pair of out-of-order elements.
let mut l = 0;
let mut r = v.len();
// SAFETY: The unsafety below involves indexing an array.
// For the first one: We already do the bounds checking here with `l < r`.
// For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
// From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
unsafe {
// Find the first element greater than or equal to the pivot.
while l < r && is_less(v.get_unchecked(l), pivot) {
l += 1;
}
// Find the last element smaller that the pivot.
while l < r && !is_less(v.get_unchecked(r - 1), pivot) {
r -= 1;
}
}
(
l + partition_in_blocks(&mut v[l..r], pivot, is_less),
l >= r,
)
// `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated
// variable) back into the slice where it originally was. This step is critical in ensuring
// safety!
};
// Place the pivot between the two partitions.
v.swap(0, mid);
(mid, was_partitioned)
}
/// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`.
///
/// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain
/// elements smaller than the pivot.
fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> usize
where
F: Fn(&T, &T) -> bool,
{
// Place the pivot at the beginning of slice.
v.swap(0, pivot);
let (pivot, v) = v.split_at_mut(1);
let pivot = &mut pivot[0];
// Read the pivot into a stack-allocated variable for efficiency. If a following comparison
// operation panics, the pivot will be automatically written back into the slice.
// SAFETY: The pointer here is valid because it is obtained from a reference to a slice.
let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
let _pivot_guard = unsafe { CopyOnDrop::new(&*tmp, pivot) };
let pivot = &*tmp;
// Now partition the slice.
let mut l = 0;
let mut r = v.len();
loop {
// SAFETY: The unsafety below involves indexing an array.
// For the first one: We already do the bounds checking here with `l < r`.
// For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
// From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
unsafe {
// Find the first element greater than the pivot.
while l < r && !is_less(pivot, v.get_unchecked(l)) {
l += 1;
}
// Find the last element equal to the pivot.
while l < r && is_less(pivot, v.get_unchecked(r - 1)) {
r -= 1;
}
// Are we done?
if l >= r {
break;
}
// Swap the found pair of out-of-order elements.
r -= 1;
let ptr = v.as_mut_ptr();
ptr::swap(ptr.add(l), ptr.add(r));
l += 1;
}
}
// We found `l` elements equal to the pivot. Add 1 to account for the pivot itself.
l + 1
// `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated variable)
// back into the slice where it originally was. This step is critical in ensuring safety!
}
/// Scatters some elements around in an attempt to break patterns that might cause imbalanced
/// partitions in quicksort.
#[cold]
fn break_patterns<T>(v: &mut [T]) {
let len = v.len();
if len >= 8 {
// Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia.
let mut random = len as u32;
let mut gen_u32 = || {
random ^= random << 13;
random ^= random >> 17;
random ^= random << 5;
random
};
let mut gen_usize = || {
if usize::BITS <= 32 {
gen_u32() as usize
} else {
(((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
}
};
// Take random numbers modulo this number.
// The number fits into `usize` because `len` is not greater than `isize::MAX`.
let modulus = len.next_power_of_two();
// Some pivot candidates will be in the nearby of this index. Let's randomize them.
let pos = len / 4 * 2;
for i in 0..3 {
// Generate a random number modulo `len`. However, in order to avoid costly operations
// we first take it modulo a power of two, and then decrease by `len` until it fits
// into the range `[0, len - 1]`.
let mut other = gen_usize() & (modulus - 1);
// `other` is guaranteed to be less than `2 * len`.
if other >= len {
other -= len;
}
v.swap(pos - 1 + i, other);
}
}
}
/// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted.
///
/// Elements in `v` might be reordered in the process.
fn choose_pivot<T, F>(v: &mut [T], is_less: &F) -> (usize, bool)
where
F: Fn(&T, &T) -> bool,
{
// Minimum length to choose the median-of-medians method.
// Shorter slices use the simple median-of-three method.
const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50;
// Maximum number of swaps that can be performed in this function.
const MAX_SWAPS: usize = 4 * 3;
let len = v.len();
// Three indices near which we are going to choose a pivot.
#[allow(clippy::identity_op)]
let mut a = len / 4 * 1;
let mut b = len / 4 * 2;
let mut c = len / 4 * 3;
// Counts the total number of swaps we are about to perform while sorting indices.
let mut swaps = 0;
if len >= 8 {
// Swaps indices so that `v[a] <= v[b]`.
// SAFETY: `len >= 8` so there are at least two elements in the neighborhoods of
// `a`, `b` and `c`. This means the three calls to `sort_adjacent` result in
// corresponding calls to `sort3` with valid 3-item neighborhoods around each
// pointer, which in turn means the calls to `sort2` are done with valid
// references. Thus the `v.get_unchecked` calls are safe, as is the `ptr::swap`
// call.
let mut sort2 = |a: &mut usize, b: &mut usize| unsafe {
if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) {
ptr::swap(a, b);
swaps += 1;
}
};
// Swaps indices so that `v[a] <= v[b] <= v[c]`.
let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| {
sort2(a, b);
sort2(b, c);
sort2(a, b);
};
if len >= SHORTEST_MEDIAN_OF_MEDIANS {
// Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`.
let mut sort_adjacent = |a: &mut usize| {
let tmp = *a;
sort3(&mut (tmp - 1), a, &mut (tmp + 1));
};
// Find medians in the neighborhoods of `a`, `b`, and `c`.
sort_adjacent(&mut a);
sort_adjacent(&mut b);
sort_adjacent(&mut c);
}
// Find the median among `a`, `b`, and `c`.
sort3(&mut a, &mut b, &mut c);
}
if swaps < MAX_SWAPS {
(b, swaps == 0)
} else {
// The maximum number of swaps was performed. Chances are the slice is descending or mostly
// descending, so reversing will probably help sort it faster.
v.reverse();
(len - 1 - b, true)
}
}
/// Sorts `v` recursively.
///
/// If the slice had a predecessor in the original array, it is specified as `pred`.
///
/// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
/// this function will immediately switch to heapsort.
fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &F, mut pred: Option<&'a mut T>, mut limit: u32)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
// Slices of up to this length get sorted using insertion sort.
const MAX_INSERTION: usize = 20;
// If both partitions are up to this length, we continue sequentially. This number is as small
// as possible but so that the overhead of Rayon's task scheduling is still negligible.
const MAX_SEQUENTIAL: usize = 2000;
// True if the last partitioning was reasonably balanced.
let mut was_balanced = true;
// True if the last partitioning didn't shuffle elements (the slice was already partitioned).
let mut was_partitioned = true;
loop {
let len = v.len();
// Very short slices get sorted using insertion sort.
if len <= MAX_INSERTION {
insertion_sort(v, is_less);
return;
}
// If too many bad pivot choices were made, simply fall back to heapsort in order to
// guarantee `O(n * log(n))` worst-case.
if limit == 0 {
heapsort(v, is_less);
return;
}
// If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
// some elements around. Hopefully we'll choose a better pivot this time.
if !was_balanced {
break_patterns(v);
limit -= 1;
}
// Choose a pivot and try guessing whether the slice is already sorted.
let (pivot, likely_sorted) = choose_pivot(v, is_less);
// If the last partitioning was decently balanced and didn't shuffle elements, and if pivot
// selection predicts the slice is likely already sorted...
if was_balanced && was_partitioned && likely_sorted {
// Try identifying several out-of-order elements and shifting them to correct
// positions. If the slice ends up being completely sorted, we're done.
if partial_insertion_sort(v, is_less) {
return;
}
}
// If the chosen pivot is equal to the predecessor, then it's the smallest element in the
// slice. Partition the slice into elements equal to and elements greater than the pivot.
// This case is usually hit when the slice contains many duplicate elements.
if let Some(ref p) = pred {
if !is_less(p, &v[pivot]) {
let mid = partition_equal(v, pivot, is_less);
// Continue sorting elements greater than the pivot.
v = &mut v[mid..];
continue;
}
}
// Partition the slice.
let (mid, was_p) = partition(v, pivot, is_less);
was_balanced = cmp::min(mid, len - mid) >= len / 8;
was_partitioned = was_p;
// Split the slice into `left`, `pivot`, and `right`.
let (left, right) = v.split_at_mut(mid);
let (pivot, right) = right.split_at_mut(1);
let pivot = &mut pivot[0];
if cmp::max(left.len(), right.len()) <= MAX_SEQUENTIAL {
// Recurse into the shorter side only in order to minimize the total number of recursive
// calls and consume less stack space. Then just continue with the longer side (this is
// akin to tail recursion).
if left.len() < right.len() {
recurse(left, is_less, pred, limit);
v = right;
pred = Some(pivot);
} else {
recurse(right, is_less, Some(pivot), limit);
v = left;
}
} else {
// Sort the left and right half in parallel.
rayon_core::join(
|| recurse(left, is_less, pred, limit),
|| recurse(right, is_less, Some(pivot), limit),
);
break;
}
}
}
/// Sorts `v` using pattern-defeating quicksort in parallel.
///
/// The algorithm is unstable, in-place, and *O*(*n* \* log(*n*)) worst-case.
pub(super) fn par_quicksort<T, F>(v: &mut [T], is_less: F)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
// Sorting has no meaningful behavior on zero-sized types.
if mem::size_of::<T>() == 0 {
return;
}
// Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
let limit = usize::BITS - v.len().leading_zeros();
recurse(v, &is_less, None, limit);
}
#[cfg(test)]
mod tests {
use super::heapsort;
use rand::distributions::Uniform;
use rand::{thread_rng, Rng};
#[test]
fn test_heapsort() {
let rng = &mut thread_rng();
for len in (0..25).chain(500..501) {
for &modulus in &[5, 10, 100] {
let dist = Uniform::new(0, modulus);
for _ in 0..100 {
let v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
// Test heapsort using `<` operator.
let mut tmp = v.clone();
heapsort(&mut tmp, &|a, b| a < b);
assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
// Test heapsort using `>` operator.
let mut tmp = v.clone();
heapsort(&mut tmp, &|a, b| a > b);
assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
}
}
}
// Sort using a completely random comparison function.
// This will reorder the elements *somehow*, but won't panic.
let mut v: Vec<_> = (0..100).collect();
heapsort(&mut v, &|_, _| thread_rng().gen());
heapsort(&mut v, &|a, b| a < b);
for (i, &entry) in v.iter().enumerate() {
assert_eq!(entry, i);
}
}
}

386
vendor/rayon/src/slice/rchunks.rs vendored Normal file
View File

@@ -0,0 +1,386 @@
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::math::div_round_up;
/// Parallel iterator over immutable non-overlapping chunks of a slice, starting at the end.
#[derive(Debug)]
pub struct RChunks<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T: Sync> RChunks<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
Self { chunk_size, slice }
}
}
impl<'data, T: Sync> Clone for RChunks<'data, T> {
fn clone(&self) -> Self {
RChunks { ..*self }
}
}
impl<'data, T: Sync + 'data> ParallelIterator for RChunks<'data, T> {
type Item = &'data [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'data, T: Sync + 'data> IndexedParallelIterator for RChunks<'data, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
div_round_up(self.slice.len(), self.chunk_size)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(RChunksProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct RChunksProducer<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T: 'data + Sync> Producer for RChunksProducer<'data, T> {
type Item = &'data [T];
type IntoIter = ::std::slice::RChunks<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.rchunks(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = self.slice.len().saturating_sub(index * self.chunk_size);
let (left, right) = self.slice.split_at(elem_index);
(
RChunksProducer {
chunk_size: self.chunk_size,
slice: right,
},
RChunksProducer {
chunk_size: self.chunk_size,
slice: left,
},
)
}
}
/// Parallel iterator over immutable non-overlapping chunks of a slice, starting at the end.
#[derive(Debug)]
pub struct RChunksExact<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
rem: &'data [T],
}
impl<'data, T: Sync> RChunksExact<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
let rem_len = slice.len() % chunk_size;
let (rem, slice) = slice.split_at(rem_len);
Self {
chunk_size,
slice,
rem,
}
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
pub fn remainder(&self) -> &'data [T] {
self.rem
}
}
impl<'data, T: Sync> Clone for RChunksExact<'data, T> {
fn clone(&self) -> Self {
RChunksExact { ..*self }
}
}
impl<'data, T: Sync + 'data> ParallelIterator for RChunksExact<'data, T> {
type Item = &'data [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'data, T: Sync + 'data> IndexedParallelIterator for RChunksExact<'data, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len() / self.chunk_size
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(RChunksExactProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct RChunksExactProducer<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T: 'data + Sync> Producer for RChunksExactProducer<'data, T> {
type Item = &'data [T];
type IntoIter = ::std::slice::RChunksExact<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.rchunks_exact(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = self.slice.len() - index * self.chunk_size;
let (left, right) = self.slice.split_at(elem_index);
(
RChunksExactProducer {
chunk_size: self.chunk_size,
slice: right,
},
RChunksExactProducer {
chunk_size: self.chunk_size,
slice: left,
},
)
}
}
/// Parallel iterator over mutable non-overlapping chunks of a slice, starting at the end.
#[derive(Debug)]
pub struct RChunksMut<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T: Send> RChunksMut<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
Self { chunk_size, slice }
}
}
impl<'data, T: Send + 'data> ParallelIterator for RChunksMut<'data, T> {
type Item = &'data mut [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'data, T: Send + 'data> IndexedParallelIterator for RChunksMut<'data, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
div_round_up(self.slice.len(), self.chunk_size)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(RChunksMutProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct RChunksMutProducer<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T: 'data + Send> Producer for RChunksMutProducer<'data, T> {
type Item = &'data mut [T];
type IntoIter = ::std::slice::RChunksMut<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.rchunks_mut(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = self.slice.len().saturating_sub(index * self.chunk_size);
let (left, right) = self.slice.split_at_mut(elem_index);
(
RChunksMutProducer {
chunk_size: self.chunk_size,
slice: right,
},
RChunksMutProducer {
chunk_size: self.chunk_size,
slice: left,
},
)
}
}
/// Parallel iterator over mutable non-overlapping chunks of a slice, starting at the end.
#[derive(Debug)]
pub struct RChunksExactMut<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
rem: &'data mut [T],
}
impl<'data, T: Send> RChunksExactMut<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
let rem_len = slice.len() % chunk_size;
let (rem, slice) = slice.split_at_mut(rem_len);
Self {
chunk_size,
slice,
rem,
}
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
///
/// Note that this has to consume `self` to return the original lifetime of
/// the data, which prevents this from actually being used as a parallel
/// iterator since that also consumes. This method is provided for parity
/// with `std::iter::RChunksExactMut`, but consider calling `remainder()` or
/// `take_remainder()` as alternatives.
pub fn into_remainder(self) -> &'data mut [T] {
self.rem
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
///
/// Consider `take_remainder()` if you need access to the data with its
/// original lifetime, rather than borrowing through `&mut self` here.
pub fn remainder(&mut self) -> &mut [T] {
self.rem
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements. Subsequent calls will return an empty slice.
pub fn take_remainder(&mut self) -> &'data mut [T] {
std::mem::take(&mut self.rem)
}
}
impl<'data, T: Send + 'data> ParallelIterator for RChunksExactMut<'data, T> {
type Item = &'data mut [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'data, T: Send + 'data> IndexedParallelIterator for RChunksExactMut<'data, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len() / self.chunk_size
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(RChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct RChunksExactMutProducer<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T: 'data + Send> Producer for RChunksExactMutProducer<'data, T> {
type Item = &'data mut [T];
type IntoIter = ::std::slice::RChunksExactMut<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.rchunks_exact_mut(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = self.slice.len() - index * self.chunk_size;
let (left, right) = self.slice.split_at_mut(elem_index);
(
RChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: right,
},
RChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: left,
},
)
}
}

170
vendor/rayon/src/slice/test.rs vendored Normal file
View File

@@ -0,0 +1,170 @@
#![cfg(test)]
use crate::prelude::*;
use rand::distributions::Uniform;
use rand::seq::SliceRandom;
use rand::{thread_rng, Rng};
use std::cmp::Ordering::{Equal, Greater, Less};
macro_rules! sort {
($f:ident, $name:ident) => {
#[test]
fn $name() {
let rng = &mut thread_rng();
for len in (0..25).chain(500..501) {
for &modulus in &[5, 10, 100] {
let dist = Uniform::new(0, modulus);
for _ in 0..100 {
let v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
// Test sort using `<` operator.
let mut tmp = v.clone();
tmp.$f(|a, b| a.cmp(b));
assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
// Test sort using `>` operator.
let mut tmp = v.clone();
tmp.$f(|a, b| b.cmp(a));
assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
}
}
}
// Test sort with many duplicates.
for &len in &[1_000, 10_000, 100_000] {
for &modulus in &[5, 10, 100, 10_000] {
let dist = Uniform::new(0, modulus);
let mut v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
v.$f(|a, b| a.cmp(b));
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
// Test sort with many pre-sorted runs.
for &len in &[1_000, 10_000, 100_000] {
let len_dist = Uniform::new(0, len);
for &modulus in &[5, 10, 1000, 50_000] {
let dist = Uniform::new(0, modulus);
let mut v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
v.sort();
v.reverse();
for _ in 0..5 {
let a = rng.sample(&len_dist);
let b = rng.sample(&len_dist);
if a < b {
v[a..b].reverse();
} else {
v.swap(a, b);
}
}
v.$f(|a, b| a.cmp(b));
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
// Sort using a completely random comparison function.
// This will reorder the elements *somehow*, but won't panic.
let mut v: Vec<_> = (0..100).collect();
v.$f(|_, _| *[Less, Equal, Greater].choose(&mut thread_rng()).unwrap());
v.$f(|a, b| a.cmp(b));
for i in 0..v.len() {
assert_eq!(v[i], i);
}
// Should not panic.
[0i32; 0].$f(|a, b| a.cmp(b));
[(); 10].$f(|a, b| a.cmp(b));
[(); 100].$f(|a, b| a.cmp(b));
let mut v = [0xDEAD_BEEFu64];
v.$f(|a, b| a.cmp(b));
assert!(v == [0xDEAD_BEEF]);
}
};
}
sort!(par_sort_by, test_par_sort);
sort!(par_sort_unstable_by, test_par_sort_unstable);
#[test]
fn test_par_sort_stability() {
for len in (2..25).chain(500..510).chain(50_000..50_010) {
for _ in 0..10 {
let mut counts = [0; 10];
// Create a vector like [(6, 1), (5, 1), (6, 2), ...],
// where the first item of each tuple is random, but
// the second item represents which occurrence of that
// number this element is, i.e. the second elements
// will occur in sorted order.
let mut rng = thread_rng();
let mut v: Vec<_> = (0..len)
.map(|_| {
let n: usize = rng.gen_range(0..10);
counts[n] += 1;
(n, counts[n])
})
.collect();
// Only sort on the first element, so an unstable sort
// may mix up the counts.
v.par_sort_by(|&(a, _), &(b, _)| a.cmp(&b));
// This comparison includes the count (the second item
// of the tuple), so elements with equal first items
// will need to be ordered with increasing
// counts... i.e. exactly asserting that this sort is
// stable.
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
}
#[test]
fn test_par_chunks_exact_remainder() {
let v: &[i32] = &[0, 1, 2, 3, 4];
let c = v.par_chunks_exact(2);
assert_eq!(c.remainder(), &[4]);
assert_eq!(c.len(), 2);
}
#[test]
fn test_par_chunks_exact_mut_remainder() {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
let mut c = v.par_chunks_exact_mut(2);
assert_eq!(c.remainder(), &[4]);
assert_eq!(c.len(), 2);
assert_eq!(c.into_remainder(), &[4]);
let mut c = v.par_chunks_exact_mut(2);
assert_eq!(c.take_remainder(), &[4]);
assert_eq!(c.take_remainder(), &[]);
assert_eq!(c.len(), 2);
}
#[test]
fn test_par_rchunks_exact_remainder() {
let v: &[i32] = &[0, 1, 2, 3, 4];
let c = v.par_rchunks_exact(2);
assert_eq!(c.remainder(), &[0]);
assert_eq!(c.len(), 2);
}
#[test]
fn test_par_rchunks_exact_mut_remainder() {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
let mut c = v.par_rchunks_exact_mut(2);
assert_eq!(c.remainder(), &[0]);
assert_eq!(c.len(), 2);
assert_eq!(c.into_remainder(), &[0]);
let mut c = v.par_rchunks_exact_mut(2);
assert_eq!(c.take_remainder(), &[0]);
assert_eq!(c.take_remainder(), &[]);
assert_eq!(c.len(), 2);
}

132
vendor/rayon/src/split_producer.rs vendored Normal file
View File

@@ -0,0 +1,132 @@
//! Common splitter for strings and slices
//!
//! This module is private, so these items are effectively `pub(super)`
use crate::iter::plumbing::{Folder, UnindexedProducer};
/// Common producer for splitting on a predicate.
pub(super) struct SplitProducer<'p, P, V> {
data: V,
separator: &'p P,
/// Marks the endpoint beyond which we've already found no separators.
tail: usize,
}
/// Helper trait so `&str`, `&[T]`, and `&mut [T]` can share `SplitProducer`.
pub(super) trait Fissile<P>: Sized {
fn length(&self) -> usize;
fn midpoint(&self, end: usize) -> usize;
fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize>;
fn rfind(&self, separator: &P, end: usize) -> Option<usize>;
fn split_once(self, index: usize) -> (Self, Self);
fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
where
F: Folder<Self>,
Self: Send;
}
impl<'p, P, V> SplitProducer<'p, P, V>
where
V: Fissile<P> + Send,
{
pub(super) fn new(data: V, separator: &'p P) -> Self {
SplitProducer {
tail: data.length(),
data,
separator,
}
}
/// Common `fold_with` implementation, integrating `SplitTerminator`'s
/// need to sometimes skip its final empty item.
pub(super) fn fold_with<F>(self, folder: F, skip_last: bool) -> F
where
F: Folder<V>,
{
let SplitProducer {
data,
separator,
tail,
} = self;
if tail == data.length() {
// No tail section, so just let `fold_splits` handle it.
data.fold_splits(separator, folder, skip_last)
} else if let Some(index) = data.rfind(separator, tail) {
// We found the last separator to complete the tail, so
// end with that slice after `fold_splits` finds the rest.
let (left, right) = data.split_once(index);
let folder = left.fold_splits(separator, folder, false);
if skip_last || folder.full() {
folder
} else {
folder.consume(right)
}
} else {
// We know there are no separators at all. Return our whole data.
if skip_last {
folder
} else {
folder.consume(data)
}
}
}
}
impl<'p, P, V> UnindexedProducer for SplitProducer<'p, P, V>
where
V: Fissile<P> + Send,
P: Sync,
{
type Item = V;
fn split(self) -> (Self, Option<Self>) {
// Look forward for the separator, and failing that look backward.
let mid = self.data.midpoint(self.tail);
let index = match self.data.find(self.separator, mid, self.tail) {
Some(i) => Some(mid + i),
None => self.data.rfind(self.separator, mid),
};
if let Some(index) = index {
let len = self.data.length();
let (left, right) = self.data.split_once(index);
let (left_tail, right_tail) = if index < mid {
// If we scanned backwards to find the separator, everything in
// the right side is exhausted, with no separators left to find.
(index, 0)
} else {
let right_index = len - right.length();
(mid, self.tail - right_index)
};
// Create the left split before the separator.
let left = SplitProducer {
data: left,
tail: left_tail,
..self
};
// Create the right split following the separator.
let right = SplitProducer {
data: right,
tail: right_tail,
..self
};
(left, Some(right))
} else {
// The search is exhausted, no more separators...
(SplitProducer { tail: 0, ..self }, None)
}
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.fold_with(folder, false)
}
}

848
vendor/rayon/src/str.rs vendored Normal file
View File

@@ -0,0 +1,848 @@
//! Parallel iterator types for [strings][std::str]
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! Note: [`ParallelString::par_split()`] and [`par_split_terminator()`]
//! reference a `Pattern` trait which is not visible outside this crate.
//! This trait is intentionally kept private, for use only by Rayon itself.
//! It is implemented for `char`, `&[char]`, and any function or closure
//! `F: Fn(char) -> bool + Sync + Send`.
//!
//! [`ParallelString::par_split()`]: trait.ParallelString.html#method.par_split
//! [`par_split_terminator()`]: trait.ParallelString.html#method.par_split_terminator
//!
//! [std::str]: https://doc.rust-lang.org/stable/std/str/
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::split_producer::*;
/// Test if a byte is the start of a UTF-8 character.
/// (extracted from `str::is_char_boundary`)
#[inline]
fn is_char_boundary(b: u8) -> bool {
// This is bit magic equivalent to: b < 128 || b >= 192
(b as i8) >= -0x40
}
/// Find the index of a character boundary near the midpoint.
#[inline]
fn find_char_midpoint(chars: &str) -> usize {
let mid = chars.len() / 2;
// We want to split near the midpoint, but we need to find an actual
// character boundary. So we look at the raw bytes, first scanning
// forward from the midpoint for a boundary, then trying backward.
let (left, right) = chars.as_bytes().split_at(mid);
match right.iter().copied().position(is_char_boundary) {
Some(i) => mid + i,
None => left
.iter()
.copied()
.rposition(is_char_boundary)
.unwrap_or(0),
}
}
/// Try to split a string near the midpoint.
#[inline]
fn split(chars: &str) -> Option<(&str, &str)> {
let index = find_char_midpoint(chars);
if index > 0 {
Some(chars.split_at(index))
} else {
None
}
}
/// Parallel extensions for strings.
pub trait ParallelString {
/// Returns a plain string slice, which is used to implement the rest of
/// the parallel methods.
fn as_parallel_string(&self) -> &str;
/// Returns a parallel iterator over the characters of a string.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// let max = "hello".par_chars().max_by_key(|c| *c as i32);
/// assert_eq!(Some('o'), max);
/// ```
fn par_chars(&self) -> Chars<'_> {
Chars {
chars: self.as_parallel_string(),
}
}
/// Returns a parallel iterator over the characters of a string, with their positions.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// let min = "hello".par_char_indices().min_by_key(|&(_i, c)| c as i32);
/// assert_eq!(Some((1, 'e')), min);
/// ```
fn par_char_indices(&self) -> CharIndices<'_> {
CharIndices {
chars: self.as_parallel_string(),
}
}
/// Returns a parallel iterator over the bytes of a string.
///
/// Note that multi-byte sequences (for code points greater than `U+007F`)
/// are produced as separate items, but will not be split across threads.
/// If you would prefer an indexed iterator without that guarantee, consider
/// `string.as_bytes().par_iter().copied()` instead.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// let max = "hello".par_bytes().max();
/// assert_eq!(Some(b'o'), max);
/// ```
fn par_bytes(&self) -> Bytes<'_> {
Bytes {
chars: self.as_parallel_string(),
}
}
/// Returns a parallel iterator over a string encoded as UTF-16.
///
/// Note that surrogate pairs (for code points greater than `U+FFFF`) are
/// produced as separate items, but will not be split across threads.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
///
/// let max = "hello".par_encode_utf16().max();
/// assert_eq!(Some(b'o' as u16), max);
///
/// let text = "Zażółć gęślą jaźń";
/// let utf8_len = text.len();
/// let utf16_len = text.par_encode_utf16().count();
/// assert!(utf16_len <= utf8_len);
/// ```
fn par_encode_utf16(&self) -> EncodeUtf16<'_> {
EncodeUtf16 {
chars: self.as_parallel_string(),
}
}
/// Returns a parallel iterator over substrings separated by a
/// given character or predicate, similar to `str::split`.
///
/// Note: the `Pattern` trait is private, for use only by Rayon itself.
/// It is implemented for `char`, `&[char]`, and any function or closure
/// `F: Fn(char) -> bool + Sync + Send`.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// let total = "1, 2, buckle, 3, 4, door"
/// .par_split(',')
/// .filter_map(|s| s.trim().parse::<i32>().ok())
/// .sum();
/// assert_eq!(10, total);
/// ```
fn par_split<P: Pattern>(&self, separator: P) -> Split<'_, P> {
Split::new(self.as_parallel_string(), separator)
}
/// Returns a parallel iterator over substrings terminated by a
/// given character or predicate, similar to `str::split_terminator`.
/// It's equivalent to `par_split`, except it doesn't produce an empty
/// substring after a trailing terminator.
///
/// Note: the `Pattern` trait is private, for use only by Rayon itself.
/// It is implemented for `char`, `&[char]`, and any function or closure
/// `F: Fn(char) -> bool + Sync + Send`.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// let parts: Vec<_> = "((1 + 3) * 2)"
/// .par_split_terminator(|c| c == '(' || c == ')')
/// .collect();
/// assert_eq!(vec!["", "", "1 + 3", " * 2"], parts);
/// ```
fn par_split_terminator<P: Pattern>(&self, terminator: P) -> SplitTerminator<'_, P> {
SplitTerminator::new(self.as_parallel_string(), terminator)
}
/// Returns a parallel iterator over the lines of a string, ending with an
/// optional carriage return and with a newline (`\r\n` or just `\n`).
/// The final line ending is optional, and line endings are not included in
/// the output strings.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// let lengths: Vec<_> = "hello world\nfizbuzz"
/// .par_lines()
/// .map(|l| l.len())
/// .collect();
/// assert_eq!(vec![11, 7], lengths);
/// ```
fn par_lines(&self) -> Lines<'_> {
Lines(self.as_parallel_string())
}
/// Returns a parallel iterator over the sub-slices of a string that are
/// separated by any amount of whitespace.
///
/// As with `str::split_whitespace`, 'whitespace' is defined according to
/// the terms of the Unicode Derived Core Property `White_Space`.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// let longest = "which is the longest word?"
/// .par_split_whitespace()
/// .max_by_key(|word| word.len());
/// assert_eq!(Some("longest"), longest);
/// ```
fn par_split_whitespace(&self) -> SplitWhitespace<'_> {
SplitWhitespace(self.as_parallel_string())
}
/// Returns a parallel iterator over substrings that match a
/// given character or predicate, similar to `str::matches`.
///
/// Note: the `Pattern` trait is private, for use only by Rayon itself.
/// It is implemented for `char`, `&[char]`, and any function or closure
/// `F: Fn(char) -> bool + Sync + Send`.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// let total = "1, 2, buckle, 3, 4, door"
/// .par_matches(char::is_numeric)
/// .map(|s| s.parse::<i32>().expect("digit"))
/// .sum();
/// assert_eq!(10, total);
/// ```
fn par_matches<P: Pattern>(&self, pattern: P) -> Matches<'_, P> {
Matches {
chars: self.as_parallel_string(),
pattern,
}
}
/// Returns a parallel iterator over substrings that match a given character
/// or predicate, with their positions, similar to `str::match_indices`.
///
/// Note: the `Pattern` trait is private, for use only by Rayon itself.
/// It is implemented for `char`, `&[char]`, and any function or closure
/// `F: Fn(char) -> bool + Sync + Send`.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// let digits: Vec<_> = "1, 2, buckle, 3, 4, door"
/// .par_match_indices(char::is_numeric)
/// .collect();
/// assert_eq!(digits, vec![(0, "1"), (3, "2"), (14, "3"), (17, "4")]);
/// ```
fn par_match_indices<P: Pattern>(&self, pattern: P) -> MatchIndices<'_, P> {
MatchIndices {
chars: self.as_parallel_string(),
pattern,
}
}
}
impl ParallelString for str {
#[inline]
fn as_parallel_string(&self) -> &str {
self
}
}
// /////////////////////////////////////////////////////////////////////////
/// We hide the `Pattern` trait in a private module, as its API is not meant
/// for general consumption. If we could have privacy on trait items, then it
/// would be nicer to have its basic existence and implementors public while
/// keeping all of the methods private.
mod private {
use crate::iter::plumbing::Folder;
/// Pattern-matching trait for `ParallelString`, somewhat like a mix of
/// `std::str::pattern::{Pattern, Searcher}`.
///
/// Implementing this trait is not permitted outside of `rayon`.
pub trait Pattern: Sized + Sync + Send {
private_decl! {}
fn find_in(&self, haystack: &str) -> Option<usize>;
fn rfind_in(&self, haystack: &str) -> Option<usize>;
fn is_suffix_of(&self, haystack: &str) -> bool;
fn fold_splits<'ch, F>(&self, haystack: &'ch str, folder: F, skip_last: bool) -> F
where
F: Folder<&'ch str>;
fn fold_matches<'ch, F>(&self, haystack: &'ch str, folder: F) -> F
where
F: Folder<&'ch str>;
fn fold_match_indices<'ch, F>(&self, haystack: &'ch str, folder: F, base: usize) -> F
where
F: Folder<(usize, &'ch str)>;
}
}
use self::private::Pattern;
#[inline]
fn offset<T>(base: usize) -> impl Fn((usize, T)) -> (usize, T) {
move |(i, x)| (base + i, x)
}
macro_rules! impl_pattern {
(&$self:ident => $pattern:expr) => {
private_impl! {}
#[inline]
fn find_in(&$self, chars: &str) -> Option<usize> {
chars.find($pattern)
}
#[inline]
fn rfind_in(&$self, chars: &str) -> Option<usize> {
chars.rfind($pattern)
}
#[inline]
fn is_suffix_of(&$self, chars: &str) -> bool {
chars.ends_with($pattern)
}
fn fold_splits<'ch, F>(&$self, chars: &'ch str, folder: F, skip_last: bool) -> F
where
F: Folder<&'ch str>,
{
let mut split = chars.split($pattern);
if skip_last {
split.next_back();
}
folder.consume_iter(split)
}
fn fold_matches<'ch, F>(&$self, chars: &'ch str, folder: F) -> F
where
F: Folder<&'ch str>,
{
folder.consume_iter(chars.matches($pattern))
}
fn fold_match_indices<'ch, F>(&$self, chars: &'ch str, folder: F, base: usize) -> F
where
F: Folder<(usize, &'ch str)>,
{
folder.consume_iter(chars.match_indices($pattern).map(offset(base)))
}
}
}
impl Pattern for char {
impl_pattern!(&self => *self);
}
impl Pattern for &[char] {
impl_pattern!(&self => *self);
}
impl<FN: Sync + Send + Fn(char) -> bool> Pattern for FN {
impl_pattern!(&self => self);
}
// /////////////////////////////////////////////////////////////////////////
/// Parallel iterator over the characters of a string
#[derive(Debug, Clone)]
pub struct Chars<'ch> {
chars: &'ch str,
}
struct CharsProducer<'ch> {
chars: &'ch str,
}
impl<'ch> ParallelIterator for Chars<'ch> {
type Item = char;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge_unindexed(CharsProducer { chars: self.chars }, consumer)
}
}
impl<'ch> UnindexedProducer for CharsProducer<'ch> {
type Item = char;
fn split(self) -> (Self, Option<Self>) {
match split(self.chars) {
Some((left, right)) => (
CharsProducer { chars: left },
Some(CharsProducer { chars: right }),
),
None => (self, None),
}
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self.chars.chars())
}
}
// /////////////////////////////////////////////////////////////////////////
/// Parallel iterator over the characters of a string, with their positions
#[derive(Debug, Clone)]
pub struct CharIndices<'ch> {
chars: &'ch str,
}
struct CharIndicesProducer<'ch> {
index: usize,
chars: &'ch str,
}
impl<'ch> ParallelIterator for CharIndices<'ch> {
type Item = (usize, char);
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = CharIndicesProducer {
index: 0,
chars: self.chars,
};
bridge_unindexed(producer, consumer)
}
}
impl<'ch> UnindexedProducer for CharIndicesProducer<'ch> {
type Item = (usize, char);
fn split(self) -> (Self, Option<Self>) {
match split(self.chars) {
Some((left, right)) => (
CharIndicesProducer {
chars: left,
..self
},
Some(CharIndicesProducer {
chars: right,
index: self.index + left.len(),
}),
),
None => (self, None),
}
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
let base = self.index;
folder.consume_iter(self.chars.char_indices().map(offset(base)))
}
}
// /////////////////////////////////////////////////////////////////////////
/// Parallel iterator over the bytes of a string
#[derive(Debug, Clone)]
pub struct Bytes<'ch> {
chars: &'ch str,
}
struct BytesProducer<'ch> {
chars: &'ch str,
}
impl<'ch> ParallelIterator for Bytes<'ch> {
type Item = u8;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge_unindexed(BytesProducer { chars: self.chars }, consumer)
}
}
impl<'ch> UnindexedProducer for BytesProducer<'ch> {
type Item = u8;
fn split(self) -> (Self, Option<Self>) {
match split(self.chars) {
Some((left, right)) => (
BytesProducer { chars: left },
Some(BytesProducer { chars: right }),
),
None => (self, None),
}
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self.chars.bytes())
}
}
// /////////////////////////////////////////////////////////////////////////
/// Parallel iterator over a string encoded as UTF-16
#[derive(Debug, Clone)]
pub struct EncodeUtf16<'ch> {
chars: &'ch str,
}
struct EncodeUtf16Producer<'ch> {
chars: &'ch str,
}
impl<'ch> ParallelIterator for EncodeUtf16<'ch> {
type Item = u16;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge_unindexed(EncodeUtf16Producer { chars: self.chars }, consumer)
}
}
impl<'ch> UnindexedProducer for EncodeUtf16Producer<'ch> {
type Item = u16;
fn split(self) -> (Self, Option<Self>) {
match split(self.chars) {
Some((left, right)) => (
EncodeUtf16Producer { chars: left },
Some(EncodeUtf16Producer { chars: right }),
),
None => (self, None),
}
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self.chars.encode_utf16())
}
}
// /////////////////////////////////////////////////////////////////////////
/// Parallel iterator over substrings separated by a pattern
#[derive(Debug, Clone)]
pub struct Split<'ch, P: Pattern> {
chars: &'ch str,
separator: P,
}
impl<'ch, P: Pattern> Split<'ch, P> {
fn new(chars: &'ch str, separator: P) -> Self {
Split { chars, separator }
}
}
impl<'ch, P: Pattern> ParallelIterator for Split<'ch, P> {
type Item = &'ch str;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = SplitProducer::new(self.chars, &self.separator);
bridge_unindexed(producer, consumer)
}
}
/// Implement support for `SplitProducer`.
impl<'ch, P: Pattern> Fissile<P> for &'ch str {
fn length(&self) -> usize {
self.len()
}
fn midpoint(&self, end: usize) -> usize {
// First find a suitable UTF-8 boundary.
find_char_midpoint(&self[..end])
}
fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
separator.find_in(&self[start..end])
}
fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
separator.rfind_in(&self[..end])
}
fn split_once(self, index: usize) -> (Self, Self) {
let (left, right) = self.split_at(index);
let mut right_iter = right.chars();
right_iter.next(); // skip the separator
(left, right_iter.as_str())
}
fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
where
F: Folder<Self>,
{
separator.fold_splits(self, folder, skip_last)
}
}
// /////////////////////////////////////////////////////////////////////////
/// Parallel iterator over substrings separated by a terminator pattern
#[derive(Debug, Clone)]
pub struct SplitTerminator<'ch, P: Pattern> {
chars: &'ch str,
terminator: P,
}
struct SplitTerminatorProducer<'ch, 'sep, P: Pattern> {
splitter: SplitProducer<'sep, P, &'ch str>,
skip_last: bool,
}
impl<'ch, P: Pattern> SplitTerminator<'ch, P> {
fn new(chars: &'ch str, terminator: P) -> Self {
SplitTerminator { chars, terminator }
}
}
impl<'ch, 'sep, P: Pattern + 'sep> SplitTerminatorProducer<'ch, 'sep, P> {
fn new(chars: &'ch str, terminator: &'sep P) -> Self {
SplitTerminatorProducer {
splitter: SplitProducer::new(chars, terminator),
skip_last: chars.is_empty() || terminator.is_suffix_of(chars),
}
}
}
impl<'ch, P: Pattern> ParallelIterator for SplitTerminator<'ch, P> {
type Item = &'ch str;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = SplitTerminatorProducer::new(self.chars, &self.terminator);
bridge_unindexed(producer, consumer)
}
}
impl<'ch, 'sep, P: Pattern + 'sep> UnindexedProducer for SplitTerminatorProducer<'ch, 'sep, P> {
type Item = &'ch str;
fn split(mut self) -> (Self, Option<Self>) {
let (left, right) = self.splitter.split();
self.splitter = left;
let right = right.map(|right| {
let skip_last = self.skip_last;
self.skip_last = false;
SplitTerminatorProducer {
splitter: right,
skip_last,
}
});
(self, right)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.splitter.fold_with(folder, self.skip_last)
}
}
// /////////////////////////////////////////////////////////////////////////
/// Parallel iterator over lines in a string
#[derive(Debug, Clone)]
pub struct Lines<'ch>(&'ch str);
#[inline]
fn no_carriage_return(line: &str) -> &str {
line.strip_suffix('\r').unwrap_or(line)
}
impl<'ch> ParallelIterator for Lines<'ch> {
type Item = &'ch str;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.0
.par_split_terminator('\n')
.map(no_carriage_return)
.drive_unindexed(consumer)
}
}
// /////////////////////////////////////////////////////////////////////////
/// Parallel iterator over substrings separated by whitespace
#[derive(Debug, Clone)]
pub struct SplitWhitespace<'ch>(&'ch str);
#[inline]
fn not_empty(s: &&str) -> bool {
!s.is_empty()
}
impl<'ch> ParallelIterator for SplitWhitespace<'ch> {
type Item = &'ch str;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.0
.par_split(char::is_whitespace)
.filter(not_empty)
.drive_unindexed(consumer)
}
}
// /////////////////////////////////////////////////////////////////////////
/// Parallel iterator over substrings that match a pattern
#[derive(Debug, Clone)]
pub struct Matches<'ch, P: Pattern> {
chars: &'ch str,
pattern: P,
}
struct MatchesProducer<'ch, 'pat, P: Pattern> {
chars: &'ch str,
pattern: &'pat P,
}
impl<'ch, P: Pattern> ParallelIterator for Matches<'ch, P> {
type Item = &'ch str;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = MatchesProducer {
chars: self.chars,
pattern: &self.pattern,
};
bridge_unindexed(producer, consumer)
}
}
impl<'ch, 'pat, P: Pattern> UnindexedProducer for MatchesProducer<'ch, 'pat, P> {
type Item = &'ch str;
fn split(self) -> (Self, Option<Self>) {
match split(self.chars) {
Some((left, right)) => (
MatchesProducer {
chars: left,
..self
},
Some(MatchesProducer {
chars: right,
..self
}),
),
None => (self, None),
}
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.pattern.fold_matches(self.chars, folder)
}
}
// /////////////////////////////////////////////////////////////////////////
/// Parallel iterator over substrings that match a pattern, with their positions
#[derive(Debug, Clone)]
pub struct MatchIndices<'ch, P: Pattern> {
chars: &'ch str,
pattern: P,
}
struct MatchIndicesProducer<'ch, 'pat, P: Pattern> {
index: usize,
chars: &'ch str,
pattern: &'pat P,
}
impl<'ch, P: Pattern> ParallelIterator for MatchIndices<'ch, P> {
type Item = (usize, &'ch str);
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = MatchIndicesProducer {
index: 0,
chars: self.chars,
pattern: &self.pattern,
};
bridge_unindexed(producer, consumer)
}
}
impl<'ch, 'pat, P: Pattern> UnindexedProducer for MatchIndicesProducer<'ch, 'pat, P> {
type Item = (usize, &'ch str);
fn split(self) -> (Self, Option<Self>) {
match split(self.chars) {
Some((left, right)) => (
MatchIndicesProducer {
chars: left,
..self
},
Some(MatchIndicesProducer {
chars: right,
index: self.index + left.len(),
..self
}),
),
None => (self, None),
}
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.pattern
.fold_match_indices(self.chars, folder, self.index)
}
}

48
vendor/rayon/src/string.rs vendored Normal file
View File

@@ -0,0 +1,48 @@
//! This module contains the parallel iterator types for owned strings
//! (`String`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use crate::iter::plumbing::*;
use crate::math::simplify_range;
use crate::prelude::*;
use std::ops::{Range, RangeBounds};
impl<'a> ParallelDrainRange<usize> for &'a mut String {
type Iter = Drain<'a>;
type Item = char;
fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
Drain {
range: simplify_range(range, self.len()),
string: self,
}
}
}
/// Draining parallel iterator that moves a range of characters out of a string,
/// but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'a> {
string: &'a mut String,
range: Range<usize>,
}
impl<'a> ParallelIterator for Drain<'a> {
type Item = char;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.string[self.range.clone()]
.par_chars()
.drive_unindexed(consumer)
}
}
impl<'a> Drop for Drain<'a> {
fn drop(&mut self) {
// Remove the drained range.
self.string.drain(self.range.clone());
}
}

283
vendor/rayon/src/vec.rs vendored Normal file
View File

@@ -0,0 +1,283 @@
//! Parallel iterator types for [vectors][std::vec] (`Vec<T>`)
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! [std::vec]: https://doc.rust-lang.org/stable/std/vec/
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::math::simplify_range;
use crate::slice::{Iter, IterMut};
use std::iter;
use std::mem;
use std::ops::{Range, RangeBounds};
use std::ptr;
use std::slice;
impl<'data, T: Sync + 'data> IntoParallelIterator for &'data Vec<T> {
type Item = &'data T;
type Iter = Iter<'data, T>;
fn into_par_iter(self) -> Self::Iter {
<&[T]>::into_par_iter(self)
}
}
impl<'data, T: Send + 'data> IntoParallelIterator for &'data mut Vec<T> {
type Item = &'data mut T;
type Iter = IterMut<'data, T>;
fn into_par_iter(self) -> Self::Iter {
<&mut [T]>::into_par_iter(self)
}
}
/// Parallel iterator that moves out of a vector.
#[derive(Debug, Clone)]
pub struct IntoIter<T: Send> {
vec: Vec<T>,
}
impl<T: Send> IntoParallelIterator for Vec<T> {
type Item = T;
type Iter = IntoIter<T>;
fn into_par_iter(self) -> Self::Iter {
IntoIter { vec: self }
}
}
impl<T: Send> ParallelIterator for IntoIter<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Send> IndexedParallelIterator for IntoIter<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.vec.len()
}
fn with_producer<CB>(mut self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
// Drain every item, and then the vector only needs to free its buffer.
self.vec.par_drain(..).with_producer(callback)
}
}
impl<'data, T: Send> ParallelDrainRange<usize> for &'data mut Vec<T> {
type Iter = Drain<'data, T>;
type Item = T;
fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
Drain {
orig_len: self.len(),
range: simplify_range(range, self.len()),
vec: self,
}
}
}
/// Draining parallel iterator that moves a range out of a vector, but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'data, T: Send> {
vec: &'data mut Vec<T>,
range: Range<usize>,
orig_len: usize,
}
impl<'data, T: Send> ParallelIterator for Drain<'data, T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'data, T: Send> IndexedParallelIterator for Drain<'data, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.range.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
unsafe {
// Make the vector forget about the drained items, and temporarily the tail too.
self.vec.set_len(self.range.start);
// Create the producer as the exclusive "owner" of the slice.
let producer = DrainProducer::from_vec(self.vec, self.range.len());
// The producer will move or drop each item from the drained range.
callback.callback(producer)
}
}
}
impl<'data, T: Send> Drop for Drain<'data, T> {
fn drop(&mut self) {
let Range { start, end } = self.range;
if self.vec.len() == self.orig_len {
// We must not have produced, so just call a normal drain to remove the items.
self.vec.drain(start..end);
} else if start == end {
// Empty range, so just restore the length to its original state
unsafe {
self.vec.set_len(self.orig_len);
}
} else if end < self.orig_len {
// The producer was responsible for consuming the drained items.
// Move the tail items to their new place, then set the length to include them.
unsafe {
let ptr = self.vec.as_mut_ptr().add(start);
let tail_ptr = self.vec.as_ptr().add(end);
let tail_len = self.orig_len - end;
ptr::copy(tail_ptr, ptr, tail_len);
self.vec.set_len(start + tail_len);
}
}
}
}
/// ////////////////////////////////////////////////////////////////////////
pub(crate) struct DrainProducer<'data, T: Send> {
slice: &'data mut [T],
}
impl<T: Send> DrainProducer<'_, T> {
/// Creates a draining producer, which *moves* items from the slice.
///
/// Unsafe because `!Copy` data must not be read after the borrow is released.
pub(crate) unsafe fn new(slice: &mut [T]) -> DrainProducer<'_, T> {
DrainProducer { slice }
}
/// Creates a draining producer, which *moves* items from the tail of the vector.
///
/// Unsafe because we're moving from beyond `vec.len()`, so the caller must ensure
/// that data is initialized and not read after the borrow is released.
unsafe fn from_vec(vec: &mut Vec<T>, len: usize) -> DrainProducer<'_, T> {
let start = vec.len();
assert!(vec.capacity() - start >= len);
// The pointer is derived from `Vec` directly, not through a `Deref`,
// so it has provenance over the whole allocation.
let ptr = vec.as_mut_ptr().add(start);
DrainProducer::new(slice::from_raw_parts_mut(ptr, len))
}
}
impl<'data, T: 'data + Send> Producer for DrainProducer<'data, T> {
type Item = T;
type IntoIter = SliceDrain<'data, T>;
fn into_iter(mut self) -> Self::IntoIter {
// replace the slice so we don't drop it twice
let slice = mem::take(&mut self.slice);
SliceDrain {
iter: slice.iter_mut(),
}
}
fn split_at(mut self, index: usize) -> (Self, Self) {
// replace the slice so we don't drop it twice
let slice = mem::take(&mut self.slice);
let (left, right) = slice.split_at_mut(index);
unsafe { (DrainProducer::new(left), DrainProducer::new(right)) }
}
}
impl<'data, T: 'data + Send> Drop for DrainProducer<'data, T> {
fn drop(&mut self) {
// extract the slice so we can use `Drop for [T]`
let slice_ptr: *mut [T] = mem::take::<&'data mut [T]>(&mut self.slice);
unsafe { ptr::drop_in_place::<[T]>(slice_ptr) };
}
}
/// ////////////////////////////////////////////////////////////////////////
// like std::vec::Drain, without updating a source Vec
pub(crate) struct SliceDrain<'data, T> {
iter: slice::IterMut<'data, T>,
}
impl<'data, T: 'data> Iterator for SliceDrain<'data, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
// Coerce the pointer early, so we don't keep the
// reference that's about to be invalidated.
let ptr: *const T = self.iter.next()?;
Some(unsafe { ptr::read(ptr) })
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
fn count(self) -> usize {
self.iter.len()
}
}
impl<'data, T: 'data> DoubleEndedIterator for SliceDrain<'data, T> {
fn next_back(&mut self) -> Option<Self::Item> {
// Coerce the pointer early, so we don't keep the
// reference that's about to be invalidated.
let ptr: *const T = self.iter.next_back()?;
Some(unsafe { ptr::read(ptr) })
}
}
impl<'data, T: 'data> ExactSizeIterator for SliceDrain<'data, T> {
fn len(&self) -> usize {
self.iter.len()
}
}
impl<'data, T: 'data> iter::FusedIterator for SliceDrain<'data, T> {}
impl<'data, T: 'data> Drop for SliceDrain<'data, T> {
fn drop(&mut self) {
// extract the iterator so we can use `Drop for [T]`
let slice_ptr: *mut [T] = mem::replace(&mut self.iter, [].iter_mut()).into_slice();
unsafe { ptr::drop_in_place::<[T]>(slice_ptr) };
}
}