Initial vendor packages

Signed-off-by: Valentin Popov <valentin@popov.link>
This commit is contained in:
2024-01-08 01:21:28 +04:00
parent 5ecd8cf2cb
commit 1b6a04ca55
7309 changed files with 2160054 additions and 0 deletions

39
vendor/rayon/tests/chars.rs vendored Normal file
View File

@ -0,0 +1,39 @@
use rayon::prelude::*;
use std::char;
#[test]
fn half_open_correctness() {
let low = char::from_u32(0xD800 - 0x7).unwrap();
let high = char::from_u32(0xE000 + 0x7).unwrap();
let range = low..high;
let mut chars: Vec<char> = range.into_par_iter().collect();
chars.sort();
assert_eq!(
chars,
vec![
'\u{D7F9}', '\u{D7FA}', '\u{D7FB}', '\u{D7FC}', '\u{D7FD}', '\u{D7FE}', '\u{D7FF}',
'\u{E000}', '\u{E001}', '\u{E002}', '\u{E003}', '\u{E004}', '\u{E005}', '\u{E006}',
]
);
}
#[test]
fn closed_correctness() {
let low = char::from_u32(0xD800 - 0x7).unwrap();
let high = char::from_u32(0xE000 + 0x7).unwrap();
let range = low..=high;
let mut chars: Vec<char> = range.into_par_iter().collect();
chars.sort();
assert_eq!(
chars,
vec![
'\u{D7F9}', '\u{D7FA}', '\u{D7FB}', '\u{D7FC}', '\u{D7FD}', '\u{D7FE}', '\u{D7FF}',
'\u{E000}', '\u{E001}', '\u{E002}', '\u{E003}', '\u{E004}', '\u{E005}', '\u{E006}',
'\u{E007}',
]
);
}

216
vendor/rayon/tests/clones.rs vendored Normal file
View File

@ -0,0 +1,216 @@
use rayon::prelude::*;
fn check<I>(iter: I)
where
I: ParallelIterator + Clone,
I::Item: std::fmt::Debug + PartialEq,
{
let a: Vec<_> = iter.clone().collect();
let b: Vec<_> = iter.collect();
assert_eq!(a, b);
}
fn check_count<I>(iter: I)
where
I: ParallelIterator + Clone,
{
assert_eq!(iter.clone().count(), iter.count());
}
#[test]
fn clone_binary_heap() {
use std::collections::BinaryHeap;
let heap: BinaryHeap<_> = (0..1000).collect();
check(heap.par_iter());
check(heap.into_par_iter());
}
#[test]
fn clone_btree_map() {
use std::collections::BTreeMap;
let map: BTreeMap<_, _> = (0..1000).enumerate().collect();
check(map.par_iter());
}
#[test]
fn clone_btree_set() {
use std::collections::BTreeSet;
let set: BTreeSet<_> = (0..1000).collect();
check(set.par_iter());
}
#[test]
fn clone_hash_map() {
use std::collections::HashMap;
let map: HashMap<_, _> = (0..1000).enumerate().collect();
check(map.par_iter());
}
#[test]
fn clone_hash_set() {
use std::collections::HashSet;
let set: HashSet<_> = (0..1000).collect();
check(set.par_iter());
}
#[test]
fn clone_linked_list() {
use std::collections::LinkedList;
let list: LinkedList<_> = (0..1000).collect();
check(list.par_iter());
check(list.into_par_iter());
}
#[test]
fn clone_vec_deque() {
use std::collections::VecDeque;
let deque: VecDeque<_> = (0..1000).collect();
check(deque.par_iter());
check(deque.into_par_iter());
}
#[test]
fn clone_option() {
let option = Some(0);
check(option.par_iter());
check(option.into_par_iter());
}
#[test]
fn clone_result() {
let result = Ok::<_, ()>(0);
check(result.par_iter());
check(result.into_par_iter());
}
#[test]
fn clone_range() {
check((0..1000).into_par_iter());
}
#[test]
fn clone_range_inclusive() {
check((0..=1000).into_par_iter());
}
#[test]
fn clone_str() {
let s = include_str!("clones.rs");
check(s.par_chars());
check(s.par_lines());
check(s.par_split('\n'));
check(s.par_split_terminator('\n'));
check(s.par_split_whitespace());
}
#[test]
fn clone_vec() {
let v: Vec<_> = (0..1000).collect();
check(v.par_iter());
check(v.par_chunks(42));
check(v.par_chunks_exact(42));
check(v.par_rchunks(42));
check(v.par_rchunks_exact(42));
check(v.par_windows(42));
check(v.par_split(|x| x % 3 == 0));
check(v.into_par_iter());
}
#[test]
fn clone_array() {
let a = [0i32; 100];
check(a.into_par_iter());
}
#[test]
fn clone_adaptors() {
let v: Vec<_> = (0..1000).map(Some).collect();
check(v.par_iter().chain(&v));
check(v.par_iter().cloned());
check(v.par_iter().copied());
check(v.par_iter().enumerate());
check(v.par_iter().filter(|_| true));
check(v.par_iter().filter_map(|x| *x));
check(v.par_iter().flat_map(|x| *x));
check(v.par_iter().flat_map_iter(|x| *x));
check(v.par_iter().flatten());
check(v.par_iter().flatten_iter());
check(v.par_iter().with_max_len(1).fold(|| 0, |x, _| x));
check(v.par_iter().with_max_len(1).fold_with(0, |x, _| x));
check(v.par_iter().with_max_len(1).fold_chunks(1, || 0, |x, _| x));
check(
v.par_iter()
.with_max_len(1)
.fold_chunks_with(1, 0, |x, _| x),
);
check(v.par_iter().with_max_len(1).try_fold(|| 0, |_, &x| x));
check(v.par_iter().with_max_len(1).try_fold_with(0, |_, &x| x));
check(v.par_iter().inspect(|_| ()));
check(v.par_iter().update(|_| ()));
check(v.par_iter().interleave(&v));
check(v.par_iter().interleave_shortest(&v));
check(v.par_iter().intersperse(&None));
check(v.par_iter().chunks(3));
check(v.par_iter().map(|x| x));
check(v.par_iter().map_with(0, |_, x| x));
check(v.par_iter().map_init(|| 0, |_, x| x));
check(v.par_iter().panic_fuse());
check(v.par_iter().positions(|_| true));
check(v.par_iter().rev());
check(v.par_iter().skip(42));
check(v.par_iter().skip_any_while(|_| false));
check(v.par_iter().take(42));
check(v.par_iter().take_any_while(|_| true));
check(v.par_iter().cloned().while_some());
check(v.par_iter().with_max_len(1));
check(v.par_iter().with_min_len(1));
check(v.par_iter().zip(&v));
check(v.par_iter().zip_eq(&v));
check(v.par_iter().step_by(2));
}
#[test]
fn clone_counted_adaptors() {
let v: Vec<_> = (0..1000).collect();
check_count(v.par_iter().skip_any(42));
check_count(v.par_iter().take_any(42));
}
#[test]
fn clone_empty() {
check(rayon::iter::empty::<i32>());
}
#[test]
fn clone_once() {
check(rayon::iter::once(10));
}
#[test]
fn clone_repeat() {
let x: Option<i32> = None;
check(rayon::iter::repeat(x).while_some());
check(rayon::iter::repeatn(x, 1000));
}
#[test]
fn clone_splitter() {
check(rayon::iter::split(0..1000, |x| (x, None)));
}
#[test]
fn clone_multizip() {
let v: &Vec<_> = &(0..1000).collect();
check((v,).into_par_iter());
check((v, v).into_par_iter());
check((v, v, v).into_par_iter());
check((v, v, v, v).into_par_iter());
check((v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v, v, v, v, v, v).into_par_iter());
}

113
vendor/rayon/tests/collect.rs vendored Normal file
View File

@ -0,0 +1,113 @@
use rayon::prelude::*;
use std::panic;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Mutex;
#[test]
#[cfg_attr(not(panic = "unwind"), ignore)]
fn collect_drop_on_unwind() {
struct Recorddrop<'a>(i64, &'a Mutex<Vec<i64>>);
impl<'a> Drop for Recorddrop<'a> {
fn drop(&mut self) {
self.1.lock().unwrap().push(self.0);
}
}
let test_collect_panic = |will_panic: bool| {
let test_vec_len = 1024;
let panic_point = 740;
let mut inserts = Mutex::new(Vec::new());
let mut drops = Mutex::new(Vec::new());
let mut a = (0..test_vec_len).collect::<Vec<_>>();
let b = (0..test_vec_len).collect::<Vec<_>>();
let _result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let mut result = Vec::new();
a.par_iter_mut()
.zip(&b)
.map(|(&mut a, &b)| {
if a > panic_point && will_panic {
panic!("unwinding for test");
}
let elt = a + b;
inserts.lock().unwrap().push(elt);
Recorddrop(elt, &drops)
})
.collect_into_vec(&mut result);
// If we reach this point, this must pass
assert_eq!(a.len(), result.len());
}));
let inserts = inserts.get_mut().unwrap();
let drops = drops.get_mut().unwrap();
println!("{:?}", inserts);
println!("{:?}", drops);
assert_eq!(inserts.len(), drops.len(), "Incorrect number of drops");
// sort to normalize order
inserts.sort();
drops.sort();
assert_eq!(inserts, drops, "Incorrect elements were dropped");
};
for &should_panic in &[true, false] {
test_collect_panic(should_panic);
}
}
#[test]
#[cfg_attr(not(panic = "unwind"), ignore)]
fn collect_drop_on_unwind_zst() {
static INSERTS: AtomicUsize = AtomicUsize::new(0);
static DROPS: AtomicUsize = AtomicUsize::new(0);
struct RecorddropZst;
impl Drop for RecorddropZst {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
let test_collect_panic = |will_panic: bool| {
INSERTS.store(0, Ordering::SeqCst);
DROPS.store(0, Ordering::SeqCst);
let test_vec_len = 1024;
let panic_point = 740;
let a = (0..test_vec_len).collect::<Vec<_>>();
let _result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let mut result = Vec::new();
a.par_iter()
.map(|&a| {
if a > panic_point && will_panic {
panic!("unwinding for test");
}
INSERTS.fetch_add(1, Ordering::SeqCst);
RecorddropZst
})
.collect_into_vec(&mut result);
// If we reach this point, this must pass
assert_eq!(a.len(), result.len());
}));
let inserts = INSERTS.load(Ordering::SeqCst);
let drops = DROPS.load(Ordering::SeqCst);
assert_eq!(inserts, drops, "Incorrect number of drops");
assert!(will_panic || drops == test_vec_len)
};
for &should_panic in &[true, false] {
test_collect_panic(should_panic);
}
}

22
vendor/rayon/tests/cross-pool.rs vendored Normal file
View File

@ -0,0 +1,22 @@
use rayon::prelude::*;
use rayon::ThreadPoolBuilder;
#[test]
#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn cross_pool_busy() {
let pool1 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
let pool2 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
let n: i32 = 100;
let sum: i32 = pool1.install(move || {
// Each item will block on pool2, but pool1 can continue processing other work from the
// parallel iterator in the meantime. There's a chance that pool1 will still be awake to
// see the latch set without being tickled, and then it will drop that stack job. The latch
// internals must not assume that the job will still be alive after it's set!
(1..=n)
.into_par_iter()
.map(|i| pool2.install(move || i))
.sum()
});
assert_eq!(sum, n * (n + 1) / 2);
}

225
vendor/rayon/tests/debug.rs vendored Normal file
View File

@ -0,0 +1,225 @@
use rayon::prelude::*;
use std::fmt::Debug;
fn check<I>(iter: I)
where
I: ParallelIterator + Debug,
{
println!("{:?}", iter);
}
#[test]
fn debug_binary_heap() {
use std::collections::BinaryHeap;
let mut heap: BinaryHeap<_> = (0..10).collect();
check(heap.par_iter());
check(heap.par_drain());
check(heap.into_par_iter());
}
#[test]
fn debug_btree_map() {
use std::collections::BTreeMap;
let mut map: BTreeMap<_, _> = (0..10).enumerate().collect();
check(map.par_iter());
check(map.par_iter_mut());
check(map.into_par_iter());
}
#[test]
fn debug_btree_set() {
use std::collections::BTreeSet;
let set: BTreeSet<_> = (0..10).collect();
check(set.par_iter());
check(set.into_par_iter());
}
#[test]
fn debug_hash_map() {
use std::collections::HashMap;
let mut map: HashMap<_, _> = (0..10).enumerate().collect();
check(map.par_iter());
check(map.par_iter_mut());
check(map.par_drain());
check(map.into_par_iter());
}
#[test]
fn debug_hash_set() {
use std::collections::HashSet;
let mut set: HashSet<_> = (0..10).collect();
check(set.par_iter());
check(set.par_drain());
check(set.into_par_iter());
}
#[test]
fn debug_linked_list() {
use std::collections::LinkedList;
let mut list: LinkedList<_> = (0..10).collect();
check(list.par_iter());
check(list.par_iter_mut());
check(list.into_par_iter());
}
#[test]
fn debug_vec_deque() {
use std::collections::VecDeque;
let mut deque: VecDeque<_> = (0..10).collect();
check(deque.par_iter());
check(deque.par_iter_mut());
check(deque.par_drain(..));
check(deque.into_par_iter());
}
#[test]
fn debug_option() {
let mut option = Some(0);
check(option.par_iter());
check(option.par_iter_mut());
check(option.into_par_iter());
}
#[test]
fn debug_result() {
let mut result = Ok::<_, ()>(0);
check(result.par_iter());
check(result.par_iter_mut());
check(result.into_par_iter());
}
#[test]
fn debug_range() {
check((0..10).into_par_iter());
}
#[test]
fn debug_range_inclusive() {
check((0..=10).into_par_iter());
}
#[test]
fn debug_str() {
let s = "a b c d\ne f g";
check(s.par_chars());
check(s.par_lines());
check(s.par_split('\n'));
check(s.par_split_terminator('\n'));
check(s.par_split_whitespace());
}
#[test]
fn debug_string() {
let mut s = "a b c d\ne f g".to_string();
s.par_drain(..);
}
#[test]
fn debug_vec() {
let mut v: Vec<_> = (0..10).collect();
check(v.par_iter());
check(v.par_iter_mut());
check(v.par_chunks(42));
check(v.par_chunks_exact(42));
check(v.par_chunks_mut(42));
check(v.par_chunks_exact_mut(42));
check(v.par_rchunks(42));
check(v.par_rchunks_exact(42));
check(v.par_rchunks_mut(42));
check(v.par_rchunks_exact_mut(42));
check(v.par_windows(42));
check(v.par_split(|x| x % 3 == 0));
check(v.par_split_mut(|x| x % 3 == 0));
check(v.par_drain(..));
check(v.into_par_iter());
}
#[test]
fn debug_array() {
let a = [0i32; 10];
check(a.into_par_iter());
}
#[test]
fn debug_adaptors() {
let v: Vec<_> = (0..10).collect();
check(v.par_iter().chain(&v));
check(v.par_iter().cloned());
check(v.par_iter().copied());
check(v.par_iter().enumerate());
check(v.par_iter().filter(|_| true));
check(v.par_iter().filter_map(Some));
check(v.par_iter().flat_map(Some));
check(v.par_iter().flat_map_iter(Some));
check(v.par_iter().map(Some).flatten());
check(v.par_iter().map(Some).flatten_iter());
check(v.par_iter().fold(|| 0, |x, _| x));
check(v.par_iter().fold_with(0, |x, _| x));
check(v.par_iter().fold_chunks(3, || 0, |x, _| x));
check(v.par_iter().fold_chunks_with(3, 0, |x, _| x));
check(v.par_iter().try_fold(|| 0, |x, _| Some(x)));
check(v.par_iter().try_fold_with(0, |x, _| Some(x)));
check(v.par_iter().inspect(|_| ()));
check(v.par_iter().update(|_| ()));
check(v.par_iter().interleave(&v));
check(v.par_iter().interleave_shortest(&v));
check(v.par_iter().intersperse(&-1));
check(v.par_iter().chunks(3));
check(v.par_iter().map(|x| x));
check(v.par_iter().map_with(0, |_, x| x));
check(v.par_iter().map_init(|| 0, |_, x| x));
check(v.par_iter().panic_fuse());
check(v.par_iter().positions(|_| true));
check(v.par_iter().rev());
check(v.par_iter().skip(1));
check(v.par_iter().skip_any(1));
check(v.par_iter().skip_any_while(|_| false));
check(v.par_iter().take(1));
check(v.par_iter().take_any(1));
check(v.par_iter().take_any_while(|_| true));
check(v.par_iter().map(Some).while_some());
check(v.par_iter().with_max_len(1));
check(v.par_iter().with_min_len(1));
check(v.par_iter().zip(&v));
check(v.par_iter().zip_eq(&v));
check(v.par_iter().step_by(2));
}
#[test]
fn debug_empty() {
check(rayon::iter::empty::<i32>());
}
#[test]
fn debug_once() {
check(rayon::iter::once(10));
}
#[test]
fn debug_repeat() {
let x: Option<i32> = None;
check(rayon::iter::repeat(x));
check(rayon::iter::repeatn(x, 10));
}
#[test]
fn debug_splitter() {
check(rayon::iter::split(0..10, |x| (x, None)));
}
#[test]
fn debug_multizip() {
let v: &Vec<_> = &(0..10).collect();
check((v,).into_par_iter());
check((v, v).into_par_iter());
check((v, v, v).into_par_iter());
check((v, v, v, v).into_par_iter());
check((v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v, v, v, v, v).into_par_iter());
check((v, v, v, v, v, v, v, v, v, v, v, v).into_par_iter());
}

41
vendor/rayon/tests/drain_vec.rs vendored Normal file
View File

@ -0,0 +1,41 @@
use rayon::prelude::*;
#[test]
fn drain_vec_yielded() {
let mut vec_org = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let yielded = vec_org.par_drain(0..5).collect::<Vec<_>>();
assert_eq!(&yielded, &[0, 1, 2, 3, 4]);
assert_eq!(&vec_org, &[5, 6, 7, 8, 9]);
}
#[test]
fn drain_vec_dropped() {
let mut vec_org = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let yielded = vec_org.par_drain(0..5);
drop(yielded);
assert_eq!(&vec_org, &[5, 6, 7, 8, 9]);
}
#[test]
fn drain_vec_empty_range_yielded() {
let mut vec_org = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let yielded = vec_org.par_drain(5..5).collect::<Vec<_>>();
assert_eq!(&yielded, &[]);
assert_eq!(&vec_org, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
}
#[test]
fn drain_vec_empty_range_dropped() {
let mut vec_org = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let yielded = vec_org.par_drain(5..5);
drop(yielded);
assert_eq!(&vec_org, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
}

60
vendor/rayon/tests/intersperse.rs vendored Normal file
View File

@ -0,0 +1,60 @@
use rayon::prelude::*;
#[test]
fn check_intersperse() {
let v: Vec<_> = (0..1000).into_par_iter().intersperse(-1).collect();
assert_eq!(v.len(), 1999);
for (i, x) in v.into_iter().enumerate() {
assert_eq!(x, if i % 2 == 0 { i as i32 / 2 } else { -1 });
}
}
#[test]
fn check_intersperse_again() {
let v: Vec<_> = (0..1000)
.into_par_iter()
.intersperse(-1)
.intersperse(-2)
.collect();
assert_eq!(v.len(), 3997);
for (i, x) in v.into_iter().enumerate() {
let y = match i % 4 {
0 => i as i32 / 4,
2 => -1,
_ => -2,
};
assert_eq!(x, y);
}
}
#[test]
fn check_intersperse_unindexed() {
let v: Vec<_> = (0..1000).map(|i| i.to_string()).collect();
let s = v.join(",");
let s2 = v.join(";");
let par: String = s.par_split(',').intersperse(";").collect();
assert_eq!(par, s2);
}
#[test]
fn check_intersperse_producer() {
(0..1000)
.into_par_iter()
.intersperse(-1)
.zip_eq(0..1999)
.for_each(|(x, i)| {
assert_eq!(x, if i % 2 == 0 { i / 2 } else { -1 });
});
}
#[test]
fn check_intersperse_rev() {
(0..1000)
.into_par_iter()
.intersperse(-1)
.zip_eq(0..1999)
.rev()
.for_each(|(x, i)| {
assert_eq!(x, if i % 2 == 0 { i / 2 } else { -1 });
});
}

17
vendor/rayon/tests/issue671-unzip.rs vendored Normal file
View File

@ -0,0 +1,17 @@
#![type_length_limit = "10000"]
use rayon::prelude::*;
#[test]
fn type_length_limit() {
let input = vec![1, 2, 3, 4, 5];
let (indexes, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = input
.par_iter()
.map(|x| (x * x, x * x * x))
.enumerate()
.unzip();
drop(indexes);
drop(squares);
drop(cubes);
}

16
vendor/rayon/tests/issue671.rs vendored Normal file
View File

@ -0,0 +1,16 @@
#![type_length_limit = "500000"]
use rayon::prelude::*;
#[test]
fn type_length_limit() {
let _ = Vec::<Result<(), ()>>::new()
.into_par_iter()
.map(|x| x)
.map(|x| x)
.map(|x| x)
.map(|x| x)
.map(|x| x)
.map(|x| x)
.collect::<Result<(), ()>>();
}

53
vendor/rayon/tests/iter_panic.rs vendored Normal file
View File

@ -0,0 +1,53 @@
use rayon::prelude::*;
use rayon::ThreadPoolBuilder;
use std::ops::Range;
use std::panic::{self, UnwindSafe};
use std::sync::atomic::{AtomicUsize, Ordering};
const ITER: Range<i32> = 0..0x1_0000;
const PANIC: i32 = 0xC000;
fn check(&i: &i32) {
if i == PANIC {
panic!("boom")
}
}
#[test]
#[should_panic(expected = "boom")]
fn iter_panic() {
ITER.into_par_iter().for_each(|i| check(&i));
}
#[test]
#[cfg_attr(not(panic = "unwind"), ignore)]
fn iter_panic_fuse() {
// We only use a single thread in order to make the behavior
// of 'panic_fuse' deterministic
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
pool.install(|| {
fn count(iter: impl ParallelIterator + UnwindSafe) -> usize {
let count = AtomicUsize::new(0);
let result = panic::catch_unwind(|| {
iter.for_each(|_| {
count.fetch_add(1, Ordering::Relaxed);
});
});
assert!(result.is_err());
count.into_inner()
}
// Without `panic_fuse()`, we'll reach every item except the panicking one.
let expected = ITER.len() - 1;
let iter = ITER.into_par_iter().with_max_len(1);
assert_eq!(count(iter.clone().inspect(check)), expected);
// With `panic_fuse()` anywhere in the chain, we'll reach fewer items.
assert!(count(iter.clone().inspect(check).panic_fuse()) < expected);
assert!(count(iter.clone().panic_fuse().inspect(check)) < expected);
// Try in reverse to be sure we hit the producer case.
assert!(count(iter.panic_fuse().inspect(check).rev()) < expected);
});
}

25
vendor/rayon/tests/named-threads.rs vendored Normal file
View File

@ -0,0 +1,25 @@
use std::collections::HashSet;
use rayon::prelude::*;
use rayon::*;
#[test]
#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn named_threads() {
ThreadPoolBuilder::new()
.thread_name(|i| format!("hello-name-test-{}", i))
.build_global()
.unwrap();
const N: usize = 10000;
let thread_names = (0..N)
.into_par_iter()
.flat_map(|_| ::std::thread::current().name().map(str::to_owned))
.collect::<HashSet<String>>();
let all_contains_name = thread_names
.iter()
.all(|name| name.starts_with("hello-name-test-"));
assert!(all_contains_name);
}

156
vendor/rayon/tests/octillion.rs vendored Normal file
View File

@ -0,0 +1,156 @@
use rayon::prelude::*;
const OCTILLION: u128 = 1_000_000_000_000_000_000_000_000_000;
/// Produce a parallel iterator for 0u128..10²⁷
fn octillion() -> rayon::range::Iter<u128> {
(0..OCTILLION).into_par_iter()
}
/// Produce a parallel iterator for 0u128..=10²⁷
fn octillion_inclusive() -> rayon::range_inclusive::Iter<u128> {
(0..=OCTILLION).into_par_iter()
}
/// Produce a parallel iterator for 0u128..10²⁷ using `flat_map`
fn octillion_flat() -> impl ParallelIterator<Item = u128> {
(0u32..1_000_000_000)
.into_par_iter()
.with_max_len(1_000)
.map(|i| u64::from(i) * 1_000_000_000)
.flat_map(|i| {
(0u32..1_000_000_000)
.into_par_iter()
.with_max_len(1_000)
.map(move |j| i + u64::from(j))
})
.map(|i| u128::from(i) * 1_000_000_000)
.flat_map(|i| {
(0u32..1_000_000_000)
.into_par_iter()
.with_max_len(1_000)
.map(move |j| i + u128::from(j))
})
}
// NOTE: `find_first` and `find_last` currently take too long on 32-bit targets,
// because the `AtomicUsize` match position has much too limited resolution.
#[test]
#[cfg_attr(not(target_pointer_width = "64"), ignore)]
fn find_first_octillion() {
let x = octillion().find_first(|_| true);
assert_eq!(x, Some(0));
}
#[test]
#[cfg_attr(not(target_pointer_width = "64"), ignore)]
fn find_first_octillion_inclusive() {
let x = octillion_inclusive().find_first(|_| true);
assert_eq!(x, Some(0));
}
#[test]
#[cfg_attr(not(target_pointer_width = "64"), ignore)]
fn find_first_octillion_flat() {
let x = octillion_flat().find_first(|_| true);
assert_eq!(x, Some(0));
}
fn two_threads<F: Send + FnOnce() -> R, R: Send>(f: F) -> R {
// FIXME: If we don't use at least two threads, then we end up walking
// through the entire iterator sequentially, without the benefit of any
// short-circuiting. We probably don't want testing to wait that long. ;)
let builder = rayon::ThreadPoolBuilder::new().num_threads(2);
let pool = builder.build().unwrap();
pool.install(f)
}
#[test]
#[cfg_attr(
any(
not(target_pointer_width = "64"),
target_os = "emscripten",
target_family = "wasm"
),
ignore
)]
fn find_last_octillion() {
// It would be nice if `find_last` could prioritize the later splits,
// basically flipping the `join` args, without needing indexed `rev`.
// (or could we have an unindexed `rev`?)
let x = two_threads(|| octillion().find_last(|_| true));
assert_eq!(x, Some(OCTILLION - 1));
}
#[test]
#[cfg_attr(
any(
not(target_pointer_width = "64"),
target_os = "emscripten",
target_family = "wasm"
),
ignore
)]
fn find_last_octillion_inclusive() {
let x = two_threads(|| octillion_inclusive().find_last(|_| true));
assert_eq!(x, Some(OCTILLION));
}
#[test]
#[cfg_attr(
any(
not(target_pointer_width = "64"),
target_os = "emscripten",
target_family = "wasm"
),
ignore
)]
fn find_last_octillion_flat() {
let x = two_threads(|| octillion_flat().find_last(|_| true));
assert_eq!(x, Some(OCTILLION - 1));
}
#[test]
#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn find_any_octillion() {
let x = two_threads(|| octillion().find_any(|x| *x > OCTILLION / 2));
assert!(x.is_some());
}
#[test]
#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn find_any_octillion_flat() {
let x = two_threads(|| octillion_flat().find_any(|x| *x > OCTILLION / 2));
assert!(x.is_some());
}
#[test]
#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn filter_find_any_octillion() {
let x = two_threads(|| {
octillion()
.filter(|x| *x > OCTILLION / 2)
.find_any(|_| true)
});
assert!(x.is_some());
}
#[test]
#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn filter_find_any_octillion_flat() {
let x = two_threads(|| {
octillion_flat()
.filter(|x| *x > OCTILLION / 2)
.find_any(|_| true)
});
assert!(x.is_some());
}
#[test]
#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn fold_find_any_octillion_flat() {
let x = two_threads(|| octillion_flat().fold(|| (), |_, _| ()).find_any(|_| true));
assert!(x.is_some());
}

View File

@ -0,0 +1,31 @@
use rayon::prelude::*;
use std::iter::once_with;
const N: usize = 100_000;
#[test]
#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn par_bridge_recursion() {
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(10)
.build()
.unwrap();
let seq: Vec<_> = (0..N).map(|i| (i, i.to_string())).collect();
pool.broadcast(|_| {
let mut par: Vec<_> = (0..N)
.into_par_iter()
.flat_map(|i| {
once_with(move || {
// Using rayon within the serial iterator creates an opportunity for
// work-stealing to make par_bridge's mutex accidentally recursive.
rayon::join(move || i, move || i.to_string())
})
.par_bridge()
})
.collect();
par.par_sort_unstable();
assert_eq!(seq, par);
});
}

394
vendor/rayon/tests/producer_split_at.rs vendored Normal file
View File

@ -0,0 +1,394 @@
use rayon::iter::plumbing::*;
use rayon::prelude::*;
/// Stress-test indexes for `Producer::split_at`.
fn check<F, I>(expected: &[I::Item], mut f: F)
where
F: FnMut() -> I,
I: IntoParallelIterator,
I::Iter: IndexedParallelIterator,
I::Item: PartialEq + std::fmt::Debug,
{
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(f(), i, j, k, expected);
Split::reverse(f(), i, j, k, expected);
});
}
fn map_triples<F>(end: usize, mut f: F)
where
F: FnMut(usize, usize, usize),
{
for i in 0..end {
for j in i..end {
for k in j..end {
f(i, j, k);
}
}
}
}
#[derive(Debug)]
struct Split {
i: usize,
j: usize,
k: usize,
reverse: bool,
}
impl Split {
fn forward<I>(iter: I, i: usize, j: usize, k: usize, expected: &[I::Item])
where
I: IntoParallelIterator,
I::Iter: IndexedParallelIterator,
I::Item: PartialEq + std::fmt::Debug,
{
let result = iter.into_par_iter().with_producer(Split {
i,
j,
k,
reverse: false,
});
assert_eq!(result, expected);
}
fn reverse<I>(iter: I, i: usize, j: usize, k: usize, expected: &[I::Item])
where
I: IntoParallelIterator,
I::Iter: IndexedParallelIterator,
I::Item: PartialEq + std::fmt::Debug,
{
let result = iter.into_par_iter().with_producer(Split {
i,
j,
k,
reverse: true,
});
assert!(result.iter().eq(expected.iter().rev()));
}
}
impl<T> ProducerCallback<T> for Split {
type Output = Vec<T>;
fn callback<P>(self, producer: P) -> Self::Output
where
P: Producer<Item = T>,
{
println!("{:?}", self);
// Splitting the outer indexes first gets us an arbitrary mid section,
// which we then split further to get full test coverage.
let (left, d) = producer.split_at(self.k);
let (a, mid) = left.split_at(self.i);
let (b, c) = mid.split_at(self.j - self.i);
let a = a.into_iter();
let b = b.into_iter();
let c = c.into_iter();
let d = d.into_iter();
check_len(&a, self.i);
check_len(&b, self.j - self.i);
check_len(&c, self.k - self.j);
let chain = a.chain(b).chain(c).chain(d);
if self.reverse {
chain.rev().collect()
} else {
chain.collect()
}
}
}
fn check_len<I: ExactSizeIterator>(iter: &I, len: usize) {
assert_eq!(iter.size_hint(), (len, Some(len)));
assert_eq!(iter.len(), len);
}
// **** Base Producers ****
#[test]
fn array() {
let a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
check(&a, || a);
}
#[test]
fn empty() {
let v = vec![42];
check(&v[..0], rayon::iter::empty);
}
#[test]
fn once() {
let v = vec![42];
check(&v, || rayon::iter::once(42));
}
#[test]
fn option() {
let v = vec![42];
check(&v, || Some(42));
}
#[test]
fn range() {
let v: Vec<_> = (0..10).collect();
check(&v, || 0..10);
}
#[test]
fn range_inclusive() {
let v: Vec<_> = (0u16..=10).collect();
check(&v, || 0u16..=10);
}
#[test]
fn repeatn() {
let v: Vec<_> = std::iter::repeat(1).take(5).collect();
check(&v, || rayon::iter::repeatn(1, 5));
}
#[test]
fn slice_iter() {
let s: Vec<_> = (0..10).collect();
let v: Vec<_> = s.iter().collect();
check(&v, || &s);
}
#[test]
fn slice_iter_mut() {
let mut s: Vec<_> = (0..10).collect();
let mut v: Vec<_> = s.clone();
let expected: Vec<_> = v.iter_mut().collect();
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(s.par_iter_mut(), i, j, k, &expected);
Split::reverse(s.par_iter_mut(), i, j, k, &expected);
});
}
#[test]
fn slice_chunks() {
let s: Vec<_> = (0..10).collect();
for len in 1..s.len() + 2 {
let v: Vec<_> = s.chunks(len).collect();
check(&v, || s.par_chunks(len));
}
}
#[test]
fn slice_chunks_exact() {
let s: Vec<_> = (0..10).collect();
for len in 1..s.len() + 2 {
let v: Vec<_> = s.chunks_exact(len).collect();
check(&v, || s.par_chunks_exact(len));
}
}
#[test]
fn slice_chunks_mut() {
let mut s: Vec<_> = (0..10).collect();
let mut v: Vec<_> = s.clone();
for len in 1..s.len() + 2 {
let expected: Vec<_> = v.chunks_mut(len).collect();
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(s.par_chunks_mut(len), i, j, k, &expected);
Split::reverse(s.par_chunks_mut(len), i, j, k, &expected);
});
}
}
#[test]
fn slice_chunks_exact_mut() {
let mut s: Vec<_> = (0..10).collect();
let mut v: Vec<_> = s.clone();
for len in 1..s.len() + 2 {
let expected: Vec<_> = v.chunks_exact_mut(len).collect();
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(s.par_chunks_exact_mut(len), i, j, k, &expected);
Split::reverse(s.par_chunks_exact_mut(len), i, j, k, &expected);
});
}
}
#[test]
fn slice_rchunks() {
let s: Vec<_> = (0..10).collect();
for len in 1..s.len() + 2 {
let v: Vec<_> = s.rchunks(len).collect();
check(&v, || s.par_rchunks(len));
}
}
#[test]
fn slice_rchunks_exact() {
let s: Vec<_> = (0..10).collect();
for len in 1..s.len() + 2 {
let v: Vec<_> = s.rchunks_exact(len).collect();
check(&v, || s.par_rchunks_exact(len));
}
}
#[test]
fn slice_rchunks_mut() {
let mut s: Vec<_> = (0..10).collect();
let mut v: Vec<_> = s.clone();
for len in 1..s.len() + 2 {
let expected: Vec<_> = v.rchunks_mut(len).collect();
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(s.par_rchunks_mut(len), i, j, k, &expected);
Split::reverse(s.par_rchunks_mut(len), i, j, k, &expected);
});
}
}
#[test]
fn slice_rchunks_exact_mut() {
let mut s: Vec<_> = (0..10).collect();
let mut v: Vec<_> = s.clone();
for len in 1..s.len() + 2 {
let expected: Vec<_> = v.rchunks_exact_mut(len).collect();
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(s.par_rchunks_exact_mut(len), i, j, k, &expected);
Split::reverse(s.par_rchunks_exact_mut(len), i, j, k, &expected);
});
}
}
#[test]
fn slice_windows() {
let s: Vec<_> = (0..10).collect();
let v: Vec<_> = s.windows(2).collect();
check(&v, || s.par_windows(2));
}
#[test]
fn vec() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.clone());
}
// **** Adaptors ****
#[test]
fn chain() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..5).into_par_iter().chain(5..10));
}
#[test]
fn cloned() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.par_iter().cloned());
}
#[test]
fn copied() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.par_iter().copied());
}
#[test]
fn enumerate() {
let v: Vec<_> = (0..10).enumerate().collect();
check(&v, || (0..10).into_par_iter().enumerate());
}
#[test]
fn step_by() {
let v: Vec<_> = (0..10).step_by(2).collect();
check(&v, || (0..10).into_par_iter().step_by(2))
}
#[test]
fn step_by_unaligned() {
let v: Vec<_> = (0..10).step_by(3).collect();
check(&v, || (0..10).into_par_iter().step_by(3))
}
#[test]
fn inspect() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..10).into_par_iter().inspect(|_| ()));
}
#[test]
fn update() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..10).into_par_iter().update(|_| ()));
}
#[test]
fn interleave() {
let v = [0, 10, 1, 11, 2, 12, 3, 4];
check(&v, || (0..5).into_par_iter().interleave(10..13));
check(&v[..6], || (0..3).into_par_iter().interleave(10..13));
let v = [0, 10, 1, 11, 2, 12, 13, 14];
check(&v, || (0..3).into_par_iter().interleave(10..15));
}
#[test]
fn intersperse() {
let v = [0, -1, 1, -1, 2, -1, 3, -1, 4];
check(&v, || (0..5).into_par_iter().intersperse(-1));
}
#[test]
fn chunks() {
let s: Vec<_> = (0..10).collect();
let v: Vec<_> = s.chunks(2).map(|c| c.to_vec()).collect();
check(&v, || s.par_iter().cloned().chunks(2));
}
#[test]
fn map() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.par_iter().map(Clone::clone));
}
#[test]
fn map_with() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.par_iter().map_with(vec![0], |_, &x| x));
}
#[test]
fn map_init() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.par_iter().map_init(|| vec![0], |_, &x| x));
}
#[test]
fn panic_fuse() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..10).into_par_iter().panic_fuse());
}
#[test]
fn rev() {
let v: Vec<_> = (0..10).rev().collect();
check(&v, || (0..10).into_par_iter().rev());
}
#[test]
fn with_max_len() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..10).into_par_iter().with_max_len(1));
}
#[test]
fn with_min_len() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..10).into_par_iter().with_min_len(1));
}
#[test]
fn zip() {
let v: Vec<_> = (0..10).zip(10..20).collect();
check(&v, || (0..10).into_par_iter().zip(10..20));
check(&v[..5], || (0..5).into_par_iter().zip(10..20));
check(&v[..5], || (0..10).into_par_iter().zip(10..15));
}

164
vendor/rayon/tests/sort-panic-safe.rs vendored Normal file
View File

@ -0,0 +1,164 @@
use rand::distributions::Uniform;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use std::cell::Cell;
use std::cmp::{self, Ordering};
use std::panic;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::Relaxed;
use std::thread;
const ZERO: AtomicUsize = AtomicUsize::new(0);
const LEN: usize = 20_000;
static VERSIONS: AtomicUsize = ZERO;
static DROP_COUNTS: [AtomicUsize; LEN] = [ZERO; LEN];
#[derive(Clone, Eq)]
struct DropCounter {
x: u32,
id: usize,
version: Cell<usize>,
}
impl PartialEq for DropCounter {
fn eq(&self, other: &Self) -> bool {
self.partial_cmp(other) == Some(Ordering::Equal)
}
}
impl PartialOrd for DropCounter {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.version.set(self.version.get() + 1);
other.version.set(other.version.get() + 1);
VERSIONS.fetch_add(2, Relaxed);
self.x.partial_cmp(&other.x)
}
}
impl Ord for DropCounter {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl Drop for DropCounter {
fn drop(&mut self) {
DROP_COUNTS[self.id].fetch_add(1, Relaxed);
VERSIONS.fetch_sub(self.version.get(), Relaxed);
}
}
macro_rules! test {
($input:ident, $func:ident) => {
let len = $input.len();
// Work out the total number of comparisons required to sort
// this array...
let count = AtomicUsize::new(0);
$input.to_owned().$func(|a, b| {
count.fetch_add(1, Relaxed);
a.cmp(b)
});
let mut panic_countdown = count.load(Relaxed);
let step = if len <= 100 {
1
} else {
cmp::max(1, panic_countdown / 10)
};
// ... and then panic after each `step` comparisons.
loop {
// Refresh the counters.
VERSIONS.store(0, Relaxed);
for i in 0..len {
DROP_COUNTS[i].store(0, Relaxed);
}
let v = $input.to_owned();
let _ = thread::spawn(move || {
let mut v = v;
let panic_countdown = AtomicUsize::new(panic_countdown);
v.$func(|a, b| {
if panic_countdown.fetch_sub(1, Relaxed) == 1 {
SILENCE_PANIC.with(|s| s.set(true));
panic!();
}
a.cmp(b)
})
})
.join();
// Check that the number of things dropped is exactly
// what we expect (i.e. the contents of `v`).
for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
let count = c.load(Relaxed);
assert!(
count == 1,
"found drop count == {} for i == {}, len == {}",
count,
i,
len
);
}
// Check that the most recent versions of values were dropped.
assert_eq!(VERSIONS.load(Relaxed), 0);
if panic_countdown < step {
break;
}
panic_countdown -= step;
}
};
}
thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
#[test]
#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn sort_panic_safe() {
let prev = panic::take_hook();
panic::set_hook(Box::new(move |info| {
if !SILENCE_PANIC.with(Cell::get) {
prev(info);
}
}));
for &len in &[1, 2, 3, 4, 5, 10, 20, 100, 500, 5_000, 20_000] {
let len_dist = Uniform::new(0, len);
for &modulus in &[5, 30, 1_000, 20_000] {
for &has_runs in &[false, true] {
let mut rng = thread_rng();
let mut input = (0..len)
.map(|id| DropCounter {
x: rng.gen_range(0..modulus),
id,
version: Cell::new(0),
})
.collect::<Vec<_>>();
if has_runs {
for c in &mut input {
c.x = c.id as u32;
}
for _ in 0..5 {
let a = rng.sample(&len_dist);
let b = rng.sample(&len_dist);
if a < b {
input[a..b].reverse();
} else {
input.swap(a, b);
}
}
}
test!(input, par_sort_by);
test!(input, par_sort_unstable_by);
}
}
}
}

134
vendor/rayon/tests/str.rs vendored Normal file
View File

@ -0,0 +1,134 @@
use rand::distributions::Standard;
use rand::{Rng, SeedableRng};
use rand_xorshift::XorShiftRng;
use rayon::prelude::*;
fn seeded_rng() -> XorShiftRng {
let mut seed = <XorShiftRng as SeedableRng>::Seed::default();
(0..).zip(seed.as_mut()).for_each(|(i, x)| *x = i);
XorShiftRng::from_seed(seed)
}
#[test]
pub fn execute_strings() {
let rng = seeded_rng();
let s: String = rng.sample_iter::<char, _>(&Standard).take(1024).collect();
let par_chars: String = s.par_chars().collect();
assert_eq!(s, par_chars);
let par_even: String = s.par_chars().filter(|&c| (c as u32) & 1 == 0).collect();
let ser_even: String = s.chars().filter(|&c| (c as u32) & 1 == 0).collect();
assert_eq!(par_even, ser_even);
// test `FromParallelIterator<&char> for String`
let vchars: Vec<char> = s.par_chars().collect();
let par_chars: String = vchars.par_iter().collect();
assert_eq!(s, par_chars);
let par_bytes: Vec<u8> = s.par_bytes().collect();
assert_eq!(s.as_bytes(), &*par_bytes);
let par_utf16: Vec<u16> = s.par_encode_utf16().collect();
let ser_utf16: Vec<u16> = s.encode_utf16().collect();
assert_eq!(par_utf16, ser_utf16);
let par_charind: Vec<_> = s.par_char_indices().collect();
let ser_charind: Vec<_> = s.char_indices().collect();
assert_eq!(par_charind, ser_charind);
}
#[test]
pub fn execute_strings_split() {
// char testcases from examples in `str::split` etc.,
// plus a large self-test for good measure.
let tests = vec![
("Mary had a little lamb", ' '),
("", 'X'),
("lionXXtigerXleopard", 'X'),
("||||a||b|c", '|'),
("(///)", '/'),
("010", '0'),
(" a b c", ' '),
("A.B.", '.'),
("A..B..", '.'),
("foo\r\nbar\n\nbaz\n", '\n'),
("foo\nbar\n\r\nbaz", '\n'),
("A few words", ' '),
(" Mary had\ta\u{2009}little \n\t lamb", ' '),
(include_str!("str.rs"), ' '),
];
for &(string, separator) in &tests {
let serial: Vec<_> = string.split(separator).collect();
let parallel: Vec<_> = string.par_split(separator).collect();
assert_eq!(serial, parallel);
let pattern: &[char] = &['\u{0}', separator, '\u{1F980}'];
let serial: Vec<_> = string.split(pattern).collect();
let parallel: Vec<_> = string.par_split(pattern).collect();
assert_eq!(serial, parallel);
let serial_fn: Vec<_> = string.split(|c| c == separator).collect();
let parallel_fn: Vec<_> = string.par_split(|c| c == separator).collect();
assert_eq!(serial_fn, parallel_fn);
}
for &(string, separator) in &tests {
let serial: Vec<_> = string.split_terminator(separator).collect();
let parallel: Vec<_> = string.par_split_terminator(separator).collect();
assert_eq!(serial, parallel);
let pattern: &[char] = &['\u{0}', separator, '\u{1F980}'];
let serial: Vec<_> = string.split_terminator(pattern).collect();
let parallel: Vec<_> = string.par_split_terminator(pattern).collect();
assert_eq!(serial, parallel);
let serial: Vec<_> = string.split_terminator(|c| c == separator).collect();
let parallel: Vec<_> = string.par_split_terminator(|c| c == separator).collect();
assert_eq!(serial, parallel);
}
for &(string, _) in &tests {
let serial: Vec<_> = string.lines().collect();
let parallel: Vec<_> = string.par_lines().collect();
assert_eq!(serial, parallel);
}
for &(string, _) in &tests {
let serial: Vec<_> = string.split_whitespace().collect();
let parallel: Vec<_> = string.par_split_whitespace().collect();
assert_eq!(serial, parallel);
}
// try matching separators too!
for &(string, separator) in &tests {
let serial: Vec<_> = string.matches(separator).collect();
let parallel: Vec<_> = string.par_matches(separator).collect();
assert_eq!(serial, parallel);
let pattern: &[char] = &['\u{0}', separator, '\u{1F980}'];
let serial: Vec<_> = string.matches(pattern).collect();
let parallel: Vec<_> = string.par_matches(pattern).collect();
assert_eq!(serial, parallel);
let serial_fn: Vec<_> = string.matches(|c| c == separator).collect();
let parallel_fn: Vec<_> = string.par_matches(|c| c == separator).collect();
assert_eq!(serial_fn, parallel_fn);
}
for &(string, separator) in &tests {
let serial: Vec<_> = string.match_indices(separator).collect();
let parallel: Vec<_> = string.par_match_indices(separator).collect();
assert_eq!(serial, parallel);
let pattern: &[char] = &['\u{0}', separator, '\u{1F980}'];
let serial: Vec<_> = string.match_indices(pattern).collect();
let parallel: Vec<_> = string.par_match_indices(pattern).collect();
assert_eq!(serial, parallel);
let serial_fn: Vec<_> = string.match_indices(|c| c == separator).collect();
let parallel_fn: Vec<_> = string.par_match_indices(|c| c == separator).collect();
assert_eq!(serial_fn, parallel_fn);
}
}