Initial vendor packages

Signed-off-by: Valentin Popov <valentin@popov.link>
This commit is contained in:
2024-01-08 01:21:28 +04:00
parent 5ecd8cf2cb
commit 1b6a04ca55
7309 changed files with 2160054 additions and 0 deletions

View File

@ -0,0 +1,374 @@
use std::mem;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use crossbeam_utils::atomic::AtomicCell;
#[test]
fn is_lock_free() {
struct UsizeWrap(usize);
struct U8Wrap(bool);
struct I16Wrap(i16);
#[repr(align(8))]
struct U64Align8(u64);
assert!(AtomicCell::<usize>::is_lock_free());
assert!(AtomicCell::<isize>::is_lock_free());
assert!(AtomicCell::<UsizeWrap>::is_lock_free());
assert!(AtomicCell::<()>::is_lock_free());
assert!(AtomicCell::<u8>::is_lock_free());
assert!(AtomicCell::<i8>::is_lock_free());
assert!(AtomicCell::<bool>::is_lock_free());
assert!(AtomicCell::<U8Wrap>::is_lock_free());
assert!(AtomicCell::<u16>::is_lock_free());
assert!(AtomicCell::<i16>::is_lock_free());
assert!(AtomicCell::<I16Wrap>::is_lock_free());
assert!(AtomicCell::<u32>::is_lock_free());
assert!(AtomicCell::<i32>::is_lock_free());
// Sizes of both types must be equal, and the alignment of `u64` must be greater or equal than
// that of `AtomicU64`. In i686-unknown-linux-gnu, the alignment of `u64` is `4` and alignment
// of `AtomicU64` is `8`, so `AtomicCell<u64>` is not lock-free.
assert_eq!(
AtomicCell::<u64>::is_lock_free(),
cfg!(target_has_atomic = "64") && std::mem::align_of::<u64>() == 8
);
assert_eq!(mem::size_of::<U64Align8>(), 8);
assert_eq!(mem::align_of::<U64Align8>(), 8);
assert_eq!(
AtomicCell::<U64Align8>::is_lock_free(),
cfg!(target_has_atomic = "64")
);
// AtomicU128 is unstable
assert!(!AtomicCell::<u128>::is_lock_free());
}
#[test]
fn const_is_lock_free() {
const _U: bool = AtomicCell::<usize>::is_lock_free();
const _I: bool = AtomicCell::<isize>::is_lock_free();
}
#[test]
fn drops_unit() {
static CNT: AtomicUsize = AtomicUsize::new(0);
CNT.store(0, SeqCst);
#[derive(Debug, PartialEq, Eq)]
struct Foo();
impl Foo {
fn new() -> Foo {
CNT.fetch_add(1, SeqCst);
Foo()
}
}
impl Drop for Foo {
fn drop(&mut self) {
CNT.fetch_sub(1, SeqCst);
}
}
impl Default for Foo {
fn default() -> Foo {
Foo::new()
}
}
let a = AtomicCell::new(Foo::new());
assert_eq!(a.swap(Foo::new()), Foo::new());
assert_eq!(CNT.load(SeqCst), 1);
a.store(Foo::new());
assert_eq!(CNT.load(SeqCst), 1);
assert_eq!(a.swap(Foo::default()), Foo::new());
assert_eq!(CNT.load(SeqCst), 1);
drop(a);
assert_eq!(CNT.load(SeqCst), 0);
}
#[test]
fn drops_u8() {
static CNT: AtomicUsize = AtomicUsize::new(0);
CNT.store(0, SeqCst);
#[derive(Debug, PartialEq, Eq)]
struct Foo(u8);
impl Foo {
fn new(val: u8) -> Foo {
CNT.fetch_add(1, SeqCst);
Foo(val)
}
}
impl Drop for Foo {
fn drop(&mut self) {
CNT.fetch_sub(1, SeqCst);
}
}
impl Default for Foo {
fn default() -> Foo {
Foo::new(0)
}
}
let a = AtomicCell::new(Foo::new(5));
assert_eq!(a.swap(Foo::new(6)), Foo::new(5));
assert_eq!(a.swap(Foo::new(1)), Foo::new(6));
assert_eq!(CNT.load(SeqCst), 1);
a.store(Foo::new(2));
assert_eq!(CNT.load(SeqCst), 1);
assert_eq!(a.swap(Foo::default()), Foo::new(2));
assert_eq!(CNT.load(SeqCst), 1);
assert_eq!(a.swap(Foo::default()), Foo::new(0));
assert_eq!(CNT.load(SeqCst), 1);
drop(a);
assert_eq!(CNT.load(SeqCst), 0);
}
#[test]
fn drops_usize() {
static CNT: AtomicUsize = AtomicUsize::new(0);
CNT.store(0, SeqCst);
#[derive(Debug, PartialEq, Eq)]
struct Foo(usize);
impl Foo {
fn new(val: usize) -> Foo {
CNT.fetch_add(1, SeqCst);
Foo(val)
}
}
impl Drop for Foo {
fn drop(&mut self) {
CNT.fetch_sub(1, SeqCst);
}
}
impl Default for Foo {
fn default() -> Foo {
Foo::new(0)
}
}
let a = AtomicCell::new(Foo::new(5));
assert_eq!(a.swap(Foo::new(6)), Foo::new(5));
assert_eq!(a.swap(Foo::new(1)), Foo::new(6));
assert_eq!(CNT.load(SeqCst), 1);
a.store(Foo::new(2));
assert_eq!(CNT.load(SeqCst), 1);
assert_eq!(a.swap(Foo::default()), Foo::new(2));
assert_eq!(CNT.load(SeqCst), 1);
assert_eq!(a.swap(Foo::default()), Foo::new(0));
assert_eq!(CNT.load(SeqCst), 1);
drop(a);
assert_eq!(CNT.load(SeqCst), 0);
}
#[test]
fn modular_u8() {
#[derive(Clone, Copy, Eq, Debug, Default)]
struct Foo(u8);
impl PartialEq for Foo {
fn eq(&self, other: &Foo) -> bool {
self.0 % 5 == other.0 % 5
}
}
let a = AtomicCell::new(Foo(1));
assert_eq!(a.load(), Foo(1));
assert_eq!(a.swap(Foo(2)), Foo(11));
assert_eq!(a.load(), Foo(52));
a.store(Foo(0));
assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100)));
assert_eq!(a.load().0, 5);
assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100)));
assert_eq!(a.load().0, 15);
}
#[test]
fn modular_usize() {
#[derive(Clone, Copy, Eq, Debug, Default)]
struct Foo(usize);
impl PartialEq for Foo {
fn eq(&self, other: &Foo) -> bool {
self.0 % 5 == other.0 % 5
}
}
let a = AtomicCell::new(Foo(1));
assert_eq!(a.load(), Foo(1));
assert_eq!(a.swap(Foo(2)), Foo(11));
assert_eq!(a.load(), Foo(52));
a.store(Foo(0));
assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100)));
assert_eq!(a.load().0, 5);
assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100)));
assert_eq!(a.load().0, 15);
}
#[test]
fn garbage_padding() {
#[derive(Copy, Clone, Eq, PartialEq)]
struct Object {
a: i64,
b: i32,
}
let cell = AtomicCell::new(Object { a: 0, b: 0 });
let _garbage = [0xfe, 0xfe, 0xfe, 0xfe, 0xfe]; // Needed
let next = Object { a: 0, b: 0 };
let prev = cell.load();
assert!(cell.compare_exchange(prev, next).is_ok());
println!();
}
#[test]
fn const_atomic_cell_new() {
static CELL: AtomicCell<usize> = AtomicCell::new(0);
CELL.store(1);
assert_eq!(CELL.load(), 1);
}
// https://github.com/crossbeam-rs/crossbeam/pull/767
macro_rules! test_arithmetic {
($test_name:ident, $ty:ident) => {
#[test]
fn $test_name() {
let a: AtomicCell<$ty> = AtomicCell::new(7);
assert_eq!(a.fetch_add(3), 7);
assert_eq!(a.load(), 10);
assert_eq!(a.fetch_sub(3), 10);
assert_eq!(a.load(), 7);
assert_eq!(a.fetch_and(3), 7);
assert_eq!(a.load(), 3);
assert_eq!(a.fetch_or(16), 3);
assert_eq!(a.load(), 19);
assert_eq!(a.fetch_xor(2), 19);
assert_eq!(a.load(), 17);
assert_eq!(a.fetch_max(18), 17);
assert_eq!(a.load(), 18);
assert_eq!(a.fetch_min(17), 18);
assert_eq!(a.load(), 17);
assert_eq!(a.fetch_nand(7), 17);
assert_eq!(a.load(), !(17 & 7));
}
};
}
test_arithmetic!(arithmetic_u8, u8);
test_arithmetic!(arithmetic_i8, i8);
test_arithmetic!(arithmetic_u16, u16);
test_arithmetic!(arithmetic_i16, i16);
test_arithmetic!(arithmetic_u32, u32);
test_arithmetic!(arithmetic_i32, i32);
test_arithmetic!(arithmetic_u64, u64);
test_arithmetic!(arithmetic_i64, i64);
test_arithmetic!(arithmetic_u128, u128);
test_arithmetic!(arithmetic_i128, i128);
// https://github.com/crossbeam-rs/crossbeam/issues/748
#[cfg_attr(miri, ignore)] // TODO
#[test]
fn issue_748() {
#[allow(dead_code)]
#[repr(align(8))]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Test {
Field(u32),
FieldLess,
}
assert_eq!(mem::size_of::<Test>(), 8);
assert_eq!(
AtomicCell::<Test>::is_lock_free(),
cfg!(target_has_atomic = "64")
);
let x = AtomicCell::new(Test::FieldLess);
assert_eq!(x.load(), Test::FieldLess);
}
// https://github.com/crossbeam-rs/crossbeam/issues/833
#[test]
fn issue_833() {
use std::num::NonZeroU128;
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
#[cfg(miri)]
const N: usize = 10_000;
#[cfg(not(miri))]
const N: usize = 1_000_000;
#[allow(dead_code)]
enum Enum {
NeverConstructed,
Cell(AtomicCell<NonZeroU128>),
}
static STATIC: Enum = Enum::Cell(AtomicCell::new(match NonZeroU128::new(1) {
Some(nonzero) => nonzero,
None => unreachable!(),
}));
static FINISHED: AtomicBool = AtomicBool::new(false);
let handle = thread::spawn(|| {
let cell = match &STATIC {
Enum::NeverConstructed => unreachable!(),
Enum::Cell(cell) => cell,
};
let x = NonZeroU128::new(0xFFFF_FFFF_FFFF_FFFF_0000_0000_0000_0000).unwrap();
let y = NonZeroU128::new(0x0000_0000_0000_0000_FFFF_FFFF_FFFF_FFFF).unwrap();
while !FINISHED.load(Ordering::Relaxed) {
cell.store(x);
cell.store(y);
}
});
for _ in 0..N {
if let Enum::NeverConstructed = STATIC {
unreachable!(":(");
}
}
FINISHED.store(true, Ordering::Relaxed);
handle.join().unwrap();
}

View File

@ -0,0 +1,113 @@
use std::cell::Cell;
use std::mem;
use crossbeam_utils::CachePadded;
#[test]
fn default() {
let x: CachePadded<u64> = Default::default();
assert_eq!(*x, 0);
}
#[test]
fn store_u64() {
let x: CachePadded<u64> = CachePadded::new(17);
assert_eq!(*x, 17);
}
#[test]
fn store_pair() {
let x: CachePadded<(u64, u64)> = CachePadded::new((17, 37));
assert_eq!(x.0, 17);
assert_eq!(x.1, 37);
}
#[test]
fn distance() {
let arr = [CachePadded::new(17u8), CachePadded::new(37u8)];
let a = &*arr[0] as *const u8;
let b = &*arr[1] as *const u8;
let align = mem::align_of::<CachePadded<()>>();
assert!(align >= 32);
assert_eq!(unsafe { a.add(align) }, b);
}
#[test]
fn different_sizes() {
CachePadded::new(17u8);
CachePadded::new(17u16);
CachePadded::new(17u32);
CachePadded::new([17u64; 0]);
CachePadded::new([17u64; 1]);
CachePadded::new([17u64; 2]);
CachePadded::new([17u64; 3]);
CachePadded::new([17u64; 4]);
CachePadded::new([17u64; 5]);
CachePadded::new([17u64; 6]);
CachePadded::new([17u64; 7]);
CachePadded::new([17u64; 8]);
}
#[test]
fn large() {
let a = [17u64; 9];
let b = CachePadded::new(a);
assert!(mem::size_of_val(&a) <= mem::size_of_val(&b));
}
#[test]
fn debug() {
assert_eq!(
format!("{:?}", CachePadded::new(17u64)),
"CachePadded { value: 17 }"
);
}
#[test]
fn drops() {
let count = Cell::new(0);
struct Foo<'a>(&'a Cell<usize>);
impl<'a> Drop for Foo<'a> {
fn drop(&mut self) {
self.0.set(self.0.get() + 1);
}
}
let a = CachePadded::new(Foo(&count));
let b = CachePadded::new(Foo(&count));
assert_eq!(count.get(), 0);
drop(a);
assert_eq!(count.get(), 1);
drop(b);
assert_eq!(count.get(), 2);
}
#[allow(clippy::clone_on_copy)] // This is intentional.
#[test]
fn clone() {
let a = CachePadded::new(17);
let b = a.clone();
assert_eq!(*a, *b);
}
#[test]
fn runs_custom_clone() {
let count = Cell::new(0);
struct Foo<'a>(&'a Cell<usize>);
impl<'a> Clone for Foo<'a> {
fn clone(&self) -> Foo<'a> {
self.0.set(self.0.get() + 1);
Foo::<'a>(self.0)
}
}
let a = CachePadded::new(Foo(&count));
let _ = a.clone();
assert_eq!(count.get(), 1);
}

41
vendor/crossbeam-utils/tests/parker.rs vendored Normal file
View File

@ -0,0 +1,41 @@
use std::thread::sleep;
use std::time::Duration;
use std::u32;
use crossbeam_utils::sync::Parker;
use crossbeam_utils::thread;
#[test]
fn park_timeout_unpark_before() {
let p = Parker::new();
for _ in 0..10 {
p.unparker().unpark();
p.park_timeout(Duration::from_millis(u32::MAX as u64));
}
}
#[test]
fn park_timeout_unpark_not_called() {
let p = Parker::new();
for _ in 0..10 {
p.park_timeout(Duration::from_millis(10))
}
}
#[test]
fn park_timeout_unpark_called_other_thread() {
for _ in 0..10 {
let p = Parker::new();
let u = p.unparker().clone();
thread::scope(|scope| {
scope.spawn(move |_| {
sleep(Duration::from_millis(50));
u.unpark();
});
p.park_timeout(Duration::from_millis(u32::MAX as u64))
})
.unwrap();
}
}

View File

@ -0,0 +1,252 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, TryLockError};
use std::thread;
use crossbeam_utils::sync::ShardedLock;
use rand::Rng;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let l = ShardedLock::new(());
drop(l.read().unwrap());
drop(l.write().unwrap());
drop((l.read().unwrap(), l.read().unwrap()));
drop(l.write().unwrap());
}
#[test]
fn frob() {
const N: u32 = 10;
#[cfg(miri)]
const M: usize = 50;
#[cfg(not(miri))]
const M: usize = 1000;
let r = Arc::new(ShardedLock::new(()));
let (tx, rx) = channel::<()>();
for _ in 0..N {
let tx = tx.clone();
let r = r.clone();
thread::spawn(move || {
let mut rng = rand::thread_rng();
for _ in 0..M {
if rng.gen_bool(1.0 / (N as f64)) {
drop(r.write().unwrap());
} else {
drop(r.read().unwrap());
}
}
drop(tx);
});
}
drop(tx);
let _ = rx.recv();
}
#[test]
fn arc_poison_wr() {
let arc = Arc::new(ShardedLock::new(1));
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.write().unwrap();
panic!();
})
.join();
assert!(arc.read().is_err());
}
#[test]
fn arc_poison_ww() {
let arc = Arc::new(ShardedLock::new(1));
assert!(!arc.is_poisoned());
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.write().unwrap();
panic!();
})
.join();
assert!(arc.write().is_err());
assert!(arc.is_poisoned());
}
#[test]
fn arc_no_poison_rr() {
let arc = Arc::new(ShardedLock::new(1));
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.read().unwrap();
panic!();
})
.join();
let lock = arc.read().unwrap();
assert_eq!(*lock, 1);
}
#[test]
fn arc_no_poison_sl() {
let arc = Arc::new(ShardedLock::new(1));
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.read().unwrap();
panic!()
})
.join();
let lock = arc.write().unwrap();
assert_eq!(*lock, 1);
}
#[test]
fn arc() {
let arc = Arc::new(ShardedLock::new(0));
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move || {
let mut lock = arc2.write().unwrap();
for _ in 0..10 {
let tmp = *lock;
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}
tx.send(()).unwrap();
});
// Readers try to catch the writer in the act
let mut children = Vec::new();
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move || {
let lock = arc3.read().unwrap();
assert!(*lock >= 0);
}));
}
// Wait for children to pass their asserts
for r in children {
assert!(r.join().is_ok());
}
// Wait for writer to finish
rx.recv().unwrap();
let lock = arc.read().unwrap();
assert_eq!(*lock, 10);
}
#[test]
fn arc_access_in_unwind() {
let arc = Arc::new(ShardedLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || {
struct Unwinder {
i: Arc<ShardedLock<isize>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
let mut lock = self.i.write().unwrap();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.read().unwrap();
assert_eq!(*lock, 2);
}
#[test]
fn unsized_type() {
let sl: &ShardedLock<[i32]> = &ShardedLock::new([1, 2, 3]);
{
let b = &mut *sl.write().unwrap();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*sl.read().unwrap(), comp);
}
#[test]
fn try_write() {
let lock = ShardedLock::new(0isize);
let read_guard = lock.read().unwrap();
let write_result = lock.try_write();
match write_result {
Err(TryLockError::WouldBlock) => (),
Ok(_) => panic!("try_write should not succeed while read_guard is in scope"),
Err(_) => panic!("unexpected error"),
}
drop(read_guard);
}
#[test]
fn test_into_inner() {
let m = ShardedLock::new(NonCopy(10));
assert_eq!(m.into_inner().unwrap(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = ShardedLock::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner().unwrap();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_into_inner_poison() {
let m = Arc::new(ShardedLock::new(NonCopy(10)));
let m2 = m.clone();
let _ = thread::spawn(move || {
let _lock = m2.write().unwrap();
panic!("test panic in inner thread to poison ShardedLock");
})
.join();
assert!(m.is_poisoned());
match Arc::try_unwrap(m).unwrap().into_inner() {
Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
Ok(x) => panic!("into_inner of poisoned ShardedLock is Ok: {:?}", x),
}
}
#[test]
fn test_get_mut() {
let mut m = ShardedLock::new(NonCopy(10));
*m.get_mut().unwrap() = NonCopy(20);
assert_eq!(m.into_inner().unwrap(), NonCopy(20));
}
#[test]
fn test_get_mut_poison() {
let m = Arc::new(ShardedLock::new(NonCopy(10)));
let m2 = m.clone();
let _ = thread::spawn(move || {
let _lock = m2.write().unwrap();
panic!("test panic in inner thread to poison ShardedLock");
})
.join();
assert!(m.is_poisoned());
match Arc::try_unwrap(m).unwrap().get_mut() {
Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
Ok(x) => panic!("get_mut of poisoned ShardedLock is Ok: {:?}", x),
}
}

215
vendor/crossbeam-utils/tests/thread.rs vendored Normal file
View File

@ -0,0 +1,215 @@
use std::any::Any;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread::sleep;
use std::time::Duration;
use crossbeam_utils::thread;
const THREADS: usize = 10;
const SMALL_STACK_SIZE: usize = 20;
#[test]
fn join() {
let counter = AtomicUsize::new(0);
thread::scope(|scope| {
let handle = scope.spawn(|_| {
counter.store(1, Ordering::Relaxed);
});
assert!(handle.join().is_ok());
let panic_handle = scope.spawn(|_| {
panic!("\"My honey is running out!\", said Pooh.");
});
assert!(panic_handle.join().is_err());
})
.unwrap();
// There should be sufficient synchronization.
assert_eq!(1, counter.load(Ordering::Relaxed));
}
#[test]
fn counter() {
let counter = AtomicUsize::new(0);
thread::scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
counter.fetch_add(1, Ordering::Relaxed);
});
}
})
.unwrap();
assert_eq!(THREADS, counter.load(Ordering::Relaxed));
}
#[test]
fn counter_builder() {
let counter = AtomicUsize::new(0);
thread::scope(|scope| {
for i in 0..THREADS {
scope
.builder()
.name(format!("child-{}", i))
.stack_size(SMALL_STACK_SIZE)
.spawn(|_| {
counter.fetch_add(1, Ordering::Relaxed);
})
.unwrap();
}
})
.unwrap();
assert_eq!(THREADS, counter.load(Ordering::Relaxed));
}
#[test]
fn counter_panic() {
let counter = AtomicUsize::new(0);
let result = thread::scope(|scope| {
scope.spawn(|_| {
panic!("\"My honey is running out!\", said Pooh.");
});
sleep(Duration::from_millis(100));
for _ in 0..THREADS {
scope.spawn(|_| {
counter.fetch_add(1, Ordering::Relaxed);
});
}
});
assert_eq!(THREADS, counter.load(Ordering::Relaxed));
assert!(result.is_err());
}
#[test]
fn panic_twice() {
let result = thread::scope(|scope| {
scope.spawn(|_| {
sleep(Duration::from_millis(500));
panic!("thread #1");
});
scope.spawn(|_| {
panic!("thread #2");
});
});
let err = result.unwrap_err();
let vec = err
.downcast_ref::<Vec<Box<dyn Any + Send + 'static>>>()
.unwrap();
assert_eq!(2, vec.len());
let first = vec[0].downcast_ref::<&str>().unwrap();
let second = vec[1].downcast_ref::<&str>().unwrap();
assert_eq!("thread #1", *first);
assert_eq!("thread #2", *second)
}
#[test]
fn panic_many() {
let result = thread::scope(|scope| {
scope.spawn(|_| panic!("deliberate panic #1"));
scope.spawn(|_| panic!("deliberate panic #2"));
scope.spawn(|_| panic!("deliberate panic #3"));
});
let err = result.unwrap_err();
let vec = err
.downcast_ref::<Vec<Box<dyn Any + Send + 'static>>>()
.unwrap();
assert_eq!(3, vec.len());
for panic in vec.iter() {
let panic = panic.downcast_ref::<&str>().unwrap();
assert!(
*panic == "deliberate panic #1"
|| *panic == "deliberate panic #2"
|| *panic == "deliberate panic #3"
);
}
}
#[test]
fn nesting() {
let var = "foo".to_string();
struct Wrapper<'a> {
var: &'a String,
}
impl<'a> Wrapper<'a> {
fn recurse(&'a self, scope: &thread::Scope<'a>, depth: usize) {
assert_eq!(self.var, "foo");
if depth > 0 {
scope.spawn(move |scope| {
self.recurse(scope, depth - 1);
});
}
}
}
let wrapper = Wrapper { var: &var };
thread::scope(|scope| {
scope.spawn(|scope| {
scope.spawn(|scope| {
wrapper.recurse(scope, 5);
});
});
})
.unwrap();
}
#[test]
fn join_nested() {
thread::scope(|scope| {
scope.spawn(|scope| {
let handle = scope.spawn(|_| 7);
sleep(Duration::from_millis(200));
handle.join().unwrap();
});
sleep(Duration::from_millis(100));
})
.unwrap();
}
#[test]
fn scope_returns_ok() {
let result = thread::scope(|scope| scope.spawn(|_| 1234).join().unwrap()).unwrap();
assert_eq!(result, 1234);
}
#[cfg(unix)]
#[test]
fn as_pthread_t() {
use std::os::unix::thread::JoinHandleExt;
thread::scope(|scope| {
let handle = scope.spawn(|_scope| {
sleep(Duration::from_millis(100));
42
});
let _pthread_t = handle.as_pthread_t();
handle.join().unwrap();
})
.unwrap();
}
#[cfg(windows)]
#[test]
fn as_raw_handle() {
use std::os::windows::io::AsRawHandle;
thread::scope(|scope| {
let handle = scope.spawn(|_scope| {
sleep(Duration::from_millis(100));
42
});
let _raw_handle = handle.as_raw_handle();
handle.join().unwrap();
})
.unwrap();
}

View File

@ -0,0 +1,67 @@
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
use crossbeam_utils::sync::WaitGroup;
const THREADS: usize = 10;
#[test]
fn wait() {
let wg = WaitGroup::new();
let (tx, rx) = mpsc::channel();
for _ in 0..THREADS {
let wg = wg.clone();
let tx = tx.clone();
thread::spawn(move || {
wg.wait();
tx.send(()).unwrap();
});
}
thread::sleep(Duration::from_millis(100));
// At this point, all spawned threads should be blocked, so we shouldn't get anything from the
// channel.
assert!(rx.try_recv().is_err());
wg.wait();
// Now, the wait group is cleared and we should receive messages.
for _ in 0..THREADS {
rx.recv().unwrap();
}
}
#[test]
fn wait_and_drop() {
let wg = WaitGroup::new();
let wg2 = WaitGroup::new();
let (tx, rx) = mpsc::channel();
for _ in 0..THREADS {
let wg = wg.clone();
let wg2 = wg2.clone();
let tx = tx.clone();
thread::spawn(move || {
wg2.wait();
tx.send(()).unwrap();
drop(wg);
});
}
// At this point, no thread has gotten past `wg2.wait()`, so we shouldn't get anything from the
// channel.
assert!(rx.try_recv().is_err());
drop(wg2);
wg.wait();
// Now, the wait group is cleared and we should receive messages.
for _ in 0..THREADS {
rx.try_recv().unwrap();
}
}