Initial vendor packages

Signed-off-by: Valentin Popov <valentin@popov.link>
This commit is contained in:
2024-01-08 01:21:28 +04:00
parent 5ecd8cf2cb
commit 1b6a04ca55
7309 changed files with 2160054 additions and 0 deletions

1
vendor/spin/.cargo-checksum.json vendored Normal file
View File

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"34b35704b01c68214b27cb20f0c4325b3ce6e40ce8fd5aa3d2b884697a27307b","Cargo.lock":"9d6481d9ffe412e6c4d9445d89c0fa3e33a14b61c56aa5c121d9f28656cacfe4","Cargo.toml":"d2a17df2fa85736364744ea8279c6aed7276cabcc63bca2d25667198504e4be7","LICENSE":"6ac8711fb340c62ce0a4ecd463342d3fa0e8e70de697c863a2e1c0c53006003c","README.md":"78f0456ba0f3fa490d2bf38d79c42b293d551a9951da5bef0e1e5043143abf78","benches/mutex.rs":"2fd79239798e88cab0fd982e04f91a44c7c63282f8703d85fbdd81a8a1e71917","examples/debug.rs":"0074651d78f8ed6d6c0274ae832c0d78de6c5c59423936f58e79cb3c98baea2c","script/doc-upload.cfg":"7383ee022197c1dade9a2e6888261def4a80df08ecb72b1b4db47d1e8a6ff8e0","src/barrier.rs":"f8569a601d870a713344747eb355e6d840db510a599709173ebd0ccba5fc4028","src/lazy.rs":"45cf9174ee607e8a9acfa652b95c9b0ac71d98e89e73b9850041c79c5f3a726e","src/lib.rs":"85fa19874da6518e7ccd0d8bd3a0549fba52afd820789797e0221af1ac62196a","src/mutex.rs":"530039838850a0e71f4af42b9a7cffa0f922874e8f00001d36d67876842ed68f","src/mutex/fair.rs":"74cf2a0b8ab8e7a95564d97e8a66fc5d3d2641dd655de56a94ae4a93458f82c3","src/mutex/spin.rs":"a11e42d58e87a195e5e2110b46bda766f5a62a0c0b75a2b163f1ce37745bf337","src/mutex/ticket.rs":"1711bb02de48cf92f950b8f31946d5003225542ab6521333be1f525b76710f26","src/once.rs":"e4289220d505b7a15e8f9fb3f0d032ee57df6a5ee1d50123d4187e1afc6fb98b","src/relax.rs":"919c345bf55c3b2f9a7d9cd4aec2b2b3db68b6dc40ee2e4de4f6f32027abc290","src/rwlock.rs":"ba3b3dd20cdcd97691f042bcdd6d352e52601db7dc0cc7053c01a5873754b196"},"package":"6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"}

146
vendor/spin/CHANGELOG.md vendored Normal file
View File

@ -0,0 +1,146 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
# Unreleased
### Added
### Changed
### Fixed
# [0.9.8] - 2023-04-03
### Fixed
- Unsoundness in `Once::try_call_once` caused by an `Err(_)` result
# [0.9.7] - 2023-03-27
### Fixed
- Relaxed accidentally restricted `Send`/`Sync` bounds for `Mutex` guards
# [0.9.6] - 2023-03-13
### Fixed
- Relaxed accidentally restricted `Send`/`Sync` bounds for `RwLock` guards
# [0.9.5] - 2023-02-07
### Added
- `FairMutex`, a new mutex implementation that reduces writer starvation.
- A MSRV policy: Rust 1.38 is currently required
### Changed
- The crate's CI now has full MIRI integration, further improving the confidence you can have in the implementation.
### Fixed
- Ensured that the crate's abstractions comply with stacked borrows rules.
- Unsoundness in the `RwLock` that could be triggered via a reader overflow
- Relaxed various `Send`/`Sync` bound requirements to make the crate more flexible
# [0.9.4] - 2022-07-14
### Fixed
- Fixed unsoundness in `RwLock` on reader overflow
- Relaxed `Send`/`Sync` bounds for `SpinMutex` and `TicketMutex` (doesn't affect `Mutex` itself)
# [0.9.3] - 2022-04-17
### Added
- Implemented `Default` for `Once`
- `Once::try_call_once`
### Fixed
- Fixed bug that caused `Once::call_once` to incorrectly fail
# [0.9.2] - 2021-07-09
### Changed
- Improved `Once` performance by reducing the memory footprint of internal state to one byte
### Fixed
- Improved performance of `Once` by relaxing ordering guarantees and removing redundant checks
# [0.9.1] - 2021-06-21
### Added
- Default type parameter on `Once` for better ergonomics
# [0.9.0] - 2021-03-18
### Changed
- Placed all major API features behind feature flags
### Fixed
- A compilation bug with the `lock_api` feature
# [0.8.0] - 2021-03-15
### Added
- `Once::get_unchecked`
- `RelaxStrategy` trait with type parameter on all locks to support switching between relax strategies
### Changed
- `lock_api1` feature is now named `lock_api`
# [0.7.1] - 2021-01-12
### Fixed
- Prevented `Once` leaking the inner value upon drop
# [0.7.0] - 2020-10-18
### Added
- `Once::initialized`
- `Once::get_mut`
- `Once::try_into_inner`
- `Once::poll`
- `RwLock`, `Mutex` and `Once` now implement `From<T>`
- `Lazy` type for lazy initialization
- `TicketMutex`, an alternative mutex implementation
- `std` feature flag to enable thread yielding instead of spinning
- `Mutex::is_locked`/`SpinMutex::is_locked`/`TicketMutex::is_locked`
- `Barrier`
### Changed
- `Once::wait` now spins even if initialization has not yet started
- `Guard::leak` is now an associated function instead of a method
- Improved the performance of `SpinMutex` by relaxing unnecessarily conservative
ordering requirements
# [0.6.0] - 2020-10-08
### Added
- More dynamic `Send`/`Sync` bounds for lock guards
- `lock_api` compatibility
- `Guard::leak` methods
- `RwLock::reader_count` and `RwLock::writer_count`
- `Display` implementation for guard types
### Changed
- Made `Debug` impls of lock guards just show the inner type like `std`

623
vendor/spin/Cargo.lock generated vendored Normal file
View File

@ -0,0 +1,623 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "anes"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi 0.1.19",
"libc",
"winapi",
]
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bumpalo"
version = "3.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535"
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "ciborium"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f"
dependencies = [
"ciborium-io",
"ciborium-ll",
"serde",
]
[[package]]
name = "ciborium-io"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369"
[[package]]
name = "ciborium-ll"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b"
dependencies = [
"ciborium-io",
"half",
]
[[package]]
name = "clap"
version = "3.2.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5"
dependencies = [
"bitflags",
"clap_lex",
"indexmap",
"textwrap",
]
[[package]]
name = "clap_lex"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
dependencies = [
"os_str_bytes",
]
[[package]]
name = "criterion"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb"
dependencies = [
"anes",
"atty",
"cast",
"ciborium",
"clap",
"criterion-plot",
"itertools",
"lazy_static",
"num-traits",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_derive",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
dependencies = [
"cast",
"itertools",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
dependencies = [
"cfg-if",
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695"
dependencies = [
"autocfg",
"cfg-if",
"crossbeam-utils",
"memoffset",
"scopeguard",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b"
dependencies = [
"cfg-if",
]
[[package]]
name = "either"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
[[package]]
name = "half"
version = "1.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
[[package]]
name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "hermit-abi"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
dependencies = [
"libc",
]
[[package]]
name = "indexmap"
version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
dependencies = [
"autocfg",
"hashbrown",
]
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6"
[[package]]
name = "js-sys"
version = "0.3.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730"
dependencies = [
"wasm-bindgen",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c"
[[package]]
name = "lock_api"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "memoffset"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1"
dependencies = [
"autocfg",
]
[[package]]
name = "num-traits"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
dependencies = [
"autocfg",
]
[[package]]
name = "num_cpus"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b"
dependencies = [
"hermit-abi 0.2.6",
"libc",
]
[[package]]
name = "once_cell"
version = "1.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
[[package]]
name = "oorandom"
version = "11.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "os_str_bytes"
version = "6.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267"
[[package]]
name = "plotters"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142"
[[package]]
name = "plotters-svg"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f"
dependencies = [
"plotters-backend",
]
[[package]]
name = "portable-atomic"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f602a0d1e09a48e4f8e8b4d4042e32807c3676da31f2ecabeac9f96226ec6c45"
[[package]]
name = "proc-macro2"
version = "1.0.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rayon"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-utils",
"num_cpus",
]
[[package]]
name = "regex"
version = "1.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d"
dependencies = [
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "ryu"
version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.159"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.159"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.13",
]
[[package]]
name = "serde_json"
version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "spin"
version = "0.9.8"
dependencies = [
"criterion",
"lock_api",
"portable-atomic",
]
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c9da457c5285ac1f936ebd076af6dac17a61cfe7826f2076b4d015cf47bc8ec"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "textwrap"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "unicode-ident"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4"
[[package]]
name = "walkdir"
version = "2.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b"
dependencies = [
"cfg-if",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
"syn 1.0.109",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d"
[[package]]
name = "web-sys"
version = "0.3.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
dependencies = [
"winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

80
vendor/spin/Cargo.toml vendored Normal file
View File

@ -0,0 +1,80 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
rust-version = "1.38"
name = "spin"
version = "0.9.8"
authors = [
"Mathijs van de Nes <git@mathijs.vd-nes.nl>",
"John Ericson <git@JohnEricson.me>",
"Joshua Barretto <joshua.s.barretto@gmail.com>",
]
description = "Spin-based synchronization primitives"
readme = "README.md"
keywords = [
"spinlock",
"mutex",
"rwlock",
]
license = "MIT"
repository = "https://github.com/mvdnes/spin-rs.git"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = [
"--cfg",
"docsrs",
]
[[bench]]
name = "mutex"
harness = false
required-features = ["ticket_mutex"]
[dependencies.lock_api_crate]
version = "0.4"
optional = true
package = "lock_api"
[dependencies.portable-atomic]
version = "1"
optional = true
default-features = false
[dev-dependencies.criterion]
version = "0.4"
[features]
barrier = ["mutex"]
default = [
"lock_api",
"mutex",
"spin_mutex",
"rwlock",
"once",
"lazy",
"barrier",
]
fair_mutex = ["mutex"]
lazy = ["once"]
lock_api = ["lock_api_crate"]
mutex = []
once = []
portable_atomic = ["portable-atomic"]
rwlock = []
spin_mutex = ["mutex"]
std = []
ticket_mutex = ["mutex"]
use_ticket_mutex = [
"mutex",
"ticket_mutex",
]

21
vendor/spin/LICENSE vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Mathijs van de Nes
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

143
vendor/spin/README.md vendored Normal file
View File

@ -0,0 +1,143 @@
# spin-rs
[![Crates.io version](https://img.shields.io/crates/v/spin.svg)](https://crates.io/crates/spin)
[![docs.rs](https://docs.rs/spin/badge.svg)](https://docs.rs/spin/)
[![Build Status](https://travis-ci.org/mvdnes/spin-rs.svg)](https://travis-ci.org/mvdnes/spin-rs)
Spin-based synchronization primitives.
This crate provides [spin-based](https://en.wikipedia.org/wiki/Spinlock)
versions of the primitives in `std::sync`. Because synchronization is done
through spinning, the primitives are suitable for use in `no_std` environments.
Before deciding to use `spin`, we recommend reading
[this superb blog post](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html)
by [@matklad](https://github.com/matklad/) that discusses the pros and cons of
spinlocks. If you have access to `std`, it's likely that the primitives in
`std::sync` will serve you better except in very specific circumstances.
## Features
- `Mutex`, `RwLock`, `Once`, `Lazy` and `Barrier` equivalents
- Support for `no_std` environments
- [`lock_api`](https://crates.io/crates/lock_api) compatibility
- Upgradeable `RwLock` guards
- Guards can be sent and shared between threads
- Guard leaking
- Ticket locks
- Different strategies for dealing with contention
## Usage
Include the following under the `[dependencies]` section in your `Cargo.toml` file.
```toml
spin = "x.y"
```
## Example
When calling `lock` on a `Mutex` you will get a guard value that provides access
to the data. When this guard is dropped, the mutex will become available again.
```rust
extern crate spin;
use std::{sync::Arc, thread};
fn main() {
let counter = Arc::new(spin::Mutex::new(0));
let thread = thread::spawn({
let counter = counter.clone();
move || {
for _ in 0..100 {
*counter.lock() += 1;
}
}
});
for _ in 0..100 {
*counter.lock() += 1;
}
thread.join().unwrap();
assert_eq!(*counter.lock(), 200);
}
```
## Feature flags
The crate comes with a few feature flags that you may wish to use.
- `mutex` enables the `Mutex` type.
- `spin_mutex` enables the `SpinMutex` type.
- `ticket_mutex` enables the `TicketMutex` type.
- `use_ticket_mutex` switches to a ticket lock for the implementation of `Mutex`. This
is recommended only on targets for which ordinary spinning locks perform very badly
because it will change the implementation used by other crates that depend on `spin`.
- `rwlock` enables the `RwLock` type.
- `once` enables the `Once` type.
- `lazy` enables the `Lazy` type.
- `barrier` enables the `Barrier` type.
- `lock_api` enables support for [`lock_api`](https://crates.io/crates/lock_api)
- `std` enables support for thread yielding instead of spinning.
- `portable_atomic` enables usage of the `portable-atomic` crate
to support platforms without native atomic operations (Cortex-M0, etc.).
The `portable_atomic_unsafe_assume_single_core` cfg or `critical-section` feature
of `portable-atomic` crate must also be set by the final binary crate.
When using the cfg, this can be done by adapting the following snippet to the `.cargo/config` file:
```
[target.<target>]
rustflags = [ "--cfg", "portable_atomic_unsafe_assume_single_core" ]
```
Note that this cfg is unsafe by nature, and enabling it for multicore systems is unsound.
When using the `critical-section` feature, you need to implement the critical-section
implementation that sound for your system by implementing an unsafe trait.
See [the documentation for the `portable-atomic` crate](https://docs.rs/portable-atomic/latest/portable_atomic/#optional-cfg)
for more information.
## Remarks
It is often desirable to have a lock shared between threads. Wrapping the lock in an
`std::sync::Arc` is route through which this might be achieved.
Locks provide zero-overhead access to their data when accessed through a mutable
reference by using their `get_mut` methods.
The behaviour of these lock is similar to their namesakes in `std::sync`. they
differ on the following:
- Locks will not be poisoned in case of failure.
- Threads will not yield to the OS scheduler when encounter a lock that cannot be
accessed. Instead, they will 'spin' in a busy loop until the lock becomes available.
Many of the feature flags listed above are enabled by default. If you're writing a
library, we recommend disabling those that you don't use to avoid increasing compilation
time for your crate's users. You can do this like so:
```
[dependencies]
spin = { version = "x.y", default-features = false, features = [...] }
```
## Minimum Safe Rust Version (MSRV)
This crate is guaranteed to compile on a Minimum Safe Rust Version (MSRV) of 1.38.0 and above.
This version will not be changed without a minor version bump.
## License
`spin` is distributed under the MIT License, (See `LICENSE`).

126
vendor/spin/benches/mutex.rs vendored Normal file
View File

@ -0,0 +1,126 @@
#[macro_use]
extern crate criterion;
use criterion::{Criterion, Bencher, black_box};
use std::{
ops::DerefMut,
sync::Arc,
};
trait Mutex<T>: Send + Sync + 'static {
type Guard<'a>: DerefMut<Target = T> where Self: 'a;
fn new(x: T) -> Self;
fn lock(&self) -> Self::Guard<'_>;
}
impl<T: Send + 'static> Mutex<T> for spin::mutex::SpinMutex<T> {
type Guard<'a> = spin::mutex::SpinMutexGuard<'a, T> where Self: 'a;
fn new(x: T) -> Self { spin::mutex::SpinMutex::new(x) }
fn lock(&self) -> Self::Guard<'_> { self.lock() }
}
impl<T: Send + 'static> Mutex<T> for spin::mutex::TicketMutex<T> {
type Guard<'a> = spin::mutex::TicketMutexGuard<'a, T> where Self: 'a;
fn new(x: T) -> Self { spin::mutex::TicketMutex::new(x) }
fn lock(&self) -> Self::Guard<'_> { self.lock() }
}
impl<T: Send + 'static> Mutex<T> for std::sync::Mutex<T> {
type Guard<'a> = std::sync::MutexGuard<'a, T> where Self: 'a;
fn new(x: T) -> Self { std::sync::Mutex::new(x) }
fn lock(&self) -> Self::Guard<'_> { self.lock().unwrap() }
}
fn gen_create<M: Mutex<u32>>(b: &mut Bencher) {
b.iter(|| {
let n = black_box(42);
M::new(n)
});
}
fn gen_lock_unlock<M: Mutex<u32>>(b: &mut Bencher) {
let m = M::new(0);
b.iter(|| {
let mut m = m.lock();
*m = m.wrapping_add(1);
drop(m);
});
}
fn gen_lock_unlock_read_contention<M: Mutex<u32>>(b: &mut Bencher) {
let m = Arc::new(M::new(0));
let thread = std::thread::spawn({
let m = m.clone();
move || {
while Arc::strong_count(&m) > 1 {
for _ in 0..1000 {
black_box(*m.lock());
}
}
}
});
b.iter(|| {
let mut m = m.lock();
*m = m.wrapping_add(1);
drop(m);
});
drop(m);
thread.join().unwrap();
}
fn gen_lock_unlock_write_contention<M: Mutex<u32>>(b: &mut Bencher) {
let m = Arc::new(M::new(0));
let thread = std::thread::spawn({
let m = m.clone();
move || {
while Arc::strong_count(&m) > 1 {
for _ in 0..1000 {
let mut m = m.lock();
*m = m.wrapping_add(1);
drop(m);
}
}
}
});
b.iter(|| {
let mut m = m.lock();
*m = m.wrapping_add(1);
drop(m);
});
drop(m);
thread.join().unwrap();
}
fn create(b: &mut Criterion) {
b.bench_function("create-spin-spinmutex", |b| gen_create::<spin::mutex::SpinMutex<u32>>(b));
b.bench_function("create-spin-ticketmutex", |b| gen_create::<spin::mutex::TicketMutex<u32>>(b));
b.bench_function("create-std", |b| gen_create::<std::sync::Mutex<u32>>(b));
}
fn lock_unlock(b: &mut Criterion) {
b.bench_function("lock_unlock-spin-spinmutex", |b| gen_lock_unlock::<spin::mutex::SpinMutex<u32>>(b));
b.bench_function("lock_unlock-spin-ticketmutex", |b| gen_lock_unlock::<spin::mutex::TicketMutex<u32>>(b));
b.bench_function("lock_unlock-std", |b| gen_lock_unlock::<std::sync::Mutex<u32>>(b));
}
fn lock_unlock_read_contention(b: &mut Criterion) {
b.bench_function("lock_unlock_read_contention-spin-spinmutex", |b| gen_lock_unlock_read_contention::<spin::mutex::SpinMutex<u32>>(b));
b.bench_function("lock_unlock_read_contention-spin-ticketmutex", |b| gen_lock_unlock_read_contention::<spin::mutex::TicketMutex<u32>>(b));
b.bench_function("lock_unlock_read_contention-std", |b| gen_lock_unlock_read_contention::<std::sync::Mutex<u32>>(b));
}
fn lock_unlock_write_contention(b: &mut Criterion) {
b.bench_function("lock_unlock_write_contention-spin-spinmutex", |b| gen_lock_unlock_write_contention::<spin::mutex::SpinMutex<u32>>(b));
b.bench_function("lock_unlock_write_contention-spin-ticketmutex", |b| gen_lock_unlock_write_contention::<spin::mutex::TicketMutex<u32>>(b));
b.bench_function("lock_unlock_write_contention-std", |b| gen_lock_unlock_write_contention::<std::sync::Mutex<u32>>(b));
}
criterion_group!(
mutex,
create,
lock_unlock,
lock_unlock_read_contention,
lock_unlock_write_contention,
);
criterion_main!(mutex);

21
vendor/spin/examples/debug.rs vendored Normal file
View File

@ -0,0 +1,21 @@
extern crate spin;
fn main() {
let mutex = spin::Mutex::new(42);
println!("{:?}", mutex);
{
let x = mutex.lock();
println!("{:?}, {:?}", mutex, *x);
}
let rwlock = spin::RwLock::new(42);
println!("{:?}", rwlock);
{
let x = rwlock.read();
println!("{:?}, {:?}", rwlock, *x);
}
{
let x = rwlock.write();
println!("{:?}, {:?}", rwlock, *x);
}
}

3
vendor/spin/script/doc-upload.cfg vendored Normal file
View File

@ -0,0 +1,3 @@
PROJECT_NAME=spin-rs
DOCS_REPO=mvdnes/rust-docs.git
DOC_RUST_VERSION=stable

239
vendor/spin/src/barrier.rs vendored Normal file
View File

@ -0,0 +1,239 @@
//! Synchronization primitive allowing multiple threads to synchronize the
//! beginning of some computation.
//!
//! Implementation adapted from the 'Barrier' type of the standard library. See:
//! <https://doc.rust-lang.org/std/sync/struct.Barrier.html>
//!
//! Copyright 2014 The Rust Project Developers. See the COPYRIGHT
//! file at the top-level directory of this distribution and at
//! <http://rust-lang.org/COPYRIGHT>.
//!
//! Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
//! <http://www.apache.org/licenses/LICENSE-2.0>> or the MIT license
//! <LICENSE-MIT or <http://opensource.org/licenses/MIT>>, at your
//! option. This file may not be copied, modified, or distributed
//! except according to those terms.
use crate::{mutex::Mutex, RelaxStrategy, Spin};
/// A primitive that synchronizes the execution of multiple threads.
///
/// # Example
///
/// ```
/// use spin;
/// use std::sync::Arc;
/// use std::thread;
///
/// let mut handles = Vec::with_capacity(10);
/// let barrier = Arc::new(spin::Barrier::new(10));
/// for _ in 0..10 {
/// let c = barrier.clone();
/// // The same messages will be printed together.
/// // You will NOT see any interleaving.
/// handles.push(thread::spawn(move|| {
/// println!("before wait");
/// c.wait();
/// println!("after wait");
/// }));
/// }
/// // Wait for other threads to finish.
/// for handle in handles {
/// handle.join().unwrap();
/// }
/// ```
pub struct Barrier<R = Spin> {
lock: Mutex<BarrierState, R>,
num_threads: usize,
}
// The inner state of a double barrier
struct BarrierState {
count: usize,
generation_id: usize,
}
/// A `BarrierWaitResult` is returned by [`wait`] when all threads in the [`Barrier`]
/// have rendezvoused.
///
/// [`wait`]: struct.Barrier.html#method.wait
/// [`Barrier`]: struct.Barrier.html
///
/// # Examples
///
/// ```
/// use spin;
///
/// let barrier = spin::Barrier::new(1);
/// let barrier_wait_result = barrier.wait();
/// ```
pub struct BarrierWaitResult(bool);
impl<R: RelaxStrategy> Barrier<R> {
/// Blocks the current thread until all threads have rendezvoused here.
///
/// Barriers are re-usable after all threads have rendezvoused once, and can
/// be used continuously.
///
/// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that
/// returns `true` from [`is_leader`] when returning from this function, and
/// all other threads will receive a result that will return `false` from
/// [`is_leader`].
///
/// [`BarrierWaitResult`]: struct.BarrierWaitResult.html
/// [`is_leader`]: struct.BarrierWaitResult.html#method.is_leader
///
/// # Examples
///
/// ```
/// use spin;
/// use std::sync::Arc;
/// use std::thread;
///
/// let mut handles = Vec::with_capacity(10);
/// let barrier = Arc::new(spin::Barrier::new(10));
/// for _ in 0..10 {
/// let c = barrier.clone();
/// // The same messages will be printed together.
/// // You will NOT see any interleaving.
/// handles.push(thread::spawn(move|| {
/// println!("before wait");
/// c.wait();
/// println!("after wait");
/// }));
/// }
/// // Wait for other threads to finish.
/// for handle in handles {
/// handle.join().unwrap();
/// }
/// ```
pub fn wait(&self) -> BarrierWaitResult {
let mut lock = self.lock.lock();
lock.count += 1;
if lock.count < self.num_threads {
// not the leader
let local_gen = lock.generation_id;
while local_gen == lock.generation_id && lock.count < self.num_threads {
drop(lock);
R::relax();
lock = self.lock.lock();
}
BarrierWaitResult(false)
} else {
// this thread is the leader,
// and is responsible for incrementing the generation
lock.count = 0;
lock.generation_id = lock.generation_id.wrapping_add(1);
BarrierWaitResult(true)
}
}
}
impl<R> Barrier<R> {
/// Creates a new barrier that can block a given number of threads.
///
/// A barrier will block `n`-1 threads which call [`wait`] and then wake up
/// all threads at once when the `n`th thread calls [`wait`]. A Barrier created
/// with n = 0 will behave identically to one created with n = 1.
///
/// [`wait`]: #method.wait
///
/// # Examples
///
/// ```
/// use spin;
///
/// let barrier = spin::Barrier::new(10);
/// ```
pub const fn new(n: usize) -> Self {
Self {
lock: Mutex::new(BarrierState {
count: 0,
generation_id: 0,
}),
num_threads: n,
}
}
}
impl BarrierWaitResult {
/// Returns whether this thread from [`wait`] is the "leader thread".
///
/// Only one thread will have `true` returned from their result, all other
/// threads will have `false` returned.
///
/// [`wait`]: struct.Barrier.html#method.wait
///
/// # Examples
///
/// ```
/// use spin;
///
/// let barrier = spin::Barrier::new(1);
/// let barrier_wait_result = barrier.wait();
/// println!("{:?}", barrier_wait_result.is_leader());
/// ```
pub fn is_leader(&self) -> bool {
self.0
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::mpsc::{channel, TryRecvError};
use std::sync::Arc;
use std::thread;
type Barrier = super::Barrier;
fn use_barrier(n: usize, barrier: Arc<Barrier>) {
let (tx, rx) = channel();
let mut ts = Vec::new();
for _ in 0..n - 1 {
let c = barrier.clone();
let tx = tx.clone();
ts.push(thread::spawn(move || {
tx.send(c.wait().is_leader()).unwrap();
}));
}
// At this point, all spawned threads should be blocked,
// so we shouldn't get anything from the port
assert!(match rx.try_recv() {
Err(TryRecvError::Empty) => true,
_ => false,
});
let mut leader_found = barrier.wait().is_leader();
// Now, the barrier is cleared and we should get data.
for _ in 0..n - 1 {
if rx.recv().unwrap() {
assert!(!leader_found);
leader_found = true;
}
}
assert!(leader_found);
for t in ts {
t.join().unwrap();
}
}
#[test]
fn test_barrier() {
const N: usize = 10;
let barrier = Arc::new(Barrier::new(N));
use_barrier(N, barrier.clone());
// use barrier twice to ensure it is reusable
use_barrier(N, barrier.clone());
}
}

118
vendor/spin/src/lazy.rs vendored Normal file
View File

@ -0,0 +1,118 @@
//! Synchronization primitives for lazy evaluation.
//!
//! Implementation adapted from the `SyncLazy` type of the standard library. See:
//! <https://doc.rust-lang.org/std/lazy/struct.SyncLazy.html>
use crate::{once::Once, RelaxStrategy, Spin};
use core::{cell::Cell, fmt, ops::Deref};
/// A value which is initialized on the first access.
///
/// This type is a thread-safe `Lazy`, and can be used in statics.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use spin::Lazy;
///
/// static HASHMAP: Lazy<HashMap<i32, String>> = Lazy::new(|| {
/// println!("initializing");
/// let mut m = HashMap::new();
/// m.insert(13, "Spica".to_string());
/// m.insert(74, "Hoyten".to_string());
/// m
/// });
///
/// fn main() {
/// println!("ready");
/// std::thread::spawn(|| {
/// println!("{:?}", HASHMAP.get(&13));
/// }).join().unwrap();
/// println!("{:?}", HASHMAP.get(&74));
///
/// // Prints:
/// // ready
/// // initializing
/// // Some("Spica")
/// // Some("Hoyten")
/// }
/// ```
pub struct Lazy<T, F = fn() -> T, R = Spin> {
cell: Once<T, R>,
init: Cell<Option<F>>,
}
impl<T: fmt::Debug, F, R> fmt::Debug for Lazy<T, F, R> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Lazy")
.field("cell", &self.cell)
.field("init", &"..")
.finish()
}
}
// We never create a `&F` from a `&Lazy<T, F>` so it is fine
// to not impl `Sync` for `F`
// we do create a `&mut Option<F>` in `force`, but this is
// properly synchronized, so it only happens once
// so it also does not contribute to this impl.
unsafe impl<T, F: Send> Sync for Lazy<T, F> where Once<T>: Sync {}
// auto-derived `Send` impl is OK.
impl<T, F, R> Lazy<T, F, R> {
/// Creates a new lazy value with the given initializing
/// function.
pub const fn new(f: F) -> Self {
Self {
cell: Once::new(),
init: Cell::new(Some(f)),
}
}
/// Retrieves a mutable pointer to the inner data.
///
/// This is especially useful when interfacing with low level code or FFI where the caller
/// explicitly knows that it has exclusive access to the inner data. Note that reading from
/// this pointer is UB until initialized or directly written to.
pub fn as_mut_ptr(&self) -> *mut T {
self.cell.as_mut_ptr()
}
}
impl<T, F: FnOnce() -> T, R: RelaxStrategy> Lazy<T, F, R> {
/// Forces the evaluation of this lazy value and
/// returns a reference to result. This is equivalent
/// to the `Deref` impl, but is explicit.
///
/// # Examples
///
/// ```
/// use spin::Lazy;
///
/// let lazy = Lazy::new(|| 92);
///
/// assert_eq!(Lazy::force(&lazy), &92);
/// assert_eq!(&*lazy, &92);
/// ```
pub fn force(this: &Self) -> &T {
this.cell.call_once(|| match this.init.take() {
Some(f) => f(),
None => panic!("Lazy instance has previously been poisoned"),
})
}
}
impl<T, F: FnOnce() -> T, R: RelaxStrategy> Deref for Lazy<T, F, R> {
type Target = T;
fn deref(&self) -> &T {
Self::force(self)
}
}
impl<T: Default, R> Default for Lazy<T, fn() -> T, R> {
/// Creates a new lazy value using `Default` as the initializing function.
fn default() -> Self {
Self::new(T::default)
}
}

221
vendor/spin/src/lib.rs vendored Normal file
View File

@ -0,0 +1,221 @@
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
//! This crate provides [spin-based](https://en.wikipedia.org/wiki/Spinlock) versions of the
//! primitives in `std::sync` and `std::lazy`. Because synchronization is done through spinning,
//! the primitives are suitable for use in `no_std` environments.
//!
//! # Features
//!
//! - `Mutex`, `RwLock`, `Once`/`SyncOnceCell`, and `SyncLazy` equivalents
//!
//! - Support for `no_std` environments
//!
//! - [`lock_api`](https://crates.io/crates/lock_api) compatibility
//!
//! - Upgradeable `RwLock` guards
//!
//! - Guards can be sent and shared between threads
//!
//! - Guard leaking
//!
//! - Ticket locks
//!
//! - Different strategies for dealing with contention
//!
//! # Relationship with `std::sync`
//!
//! While `spin` is not a drop-in replacement for `std::sync` (and
//! [should not be considered as such](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html))
//! an effort is made to keep this crate reasonably consistent with `std::sync`.
//!
//! Many of the types defined in this crate have 'additional capabilities' when compared to `std::sync`:
//!
//! - Because spinning does not depend on the thread-driven model of `std::sync`, guards ([`MutexGuard`],
//! [`RwLockReadGuard`], [`RwLockWriteGuard`], etc.) may be sent and shared between threads.
//!
//! - [`RwLockUpgradableGuard`] supports being upgraded into a [`RwLockWriteGuard`].
//!
//! - Guards support [leaking](https://doc.rust-lang.org/nomicon/leaking.html).
//!
//! - [`Once`] owns the value returned by its `call_once` initializer.
//!
//! - [`RwLock`] supports counting readers and writers.
//!
//! Conversely, the types in this crate do not have some of the features `std::sync` has:
//!
//! - Locks do not track [panic poisoning](https://doc.rust-lang.org/nomicon/poisoning.html).
//!
//! ## Feature flags
//!
//! The crate comes with a few feature flags that you may wish to use.
//!
//! - `lock_api` enables support for [`lock_api`](https://crates.io/crates/lock_api)
//!
//! - `ticket_mutex` uses a ticket lock for the implementation of `Mutex`
//!
//! - `fair_mutex` enables a fairer implementation of `Mutex` that uses eventual fairness to avoid
//! starvation
//!
//! - `std` enables support for thread yielding instead of spinning
#[cfg(any(test, feature = "std"))]
extern crate core;
#[cfg(feature = "portable_atomic")]
extern crate portable_atomic;
#[cfg(not(feature = "portable_atomic"))]
use core::sync::atomic;
#[cfg(feature = "portable_atomic")]
use portable_atomic as atomic;
#[cfg(feature = "barrier")]
#[cfg_attr(docsrs, doc(cfg(feature = "barrier")))]
pub mod barrier;
#[cfg(feature = "lazy")]
#[cfg_attr(docsrs, doc(cfg(feature = "lazy")))]
pub mod lazy;
#[cfg(feature = "mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
pub mod mutex;
#[cfg(feature = "once")]
#[cfg_attr(docsrs, doc(cfg(feature = "once")))]
pub mod once;
pub mod relax;
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub mod rwlock;
#[cfg(feature = "mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
pub use mutex::MutexGuard;
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub use relax::Yield;
pub use relax::{RelaxStrategy, Spin};
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub use rwlock::RwLockReadGuard;
// Avoid confusing inference errors by aliasing away the relax strategy parameter. Users that need to use a different
// relax strategy can do so by accessing the types through their fully-qualified path. This is a little bit horrible
// but sadly adding a default type parameter is *still* a breaking change in Rust (for understandable reasons).
/// A primitive that synchronizes the execution of multiple threads. See [`barrier::Barrier`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "barrier")]
#[cfg_attr(docsrs, doc(cfg(feature = "barrier")))]
pub type Barrier = crate::barrier::Barrier;
/// A value which is initialized on the first access. See [`lazy::Lazy`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "lazy")]
#[cfg_attr(docsrs, doc(cfg(feature = "lazy")))]
pub type Lazy<T, F = fn() -> T> = crate::lazy::Lazy<T, F>;
/// A primitive that synchronizes the execution of multiple threads. See [`mutex::Mutex`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
pub type Mutex<T> = crate::mutex::Mutex<T>;
/// A primitive that provides lazy one-time initialization. See [`once::Once`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "once")]
#[cfg_attr(docsrs, doc(cfg(feature = "once")))]
pub type Once<T = ()> = crate::once::Once<T>;
/// A lock that provides data access to either one writer or many readers. See [`rwlock::RwLock`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLock<T> = crate::rwlock::RwLock<T>;
/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`]. See
/// [`rwlock::RwLockUpgradableGuard`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLockUpgradableGuard<'a, T> = crate::rwlock::RwLockUpgradableGuard<'a, T>;
/// A guard that provides mutable data access. See [`rwlock::RwLockWriteGuard`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLockWriteGuard<'a, T> = crate::rwlock::RwLockWriteGuard<'a, T>;
/// Spin synchronisation primitives, but compatible with [`lock_api`](https://crates.io/crates/lock_api).
#[cfg(feature = "lock_api")]
#[cfg_attr(docsrs, doc(cfg(feature = "lock_api")))]
pub mod lock_api {
/// A lock that provides mutually exclusive data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
pub type Mutex<T> = lock_api_crate::Mutex<crate::Mutex<()>, T>;
/// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
pub type MutexGuard<'a, T> = lock_api_crate::MutexGuard<'a, crate::Mutex<()>, T>;
/// A lock that provides data access to either one writer or many readers (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLock<T> = lock_api_crate::RwLock<crate::RwLock<()>, T>;
/// A guard that provides immutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLockReadGuard<'a, T> = lock_api_crate::RwLockReadGuard<'a, crate::RwLock<()>, T>;
/// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLockWriteGuard<'a, T> = lock_api_crate::RwLockWriteGuard<'a, crate::RwLock<()>, T>;
/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`] (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLockUpgradableReadGuard<'a, T> =
lock_api_crate::RwLockUpgradableReadGuard<'a, crate::RwLock<()>, T>;
}
/// In the event of an invalid operation, it's best to abort the current process.
#[cfg(feature = "fair_mutex")]
fn abort() -> ! {
#[cfg(not(feature = "std"))]
{
// Panicking while panicking is defined by Rust to result in an abort.
struct Panic;
impl Drop for Panic {
fn drop(&mut self) {
panic!("aborting due to invalid operation");
}
}
let _panic = Panic;
panic!("aborting due to invalid operation");
}
#[cfg(feature = "std")]
{
std::process::abort();
}
}

340
vendor/spin/src/mutex.rs vendored Normal file
View File

@ -0,0 +1,340 @@
//! Locks that have the same behaviour as a mutex.
//!
//! The [`Mutex`] in the root of the crate, can be configured using the `ticket_mutex` feature.
//! If it's enabled, [`TicketMutex`] and [`TicketMutexGuard`] will be re-exported as [`Mutex`]
//! and [`MutexGuard`], otherwise the [`SpinMutex`] and guard will be re-exported.
//!
//! `ticket_mutex` is disabled by default.
//!
//! [`Mutex`]: ../struct.Mutex.html
//! [`MutexGuard`]: ../struct.MutexGuard.html
//! [`TicketMutex`]: ./struct.TicketMutex.html
//! [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html
//! [`SpinMutex`]: ./struct.SpinMutex.html
//! [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html
#[cfg(feature = "spin_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))]
pub mod spin;
#[cfg(feature = "spin_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))]
pub use self::spin::{SpinMutex, SpinMutexGuard};
#[cfg(feature = "ticket_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))]
pub mod ticket;
#[cfg(feature = "ticket_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))]
pub use self::ticket::{TicketMutex, TicketMutexGuard};
#[cfg(feature = "fair_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))]
pub mod fair;
#[cfg(feature = "fair_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))]
pub use self::fair::{FairMutex, FairMutexGuard, Starvation};
use crate::{RelaxStrategy, Spin};
use core::{
fmt,
ops::{Deref, DerefMut},
};
#[cfg(all(not(feature = "spin_mutex"), not(feature = "use_ticket_mutex")))]
compile_error!("The `mutex` feature flag was used (perhaps through another feature?) without either `spin_mutex` or `use_ticket_mutex`. One of these is required.");
#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))]
type InnerMutex<T, R> = self::spin::SpinMutex<T, R>;
#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))]
type InnerMutexGuard<'a, T> = self::spin::SpinMutexGuard<'a, T>;
#[cfg(feature = "use_ticket_mutex")]
type InnerMutex<T, R> = self::ticket::TicketMutex<T, R>;
#[cfg(feature = "use_ticket_mutex")]
type InnerMutexGuard<'a, T> = self::ticket::TicketMutexGuard<'a, T>;
/// A spin-based lock providing mutually exclusive access to data.
///
/// The implementation uses either a ticket mutex or a regular spin mutex depending on whether the `spin_mutex` or
/// `ticket_mutex` feature flag is enabled.
///
/// # Example
///
/// ```
/// use spin;
///
/// let lock = spin::Mutex::new(0);
///
/// // Modify the data
/// *lock.lock() = 2;
///
/// // Read the data
/// let answer = *lock.lock();
/// assert_eq!(answer, 2);
/// ```
///
/// # Thread safety example
///
/// ```
/// use spin;
/// use std::sync::{Arc, Barrier};
///
/// let thread_count = 1000;
/// let spin_mutex = Arc::new(spin::Mutex::new(0));
///
/// // We use a barrier to ensure the readout happens after all writing
/// let barrier = Arc::new(Barrier::new(thread_count + 1));
///
/// # let mut ts = Vec::new();
/// for _ in (0..thread_count) {
/// let my_barrier = barrier.clone();
/// let my_lock = spin_mutex.clone();
/// # let t =
/// std::thread::spawn(move || {
/// let mut guard = my_lock.lock();
/// *guard += 1;
///
/// // Release the lock to prevent a deadlock
/// drop(guard);
/// my_barrier.wait();
/// });
/// # ts.push(t);
/// }
///
/// barrier.wait();
///
/// let answer = { *spin_mutex.lock() };
/// assert_eq!(answer, thread_count);
///
/// # for t in ts {
/// # t.join().unwrap();
/// # }
/// ```
pub struct Mutex<T: ?Sized, R = Spin> {
inner: InnerMutex<T, R>,
}
unsafe impl<T: ?Sized + Send, R> Sync for Mutex<T, R> {}
unsafe impl<T: ?Sized + Send, R> Send for Mutex<T, R> {}
/// A generic guard that will protect some data access and
/// uses either a ticket lock or a normal spin mutex.
///
/// For more info see [`TicketMutexGuard`] or [`SpinMutexGuard`].
///
/// [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html
/// [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html
pub struct MutexGuard<'a, T: 'a + ?Sized> {
inner: InnerMutexGuard<'a, T>,
}
impl<T, R> Mutex<T, R> {
/// Creates a new [`Mutex`] wrapping the supplied data.
///
/// # Example
///
/// ```
/// use spin::Mutex;
///
/// static MUTEX: Mutex<()> = Mutex::new(());
///
/// fn demo() {
/// let lock = MUTEX.lock();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline(always)]
pub const fn new(value: T) -> Self {
Self {
inner: InnerMutex::new(value),
}
}
/// Consumes this [`Mutex`] and unwraps the underlying data.
///
/// # Example
///
/// ```
/// let lock = spin::Mutex::new(42);
/// assert_eq!(42, lock.into_inner());
/// ```
#[inline(always)]
pub fn into_inner(self) -> T {
self.inner.into_inner()
}
}
impl<T: ?Sized, R: RelaxStrategy> Mutex<T, R> {
/// Locks the [`Mutex`] and returns a guard that permits access to the inner data.
///
/// The returned value may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
///
/// ```
/// let lock = spin::Mutex::new(0);
/// {
/// let mut data = lock.lock();
/// // The lock is now locked and the data can be accessed
/// *data += 1;
/// // The lock is implicitly dropped at the end of the scope
/// }
/// ```
#[inline(always)]
pub fn lock(&self) -> MutexGuard<T> {
MutexGuard {
inner: self.inner.lock(),
}
}
}
impl<T: ?Sized, R> Mutex<T, R> {
/// Returns `true` if the lock is currently held.
///
/// # Safety
///
/// This function provides no synchronization guarantees and so its result should be considered 'out of date'
/// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
#[inline(always)]
pub fn is_locked(&self) -> bool {
self.inner.is_locked()
}
/// Force unlock this [`Mutex`].
///
/// # Safety
///
/// This is *extremely* unsafe if the lock is not held by the current
/// thread. However, this can be useful in some instances for exposing the
/// lock to FFI that doesn't know how to deal with RAII.
#[inline(always)]
pub unsafe fn force_unlock(&self) {
self.inner.force_unlock()
}
/// Try to lock this [`Mutex`], returning a lock guard if successful.
///
/// # Example
///
/// ```
/// let lock = spin::Mutex::new(42);
///
/// let maybe_guard = lock.try_lock();
/// assert!(maybe_guard.is_some());
///
/// // `maybe_guard` is still held, so the second call fails
/// let maybe_guard2 = lock.try_lock();
/// assert!(maybe_guard2.is_none());
/// ```
#[inline(always)]
pub fn try_lock(&self) -> Option<MutexGuard<T>> {
self.inner
.try_lock()
.map(|guard| MutexGuard { inner: guard })
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the [`Mutex`] mutably, and a mutable reference is guaranteed to be exclusive in Rust,
/// no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As such,
/// this is a 'zero-cost' operation.
///
/// # Example
///
/// ```
/// let mut lock = spin::Mutex::new(0);
/// *lock.get_mut() = 10;
/// assert_eq!(*lock.lock(), 10);
/// ```
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
self.inner.get_mut()
}
}
impl<T: ?Sized + fmt::Debug, R> fmt::Debug for Mutex<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.inner, f)
}
}
impl<T: ?Sized + Default, R> Default for Mutex<T, R> {
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T, R> From<T> for Mutex<T, R> {
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<'a, T: ?Sized> MutexGuard<'a, T> {
/// Leak the lock guard, yielding a mutable reference to the underlying data.
///
/// Note that this function will permanently lock the original [`Mutex`].
///
/// ```
/// let mylock = spin::Mutex::new(0);
///
/// let data: &mut i32 = spin::MutexGuard::leak(mylock.lock());
///
/// *data = 1;
/// assert_eq!(*data, 1);
/// ```
#[inline(always)]
pub fn leak(this: Self) -> &'a mut T {
InnerMutexGuard::leak(this.inner)
}
}
impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
&*self.inner
}
}
impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
&mut *self.inner
}
}
#[cfg(feature = "lock_api")]
unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for Mutex<(), R> {
type GuardMarker = lock_api_crate::GuardSend;
const INIT: Self = Self::new(());
fn lock(&self) {
// Prevent guard destructor running
core::mem::forget(Self::lock(self));
}
fn try_lock(&self) -> bool {
// Prevent guard destructor running
Self::try_lock(self).map(core::mem::forget).is_some()
}
unsafe fn unlock(&self) {
self.force_unlock();
}
fn is_locked(&self) -> bool {
self.inner.is_locked()
}
}

735
vendor/spin/src/mutex/fair.rs vendored Normal file
View File

@ -0,0 +1,735 @@
//! A spinning mutex with a fairer unlock algorithm.
//!
//! This mutex is similar to the `SpinMutex` in that it uses spinning to avoid
//! context switches. However, it uses a fairer unlock algorithm that avoids
//! starvation of threads that are waiting for the lock.
use crate::{
atomic::{AtomicUsize, Ordering},
RelaxStrategy, Spin,
};
use core::{
cell::UnsafeCell,
fmt,
marker::PhantomData,
mem::ManuallyDrop,
ops::{Deref, DerefMut},
};
// The lowest bit of `lock` is used to indicate whether the mutex is locked or not. The rest of the bits are used to
// store the number of starving threads.
const LOCKED: usize = 1;
const STARVED: usize = 2;
/// Number chosen by fair roll of the dice, adjust as needed.
const STARVATION_SPINS: usize = 1024;
/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data, but with a fairer
/// algorithm.
///
/// # Example
///
/// ```
/// use spin;
///
/// let lock = spin::mutex::FairMutex::<_>::new(0);
///
/// // Modify the data
/// *lock.lock() = 2;
///
/// // Read the data
/// let answer = *lock.lock();
/// assert_eq!(answer, 2);
/// ```
///
/// # Thread safety example
///
/// ```
/// use spin;
/// use std::sync::{Arc, Barrier};
///
/// let thread_count = 1000;
/// let spin_mutex = Arc::new(spin::mutex::FairMutex::<_>::new(0));
///
/// // We use a barrier to ensure the readout happens after all writing
/// let barrier = Arc::new(Barrier::new(thread_count + 1));
///
/// for _ in (0..thread_count) {
/// let my_barrier = barrier.clone();
/// let my_lock = spin_mutex.clone();
/// std::thread::spawn(move || {
/// let mut guard = my_lock.lock();
/// *guard += 1;
///
/// // Release the lock to prevent a deadlock
/// drop(guard);
/// my_barrier.wait();
/// });
/// }
///
/// barrier.wait();
///
/// let answer = { *spin_mutex.lock() };
/// assert_eq!(answer, thread_count);
/// ```
pub struct FairMutex<T: ?Sized, R = Spin> {
phantom: PhantomData<R>,
pub(crate) lock: AtomicUsize,
data: UnsafeCell<T>,
}
/// A guard that provides mutable data access.
///
/// When the guard falls out of scope it will release the lock.
pub struct FairMutexGuard<'a, T: ?Sized + 'a> {
lock: &'a AtomicUsize,
data: *mut T,
}
/// A handle that indicates that we have been trying to acquire the lock for a while.
///
/// This handle is used to prevent starvation.
pub struct Starvation<'a, T: ?Sized + 'a, R> {
lock: &'a FairMutex<T, R>,
}
/// Indicates whether a lock was rejected due to the lock being held by another thread or due to starvation.
#[derive(Debug)]
pub enum LockRejectReason {
/// The lock was rejected due to the lock being held by another thread.
Locked,
/// The lock was rejected due to starvation.
Starved,
}
// Same unsafe impls as `std::sync::Mutex`
unsafe impl<T: ?Sized + Send, R> Sync for FairMutex<T, R> {}
unsafe impl<T: ?Sized + Send, R> Send for FairMutex<T, R> {}
unsafe impl<T: ?Sized + Sync> Sync for FairMutexGuard<'_, T> {}
unsafe impl<T: ?Sized + Send> Send for FairMutexGuard<'_, T> {}
impl<T, R> FairMutex<T, R> {
/// Creates a new [`FairMutex`] wrapping the supplied data.
///
/// # Example
///
/// ```
/// use spin::mutex::FairMutex;
///
/// static MUTEX: FairMutex<()> = FairMutex::<_>::new(());
///
/// fn demo() {
/// let lock = MUTEX.lock();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline(always)]
pub const fn new(data: T) -> Self {
FairMutex {
lock: AtomicUsize::new(0),
data: UnsafeCell::new(data),
phantom: PhantomData,
}
}
/// Consumes this [`FairMutex`] and unwraps the underlying data.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(42);
/// assert_eq!(42, lock.into_inner());
/// ```
#[inline(always)]
pub fn into_inner(self) -> T {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let FairMutex { data, .. } = self;
data.into_inner()
}
/// Returns a mutable pointer to the underlying data.
///
/// This is mostly meant to be used for applications which require manual unlocking, but where
/// storing both the lock and the pointer to the inner data gets inefficient.
///
/// # Example
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(42);
///
/// unsafe {
/// core::mem::forget(lock.lock());
///
/// assert_eq!(lock.as_mut_ptr().read(), 42);
/// lock.as_mut_ptr().write(58);
///
/// lock.force_unlock();
/// }
///
/// assert_eq!(*lock.lock(), 58);
///
/// ```
#[inline(always)]
pub fn as_mut_ptr(&self) -> *mut T {
self.data.get()
}
}
impl<T: ?Sized, R: RelaxStrategy> FairMutex<T, R> {
/// Locks the [`FairMutex`] and returns a guard that permits access to the inner data.
///
/// The returned value may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
///
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(0);
/// {
/// let mut data = lock.lock();
/// // The lock is now locked and the data can be accessed
/// *data += 1;
/// // The lock is implicitly dropped at the end of the scope
/// }
/// ```
#[inline(always)]
pub fn lock(&self) -> FairMutexGuard<T> {
// Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
// when called in a loop.
let mut spins = 0;
while self
.lock
.compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
// Wait until the lock looks unlocked before retrying
while self.is_locked() {
R::relax();
// If we've been spinning for a while, switch to a fairer strategy that will prevent
// newer users from stealing our lock from us.
if spins > STARVATION_SPINS {
return self.starve().lock();
}
spins += 1;
}
}
FairMutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
}
}
impl<T: ?Sized, R> FairMutex<T, R> {
/// Returns `true` if the lock is currently held.
///
/// # Safety
///
/// This function provides no synchronization guarantees and so its result should be considered 'out of date'
/// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
#[inline(always)]
pub fn is_locked(&self) -> bool {
self.lock.load(Ordering::Relaxed) & LOCKED != 0
}
/// Force unlock this [`FairMutex`].
///
/// # Safety
///
/// This is *extremely* unsafe if the lock is not held by the current
/// thread. However, this can be useful in some instances for exposing the
/// lock to FFI that doesn't know how to deal with RAII.
#[inline(always)]
pub unsafe fn force_unlock(&self) {
self.lock.fetch_and(!LOCKED, Ordering::Release);
}
/// Try to lock this [`FairMutex`], returning a lock guard if successful.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(42);
///
/// let maybe_guard = lock.try_lock();
/// assert!(maybe_guard.is_some());
///
/// // `maybe_guard` is still held, so the second call fails
/// let maybe_guard2 = lock.try_lock();
/// assert!(maybe_guard2.is_none());
/// ```
#[inline(always)]
pub fn try_lock(&self) -> Option<FairMutexGuard<T>> {
self.try_lock_starver().ok()
}
/// Tries to lock this [`FairMutex`] and returns a result that indicates whether the lock was
/// rejected due to a starver or not.
#[inline(always)]
pub fn try_lock_starver(&self) -> Result<FairMutexGuard<T>, LockRejectReason> {
match self
.lock
.compare_exchange(0, LOCKED, Ordering::Acquire, Ordering::Relaxed)
.unwrap_or_else(|x| x)
{
0 => Ok(FairMutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}),
LOCKED => Err(LockRejectReason::Locked),
_ => Err(LockRejectReason::Starved),
}
}
/// Indicates that the current user has been waiting for the lock for a while
/// and that the lock should yield to this thread over a newly arriving thread.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(42);
///
/// // Lock the mutex to simulate it being used by another user.
/// let guard1 = lock.lock();
///
/// // Try to lock the mutex.
/// let guard2 = lock.try_lock();
/// assert!(guard2.is_none());
///
/// // Wait for a while.
/// wait_for_a_while();
///
/// // We are now starved, indicate as such.
/// let starve = lock.starve();
///
/// // Once the lock is released, another user trying to lock it will
/// // fail.
/// drop(guard1);
/// let guard3 = lock.try_lock();
/// assert!(guard3.is_none());
///
/// // However, we will be able to lock it.
/// let guard4 = starve.try_lock();
/// assert!(guard4.is_ok());
///
/// # fn wait_for_a_while() {}
/// ```
pub fn starve(&self) -> Starvation<'_, T, R> {
// Add a new starver to the state.
if self.lock.fetch_add(STARVED, Ordering::Relaxed) > (core::isize::MAX - 1) as usize {
// In the event of a potential lock overflow, abort.
crate::abort();
}
Starvation { lock: self }
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the [`FairMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
/// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
/// such, this is a 'zero-cost' operation.
///
/// # Example
///
/// ```
/// let mut lock = spin::mutex::FairMutex::<_>::new(0);
/// *lock.get_mut() = 10;
/// assert_eq!(*lock.lock(), 10);
/// ```
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
// We know statically that there are no other references to `self`, so
// there's no need to lock the inner mutex.
unsafe { &mut *self.data.get() }
}
}
impl<T: ?Sized + fmt::Debug, R> fmt::Debug for FairMutex<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
struct LockWrapper<'a, T: ?Sized + fmt::Debug>(Option<FairMutexGuard<'a, T>>);
impl<T: ?Sized + fmt::Debug> fmt::Debug for LockWrapper<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.0 {
Some(guard) => fmt::Debug::fmt(guard, f),
None => f.write_str("<locked>"),
}
}
}
f.debug_struct("FairMutex")
.field("data", &LockWrapper(self.try_lock()))
.finish()
}
}
impl<T: ?Sized + Default, R> Default for FairMutex<T, R> {
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T, R> From<T> for FairMutex<T, R> {
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<'a, T: ?Sized> FairMutexGuard<'a, T> {
/// Leak the lock guard, yielding a mutable reference to the underlying data.
///
/// Note that this function will permanently lock the original [`FairMutex`].
///
/// ```
/// let mylock = spin::mutex::FairMutex::<_>::new(0);
///
/// let data: &mut i32 = spin::mutex::FairMutexGuard::leak(mylock.lock());
///
/// *data = 1;
/// assert_eq!(*data, 1);
/// ```
#[inline(always)]
pub fn leak(this: Self) -> &'a mut T {
// Use ManuallyDrop to avoid stacked-borrow invalidation
let mut this = ManuallyDrop::new(this);
// We know statically that only we are referencing data
unsafe { &mut *this.data }
}
}
impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for FairMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized + fmt::Display> fmt::Display for FairMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> Deref for FairMutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
// We know statically that only we are referencing data
unsafe { &*self.data }
}
}
impl<'a, T: ?Sized> DerefMut for FairMutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
// We know statically that only we are referencing data
unsafe { &mut *self.data }
}
}
impl<'a, T: ?Sized> Drop for FairMutexGuard<'a, T> {
/// The dropping of the MutexGuard will release the lock it was created from.
fn drop(&mut self) {
self.lock.fetch_and(!LOCKED, Ordering::Release);
}
}
impl<'a, T: ?Sized, R> Starvation<'a, T, R> {
/// Attempts the lock the mutex if we are the only starving user.
///
/// This allows another user to lock the mutex if they are starving as well.
pub fn try_lock_fair(self) -> Result<FairMutexGuard<'a, T>, Self> {
// Try to lock the mutex.
if self
.lock
.lock
.compare_exchange(
STARVED,
STARVED | LOCKED,
Ordering::Acquire,
Ordering::Relaxed,
)
.is_ok()
{
// We are the only starving user, lock the mutex.
Ok(FairMutexGuard {
lock: &self.lock.lock,
data: self.lock.data.get(),
})
} else {
// Another user is starving, fail.
Err(self)
}
}
/// Attempts to lock the mutex.
///
/// If the lock is currently held by another thread, this will return `None`.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(42);
///
/// // Lock the mutex to simulate it being used by another user.
/// let guard1 = lock.lock();
///
/// // Try to lock the mutex.
/// let guard2 = lock.try_lock();
/// assert!(guard2.is_none());
///
/// // Wait for a while.
/// wait_for_a_while();
///
/// // We are now starved, indicate as such.
/// let starve = lock.starve();
///
/// // Once the lock is released, another user trying to lock it will
/// // fail.
/// drop(guard1);
/// let guard3 = lock.try_lock();
/// assert!(guard3.is_none());
///
/// // However, we will be able to lock it.
/// let guard4 = starve.try_lock();
/// assert!(guard4.is_ok());
///
/// # fn wait_for_a_while() {}
/// ```
pub fn try_lock(self) -> Result<FairMutexGuard<'a, T>, Self> {
// Try to lock the mutex.
if self.lock.lock.fetch_or(LOCKED, Ordering::Acquire) & LOCKED == 0 {
// We have successfully locked the mutex.
// By dropping `self` here, we decrement the starvation count.
Ok(FairMutexGuard {
lock: &self.lock.lock,
data: self.lock.data.get(),
})
} else {
Err(self)
}
}
}
impl<'a, T: ?Sized, R: RelaxStrategy> Starvation<'a, T, R> {
/// Locks the mutex.
pub fn lock(mut self) -> FairMutexGuard<'a, T> {
// Try to lock the mutex.
loop {
match self.try_lock() {
Ok(lock) => return lock,
Err(starve) => self = starve,
}
// Relax until the lock is released.
while self.lock.is_locked() {
R::relax();
}
}
}
}
impl<'a, T: ?Sized, R> Drop for Starvation<'a, T, R> {
fn drop(&mut self) {
// As there is no longer a user being starved, we decrement the starver count.
self.lock.lock.fetch_sub(STARVED, Ordering::Release);
}
}
impl fmt::Display for LockRejectReason {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
LockRejectReason::Locked => write!(f, "locked"),
LockRejectReason::Starved => write!(f, "starved"),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for LockRejectReason {}
#[cfg(feature = "lock_api")]
unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for FairMutex<(), R> {
type GuardMarker = lock_api_crate::GuardSend;
const INIT: Self = Self::new(());
fn lock(&self) {
// Prevent guard destructor running
core::mem::forget(Self::lock(self));
}
fn try_lock(&self) -> bool {
// Prevent guard destructor running
Self::try_lock(self).map(core::mem::forget).is_some()
}
unsafe fn unlock(&self) {
self.force_unlock();
}
fn is_locked(&self) -> bool {
Self::is_locked(self)
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
type FairMutex<T> = super::FairMutex<T>;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let m = FairMutex::<_>::new(());
drop(m.lock());
drop(m.lock());
}
#[test]
fn lots_and_lots() {
static M: FairMutex<()> = FairMutex::<_>::new(());
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
fn inc() {
for _ in 0..J {
unsafe {
let _g = M.lock();
CNT += 1;
}
}
}
let (tx, rx) = channel();
for _ in 0..K {
let tx2 = tx.clone();
thread::spawn(move || {
inc();
tx2.send(()).unwrap();
});
let tx2 = tx.clone();
thread::spawn(move || {
inc();
tx2.send(()).unwrap();
});
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(unsafe { CNT }, J * K * 2);
}
#[test]
fn try_lock() {
let mutex = FairMutex::<_>::new(42);
// First lock succeeds
let a = mutex.try_lock();
assert_eq!(a.as_ref().map(|r| **r), Some(42));
// Additional lock fails
let b = mutex.try_lock();
assert!(b.is_none());
// After dropping lock, it succeeds again
::core::mem::drop(a);
let c = mutex.try_lock();
assert_eq!(c.as_ref().map(|r| **r), Some(42));
}
#[test]
fn test_into_inner() {
let m = FairMutex::<_>::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = FairMutex::<_>::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(FairMutex::<_>::new(1));
let arc2 = Arc::new(FairMutex::<_>::new(arc));
let (tx, rx) = channel();
let _t = thread::spawn(move || {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(FairMutex::<_>::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || -> () {
struct Unwinder {
i: Arc<FairMutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn test_mutex_unsized() {
let mutex: &FairMutex<[i32]> = &FairMutex::<_>::new([1, 2, 3]);
{
let b = &mut *mutex.lock();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.lock(), comp);
}
#[test]
fn test_mutex_force_lock() {
let lock = FairMutex::<_>::new(());
::std::mem::forget(lock.lock());
unsafe {
lock.force_unlock();
}
assert!(lock.try_lock().is_some());
}
}

543
vendor/spin/src/mutex/spin.rs vendored Normal file
View File

@ -0,0 +1,543 @@
//! A naïve spinning mutex.
//!
//! Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case
//! latency is theoretically infinite.
use crate::{
atomic::{AtomicBool, Ordering},
RelaxStrategy, Spin,
};
use core::{
cell::UnsafeCell,
fmt,
marker::PhantomData,
mem::ManuallyDrop,
ops::{Deref, DerefMut},
};
/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data.
///
/// # Example
///
/// ```
/// use spin;
///
/// let lock = spin::mutex::SpinMutex::<_>::new(0);
///
/// // Modify the data
/// *lock.lock() = 2;
///
/// // Read the data
/// let answer = *lock.lock();
/// assert_eq!(answer, 2);
/// ```
///
/// # Thread safety example
///
/// ```
/// use spin;
/// use std::sync::{Arc, Barrier};
///
/// let thread_count = 1000;
/// let spin_mutex = Arc::new(spin::mutex::SpinMutex::<_>::new(0));
///
/// // We use a barrier to ensure the readout happens after all writing
/// let barrier = Arc::new(Barrier::new(thread_count + 1));
///
/// # let mut ts = Vec::new();
/// for _ in (0..thread_count) {
/// let my_barrier = barrier.clone();
/// let my_lock = spin_mutex.clone();
/// # let t =
/// std::thread::spawn(move || {
/// let mut guard = my_lock.lock();
/// *guard += 1;
///
/// // Release the lock to prevent a deadlock
/// drop(guard);
/// my_barrier.wait();
/// });
/// # ts.push(t);
/// }
///
/// barrier.wait();
///
/// let answer = { *spin_mutex.lock() };
/// assert_eq!(answer, thread_count);
///
/// # for t in ts {
/// # t.join().unwrap();
/// # }
/// ```
pub struct SpinMutex<T: ?Sized, R = Spin> {
phantom: PhantomData<R>,
pub(crate) lock: AtomicBool,
data: UnsafeCell<T>,
}
/// A guard that provides mutable data access.
///
/// When the guard falls out of scope it will release the lock.
pub struct SpinMutexGuard<'a, T: ?Sized + 'a> {
lock: &'a AtomicBool,
data: *mut T,
}
// Same unsafe impls as `std::sync::Mutex`
unsafe impl<T: ?Sized + Send, R> Sync for SpinMutex<T, R> {}
unsafe impl<T: ?Sized + Send, R> Send for SpinMutex<T, R> {}
unsafe impl<T: ?Sized + Sync> Sync for SpinMutexGuard<'_, T> {}
unsafe impl<T: ?Sized + Send> Send for SpinMutexGuard<'_, T> {}
impl<T, R> SpinMutex<T, R> {
/// Creates a new [`SpinMutex`] wrapping the supplied data.
///
/// # Example
///
/// ```
/// use spin::mutex::SpinMutex;
///
/// static MUTEX: SpinMutex<()> = SpinMutex::<_>::new(());
///
/// fn demo() {
/// let lock = MUTEX.lock();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline(always)]
pub const fn new(data: T) -> Self {
SpinMutex {
lock: AtomicBool::new(false),
data: UnsafeCell::new(data),
phantom: PhantomData,
}
}
/// Consumes this [`SpinMutex`] and unwraps the underlying data.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::SpinMutex::<_>::new(42);
/// assert_eq!(42, lock.into_inner());
/// ```
#[inline(always)]
pub fn into_inner(self) -> T {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let SpinMutex { data, .. } = self;
data.into_inner()
}
/// Returns a mutable pointer to the underlying data.
///
/// This is mostly meant to be used for applications which require manual unlocking, but where
/// storing both the lock and the pointer to the inner data gets inefficient.
///
/// # Example
/// ```
/// let lock = spin::mutex::SpinMutex::<_>::new(42);
///
/// unsafe {
/// core::mem::forget(lock.lock());
///
/// assert_eq!(lock.as_mut_ptr().read(), 42);
/// lock.as_mut_ptr().write(58);
///
/// lock.force_unlock();
/// }
///
/// assert_eq!(*lock.lock(), 58);
///
/// ```
#[inline(always)]
pub fn as_mut_ptr(&self) -> *mut T {
self.data.get()
}
}
impl<T: ?Sized, R: RelaxStrategy> SpinMutex<T, R> {
/// Locks the [`SpinMutex`] and returns a guard that permits access to the inner data.
///
/// The returned value may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
///
/// ```
/// let lock = spin::mutex::SpinMutex::<_>::new(0);
/// {
/// let mut data = lock.lock();
/// // The lock is now locked and the data can be accessed
/// *data += 1;
/// // The lock is implicitly dropped at the end of the scope
/// }
/// ```
#[inline(always)]
pub fn lock(&self) -> SpinMutexGuard<T> {
// Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
// when called in a loop.
while self
.lock
.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
// Wait until the lock looks unlocked before retrying
while self.is_locked() {
R::relax();
}
}
SpinMutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
}
}
impl<T: ?Sized, R> SpinMutex<T, R> {
/// Returns `true` if the lock is currently held.
///
/// # Safety
///
/// This function provides no synchronization guarantees and so its result should be considered 'out of date'
/// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
#[inline(always)]
pub fn is_locked(&self) -> bool {
self.lock.load(Ordering::Relaxed)
}
/// Force unlock this [`SpinMutex`].
///
/// # Safety
///
/// This is *extremely* unsafe if the lock is not held by the current
/// thread. However, this can be useful in some instances for exposing the
/// lock to FFI that doesn't know how to deal with RAII.
#[inline(always)]
pub unsafe fn force_unlock(&self) {
self.lock.store(false, Ordering::Release);
}
/// Try to lock this [`SpinMutex`], returning a lock guard if successful.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::SpinMutex::<_>::new(42);
///
/// let maybe_guard = lock.try_lock();
/// assert!(maybe_guard.is_some());
///
/// // `maybe_guard` is still held, so the second call fails
/// let maybe_guard2 = lock.try_lock();
/// assert!(maybe_guard2.is_none());
/// ```
#[inline(always)]
pub fn try_lock(&self) -> Option<SpinMutexGuard<T>> {
// The reason for using a strong compare_exchange is explained here:
// https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
if self
.lock
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
Some(SpinMutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
})
} else {
None
}
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the [`SpinMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
/// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
/// such, this is a 'zero-cost' operation.
///
/// # Example
///
/// ```
/// let mut lock = spin::mutex::SpinMutex::<_>::new(0);
/// *lock.get_mut() = 10;
/// assert_eq!(*lock.lock(), 10);
/// ```
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
// We know statically that there are no other references to `self`, so
// there's no need to lock the inner mutex.
unsafe { &mut *self.data.get() }
}
}
impl<T: ?Sized + fmt::Debug, R> fmt::Debug for SpinMutex<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Some(guard) => write!(f, "Mutex {{ data: ")
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "Mutex {{ <locked> }}"),
}
}
}
impl<T: ?Sized + Default, R> Default for SpinMutex<T, R> {
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T, R> From<T> for SpinMutex<T, R> {
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<'a, T: ?Sized> SpinMutexGuard<'a, T> {
/// Leak the lock guard, yielding a mutable reference to the underlying data.
///
/// Note that this function will permanently lock the original [`SpinMutex`].
///
/// ```
/// let mylock = spin::mutex::SpinMutex::<_>::new(0);
///
/// let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock());
///
/// *data = 1;
/// assert_eq!(*data, 1);
/// ```
#[inline(always)]
pub fn leak(this: Self) -> &'a mut T {
// Use ManuallyDrop to avoid stacked-borrow invalidation
let mut this = ManuallyDrop::new(this);
// We know statically that only we are referencing data
unsafe { &mut *this.data }
}
}
impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for SpinMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized + fmt::Display> fmt::Display for SpinMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> Deref for SpinMutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
// We know statically that only we are referencing data
unsafe { &*self.data }
}
}
impl<'a, T: ?Sized> DerefMut for SpinMutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
// We know statically that only we are referencing data
unsafe { &mut *self.data }
}
}
impl<'a, T: ?Sized> Drop for SpinMutexGuard<'a, T> {
/// The dropping of the MutexGuard will release the lock it was created from.
fn drop(&mut self) {
self.lock.store(false, Ordering::Release);
}
}
#[cfg(feature = "lock_api")]
unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for SpinMutex<(), R> {
type GuardMarker = lock_api_crate::GuardSend;
const INIT: Self = Self::new(());
fn lock(&self) {
// Prevent guard destructor running
core::mem::forget(Self::lock(self));
}
fn try_lock(&self) -> bool {
// Prevent guard destructor running
Self::try_lock(self).map(core::mem::forget).is_some()
}
unsafe fn unlock(&self) {
self.force_unlock();
}
fn is_locked(&self) -> bool {
Self::is_locked(self)
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
type SpinMutex<T> = super::SpinMutex<T>;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let m = SpinMutex::<_>::new(());
drop(m.lock());
drop(m.lock());
}
#[test]
fn lots_and_lots() {
static M: SpinMutex<()> = SpinMutex::<_>::new(());
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
fn inc() {
for _ in 0..J {
unsafe {
let _g = M.lock();
CNT += 1;
}
}
}
let (tx, rx) = channel();
let mut ts = Vec::new();
for _ in 0..K {
let tx2 = tx.clone();
ts.push(thread::spawn(move || {
inc();
tx2.send(()).unwrap();
}));
let tx2 = tx.clone();
ts.push(thread::spawn(move || {
inc();
tx2.send(()).unwrap();
}));
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(unsafe { CNT }, J * K * 2);
for t in ts {
t.join().unwrap();
}
}
#[test]
fn try_lock() {
let mutex = SpinMutex::<_>::new(42);
// First lock succeeds
let a = mutex.try_lock();
assert_eq!(a.as_ref().map(|r| **r), Some(42));
// Additional lock fails
let b = mutex.try_lock();
assert!(b.is_none());
// After dropping lock, it succeeds again
::core::mem::drop(a);
let c = mutex.try_lock();
assert_eq!(c.as_ref().map(|r| **r), Some(42));
}
#[test]
fn test_into_inner() {
let m = SpinMutex::<_>::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = SpinMutex::<_>::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(SpinMutex::<_>::new(1));
let arc2 = Arc::new(SpinMutex::<_>::new(arc));
let (tx, rx) = channel();
let t = thread::spawn(move || {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
t.join().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(SpinMutex::<_>::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || -> () {
struct Unwinder {
i: Arc<SpinMutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn test_mutex_unsized() {
let mutex: &SpinMutex<[i32]> = &SpinMutex::<_>::new([1, 2, 3]);
{
let b = &mut *mutex.lock();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.lock(), comp);
}
#[test]
fn test_mutex_force_lock() {
let lock = SpinMutex::<_>::new(());
::std::mem::forget(lock.lock());
unsafe {
lock.force_unlock();
}
assert!(lock.try_lock().is_some());
}
}

537
vendor/spin/src/mutex/ticket.rs vendored Normal file
View File

@ -0,0 +1,537 @@
//! A ticket-based mutex.
//!
//! Waiting threads take a 'ticket' from the lock in the order they arrive and gain access to the lock when their
//! ticket is next in the queue. Best-case latency is slightly worse than a regular spinning mutex, but worse-case
//! latency is infinitely better. Waiting threads simply need to wait for all threads that come before them in the
//! queue to finish.
use crate::{
atomic::{AtomicUsize, Ordering},
RelaxStrategy, Spin,
};
use core::{
cell::UnsafeCell,
fmt,
marker::PhantomData,
ops::{Deref, DerefMut},
};
/// A spin-based [ticket lock](https://en.wikipedia.org/wiki/Ticket_lock) providing mutually exclusive access to data.
///
/// A ticket lock is analogous to a queue management system for lock requests. When a thread tries to take a lock, it
/// is assigned a 'ticket'. It then spins until its ticket becomes next in line. When the lock guard is released, the
/// next ticket will be processed.
///
/// Ticket locks significantly reduce the worse-case performance of locking at the cost of slightly higher average-time
/// overhead.
///
/// # Example
///
/// ```
/// use spin;
///
/// let lock = spin::mutex::TicketMutex::<_>::new(0);
///
/// // Modify the data
/// *lock.lock() = 2;
///
/// // Read the data
/// let answer = *lock.lock();
/// assert_eq!(answer, 2);
/// ```
///
/// # Thread safety example
///
/// ```
/// use spin;
/// use std::sync::{Arc, Barrier};
///
/// let thread_count = 1000;
/// let spin_mutex = Arc::new(spin::mutex::TicketMutex::<_>::new(0));
///
/// // We use a barrier to ensure the readout happens after all writing
/// let barrier = Arc::new(Barrier::new(thread_count + 1));
///
/// for _ in (0..thread_count) {
/// let my_barrier = barrier.clone();
/// let my_lock = spin_mutex.clone();
/// std::thread::spawn(move || {
/// let mut guard = my_lock.lock();
/// *guard += 1;
///
/// // Release the lock to prevent a deadlock
/// drop(guard);
/// my_barrier.wait();
/// });
/// }
///
/// barrier.wait();
///
/// let answer = { *spin_mutex.lock() };
/// assert_eq!(answer, thread_count);
/// ```
pub struct TicketMutex<T: ?Sized, R = Spin> {
phantom: PhantomData<R>,
next_ticket: AtomicUsize,
next_serving: AtomicUsize,
data: UnsafeCell<T>,
}
/// A guard that protects some data.
///
/// When the guard is dropped, the next ticket will be processed.
pub struct TicketMutexGuard<'a, T: ?Sized + 'a> {
next_serving: &'a AtomicUsize,
ticket: usize,
data: &'a mut T,
}
unsafe impl<T: ?Sized + Send, R> Sync for TicketMutex<T, R> {}
unsafe impl<T: ?Sized + Send, R> Send for TicketMutex<T, R> {}
impl<T, R> TicketMutex<T, R> {
/// Creates a new [`TicketMutex`] wrapping the supplied data.
///
/// # Example
///
/// ```
/// use spin::mutex::TicketMutex;
///
/// static MUTEX: TicketMutex<()> = TicketMutex::<_>::new(());
///
/// fn demo() {
/// let lock = MUTEX.lock();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline(always)]
pub const fn new(data: T) -> Self {
Self {
phantom: PhantomData,
next_ticket: AtomicUsize::new(0),
next_serving: AtomicUsize::new(0),
data: UnsafeCell::new(data),
}
}
/// Consumes this [`TicketMutex`] and unwraps the underlying data.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::TicketMutex::<_>::new(42);
/// assert_eq!(42, lock.into_inner());
/// ```
#[inline(always)]
pub fn into_inner(self) -> T {
self.data.into_inner()
}
/// Returns a mutable pointer to the underying data.
///
/// This is mostly meant to be used for applications which require manual unlocking, but where
/// storing both the lock and the pointer to the inner data gets inefficient.
///
/// # Example
/// ```
/// let lock = spin::mutex::SpinMutex::<_>::new(42);
///
/// unsafe {
/// core::mem::forget(lock.lock());
///
/// assert_eq!(lock.as_mut_ptr().read(), 42);
/// lock.as_mut_ptr().write(58);
///
/// lock.force_unlock();
/// }
///
/// assert_eq!(*lock.lock(), 58);
///
/// ```
#[inline(always)]
pub fn as_mut_ptr(&self) -> *mut T {
self.data.get()
}
}
impl<T: ?Sized + fmt::Debug, R> fmt::Debug for TicketMutex<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Some(guard) => write!(f, "Mutex {{ data: ")
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "Mutex {{ <locked> }}"),
}
}
}
impl<T: ?Sized, R: RelaxStrategy> TicketMutex<T, R> {
/// Locks the [`TicketMutex`] and returns a guard that permits access to the inner data.
///
/// The returned data may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
///
/// ```
/// let lock = spin::mutex::TicketMutex::<_>::new(0);
/// {
/// let mut data = lock.lock();
/// // The lock is now locked and the data can be accessed
/// *data += 1;
/// // The lock is implicitly dropped at the end of the scope
/// }
/// ```
#[inline(always)]
pub fn lock(&self) -> TicketMutexGuard<T> {
let ticket = self.next_ticket.fetch_add(1, Ordering::Relaxed);
while self.next_serving.load(Ordering::Acquire) != ticket {
R::relax();
}
TicketMutexGuard {
next_serving: &self.next_serving,
ticket,
// Safety
// We know that we are the next ticket to be served,
// so there's no other thread accessing the data.
//
// Every other thread has another ticket number so it's
// definitely stuck in the spin loop above.
data: unsafe { &mut *self.data.get() },
}
}
}
impl<T: ?Sized, R> TicketMutex<T, R> {
/// Returns `true` if the lock is currently held.
///
/// # Safety
///
/// This function provides no synchronization guarantees and so its result should be considered 'out of date'
/// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
#[inline(always)]
pub fn is_locked(&self) -> bool {
let ticket = self.next_ticket.load(Ordering::Relaxed);
self.next_serving.load(Ordering::Relaxed) != ticket
}
/// Force unlock this [`TicketMutex`], by serving the next ticket.
///
/// # Safety
///
/// This is *extremely* unsafe if the lock is not held by the current
/// thread. However, this can be useful in some instances for exposing the
/// lock to FFI that doesn't know how to deal with RAII.
#[inline(always)]
pub unsafe fn force_unlock(&self) {
self.next_serving.fetch_add(1, Ordering::Release);
}
/// Try to lock this [`TicketMutex`], returning a lock guard if successful.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::TicketMutex::<_>::new(42);
///
/// let maybe_guard = lock.try_lock();
/// assert!(maybe_guard.is_some());
///
/// // `maybe_guard` is still held, so the second call fails
/// let maybe_guard2 = lock.try_lock();
/// assert!(maybe_guard2.is_none());
/// ```
#[inline(always)]
pub fn try_lock(&self) -> Option<TicketMutexGuard<T>> {
let ticket = self
.next_ticket
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |ticket| {
if self.next_serving.load(Ordering::Acquire) == ticket {
Some(ticket + 1)
} else {
None
}
});
ticket.ok().map(|ticket| TicketMutexGuard {
next_serving: &self.next_serving,
ticket,
// Safety
// We have a ticket that is equal to the next_serving ticket, so we know:
// - that no other thread can have the same ticket id as this thread
// - that we are the next one to be served so we have exclusive access to the data
data: unsafe { &mut *self.data.get() },
})
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the [`TicketMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
/// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
/// such, this is a 'zero-cost' operation.
///
/// # Example
///
/// ```
/// let mut lock = spin::mutex::TicketMutex::<_>::new(0);
/// *lock.get_mut() = 10;
/// assert_eq!(*lock.lock(), 10);
/// ```
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
// Safety:
// We know that there are no other references to `self`,
// so it's safe to return a exclusive reference to the data.
unsafe { &mut *self.data.get() }
}
}
impl<T: ?Sized + Default, R> Default for TicketMutex<T, R> {
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T, R> From<T> for TicketMutex<T, R> {
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<'a, T: ?Sized> TicketMutexGuard<'a, T> {
/// Leak the lock guard, yielding a mutable reference to the underlying data.
///
/// Note that this function will permanently lock the original [`TicketMutex`].
///
/// ```
/// let mylock = spin::mutex::TicketMutex::<_>::new(0);
///
/// let data: &mut i32 = spin::mutex::TicketMutexGuard::leak(mylock.lock());
///
/// *data = 1;
/// assert_eq!(*data, 1);
/// ```
#[inline(always)]
pub fn leak(this: Self) -> &'a mut T {
let data = this.data as *mut _; // Keep it in pointer form temporarily to avoid double-aliasing
core::mem::forget(this);
unsafe { &mut *data }
}
}
impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for TicketMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized + fmt::Display> fmt::Display for TicketMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> Deref for TicketMutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
self.data
}
}
impl<'a, T: ?Sized> DerefMut for TicketMutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
self.data
}
}
impl<'a, T: ?Sized> Drop for TicketMutexGuard<'a, T> {
fn drop(&mut self) {
let new_ticket = self.ticket + 1;
self.next_serving.store(new_ticket, Ordering::Release);
}
}
#[cfg(feature = "lock_api")]
unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for TicketMutex<(), R> {
type GuardMarker = lock_api_crate::GuardSend;
const INIT: Self = Self::new(());
fn lock(&self) {
// Prevent guard destructor running
core::mem::forget(Self::lock(self));
}
fn try_lock(&self) -> bool {
// Prevent guard destructor running
Self::try_lock(self).map(core::mem::forget).is_some()
}
unsafe fn unlock(&self) {
self.force_unlock();
}
fn is_locked(&self) -> bool {
Self::is_locked(self)
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
type TicketMutex<T> = super::TicketMutex<T>;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let m = TicketMutex::<_>::new(());
drop(m.lock());
drop(m.lock());
}
#[test]
fn lots_and_lots() {
static M: TicketMutex<()> = TicketMutex::<_>::new(());
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
fn inc() {
for _ in 0..J {
unsafe {
let _g = M.lock();
CNT += 1;
}
}
}
let (tx, rx) = channel();
for _ in 0..K {
let tx2 = tx.clone();
thread::spawn(move || {
inc();
tx2.send(()).unwrap();
});
let tx2 = tx.clone();
thread::spawn(move || {
inc();
tx2.send(()).unwrap();
});
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(unsafe { CNT }, J * K * 2);
}
#[test]
fn try_lock() {
let mutex = TicketMutex::<_>::new(42);
// First lock succeeds
let a = mutex.try_lock();
assert_eq!(a.as_ref().map(|r| **r), Some(42));
// Additional lock fails
let b = mutex.try_lock();
assert!(b.is_none());
// After dropping lock, it succeeds again
::core::mem::drop(a);
let c = mutex.try_lock();
assert_eq!(c.as_ref().map(|r| **r), Some(42));
}
#[test]
fn test_into_inner() {
let m = TicketMutex::<_>::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = TicketMutex::<_>::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(TicketMutex::<_>::new(1));
let arc2 = Arc::new(TicketMutex::<_>::new(arc));
let (tx, rx) = channel();
let _t = thread::spawn(move || {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(TicketMutex::<_>::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || -> () {
struct Unwinder {
i: Arc<TicketMutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn test_mutex_unsized() {
let mutex: &TicketMutex<[i32]> = &TicketMutex::<_>::new([1, 2, 3]);
{
let b = &mut *mutex.lock();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.lock(), comp);
}
#[test]
fn is_locked() {
let mutex = TicketMutex::<_>::new(());
assert!(!mutex.is_locked());
let lock = mutex.lock();
assert!(mutex.is_locked());
drop(lock);
assert!(!mutex.is_locked());
}
}

789
vendor/spin/src/once.rs vendored Normal file
View File

@ -0,0 +1,789 @@
//! Synchronization primitives for one-time evaluation.
use crate::{
atomic::{AtomicU8, Ordering},
RelaxStrategy, Spin,
};
use core::{cell::UnsafeCell, fmt, marker::PhantomData, mem::MaybeUninit};
/// A primitive that provides lazy one-time initialization.
///
/// Unlike its `std::sync` equivalent, this is generalized such that the closure returns a
/// value to be stored by the [`Once`] (`std::sync::Once` can be trivially emulated with
/// `Once`).
///
/// Because [`Once::new`] is `const`, this primitive may be used to safely initialize statics.
///
/// # Examples
///
/// ```
/// use spin;
///
/// static START: spin::Once = spin::Once::new();
///
/// START.call_once(|| {
/// // run initialization here
/// });
/// ```
pub struct Once<T = (), R = Spin> {
phantom: PhantomData<R>,
status: AtomicStatus,
data: UnsafeCell<MaybeUninit<T>>,
}
impl<T, R> Default for Once<T, R> {
fn default() -> Self {
Self::new()
}
}
impl<T: fmt::Debug, R> fmt::Debug for Once<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.get() {
Some(s) => write!(f, "Once {{ data: ")
.and_then(|()| s.fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "Once {{ <uninitialized> }}"),
}
}
}
// Same unsafe impls as `std::sync::RwLock`, because this also allows for
// concurrent reads.
unsafe impl<T: Send + Sync, R> Sync for Once<T, R> {}
unsafe impl<T: Send, R> Send for Once<T, R> {}
mod status {
use super::*;
// SAFETY: This structure has an invariant, namely that the inner atomic u8 must *always* have
// a value for which there exists a valid Status. This means that users of this API must only
// be allowed to load and store `Status`es.
#[repr(transparent)]
pub struct AtomicStatus(AtomicU8);
// Four states that a Once can be in, encoded into the lower bits of `status` in
// the Once structure.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Status {
Incomplete = 0x00,
Running = 0x01,
Complete = 0x02,
Panicked = 0x03,
}
impl Status {
// Construct a status from an inner u8 integer.
//
// # Safety
//
// For this to be safe, the inner number must have a valid corresponding enum variant.
unsafe fn new_unchecked(inner: u8) -> Self {
core::mem::transmute(inner)
}
}
impl AtomicStatus {
#[inline(always)]
pub const fn new(status: Status) -> Self {
// SAFETY: We got the value directly from status, so transmuting back is fine.
Self(AtomicU8::new(status as u8))
}
#[inline(always)]
pub fn load(&self, ordering: Ordering) -> Status {
// SAFETY: We know that the inner integer must have been constructed from a Status in
// the first place.
unsafe { Status::new_unchecked(self.0.load(ordering)) }
}
#[inline(always)]
pub fn store(&self, status: Status, ordering: Ordering) {
// SAFETY: While not directly unsafe, this is safe because the value was retrieved from
// a status, thus making transmutation safe.
self.0.store(status as u8, ordering);
}
#[inline(always)]
pub fn compare_exchange(
&self,
old: Status,
new: Status,
success: Ordering,
failure: Ordering,
) -> Result<Status, Status> {
match self
.0
.compare_exchange(old as u8, new as u8, success, failure)
{
// SAFETY: A compare exchange will always return a value that was later stored into
// the atomic u8, but due to the invariant that it must be a valid Status, we know
// that both Ok(_) and Err(_) will be safely transmutable.
Ok(ok) => Ok(unsafe { Status::new_unchecked(ok) }),
Err(err) => Err(unsafe { Status::new_unchecked(err) }),
}
}
#[inline(always)]
pub fn get_mut(&mut self) -> &mut Status {
// SAFETY: Since we know that the u8 inside must be a valid Status, we can safely cast
// it to a &mut Status.
unsafe { &mut *((self.0.get_mut() as *mut u8).cast::<Status>()) }
}
}
}
use self::status::{AtomicStatus, Status};
impl<T, R: RelaxStrategy> Once<T, R> {
/// Performs an initialization routine once and only once. The given closure
/// will be executed if this is the first time `call_once` has been called,
/// and otherwise the routine will *not* be invoked.
///
/// This method will block the calling thread if another initialization
/// routine is currently running.
///
/// When this function returns, it is guaranteed that some initialization
/// has run and completed (it may not be the closure specified). The
/// returned pointer will point to the result from the closure that was
/// run.
///
/// # Panics
///
/// This function will panic if the [`Once`] previously panicked while attempting
/// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
/// primitives.
///
/// # Examples
///
/// ```
/// use spin;
///
/// static INIT: spin::Once<usize> = spin::Once::new();
///
/// fn get_cached_val() -> usize {
/// *INIT.call_once(expensive_computation)
/// }
///
/// fn expensive_computation() -> usize {
/// // ...
/// # 2
/// }
/// ```
pub fn call_once<F: FnOnce() -> T>(&self, f: F) -> &T {
match self.try_call_once(|| Ok::<T, core::convert::Infallible>(f())) {
Ok(x) => x,
Err(void) => match void {},
}
}
/// This method is similar to `call_once`, but allows the given closure to
/// fail, and lets the `Once` in a uninitialized state if it does.
///
/// This method will block the calling thread if another initialization
/// routine is currently running.
///
/// When this function returns without error, it is guaranteed that some
/// initialization has run and completed (it may not be the closure
/// specified). The returned reference will point to the result from the
/// closure that was run.
///
/// # Panics
///
/// This function will panic if the [`Once`] previously panicked while attempting
/// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
/// primitives.
///
/// # Examples
///
/// ```
/// use spin;
///
/// static INIT: spin::Once<usize> = spin::Once::new();
///
/// fn get_cached_val() -> Result<usize, String> {
/// INIT.try_call_once(expensive_fallible_computation).map(|x| *x)
/// }
///
/// fn expensive_fallible_computation() -> Result<usize, String> {
/// // ...
/// # Ok(2)
/// }
/// ```
pub fn try_call_once<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
if let Some(value) = self.get() {
Ok(value)
} else {
self.try_call_once_slow(f)
}
}
#[cold]
fn try_call_once_slow<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
loop {
let xchg = self.status.compare_exchange(
Status::Incomplete,
Status::Running,
Ordering::Acquire,
Ordering::Acquire,
);
match xchg {
Ok(_must_be_state_incomplete) => {
// Impl is defined after the match for readability
}
Err(Status::Panicked) => panic!("Once panicked"),
Err(Status::Running) => match self.poll() {
Some(v) => return Ok(v),
None => continue,
},
Err(Status::Complete) => {
return Ok(unsafe {
// SAFETY: The status is Complete
self.force_get()
});
}
Err(Status::Incomplete) => {
// The compare_exchange failed, so this shouldn't ever be reached,
// however if we decide to switch to compare_exchange_weak it will
// be safer to leave this here than hit an unreachable
continue;
}
}
// The compare-exchange succeeded, so we shall initialize it.
// We use a guard (Finish) to catch panics caused by builder
let finish = Finish {
status: &self.status,
};
let val = match f() {
Ok(val) => val,
Err(err) => {
// If an error occurs, clean up everything and leave.
core::mem::forget(finish);
self.status.store(Status::Incomplete, Ordering::Release);
return Err(err);
}
};
unsafe {
// SAFETY:
// `UnsafeCell`/deref: currently the only accessor, mutably
// and immutably by cas exclusion.
// `write`: pointer comes from `MaybeUninit`.
(*self.data.get()).as_mut_ptr().write(val);
};
// If there were to be a panic with unwind enabled, the code would
// short-circuit and never reach the point where it writes the inner data.
// The destructor for Finish will run, and poison the Once to ensure that other
// threads accessing it do not exhibit unwanted behavior, if there were to be
// any inconsistency in data structures caused by the panicking thread.
//
// However, f() is expected in the general case not to panic. In that case, we
// simply forget the guard, bypassing its destructor. We could theoretically
// clear a flag instead, but this eliminates the call to the destructor at
// compile time, and unconditionally poisons during an eventual panic, if
// unwinding is enabled.
core::mem::forget(finish);
// SAFETY: Release is required here, so that all memory accesses done in the
// closure when initializing, become visible to other threads that perform Acquire
// loads.
//
// And, we also know that the changes this thread has done will not magically
// disappear from our cache, so it does not need to be AcqRel.
self.status.store(Status::Complete, Ordering::Release);
// This next line is mainly an optimization.
return unsafe { Ok(self.force_get()) };
}
}
/// Spins until the [`Once`] contains a value.
///
/// Note that in releases prior to `0.7`, this function had the behaviour of [`Once::poll`].
///
/// # Panics
///
/// This function will panic if the [`Once`] previously panicked while attempting
/// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
/// primitives.
pub fn wait(&self) -> &T {
loop {
match self.poll() {
Some(x) => break x,
None => R::relax(),
}
}
}
/// Like [`Once::get`], but will spin if the [`Once`] is in the process of being
/// initialized. If initialization has not even begun, `None` will be returned.
///
/// Note that in releases prior to `0.7`, this function was named `wait`.
///
/// # Panics
///
/// This function will panic if the [`Once`] previously panicked while attempting
/// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
/// primitives.
pub fn poll(&self) -> Option<&T> {
loop {
// SAFETY: Acquire is safe here, because if the status is COMPLETE, then we want to make
// sure that all memory accessed done while initializing that value, are visible when
// we return a reference to the inner data after this load.
match self.status.load(Ordering::Acquire) {
Status::Incomplete => return None,
Status::Running => R::relax(), // We spin
Status::Complete => return Some(unsafe { self.force_get() }),
Status::Panicked => panic!("Once previously poisoned by a panicked"),
}
}
}
}
impl<T, R> Once<T, R> {
/// Initialization constant of [`Once`].
#[allow(clippy::declare_interior_mutable_const)]
pub const INIT: Self = Self {
phantom: PhantomData,
status: AtomicStatus::new(Status::Incomplete),
data: UnsafeCell::new(MaybeUninit::uninit()),
};
/// Creates a new [`Once`].
pub const fn new() -> Self {
Self::INIT
}
/// Creates a new initialized [`Once`].
pub const fn initialized(data: T) -> Self {
Self {
phantom: PhantomData,
status: AtomicStatus::new(Status::Complete),
data: UnsafeCell::new(MaybeUninit::new(data)),
}
}
/// Retrieve a pointer to the inner data.
///
/// While this method itself is safe, accessing the pointer before the [`Once`] has been
/// initialized is UB, unless this method has already been written to from a pointer coming
/// from this method.
pub fn as_mut_ptr(&self) -> *mut T {
// SAFETY:
// * MaybeUninit<T> always has exactly the same layout as T
self.data.get().cast::<T>()
}
/// Get a reference to the initialized instance. Must only be called once COMPLETE.
unsafe fn force_get(&self) -> &T {
// SAFETY:
// * `UnsafeCell`/inner deref: data never changes again
// * `MaybeUninit`/outer deref: data was initialized
&*(*self.data.get()).as_ptr()
}
/// Get a reference to the initialized instance. Must only be called once COMPLETE.
unsafe fn force_get_mut(&mut self) -> &mut T {
// SAFETY:
// * `UnsafeCell`/inner deref: data never changes again
// * `MaybeUninit`/outer deref: data was initialized
&mut *(*self.data.get()).as_mut_ptr()
}
/// Get a reference to the initialized instance. Must only be called once COMPLETE.
unsafe fn force_into_inner(self) -> T {
// SAFETY:
// * `UnsafeCell`/inner deref: data never changes again
// * `MaybeUninit`/outer deref: data was initialized
(*self.data.get()).as_ptr().read()
}
/// Returns a reference to the inner value if the [`Once`] has been initialized.
pub fn get(&self) -> Option<&T> {
// SAFETY: Just as with `poll`, Acquire is safe here because we want to be able to see the
// nonatomic stores done when initializing, once we have loaded and checked the status.
match self.status.load(Ordering::Acquire) {
Status::Complete => Some(unsafe { self.force_get() }),
_ => None,
}
}
/// Returns a reference to the inner value on the unchecked assumption that the [`Once`] has been initialized.
///
/// # Safety
///
/// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
/// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
/// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
/// checking initialization is unacceptable and the `Once` has already been initialized.
pub unsafe fn get_unchecked(&self) -> &T {
debug_assert_eq!(
self.status.load(Ordering::SeqCst),
Status::Complete,
"Attempted to access an uninitialized Once. If this was run without debug checks, this would be undefined behaviour. This is a serious bug and you must fix it.",
);
self.force_get()
}
/// Returns a mutable reference to the inner value if the [`Once`] has been initialized.
///
/// Because this method requires a mutable reference to the [`Once`], no synchronization
/// overhead is required to access the inner value. In effect, it is zero-cost.
pub fn get_mut(&mut self) -> Option<&mut T> {
match *self.status.get_mut() {
Status::Complete => Some(unsafe { self.force_get_mut() }),
_ => None,
}
}
/// Returns a mutable reference to the inner value
///
/// # Safety
///
/// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
/// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
/// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
/// checking initialization is unacceptable and the `Once` has already been initialized.
pub unsafe fn get_mut_unchecked(&mut self) -> &mut T {
debug_assert_eq!(
self.status.load(Ordering::SeqCst),
Status::Complete,
"Attempted to access an unintialized Once. If this was to run without debug checks, this would be undefined behavior. This is a serious bug and you must fix it.",
);
self.force_get_mut()
}
/// Returns a the inner value if the [`Once`] has been initialized.
///
/// Because this method requires ownership of the [`Once`], no synchronization overhead
/// is required to access the inner value. In effect, it is zero-cost.
pub fn try_into_inner(mut self) -> Option<T> {
match *self.status.get_mut() {
Status::Complete => Some(unsafe { self.force_into_inner() }),
_ => None,
}
}
/// Returns a the inner value if the [`Once`] has been initialized.
/// # Safety
///
/// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
/// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused)
/// This can be useful, if `Once` has already been initialized, and you want to bypass an
/// option check.
pub unsafe fn into_inner_unchecked(self) -> T {
debug_assert_eq!(
self.status.load(Ordering::SeqCst),
Status::Complete,
"Attempted to access an unintialized Once. If this was to run without debug checks, this would be undefined behavior. This is a serious bug and you must fix it.",
);
self.force_into_inner()
}
/// Checks whether the value has been initialized.
///
/// This is done using [`Acquire`](core::sync::atomic::Ordering::Acquire) ordering, and
/// therefore it is safe to access the value directly via
/// [`get_unchecked`](Self::get_unchecked) if this returns true.
pub fn is_completed(&self) -> bool {
// TODO: Add a similar variant for Relaxed?
self.status.load(Ordering::Acquire) == Status::Complete
}
}
impl<T, R> From<T> for Once<T, R> {
fn from(data: T) -> Self {
Self::initialized(data)
}
}
impl<T, R> Drop for Once<T, R> {
fn drop(&mut self) {
// No need to do any atomic access here, we have &mut!
if *self.status.get_mut() == Status::Complete {
unsafe {
//TODO: Use MaybeUninit::assume_init_drop once stabilised
core::ptr::drop_in_place((*self.data.get()).as_mut_ptr());
}
}
}
}
struct Finish<'a> {
status: &'a AtomicStatus,
}
impl<'a> Drop for Finish<'a> {
fn drop(&mut self) {
// While using Relaxed here would most likely not be an issue, we use SeqCst anyway.
// This is mainly because panics are not meant to be fast at all, but also because if
// there were to be a compiler bug which reorders accesses within the same thread,
// where it should not, we want to be sure that the panic really is handled, and does
// not cause additional problems. SeqCst will therefore help guarding against such
// bugs.
self.status.store(Status::Panicked, Ordering::SeqCst);
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::atomic::AtomicU32;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use super::*;
#[test]
fn smoke_once() {
static O: Once = Once::new();
let mut a = 0;
O.call_once(|| a += 1);
assert_eq!(a, 1);
O.call_once(|| a += 1);
assert_eq!(a, 1);
}
#[test]
fn smoke_once_value() {
static O: Once<usize> = Once::new();
let a = O.call_once(|| 1);
assert_eq!(*a, 1);
let b = O.call_once(|| 2);
assert_eq!(*b, 1);
}
#[test]
fn stampede_once() {
static O: Once = Once::new();
static mut RUN: bool = false;
let (tx, rx) = channel();
let mut ts = Vec::new();
for _ in 0..10 {
let tx = tx.clone();
ts.push(thread::spawn(move || {
for _ in 0..4 {
thread::yield_now()
}
unsafe {
O.call_once(|| {
assert!(!RUN);
RUN = true;
});
assert!(RUN);
}
tx.send(()).unwrap();
}));
}
unsafe {
O.call_once(|| {
assert!(!RUN);
RUN = true;
});
assert!(RUN);
}
for _ in 0..10 {
rx.recv().unwrap();
}
for t in ts {
t.join().unwrap();
}
}
#[test]
fn get() {
static INIT: Once<usize> = Once::new();
assert!(INIT.get().is_none());
INIT.call_once(|| 2);
assert_eq!(INIT.get().map(|r| *r), Some(2));
}
#[test]
fn get_no_wait() {
static INIT: Once<usize> = Once::new();
assert!(INIT.get().is_none());
let t = thread::spawn(move || {
INIT.call_once(|| {
thread::sleep(std::time::Duration::from_secs(3));
42
});
});
assert!(INIT.get().is_none());
t.join().unwrap();
}
#[test]
fn poll() {
static INIT: Once<usize> = Once::new();
assert!(INIT.poll().is_none());
INIT.call_once(|| 3);
assert_eq!(INIT.poll().map(|r| *r), Some(3));
}
#[test]
fn wait() {
static INIT: Once<usize> = Once::new();
let t = std::thread::spawn(|| {
assert_eq!(*INIT.wait(), 3);
assert!(INIT.is_completed());
});
for _ in 0..4 {
thread::yield_now()
}
assert!(INIT.poll().is_none());
INIT.call_once(|| 3);
t.join().unwrap();
}
#[test]
fn panic() {
use std::panic;
static INIT: Once = Once::new();
// poison the once
let t = panic::catch_unwind(|| {
INIT.call_once(|| panic!());
});
assert!(t.is_err());
// poisoning propagates
let t = panic::catch_unwind(|| {
INIT.call_once(|| {});
});
assert!(t.is_err());
}
#[test]
fn init_constant() {
static O: Once = Once::INIT;
let mut a = 0;
O.call_once(|| a += 1);
assert_eq!(a, 1);
O.call_once(|| a += 1);
assert_eq!(a, 1);
}
static mut CALLED: bool = false;
struct DropTest {}
impl Drop for DropTest {
fn drop(&mut self) {
unsafe {
CALLED = true;
}
}
}
#[test]
fn try_call_once_err() {
let once = Once::<_, Spin>::new();
let shared = Arc::new((once, AtomicU32::new(0)));
let (tx, rx) = channel();
let t0 = {
let shared = shared.clone();
thread::spawn(move || {
let (once, called) = &*shared;
once.try_call_once(|| {
called.fetch_add(1, Ordering::AcqRel);
tx.send(()).unwrap();
thread::sleep(std::time::Duration::from_millis(50));
Err(())
})
.ok();
})
};
let t1 = {
let shared = shared.clone();
thread::spawn(move || {
rx.recv().unwrap();
let (once, called) = &*shared;
assert_eq!(
called.load(Ordering::Acquire),
1,
"leader thread did not run first"
);
once.call_once(|| {
called.fetch_add(1, Ordering::AcqRel);
});
})
};
t0.join().unwrap();
t1.join().unwrap();
assert_eq!(shared.1.load(Ordering::Acquire), 2);
}
// This is sort of two test cases, but if we write them as separate test methods
// they can be executed concurrently and then fail some small fraction of the
// time.
#[test]
fn drop_occurs_and_skip_uninit_drop() {
unsafe {
CALLED = false;
}
{
let once = Once::<_>::new();
once.call_once(|| DropTest {});
}
assert!(unsafe { CALLED });
// Now test that we skip drops for the uninitialized case.
unsafe {
CALLED = false;
}
let once = Once::<DropTest>::new();
drop(once);
assert!(unsafe { !CALLED });
}
#[test]
fn call_once_test() {
for _ in 0..20 {
use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
use std::time::Duration;
let share = Arc::new(AtomicUsize::new(0));
let once = Arc::new(Once::<_, Spin>::new());
let mut hs = Vec::new();
for _ in 0..8 {
let h = thread::spawn({
let share = share.clone();
let once = once.clone();
move || {
thread::sleep(Duration::from_millis(10));
once.call_once(|| {
share.fetch_add(1, Ordering::SeqCst);
});
}
});
hs.push(h);
}
for h in hs {
h.join().unwrap();
}
assert_eq!(1, share.load(Ordering::SeqCst));
}
}
}

61
vendor/spin/src/relax.rs vendored Normal file
View File

@ -0,0 +1,61 @@
//! Strategies that determine the behaviour of locks when encountering contention.
/// A trait implemented by spinning relax strategies.
pub trait RelaxStrategy {
/// Perform the relaxing operation during a period of contention.
fn relax();
}
/// A strategy that rapidly spins while informing the CPU that it should power down non-essential components via
/// [`core::hint::spin_loop`].
///
/// Note that spinning is a 'dumb' strategy and most schedulers cannot correctly differentiate it from useful work,
/// thereby misallocating even more CPU time to the spinning process. This is known as
/// ['priority inversion'](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html).
///
/// If you see signs that priority inversion is occurring, consider switching to [`Yield`] or, even better, not using a
/// spinlock at all and opting for a proper scheduler-aware lock. Remember also that different targets, operating
/// systems, schedulers, and even the same scheduler with different workloads will exhibit different behaviour. Just
/// because priority inversion isn't occurring in your tests does not mean that it will not occur. Use a scheduler-
/// aware lock if at all possible.
pub struct Spin;
impl RelaxStrategy for Spin {
#[inline(always)]
fn relax() {
// Use the deprecated spin_loop_hint() to ensure that we don't get
// a higher MSRV than we need to.
#[allow(deprecated)]
core::sync::atomic::spin_loop_hint();
}
}
/// A strategy that yields the current time slice to the scheduler in favour of other threads or processes.
///
/// This is generally used as a strategy for minimising power consumption and priority inversion on targets that have a
/// standard library available. Note that such targets have scheduler-integrated concurrency primitives available, and
/// you should generally use these instead, except in rare circumstances.
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct Yield;
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl RelaxStrategy for Yield {
#[inline(always)]
fn relax() {
std::thread::yield_now();
}
}
/// A strategy that rapidly spins, without telling the CPU to do any powering down.
///
/// You almost certainly do not want to use this. Use [`Spin`] instead. It exists for completeness and for targets
/// that, for some reason, miscompile or do not support spin hint intrinsics despite attempting to generate code for
/// them (i.e: this is a workaround for possible compiler bugs).
pub struct Loop;
impl RelaxStrategy for Loop {
#[inline(always)]
fn relax() {}
}

1165
vendor/spin/src/rwlock.rs vendored Normal file

File diff suppressed because it is too large Load Diff