From d9b1e3429450a64b490f68c08bd191417e68f00c Mon Sep 17 00:00:00 2001 From: Stjepan Glavina Date: Mon, 5 Nov 2018 01:12:49 +0100 Subject: [PATCH] Merge remaining subcrates --- .gitignore | 4 +- .travis.yml | 15 +- Cargo.toml | 4 + README.md | 3 - ci/script.sh | 20 +- crossbeam-channel/CHANGELOG.md | 93 + crossbeam-channel/Cargo.toml | 27 + crossbeam-channel/LICENSE-APACHE | 201 ++ crossbeam-channel/LICENSE-MIT | 25 + crossbeam-channel/README.md | 38 + crossbeam-channel/benchmarks/Cargo.toml | 64 + crossbeam-channel/benchmarks/README.md | 43 + crossbeam-channel/benchmarks/atomicring.rs | 141 + .../benchmarks/atomicringqueue.rs | 124 + crossbeam-channel/benchmarks/bus.rs | 60 + crossbeam-channel/benchmarks/chan.rs | 200 ++ .../benchmarks/crossbeam-channel.rs | 186 ++ .../benchmarks/crossbeam-deque.rs | 68 + .../benchmarks/futures-channel.rs | 179 ++ crossbeam-channel/benchmarks/go.go | 200 ++ crossbeam-channel/benchmarks/mpmc.rs | 141 + crossbeam-channel/benchmarks/mpsc.rs | 201 ++ crossbeam-channel/benchmarks/msqueue.rs | 117 + crossbeam-channel/benchmarks/plot.py | 120 + crossbeam-channel/benchmarks/run.sh | 12 + crossbeam-channel/benchmarks/segqueue.rs | 117 + crossbeam-channel/benchmarks/shared.rs | 43 + crossbeam-channel/ci/script.sh | 19 + crossbeam-channel/examples/fibonacci.rs | 27 + crossbeam-channel/examples/matching.rs | 73 + crossbeam-channel/examples/stopwatch.rs | 54 + crossbeam-channel/src/channel.rs | 1381 +++++++++ crossbeam-channel/src/context.rs | 187 ++ crossbeam-channel/src/err.rs | 367 +++ crossbeam-channel/src/flavors/after.rs | 212 ++ crossbeam-channel/src/flavors/array.rs | 638 ++++ crossbeam-channel/src/flavors/list.rs | 594 ++++ crossbeam-channel/src/flavors/mod.rs | 17 + crossbeam-channel/src/flavors/never.rs | 114 + crossbeam-channel/src/flavors/tick.rs | 199 ++ crossbeam-channel/src/flavors/zero.rs | 540 ++++ crossbeam-channel/src/lib.rs | 374 +++ crossbeam-channel/src/select.rs | 813 +++++ crossbeam-channel/src/select_macro.rs | 1163 +++++++ crossbeam-channel/src/utils.rs | 107 + crossbeam-channel/src/waker.rs | 251 ++ crossbeam-channel/tests/after.rs | 331 ++ crossbeam-channel/tests/array.rs | 557 ++++ crossbeam-channel/tests/golang.rs | 1053 +++++++ crossbeam-channel/tests/iter.rs | 110 + crossbeam-channel/tests/list.rs | 438 +++ crossbeam-channel/tests/mpsc.rs | 1893 ++++++++++++ crossbeam-channel/tests/never.rs | 101 + crossbeam-channel/tests/select.rs | 1287 ++++++++ crossbeam-channel/tests/select_macro.rs | 1415 +++++++++ crossbeam-channel/tests/thread_locals.rs | 53 + crossbeam-channel/tests/tick.rs | 291 ++ crossbeam-channel/tests/zero.rs | 457 +++ crossbeam-deque/CHANGELOG.md | 63 + crossbeam-deque/Cargo.toml | 26 + crossbeam-deque/LICENSE-APACHE | 201 ++ crossbeam-deque/LICENSE-MIT | 25 + crossbeam-deque/README.md | 33 + crossbeam-deque/ci/script.sh | 16 + crossbeam-deque/src/lib.rs | 859 ++++++ crossbeam-deque/tests/fifo.rs | 375 +++ crossbeam-deque/tests/lifo.rs | 375 +++ crossbeam-epoch/CHANGELOG.md | 62 + crossbeam-epoch/Cargo.toml | 33 + crossbeam-epoch/LICENSE-APACHE | 201 ++ crossbeam-epoch/LICENSE-MIT | 25 + crossbeam-epoch/README.md | 39 + crossbeam-epoch/benches/defer.rs | 73 + crossbeam-epoch/benches/flush.rs | 51 + crossbeam-epoch/benches/pin.rs | 31 + crossbeam-epoch/ci/script.sh | 31 + crossbeam-epoch/examples/sanitize.rs | 70 + crossbeam-epoch/src/atomic.rs | 1124 +++++++ crossbeam-epoch/src/collector.rs | 428 +++ crossbeam-epoch/src/default.rs | 73 + crossbeam-epoch/src/deferred.rs | 134 + crossbeam-epoch/src/epoch.rs | 106 + crossbeam-epoch/src/guard.rs | 547 ++++ crossbeam-epoch/src/internal.rs | 543 ++++ crossbeam-epoch/src/lib.rs | 99 + crossbeam-epoch/src/sync/list.rs | 473 +++ crossbeam-epoch/src/sync/mod.rs | 4 + crossbeam-epoch/src/sync/queue.rs | 428 +++ crossbeam-skiplist/CHANGELOG.md | 3 + crossbeam-skiplist/Cargo.toml | 30 + crossbeam-skiplist/LICENSE-APACHE | 201 ++ crossbeam-skiplist/LICENSE-MIT | 25 + crossbeam-skiplist/README.md | 35 + crossbeam-skiplist/benches/btree.rs | 94 + crossbeam-skiplist/benches/hash.rs | 77 + crossbeam-skiplist/benches/skiplist.rs | 101 + crossbeam-skiplist/benches/skipmap.rs | 94 + crossbeam-skiplist/ci/script.sh | 21 + crossbeam-skiplist/examples/simple.rs | 27 + crossbeam-skiplist/src/base.rs | 2683 +++++++++++++++++ crossbeam-skiplist/src/lib.rs | 45 + crossbeam-skiplist/src/map.rs | 679 +++++ crossbeam-skiplist/src/set.rs | 397 +++ crossbeam-utils/README.md | 3 - crossbeam-utils/ci/script.sh | 14 +- src/bin/bench.rs | 170 -- src/bin/extra_impls/mod.rs | 1 - src/bin/extra_impls/mpsc_queue.rs | 159 - src/bin/stress-msq.rs | 38 - 109 files changed, 28622 insertions(+), 385 deletions(-) create mode 100644 crossbeam-channel/CHANGELOG.md create mode 100644 crossbeam-channel/Cargo.toml create mode 100644 crossbeam-channel/LICENSE-APACHE create mode 100644 crossbeam-channel/LICENSE-MIT create mode 100644 crossbeam-channel/README.md create mode 100644 crossbeam-channel/benchmarks/Cargo.toml create mode 100644 crossbeam-channel/benchmarks/README.md create mode 100644 crossbeam-channel/benchmarks/atomicring.rs create mode 100644 crossbeam-channel/benchmarks/atomicringqueue.rs create mode 100644 crossbeam-channel/benchmarks/bus.rs create mode 100644 crossbeam-channel/benchmarks/chan.rs create mode 100644 crossbeam-channel/benchmarks/crossbeam-channel.rs create mode 100644 crossbeam-channel/benchmarks/crossbeam-deque.rs create mode 100644 crossbeam-channel/benchmarks/futures-channel.rs create mode 100644 crossbeam-channel/benchmarks/go.go create mode 100644 crossbeam-channel/benchmarks/mpmc.rs create mode 100644 crossbeam-channel/benchmarks/mpsc.rs create mode 100644 crossbeam-channel/benchmarks/msqueue.rs create mode 100755 crossbeam-channel/benchmarks/plot.py create mode 100755 crossbeam-channel/benchmarks/run.sh create mode 100644 crossbeam-channel/benchmarks/segqueue.rs create mode 100644 crossbeam-channel/benchmarks/shared.rs create mode 100755 crossbeam-channel/ci/script.sh create mode 100644 crossbeam-channel/examples/fibonacci.rs create mode 100644 crossbeam-channel/examples/matching.rs create mode 100644 crossbeam-channel/examples/stopwatch.rs create mode 100644 crossbeam-channel/src/channel.rs create mode 100644 crossbeam-channel/src/context.rs create mode 100644 crossbeam-channel/src/err.rs create mode 100644 crossbeam-channel/src/flavors/after.rs create mode 100644 crossbeam-channel/src/flavors/array.rs create mode 100644 crossbeam-channel/src/flavors/list.rs create mode 100644 crossbeam-channel/src/flavors/mod.rs create mode 100644 crossbeam-channel/src/flavors/never.rs create mode 100644 crossbeam-channel/src/flavors/tick.rs create mode 100644 crossbeam-channel/src/flavors/zero.rs create mode 100644 crossbeam-channel/src/lib.rs create mode 100644 crossbeam-channel/src/select.rs create mode 100644 crossbeam-channel/src/select_macro.rs create mode 100644 crossbeam-channel/src/utils.rs create mode 100644 crossbeam-channel/src/waker.rs create mode 100644 crossbeam-channel/tests/after.rs create mode 100644 crossbeam-channel/tests/array.rs create mode 100644 crossbeam-channel/tests/golang.rs create mode 100644 crossbeam-channel/tests/iter.rs create mode 100644 crossbeam-channel/tests/list.rs create mode 100644 crossbeam-channel/tests/mpsc.rs create mode 100644 crossbeam-channel/tests/never.rs create mode 100644 crossbeam-channel/tests/select.rs create mode 100644 crossbeam-channel/tests/select_macro.rs create mode 100644 crossbeam-channel/tests/thread_locals.rs create mode 100644 crossbeam-channel/tests/tick.rs create mode 100644 crossbeam-channel/tests/zero.rs create mode 100644 crossbeam-deque/CHANGELOG.md create mode 100644 crossbeam-deque/Cargo.toml create mode 100644 crossbeam-deque/LICENSE-APACHE create mode 100644 crossbeam-deque/LICENSE-MIT create mode 100644 crossbeam-deque/README.md create mode 100755 crossbeam-deque/ci/script.sh create mode 100644 crossbeam-deque/src/lib.rs create mode 100644 crossbeam-deque/tests/fifo.rs create mode 100644 crossbeam-deque/tests/lifo.rs create mode 100644 crossbeam-epoch/CHANGELOG.md create mode 100644 crossbeam-epoch/Cargo.toml create mode 100644 crossbeam-epoch/LICENSE-APACHE create mode 100644 crossbeam-epoch/LICENSE-MIT create mode 100644 crossbeam-epoch/README.md create mode 100644 crossbeam-epoch/benches/defer.rs create mode 100644 crossbeam-epoch/benches/flush.rs create mode 100644 crossbeam-epoch/benches/pin.rs create mode 100755 crossbeam-epoch/ci/script.sh create mode 100644 crossbeam-epoch/examples/sanitize.rs create mode 100644 crossbeam-epoch/src/atomic.rs create mode 100644 crossbeam-epoch/src/collector.rs create mode 100644 crossbeam-epoch/src/default.rs create mode 100644 crossbeam-epoch/src/deferred.rs create mode 100644 crossbeam-epoch/src/epoch.rs create mode 100644 crossbeam-epoch/src/guard.rs create mode 100644 crossbeam-epoch/src/internal.rs create mode 100644 crossbeam-epoch/src/lib.rs create mode 100644 crossbeam-epoch/src/sync/list.rs create mode 100644 crossbeam-epoch/src/sync/mod.rs create mode 100644 crossbeam-epoch/src/sync/queue.rs create mode 100644 crossbeam-skiplist/CHANGELOG.md create mode 100644 crossbeam-skiplist/Cargo.toml create mode 100644 crossbeam-skiplist/LICENSE-APACHE create mode 100644 crossbeam-skiplist/LICENSE-MIT create mode 100644 crossbeam-skiplist/README.md create mode 100644 crossbeam-skiplist/benches/btree.rs create mode 100644 crossbeam-skiplist/benches/hash.rs create mode 100644 crossbeam-skiplist/benches/skiplist.rs create mode 100644 crossbeam-skiplist/benches/skipmap.rs create mode 100755 crossbeam-skiplist/ci/script.sh create mode 100644 crossbeam-skiplist/examples/simple.rs create mode 100644 crossbeam-skiplist/src/base.rs create mode 100644 crossbeam-skiplist/src/lib.rs create mode 100644 crossbeam-skiplist/src/map.rs create mode 100644 crossbeam-skiplist/src/set.rs delete mode 100644 src/bin/bench.rs delete mode 100644 src/bin/extra_impls/mod.rs delete mode 100644 src/bin/extra_impls/mpsc_queue.rs delete mode 100644 src/bin/stress-msq.rs diff --git a/.gitignore b/.gitignore index ca98cd96e..6a56a4f7d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ -/target/ +/crossbeam-channel/benchmarks/*.txt +/crossbeam-channel/benchmarks/*.png +target/ Cargo.lock diff --git a/.travis.yml b/.travis.yml index 2603f4521..e7c1ddc09 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,16 +1,19 @@ language: rust +addons: + apt: + sources: + - ubuntu-toolchain-r-test + - llvm-toolchain-precise + - llvm-toolchain-precise-3.8 + rust: + - 1.25.0 + - 1.26.0 - stable - beta - nightly - - 1.26.0 - -install: - - cargo install cargo-tree script: - # TODO(stjepang): Uncomment the following line once we fix the dependency tree - # - (cargo tree --duplicate | grep ^crossbeam) && exit 1 - ./ci/script.sh - (cd crossbeam-utils && ./ci/script.sh) diff --git a/Cargo.toml b/Cargo.toml index 9a6198399..f0585eda9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,10 @@ rand = "0.5" [workspace] members = [ ".", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-skiplist", "crossbeam-utils", ] diff --git a/README.md b/README.md index 8575a9de0..e1813ad76 100644 --- a/README.md +++ b/README.md @@ -2,13 +2,10 @@ [![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)]( https://travis-ci.org/crossbeam-rs/crossbeam) - [![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)]( https://github.com/crossbeam-rs/crossbeam) - [![Cargo](https://img.shields.io/crates/v/crossbeam.svg)]( https://crates.io/crates/crossbeam) - [![Documentation](https://docs.rs/crossbeam/badge.svg)]( https://docs.rs/crossbeam) diff --git a/ci/script.sh b/ci/script.sh index 053f3f93e..468267e64 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -1,11 +1,27 @@ #!/bin/bash +check_min_version() { + local rustc="`rustc -V | cut -d' ' -f2 | cut -d- -f1`" + if [[ "$rustc" != "`echo -e "$rustc\n$1" | sort -V | tail -n1`" ]]; then + echo "Unsupported Rust version: $1 < $rustc" + exit 0 + fi +} +check_min_version 1.26.0 + set -ex -cargo build +export RUSTFLAGS="-D warnings" + cargo build --no-default-features cargo test -if [[ $TRAVIS_RUST_VERSION == nightly ]]; then +if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then cargo test --features nightly fi + +# TODO(stjepang): Uncomment the following lines once we fix the dependency tree +# if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then +# cargo install cargo-tree +# (cargo tree --duplicate | grep "^crossbeam") && exit 1 +# fi diff --git a/crossbeam-channel/CHANGELOG.md b/crossbeam-channel/CHANGELOG.md new file mode 100644 index 000000000..63e222b0b --- /dev/null +++ b/crossbeam-channel/CHANGELOG.md @@ -0,0 +1,93 @@ +# Version 0.3.0 + +- Add a special `never` channel type. +- Dropping all receivers now closes the channel. +- The interface of sending and receiving methods is now very similar to those in v0.1. +- The syntax for `send` in `select!` is now `send(sender, msg) -> res => body`. +- The syntax for `recv` in `select!` is now `recv(receiver) -> res => body`. +- New, more efficient interface for `Select` without callbacks. +- Timeouts can be specified in `select!`. + +# Version 0.2.6 + +- `Select` struct that can add cases dynamically. +- More documentation (in particular, the FAQ section). +- Optimize contended sends/receives in unbounded channels. + +# Version 0.2.5 + +- Use `LocalKey::try_with` instead of `LocalKey::with`. +- Remove helper macros `__crossbeam_channel*`. + +# Version 0.2.4 + +- Make `select!` linearizable with other channel operations. +- Update `crossbeam-utils` to `0.5.0`. +- Update `parking_lot` to `0.6.3`. +- Remove Mac OS X tests. + +# Version 0.2.3 + +- Add Mac OS X tests. +- Lower some memory orderings. +- Eliminate calls to `mem::unitialized`, which caused bugs with ZST. + +# Version 0.2.2 + +- Add more tests. +- Update `crossbeam-epoch` to 0.5.0 +- Initialize the RNG seed to a random value. +- Replace `libc::abort` with `std::process::abort`. +- Ignore clippy warnings in `select!`. +- Better interaction of `select!` with the NLL borrow checker. + +# Version 0.2.1 + +- Fix compilation errors when using `select!` with `#[deny(unsafe_code)]`. + +# Version 0.2.0 + +- Implement `IntoIterator` for `Receiver`. +- Add a new `select!` macro. +- Add special channels `after` and `tick`. +- Dropping receivers doesn't close the channel anymore. +- Change the signature of `recv`, `send`, and `try_recv`. +- Remove `Sender::is_closed` and `Receiver::is_closed`. +- Remove `Sender::close` and `Receiver::close`. +- Remove `Sender::send_timeout` and `Receiver::recv_timeout`. +- Remove `Sender::try_send`. +- Remove `Select` and `select_loop!`. +- Remove all error types. +- Remove `Iter`, `TryIter`, and `IntoIter`. +- Remove the `nightly` feature. +- Remove ordering operators for `Sender` and `Receiver`. + +# Version 0.1.3 + +- Add `Sender::disconnect` and `Receiver::disconnect`. +- Implement comparison operators for `Sender` and `Receiver`. +- Allow arbitrary patterns in place of `msg` in `recv(r, msg)`. +- Add a few conversion impls between error types. +- Add benchmarks for `atomicring` and `mpmc`. +- Add benchmarks for different message sizes. +- Documentation improvements. +- Update `crossbeam-epoch` to 0.4.0 +- Update `crossbeam-utils` to 0.3.0 +- Update `parking_lot` to 0.5 +- Update `rand` to 0.4 + +# Version 0.1.2 + +- Allow conditional cases in `select_loop!` macro. +- Fix typos in documentation. +- Fix deadlock in selection when all channels are disconnected and a timeout is specified. + +# Version 0.1.1 + +- Implement `Debug` for `Sender`, `Receiver`, `Iter`, `TryIter`, `IntoIter`, and `Select`. +- Implement `Default` for `Select`. + +# Version 0.1.0 + +- First implementation of the channels. +- Add `select_loop!` macro by @TimNN. diff --git a/crossbeam-channel/Cargo.toml b/crossbeam-channel/Cargo.toml new file mode 100644 index 000000000..cec2ce32a --- /dev/null +++ b/crossbeam-channel/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "crossbeam-channel" +# When publishing a new version: +# - Update CHANGELOG.md +# - Update README.md +# - Create "crossbeam-channel-X.Y.Z" git tag +version = "0.3.0" +authors = ["The Crossbeam Project Developers"] +license = "MIT/Apache-2.0" +readme = "README.md" +repository = "https://github.com/crossbeam-rs/crossbeam-channel" +homepage = "https://github.com/crossbeam-rs/crossbeam-channel" +documentation = "https://docs.rs/crossbeam-channel" +description = "Multi-producer multi-consumer channels for message passing" +keywords = ["channel", "mpmc", "select", "golang", "message"] +categories = ["algorithms", "concurrency", "data-structures"] + +[dependencies] +crossbeam-epoch = "0.6.0" +crossbeam-utils = "0.5.0" +parking_lot = "0.6.3" +rand = "0.5.3" +smallvec = "0.6.2" + +[dev-dependencies] +crossbeam = "0.3.0" +signal-hook = "0.1.5" diff --git a/crossbeam-channel/LICENSE-APACHE b/crossbeam-channel/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/crossbeam-channel/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/crossbeam-channel/LICENSE-MIT b/crossbeam-channel/LICENSE-MIT new file mode 100644 index 000000000..25597d583 --- /dev/null +++ b/crossbeam-channel/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/crossbeam-channel/README.md b/crossbeam-channel/README.md new file mode 100644 index 000000000..22cf2c406 --- /dev/null +++ b/crossbeam-channel/README.md @@ -0,0 +1,38 @@ +# Multi-producer multi-consumer channels for message passing + +[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)]( +https://travis-ci.org/crossbeam-rs/crossbeam) +[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)]( +https://github.com/crossbeam-rs/crossbeam-channel) +[![Cargo](https://img.shields.io/crates/v/crossbeam-channel.svg)]( +https://crates.io/crates/crossbeam-channel) +[![Documentation](https://docs.rs/crossbeam-channel/badge.svg)]( +https://docs.rs/crossbeam-channel) + +This library is an alternative to [`std::sync::mpsc`] with more features and better performance. + +[`std::sync::mpsc`]: https://doc.rust-lang.org/std/sync/mpsc/index.html + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +crossbeam-channel = "0.3" +``` + +Next, add this to your crate: + +```rust +#[macro_use] +extern crate crossbeam_channel; +``` + +The minimum required Rust version is 1.26. + +## License + +Licensed under the terms of MIT license and the Apache License (Version 2.0). + +See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details. diff --git a/crossbeam-channel/benchmarks/Cargo.toml b/crossbeam-channel/benchmarks/Cargo.toml new file mode 100644 index 000000000..1638cc302 --- /dev/null +++ b/crossbeam-channel/benchmarks/Cargo.toml @@ -0,0 +1,64 @@ +[package] +name = "benchmarks" +version = "0.1.0" + +[profile.release] +opt-level = 3 +lto = true +codegen-units = 1 +incremental = false +panic = 'abort' + +[dependencies] +atomicring = "0.5.3" +bus = "2.0.0" +chan = "0.1.19" +crossbeam = "0.3.0" +crossbeam-deque = "0.4.1" +crossbeam-channel = { path = ".." } +futures-preview = "0.2.2" +mpmc = "0.1.5" + +[[bin]] +name = "atomicring" +path = "atomicring.rs" + +[[bin]] +name = "atomicringqueue" +path = "atomicringqueue.rs" + +[[bin]] +name = "bus" +path = "bus.rs" + +[[bin]] +name = "chan" +path = "chan.rs" + +[[bin]] +name = "crossbeam-channel" +path = "crossbeam-channel.rs" + +[[bin]] +name = "crossbeam-deque" +path = "crossbeam-deque.rs" + +[[bin]] +name = "futures-channel" +path = "futures-channel.rs" + +[[bin]] +name = "mpsc" +path = "mpsc.rs" + +[[bin]] +name = "msqueue" +path = "msqueue.rs" + +[[bin]] +name = "segqueue" +path = "segqueue.rs" + +[[bin]] +name = "mpmc" +path = "mpmc.rs" diff --git a/crossbeam-channel/benchmarks/README.md b/crossbeam-channel/benchmarks/README.md new file mode 100644 index 000000000..27fd96fa9 --- /dev/null +++ b/crossbeam-channel/benchmarks/README.md @@ -0,0 +1,43 @@ +# Benchmarks + +### Tests + +* `seq`: A single thread sends `N` messages. Then it receives `N` messages. +* `spsc`: One thread sends `N` messages. Another thread receives `N` messages. +* `mpsc`: `T` threads send `N / T` messages each. One thread receives `N` messages. +* `mpmc`: `T` threads send `N / T` messages each. `T` other threads receive `N / T` messages each. +* `select_rx`: `T` threads send `N / T` messages each into a separate channel. Another thread receives `N` messages by selecting over the `T` channels. +* `select_both`: `T` threads send `N / T` messages each by selecting over `T` channels. `T` other threads receive `N / T` messages each by selecting over the `T` channels. + +Default configuration: + +- `N = 5000000` +- `T = 4` + +### Running + +Runs benchmarks, stores results into `*.txt` files, and generates `plot.png`: + +``` +./run_all.sh +``` + +Dependencies: + +- Rust (nightly) +- Go +- Bash +- Python 2 +- Matplotlib + +### Results + +Machine: Intel(R) Core(TM) i7-5600U (2 physical cores, 4 logical cores) + +Rust: `rustc 1.30.0-nightly (90d36fb59 2018-09-13)` + +Go: `go version go1.10.3 linux/amd64` + +Commit: `6cde88a` (2018-09-17) + +![Benchmark results](https://i.imgur.com/Kw2dQcy.png) diff --git a/crossbeam-channel/benchmarks/atomicring.rs b/crossbeam-channel/benchmarks/atomicring.rs new file mode 100644 index 000000000..db38b9cef --- /dev/null +++ b/crossbeam-channel/benchmarks/atomicring.rs @@ -0,0 +1,141 @@ +extern crate atomicring; +extern crate crossbeam; + +use atomicring::AtomicRingBuffer; +use shared::message; +use std::thread; + +mod shared; + +const MESSAGES: usize = 5_000_000; +const THREADS: usize = 4; + +fn seq(cap: usize) { + let q = AtomicRingBuffer::with_capacity(cap); + + for i in 0..MESSAGES { + loop { + if q.try_push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + + for _ in 0..MESSAGES { + q.try_pop().unwrap(); + } +} + +fn spsc(cap: usize) { + let q = AtomicRingBuffer::with_capacity(cap); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..MESSAGES { + loop { + if q.try_push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + }); + + for _ in 0..MESSAGES { + loop { + if q.try_pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); +} + +fn mpsc(cap: usize) { + let q = AtomicRingBuffer::with_capacity(cap); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + loop { + if q.try_push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + }); + } + + for _ in 0..MESSAGES { + loop { + if q.try_pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); +} + +fn mpmc(cap: usize) { + let q = AtomicRingBuffer::with_capacity(cap); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + loop { + if q.try_push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + }); + } + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..MESSAGES / THREADS { + loop { + if q.try_pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); + } + }); +} + +fn main() { + macro_rules! run { + ($name:expr, $f:expr) => { + let now = ::std::time::Instant::now(); + $f; + let elapsed = now.elapsed(); + println!( + "{:25} {:15} {:7.3} sec", + $name, + "Rust atomicring", + elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 + ); + } + } + + run!("bounded_mpmc", mpmc(MESSAGES)); + run!("bounded_mpsc", mpsc(MESSAGES)); + run!("bounded_seq", seq(MESSAGES)); + run!("bounded_spsc", spsc(MESSAGES)); +} diff --git a/crossbeam-channel/benchmarks/atomicringqueue.rs b/crossbeam-channel/benchmarks/atomicringqueue.rs new file mode 100644 index 000000000..ce1ab9dfc --- /dev/null +++ b/crossbeam-channel/benchmarks/atomicringqueue.rs @@ -0,0 +1,124 @@ +extern crate atomicring; +extern crate crossbeam; + +use atomicring::AtomicRingQueue; +use shared::message; +use std::thread; + +mod shared; + +const MESSAGES: usize = 5_000_000; +const THREADS: usize = 4; + +fn seq(cap: usize) { + let q = AtomicRingQueue::with_capacity(cap); + + for i in 0..MESSAGES { + loop { + if q.try_push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + + for _ in 0..MESSAGES { + q.pop(); + } +} + +fn spsc(cap: usize) { + let q = AtomicRingQueue::with_capacity(cap); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..MESSAGES { + loop { + if q.try_push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + }); + + for _ in 0..MESSAGES { + q.pop(); + } + }); +} + +fn mpsc(cap: usize) { + let q = AtomicRingQueue::with_capacity(cap); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + loop { + if q.try_push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + }); + } + + for _ in 0..MESSAGES { + q.pop(); + } + }); +} + +fn mpmc(cap: usize) { + let q = AtomicRingQueue::with_capacity(cap); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + loop { + if q.try_push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + }); + } + + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..MESSAGES / THREADS { + q.pop(); + } + }); + } + }); +} + +fn main() { + macro_rules! run { + ($name:expr, $f:expr) => { + let now = ::std::time::Instant::now(); + $f; + let elapsed = now.elapsed(); + println!( + "{:25} {:15} {:7.3} sec", + $name, + "Rust atomicringqueue", + elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 + ); + } + } + + run!("bounded_mpmc", mpmc(MESSAGES)); + run!("bounded_mpsc", mpsc(MESSAGES)); + run!("bounded_seq", seq(MESSAGES)); + run!("bounded_spsc", spsc(MESSAGES)); +} diff --git a/crossbeam-channel/benchmarks/bus.rs b/crossbeam-channel/benchmarks/bus.rs new file mode 100644 index 000000000..813e87637 --- /dev/null +++ b/crossbeam-channel/benchmarks/bus.rs @@ -0,0 +1,60 @@ +extern crate bus; +extern crate crossbeam; + +use bus::Bus; +use shared::message; + +mod shared; + +const MESSAGES: usize = 5_000_000; + +fn seq(cap: usize) { + let mut tx = Bus::new(cap); + let mut rx = tx.add_rx(); + + for i in 0..MESSAGES { + tx.broadcast(message(i)); + } + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } +} + +fn spsc(cap: usize) { + let mut tx = Bus::new(cap); + let mut rx = tx.add_rx(); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..MESSAGES { + tx.broadcast(message(i)); + } + }); + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } + }); +} + +fn main() { + macro_rules! run { + ($name:expr, $f:expr) => { + let now = ::std::time::Instant::now(); + $f; + let elapsed = now.elapsed(); + println!( + "{:25} {:15} {:7.3} sec", + $name, + "Rust bus", + elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 + ); + } + } + + run!("bounded1_spsc", spsc(1)); + + run!("bounded_seq", seq(MESSAGES)); + run!("bounded_spsc", spsc(MESSAGES)); +} diff --git a/crossbeam-channel/benchmarks/chan.rs b/crossbeam-channel/benchmarks/chan.rs new file mode 100644 index 000000000..7f00df25a --- /dev/null +++ b/crossbeam-channel/benchmarks/chan.rs @@ -0,0 +1,200 @@ +#[macro_use] +extern crate chan; +extern crate crossbeam; + +use shared::message; + +mod shared; + +const MESSAGES: usize = 5_000_000; +const THREADS: usize = 4; + +fn new(cap: Option) -> (chan::Sender, chan::Receiver) { + match cap { + None => chan::async(), + Some(cap) => chan::sync(cap) + } +} + +fn seq(cap: Option) { + let (tx, rx) = new(cap); + + for i in 0..MESSAGES { + tx.send(message(i)); + } + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } +} + +fn spsc(cap: Option) { + let (tx, rx) = new(cap); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..MESSAGES { + tx.send(message(i)); + } + }); + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } + }); +} + +fn mpsc(cap: Option) { + let (tx, rx) = new(cap); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + tx.send(message(i)); + } + }); + } + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } + }); +} + +fn mpmc(cap: Option) { + let (tx, rx) = new(cap); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + tx.send(message(i)); + } + }); + } + + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..MESSAGES / THREADS { + rx.recv().unwrap(); + } + }); + } + }); +} + +fn select_rx(cap: Option) { + assert_eq!(THREADS, 4); + let chans = (0..THREADS).map(|_| new(cap)).collect::>(); + + crossbeam::scope(|scope| { + for (tx, _) in &chans { + let tx = tx.clone(); + scope.spawn(move || { + for i in 0..MESSAGES / THREADS { + tx.send(message(i)); + } + }); + } + + let rx0 = &chans[0].1; + let rx1 = &chans[1].1; + let rx2 = &chans[2].1; + let rx3 = &chans[3].1; + for _ in 0..MESSAGES { + chan_select! { + rx0.recv() -> m => assert!(m.is_some()), + rx1.recv() -> m => assert!(m.is_some()), + rx2.recv() -> m => assert!(m.is_some()), + rx3.recv() -> m => assert!(m.is_some()), + } + } + }); +} + +fn select_both(cap: Option) { + assert_eq!(THREADS, 4); + let chans = (0..THREADS).map(|_| new(cap)).collect::>(); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + let chans = chans.clone(); + scope.spawn(move || { + let tx0 = &chans[0].0; + let tx1 = &chans[1].0; + let tx2 = &chans[2].0; + let tx3 = &chans[3].0; + + for i in 0..MESSAGES / THREADS { + chan_select! { + tx0.send(message(i)) => {}, + tx1.send(message(i)) => {}, + tx2.send(message(i)) => {}, + tx3.send(message(i)) => {}, + } + } + }); + } + + for _ in 0..THREADS { + let chans = chans.clone(); + scope.spawn(move || { + let rx0 = &chans[0].1; + let rx1 = &chans[1].1; + let rx2 = &chans[2].1; + let rx3 = &chans[3].1; + + for _ in 0..MESSAGES / THREADS { + chan_select! { + rx0.recv() -> m => assert!(m.is_some()), + rx1.recv() -> m => assert!(m.is_some()), + rx2.recv() -> m => assert!(m.is_some()), + rx3.recv() -> m => assert!(m.is_some()), + } + } + }); + } + }); +} + +fn main() { + macro_rules! run { + ($name:expr, $f:expr) => { + let now = ::std::time::Instant::now(); + $f; + let elapsed = now.elapsed(); + println!( + "{:25} {:15} {:7.3} sec", + $name, + "Rust chan", + elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 + ); + } + } + + run!("bounded0_mpmc", mpmc(Some(0))); + run!("bounded0_mpsc", mpsc(Some(0))); + run!("bounded0_select_rx", select_rx(Some(0))); + run!("bounded0_spsc", spsc(Some(0))); + + run!("bounded1_mpmc", mpmc(Some(1))); + run!("bounded1_mpsc", mpsc(Some(1))); + run!("bounded1_select_both", select_both(Some(1))); + run!("bounded1_select_rx", select_rx(Some(1))); + run!("bounded1_spsc", spsc(Some(1))); + + run!("bounded_mpmc", mpmc(Some(MESSAGES))); + run!("bounded_mpsc", mpsc(Some(MESSAGES))); + run!("bounded_select_both", select_both(Some(MESSAGES))); + run!("bounded_select_rx", select_rx(Some(MESSAGES))); + run!("bounded_seq", seq(Some(MESSAGES))); + run!("bounded_spsc", spsc(Some(MESSAGES))); + + run!("unbounded_mpmc", mpmc(None)); + run!("unbounded_mpsc", mpsc(None)); + run!("unbounded_select_both", select_both(None)); + run!("unbounded_select_rx", select_rx(None)); + run!("unbounded_seq", seq(None)); + run!("unbounded_spsc", spsc(None)); +} diff --git a/crossbeam-channel/benchmarks/crossbeam-channel.rs b/crossbeam-channel/benchmarks/crossbeam-channel.rs new file mode 100644 index 000000000..350cb3162 --- /dev/null +++ b/crossbeam-channel/benchmarks/crossbeam-channel.rs @@ -0,0 +1,186 @@ +extern crate crossbeam; +extern crate crossbeam_channel; + +use crossbeam_channel::{bounded, unbounded, Receiver, Select, Sender}; +use shared::message; + +mod shared; + +const MESSAGES: usize = 5_000_000; +const THREADS: usize = 4; + +fn new(cap: Option) -> (Sender, Receiver) { + match cap { + None => unbounded(), + Some(cap) => bounded(cap), + } +} + +fn seq(cap: Option) { + let (tx, rx) = new(cap); + + for i in 0..MESSAGES { + tx.send(message(i)).unwrap(); + } + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } +} + +fn spsc(cap: Option) { + let (tx, rx) = new(cap); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..MESSAGES { + tx.send(message(i)).unwrap(); + } + }); + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } + }); +} + +fn mpsc(cap: Option) { + let (tx, rx) = new(cap); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + tx.send(message(i)).unwrap(); + } + }); + } + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } + }); +} + +fn mpmc(cap: Option) { + let (tx, rx) = new(cap); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + tx.send(message(i)).unwrap(); + } + }); + } + + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..MESSAGES / THREADS { + rx.recv().unwrap(); + } + }); + } + }); +} + +fn select_rx(cap: Option) { + let chans = (0..THREADS).map(|_| new(cap)).collect::>(); + + crossbeam::scope(|scope| { + for (tx, _) in &chans { + let tx = tx.clone(); + scope.spawn(move || { + for i in 0..MESSAGES / THREADS { + tx.send(message(i)).unwrap(); + } + }); + } + + for _ in 0..MESSAGES { + let mut sel = Select::new(); + for (_, rx) in &chans { + sel.recv(rx); + } + let case = sel.select(); + let index = case.index(); + case.recv(&chans[index].1).unwrap(); + } + }); +} + +fn select_both(cap: Option) { + let chans = (0..THREADS).map(|_| new(cap)).collect::>(); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + let mut sel = Select::new(); + for (tx, _) in &chans { + sel.send(tx); + } + let case = sel.select(); + let index = case.index(); + case.send(&chans[index].0, message(i)).unwrap(); + } + }); + } + + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..MESSAGES / THREADS { + let mut sel = Select::new(); + for (_, rx) in &chans { + sel.recv(rx); + } + let case = sel.select(); + let index = case.index(); + case.recv(&chans[index].1).unwrap(); + } + }); + } + }); +} + +fn main() { + macro_rules! run { + ($name:expr, $f:expr) => { + let now = ::std::time::Instant::now(); + $f; + let elapsed = now.elapsed(); + println!( + "{:25} {:15} {:7.3} sec", + $name, + "Rust crossbeam-channel", + elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 + ); + } + } + + run!("bounded0_mpmc", mpmc(Some(0))); + run!("bounded0_mpsc", mpsc(Some(0))); + run!("bounded0_select_both", select_both(Some(0))); + run!("bounded0_select_rx", select_rx(Some(0))); + run!("bounded0_spsc", spsc(Some(0))); + + run!("bounded1_mpmc", mpmc(Some(1))); + run!("bounded1_mpsc", mpsc(Some(1))); + run!("bounded1_select_both", select_both(Some(1))); + run!("bounded1_select_rx", select_rx(Some(1))); + run!("bounded1_spsc", spsc(Some(1))); + + run!("bounded_mpmc", mpmc(Some(MESSAGES))); + run!("bounded_mpsc", mpsc(Some(MESSAGES))); + run!("bounded_select_both", select_both(Some(MESSAGES))); + run!("bounded_select_rx", select_rx(Some(MESSAGES))); + run!("bounded_seq", seq(Some(MESSAGES))); + run!("bounded_spsc", spsc(Some(MESSAGES))); + + run!("unbounded_mpmc", mpmc(None)); + run!("unbounded_mpsc", mpsc(None)); + run!("unbounded_select_both", select_both(None)); + run!("unbounded_select_rx", select_rx(None)); + run!("unbounded_seq", seq(None)); + run!("unbounded_spsc", spsc(None)); +} diff --git a/crossbeam-channel/benchmarks/crossbeam-deque.rs b/crossbeam-channel/benchmarks/crossbeam-deque.rs new file mode 100644 index 000000000..60c3f91e5 --- /dev/null +++ b/crossbeam-channel/benchmarks/crossbeam-deque.rs @@ -0,0 +1,68 @@ +extern crate crossbeam; +extern crate crossbeam_deque as deque; + +use std::thread; + +use deque::{Deque, Steal}; + +const MESSAGES: usize = 5_000_000; + +fn seq() { + let tx = Deque::new(); + let rx = tx.stealer(); + + for i in 0..MESSAGES { + tx.push(i as i32); + } + + for _ in 0..MESSAGES { + match rx.steal() { + Steal::Data(_) => break, + Steal::Retry => panic!(), + Steal::Empty => panic!(), + } + } +} + +fn spsc() { + let tx = Deque::new(); + let rx = tx.stealer(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + for i in 0..MESSAGES { + tx.push(i as i32); + } + }); + + scope.spawn(move || { + for _ in 0..MESSAGES { + loop { + match rx.steal() { + Steal::Data(_) => break, + Steal::Retry | Steal::Empty => thread::yield_now(), + } + } + } + }); + }); +} + +fn main() { + macro_rules! run { + ($name:expr, $f:expr) => { + let now = ::std::time::Instant::now(); + $f; + let elapsed = now.elapsed(); + println!( + "{:25} {:15} {:7.3} sec", + $name, + "Rust crossbeam-deque", + elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 + ); + } + } + + run!("unbounded_seq", seq()); + run!("unbounded_spsc", spsc()); +} diff --git a/crossbeam-channel/benchmarks/futures-channel.rs b/crossbeam-channel/benchmarks/futures-channel.rs new file mode 100644 index 000000000..3aeae5247 --- /dev/null +++ b/crossbeam-channel/benchmarks/futures-channel.rs @@ -0,0 +1,179 @@ +extern crate crossbeam; +extern crate futures; + +use futures::channel::mpsc; +use futures::executor::ThreadPool; +use futures::prelude::*; +use futures::{SinkExt, StreamExt, future, stream}; + +use shared::message; + +mod shared; + +const MESSAGES: usize = 5_000_000; +const THREADS: usize = 4; + +fn seq_unbounded() { + ThreadPool::new().unwrap().run(future::lazy(|_| { + let (tx, rx) = mpsc::unbounded(); + for i in 0..MESSAGES { + tx.unbounded_send(message(i)).unwrap(); + } + drop(tx); + + rx.for_each(|_| future::ok(())) + })).unwrap(); +} + +fn seq_bounded(cap: usize) { + let (mut tx, rx) = mpsc::channel(cap); + ThreadPool::new().unwrap().run(future::lazy(|_| { + for i in 0..MESSAGES { + tx.try_send(message(i)).unwrap(); + } + drop(tx); + + rx.for_each(|_| future::ok(())) + })).unwrap(); +} + +fn spsc_unbounded() { + ThreadPool::new().unwrap().run(future::lazy(|cx| { + let (tx, rx) = mpsc::unbounded(); + + cx.spawn(future::lazy(move |_| { + tx.send_all(stream::iter_ok((0..MESSAGES).map(|i| message(i)))) + .map_err(|_| panic!()) + .and_then(|_| future::ok(())) + })); + + rx.for_each(|_| future::ok(())) + })).unwrap(); +} + +fn spsc_bounded(cap: usize) { + ThreadPool::new().unwrap().run(future::lazy(|cx| { + let (tx, rx) = mpsc::channel(cap); + + cx.spawn(future::lazy(move |_| { + tx.send_all(stream::iter_ok((0..MESSAGES).map(|i| message(i)))) + .map_err(|_| panic!()) + .and_then(|_| future::ok(())) + })); + + rx.for_each(|_| future::ok(())) + })).unwrap(); +} + +fn mpsc_unbounded() { + ThreadPool::new().unwrap().run(future::lazy(|cx| { + let (tx, rx) = mpsc::unbounded(); + + for _ in 0..THREADS { + let tx = tx.clone(); + cx.spawn(future::lazy(move |_| { + tx.send_all(stream::iter_ok((0..MESSAGES / THREADS).map(|i| message(i)))) + .map_err(|_| panic!()) + .and_then(|_| future::ok(())) + })); + } + drop(tx); + + rx.for_each(|_| future::ok(())) + })).unwrap(); +} + +fn mpsc_bounded(cap: usize) { + ThreadPool::new().unwrap().run(future::lazy(|cx| { + let (tx, rx) = mpsc::channel(cap); + + for _ in 0..THREADS { + let tx = tx.clone(); + cx.spawn(future::lazy(move |_| { + tx.send_all(stream::iter_ok((0..MESSAGES / THREADS).map(|i| message(i)))) + .map_err(|_| panic!()) + .and_then(|_| future::ok(())) + })); + } + drop(tx); + + rx.for_each(|_| future::ok(())) + })).unwrap(); +} + +fn select_rx_unbounded() { + ThreadPool::new().unwrap().run(future::lazy(|cx| { + let chans = (0..THREADS) + .map(|_| mpsc::unbounded()) + .collect::>(); + + for (tx, _) in &chans { + let tx = tx.clone(); + cx.spawn(future::lazy(move |_| { + for i in 0..MESSAGES / THREADS { + tx.unbounded_send(message(i)).unwrap(); + } + future::ok(()) + })); + } + + stream::select_all(chans.into_iter().map(|(_, rx)| rx)) + .for_each(|_| future::ok(())) + .and_then(|_| future::ok(())) + })).unwrap(); +} + +fn select_rx_bounded(cap: usize) { + ThreadPool::new().unwrap().run(future::lazy(|cx| { + let chans = (0..THREADS) + .map(|_| mpsc::channel(cap)) + .collect::>(); + + for (tx, _) in &chans { + let tx = tx.clone(); + cx.spawn(future::lazy(move |_| { + tx.send_all(stream::iter_ok((0..MESSAGES / THREADS).map(|i| message(i)))) + .map_err(|_| panic!()) + .and_then(|_| future::ok(())) + })); + } + + stream::select_all(chans.into_iter().map(|(_, rx)| rx)) + .for_each(|_| future::ok(())) + .and_then(|_| future::ok(())) + })).unwrap(); +} + +fn main() { + macro_rules! run { + ($name:expr, $f:expr) => { + let now = ::std::time::Instant::now(); + $f; + let elapsed = now.elapsed(); + println!( + "{:25} {:15} {:7.3} sec", + $name, + "Rust futures-channel", + elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 + ); + } + } + + run!("bounded0_mpsc", mpsc_bounded(0)); + run!("bounded0_select_rx", select_rx_bounded(0)); + run!("bounded0_spsc", spsc_bounded(0)); + + run!("bounded1_mpsc", mpsc_bounded(1)); + run!("bounded1_select_rx", select_rx_bounded(1)); + run!("bounded1_spsc", spsc_bounded(1)); + + run!("bounded_mpsc", mpsc_bounded(MESSAGES)); + run!("bounded_select_rx", select_rx_bounded(MESSAGES)); + run!("bounded_seq", seq_bounded(MESSAGES)); + run!("bounded_spsc", spsc_bounded(MESSAGES)); + + run!("unbounded_mpsc", mpsc_unbounded()); + run!("unbounded_select_rx", select_rx_unbounded()); + run!("unbounded_seq", seq_unbounded()); + run!("unbounded_spsc", spsc_unbounded()); +} diff --git a/crossbeam-channel/benchmarks/go.go b/crossbeam-channel/benchmarks/go.go new file mode 100644 index 000000000..cac68b8cf --- /dev/null +++ b/crossbeam-channel/benchmarks/go.go @@ -0,0 +1,200 @@ +package main + +import "fmt" +import "time" + +const MESSAGES = 5 * 1000 * 1000 +const THREADS = 4 + +type Message = int + +func message(msg int) Message { + return msg +} + +func seq(cap int) { + var c = make(chan Message, cap) + + for i := 0; i < MESSAGES; i++ { + c <- i + } + + for i := 0; i < MESSAGES; i++ { + <-c + } +} + +func spsc(cap int) { + var c = make(chan Message, cap) + var done = make(chan bool) + + go func() { + for i := 0; i < MESSAGES; i++ { + c <- i + } + done <- true + }() + + for i := 0; i < MESSAGES; i++ { + <-c + } + + <-done +} + +func mpsc(cap int) { + var c = make(chan Message, cap) + var done = make(chan bool) + + for t := 0; t < THREADS; t++ { + go func() { + for i := 0; i < MESSAGES / THREADS; i++ { + c <- i + } + done <- true + }() + } + + for i := 0; i < MESSAGES; i++ { + <-c + } + + for t := 0; t < THREADS; t++ { + <-done + } +} + +func mpmc(cap int) { + var c = make(chan Message, cap) + var done = make(chan bool) + + for t := 0; t < THREADS; t++ { + go func() { + for i := 0; i < MESSAGES / THREADS; i++ { + c <- i + } + done <- true + }() + + } + + for t := 0; t < THREADS; t++ { + go func() { + for i := 0; i < MESSAGES / THREADS; i++ { + <-c + } + done <- true + }() + } + + for t := 0; t < THREADS; t++ { + <-done + <-done + } +} + +func select_rx(cap int) { + if THREADS != 4 { + panic("assumed there are 4 threads") + } + + var c0 = make(chan Message, cap) + var c1 = make(chan Message, cap) + var c2 = make(chan Message, cap) + var c3 = make(chan Message, cap) + var done = make(chan bool) + + var producer = func(c chan Message) { + for i := 0; i < MESSAGES / THREADS; i++ { + c <- i + } + done <- true + } + go producer(c0) + go producer(c1) + go producer(c2) + go producer(c3) + + for i := 0; i < MESSAGES; i++ { + select { + case <-c0: + case <-c1: + case <-c2: + case <-c3: + } + } + + for t := 0; t < THREADS; t++ { + <-done + } +} + +func select_both(cap int) { + if THREADS != 4 { + panic("assumed there are 4 threads") + } + + var c0 = make(chan Message, cap) + var c1 = make(chan Message, cap) + var c2 = make(chan Message, cap) + var c3 = make(chan Message, cap) + var done = make(chan bool) + + var producer = func(c chan Message) { + for i := 0; i < MESSAGES / THREADS; i++ { + c <- i + } + done <- true + } + go producer(c0) + go producer(c1) + go producer(c2) + go producer(c3) + + for t := 0; t < THREADS; t++ { + go func() { + for i := 0; i < MESSAGES / THREADS; i++ { + select { + case <-c0: + case <-c1: + case <-c2: + case <-c3: + } + } + done <- true + }() + } + + for t := 0; t < THREADS; t++ { + <-done + <-done + } +} + +func run(name string, f func(int), cap int) { + var now = time.Now() + f(cap) + var elapsed = time.Now().Sub(now) + fmt.Printf("%-25v %-15v %7.3f sec\n", name, "Go chan", float64(elapsed) / float64(time.Second)) +} + +func main() { + run("bounded0_mpmc", mpmc, 0) + run("bounded0_mpsc", mpsc, 0) + run("bounded0_select_both", select_both, 0) + run("bounded0_select_rx", select_rx, 0) + run("bounded0_spsc", spsc, 0) + + run("bounded1_mpmc", mpmc, 1) + run("bounded1_mpsc", mpsc, 1) + run("bounded1_select_both", select_both, 1) + run("bounded1_select_rx", select_rx, 1) + run("bounded1_spsc", spsc, 1) + + run("bounded_mpmc", mpmc, MESSAGES) + run("bounded_mpsc", mpsc, MESSAGES) + run("bounded_select_both", select_both, MESSAGES) + run("bounded_select_rx", select_rx, MESSAGES) + run("bounded_seq", seq, MESSAGES) + run("bounded_spsc", spsc, MESSAGES) +} diff --git a/crossbeam-channel/benchmarks/mpmc.rs b/crossbeam-channel/benchmarks/mpmc.rs new file mode 100644 index 000000000..e05ea2600 --- /dev/null +++ b/crossbeam-channel/benchmarks/mpmc.rs @@ -0,0 +1,141 @@ +extern crate mpmc; +extern crate crossbeam; + +use shared::message; +use std::thread; + +mod shared; + +const MESSAGES: usize = 5_000_000; +const THREADS: usize = 4; + +fn seq(cap: usize) { + let q = mpmc::Queue::with_capacity(cap); + + for i in 0..MESSAGES { + loop { + if q.push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + + for _ in 0..MESSAGES { + q.pop().unwrap(); + } +} + +fn spsc(cap: usize) { + let q = mpmc::Queue::with_capacity(cap); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..MESSAGES { + loop { + if q.push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + }); + + for _ in 0..MESSAGES { + loop { + if q.pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); +} + +fn mpsc(cap: usize) { + let q = mpmc::Queue::with_capacity(cap); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + loop { + if q.push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + }); + } + + for _ in 0..MESSAGES { + loop { + if q.pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); +} + +fn mpmc(cap: usize) { + let q = mpmc::Queue::with_capacity(cap); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + loop { + if q.push(message(i)).is_ok() { + break; + } else { + thread::yield_now(); + } + } + } + }); + } + + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..MESSAGES / THREADS { + loop { + if q.pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); + } + }); +} + +fn main() { + macro_rules! run { + ($name:expr, $f:expr) => { + let now = ::std::time::Instant::now(); + $f; + let elapsed = now.elapsed(); + println!( + "{:25} {:15} {:7.3} sec", + $name, + "Rust mpmc", + elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 + ); + } + } + + run!("bounded_mpmc", mpmc(MESSAGES)); + run!("bounded_mpsc", mpsc(MESSAGES)); + run!("bounded_seq", seq(MESSAGES)); + run!("bounded_spsc", spsc(MESSAGES)); +} diff --git a/crossbeam-channel/benchmarks/mpsc.rs b/crossbeam-channel/benchmarks/mpsc.rs new file mode 100644 index 000000000..2417479d3 --- /dev/null +++ b/crossbeam-channel/benchmarks/mpsc.rs @@ -0,0 +1,201 @@ +#![feature(mpsc_select)] + +extern crate crossbeam; + +use std::sync::mpsc; +use shared::{message, shuffle}; + +mod shared; + +const MESSAGES: usize = 5_000_000; +const THREADS: usize = 4; + +fn seq_async() { + let (tx, rx) = mpsc::channel(); + + for i in 0..MESSAGES { + tx.send(message(i)).unwrap(); + } + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } +} + +fn seq_sync(cap: usize) { + let (tx, rx) = mpsc::sync_channel(cap); + + for i in 0..MESSAGES { + tx.send(message(i)).unwrap(); + } + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } +} + +fn spsc_async() { + let (tx, rx) = mpsc::channel(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + for i in 0..MESSAGES { + tx.send(message(i)).unwrap(); + } + }); + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } + }); +} + +fn spsc_sync(cap: usize) { + let (tx, rx) = mpsc::sync_channel(cap); + + crossbeam::scope(|scope| { + scope.spawn(move || { + for i in 0..MESSAGES { + tx.send(message(i)).unwrap(); + } + }); + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } + }); +} + +fn mpsc_async() { + let (tx, rx) = mpsc::channel(); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + let tx = tx.clone(); + scope.spawn(move || { + for i in 0..MESSAGES / THREADS { + tx.send(message(i)).unwrap(); + } + }); + } + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } + }); +} + +fn mpsc_sync(cap: usize) { + let (tx, rx) = mpsc::sync_channel(cap); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + let tx = tx.clone(); + scope.spawn(move || { + for i in 0..MESSAGES / THREADS { + tx.send(message(i)).unwrap(); + } + }); + } + + for _ in 0..MESSAGES { + rx.recv().unwrap(); + } + }); +} + +fn select_rx_async() { + assert_eq!(THREADS, 4); + let mut chans = (0..THREADS).map(|_| mpsc::channel()).collect::>(); + + crossbeam::scope(|scope| { + for &(ref tx, _) in &chans { + let tx = tx.clone(); + scope.spawn(move || { + for i in 0..MESSAGES / THREADS { + tx.send(message(i)).unwrap(); + } + }); + } + + for _ in 0..MESSAGES { + shuffle(&mut chans); + let rx0 = &chans[0].1; + let rx1 = &chans[1].1; + let rx2 = &chans[2].1; + let rx3 = &chans[3].1; + + select! { + m = rx0.recv() => assert!(m.is_ok()), + m = rx1.recv() => assert!(m.is_ok()), + m = rx2.recv() => assert!(m.is_ok()), + m = rx3.recv() => assert!(m.is_ok()) + }; + } + }); +} + +fn select_rx_sync(cap: usize) { + assert_eq!(THREADS, 4); + let mut chans = (0..THREADS).map(|_| mpsc::sync_channel(cap)).collect::>(); + + crossbeam::scope(|scope| { + for &(ref tx, _) in &chans { + let tx = tx.clone(); + scope.spawn(move || { + for i in 0..MESSAGES / THREADS { + tx.send(message(i)).unwrap(); + } + }); + } + + for _ in 0..MESSAGES { + shuffle(&mut chans); + let rx0 = &chans[0].1; + let rx1 = &chans[1].1; + let rx2 = &chans[2].1; + let rx3 = &chans[3].1; + + select! { + m = rx0.recv() => assert!(m.is_ok()), + m = rx1.recv() => assert!(m.is_ok()), + m = rx2.recv() => assert!(m.is_ok()), + m = rx3.recv() => assert!(m.is_ok()) + } + } + }); +} + +fn main() { + macro_rules! run { + ($name:expr, $f:expr) => { + let now = ::std::time::Instant::now(); + $f; + let elapsed = now.elapsed(); + println!( + "{:25} {:15} {:7.3} sec", + $name, + "Rust mpsc", + elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 + ); + } + } + + run!("bounded0_mpsc", mpsc_sync(0)); + run!("bounded0_select_rx", select_rx_sync(0)); + run!("bounded0_spsc", spsc_sync(0)); + + run!("bounded1_mpsc", mpsc_sync(1)); + run!("bounded1_select_rx", select_rx_sync(1)); + run!("bounded1_spsc", spsc_sync(1)); + + run!("bounded_mpsc", mpsc_sync(MESSAGES)); + run!("bounded_select_rx", select_rx_sync(MESSAGES)); + run!("bounded_seq", seq_sync(MESSAGES)); + run!("bounded_spsc", spsc_sync(MESSAGES)); + + run!("unbounded_mpsc", mpsc_async()); + run!("unbounded_select_rx", select_rx_async()); + run!("unbounded_seq", seq_async()); + run!("unbounded_spsc", spsc_async()); +} diff --git a/crossbeam-channel/benchmarks/msqueue.rs b/crossbeam-channel/benchmarks/msqueue.rs new file mode 100644 index 000000000..b6ccb34ad --- /dev/null +++ b/crossbeam-channel/benchmarks/msqueue.rs @@ -0,0 +1,117 @@ +extern crate crossbeam; + +use crossbeam::sync::MsQueue; +use shared::message; +use std::thread; + +mod shared; + +const MESSAGES: usize = 5_000_000; +const THREADS: usize = 4; + +fn seq() { + let q = MsQueue::new(); + + for i in 0..MESSAGES { + q.push(message(i)); + } + + for _ in 0..MESSAGES { + q.try_pop().unwrap(); + } +} + +fn spsc() { + let q = MsQueue::new(); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..MESSAGES { + q.push(message(i)); + } + }); + + for _ in 0..MESSAGES { + loop { + if q.try_pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); +} + +fn mpsc() { + let q = MsQueue::new(); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + q.push(message(i)); + } + }); + } + + for _ in 0..MESSAGES { + loop { + if q.try_pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); +} + +fn mpmc() { + let q = MsQueue::new(); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + q.push(message(i)); + } + }); + } + + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..MESSAGES / THREADS { + loop { + if q.try_pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); + } + }); +} + +fn main() { + macro_rules! run { + ($name:expr, $f:expr) => { + let now = ::std::time::Instant::now(); + $f; + let elapsed = now.elapsed(); + println!( + "{:25} {:15} {:7.3} sec", + $name, + "Rust msqueue", + elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 + ); + } + } + + run!("unbounded_mpmc", mpmc()); + run!("unbounded_mpsc", mpsc()); + run!("unbounded_seq", seq()); + run!("unbounded_spsc", spsc()); +} diff --git a/crossbeam-channel/benchmarks/plot.py b/crossbeam-channel/benchmarks/plot.py new file mode 100755 index 000000000..d45dcf759 --- /dev/null +++ b/crossbeam-channel/benchmarks/plot.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python2 + +import sys +import matplotlib.pyplot as plt +import matplotlib.patches as mpatches + +results = [] +for f in sys.argv[1:]: + with open(f) as f: + for line in f.readlines(): + test, lang, impl, secs, _ = line.split() + results.append((test, lang, impl, float(secs))) + +fig = plt.figure(figsize=(10, 10)) + + +def plot(subplot, title, prefix, runs): + runs.reverse() + + ys = [6 * (i+1) for i in xrange(len(runs))] + ax = fig.add_subplot(subplot) + ax.set_title(title) + ax.set_yticks(ys) + ax.set_yticklabels(runs) + ax.tick_params(which='major', length=0) + ax.set_xlabel('seconds') + + go = [0] * len(runs) + mpsc = [0] * len(runs) + futures_channel = [0] * len(runs) + chan = [0] * len(runs) + crossbeam_channel = [0] * len(runs) + + for (i, run) in enumerate(runs): + for (test, lang, impl, secs) in results: + if test == prefix + '_' + run: + if lang == 'Go' and impl == 'chan': + go[i] = secs + if lang == 'Rust' and impl == 'mpsc': + mpsc[i] = secs + if lang == 'Rust' and impl == 'futures-channel': + futures_channel[i] = secs + if lang == 'Rust' and impl == 'chan': + chan[i] = secs + if lang == 'Rust' and impl == 'crossbeam-channel': + crossbeam_channel[i] = secs + + opts = dict(height=0.7, align='center') + ax.barh([y-2 for y in ys], go, color='skyblue', **opts) + ax.barh([y-1 for y in ys], crossbeam_channel, color='red', **opts) + ax.barh([y+0 for y in ys], chan, color='orange', **opts) + ax.barh([y+1 for y in ys], mpsc, color='black', **opts) + ax.barh([y+2 for y in ys], futures_channel, color='blue', **opts) + + m = int(max(go + mpsc + futures_channel + chan + crossbeam_channel) * 1.3) + if m < 10: + ax.set_xticks(range(m + 1)) + elif m < 50: + ax.set_xticks([x*5 for x in range(m / 5 + 1)]) + elif m < 100: + ax.set_xticks([x*10 for x in range(m / 10 + 1)]) + elif m < 100: + ax.set_xticks([x*20 for x in range(m / 20 + 1)]) + else: + ax.set_xticks([x*100 for x in range(m / 100 + 1)]) + + for (x, y) in zip(go, ys): + if x > 0: + ax.text(x+m/200., y-2-0.3, 'Go', fontsize=9) + for (x, y) in zip(crossbeam_channel, ys): + if x > 0: + ax.text(x+m/200., y-1-0.3, 'crossbeam-channel', fontsize=9) + for (x, y) in zip(chan, ys): + if x > 0: + ax.text(x+m/200., y+0-0.3, 'chan', fontsize=9) + for (x, y) in zip(mpsc, ys): + if x > 0: + ax.text(x+m/200., y+1-0.3, 'mpsc', fontsize=9) + for (x, y) in zip(futures_channel, ys): + if x > 0: + ax.text(x+m/200., y+2-0.3, 'futures-channel', fontsize=9) + +plot( + 221, + "Bounded channel of capacity 0", + 'bounded0', + ['spsc', 'mpsc', 'mpmc', 'select_rx', 'select_both'], +) + +plot( + 222, + "Bounded channel of capacity 1", + 'bounded1', + ['spsc', 'mpsc', 'mpmc', 'select_rx', 'select_both'], +) + +plot( + 223, + "Bounded channel of capacity N", + 'bounded', + ['seq', 'spsc', 'mpsc', 'mpmc', 'select_rx', 'select_both'], +) + +plot( + 224, + "Unbounded channel", + 'unbounded', + ['seq', 'spsc', 'mpsc', 'mpmc', 'select_rx', 'select_both'], +) + +plt.subplots_adjust( + top=0.95, + bottom=0.05, + left=0.1, + right=0.95, + wspace=0.3, + hspace=0.2, +) +plt.savefig('plot.png') +# plt.show() diff --git a/crossbeam-channel/benchmarks/run.sh b/crossbeam-channel/benchmarks/run.sh new file mode 100755 index 000000000..0e1072dce --- /dev/null +++ b/crossbeam-channel/benchmarks/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +cd "$(dirname "$0")" +set -ex + +cargo run --release --bin chan | tee chan.txt +cargo run --release --bin crossbeam-channel | tee crossbeam-channel.txt +cargo run --release --bin futures-channel | tee futures-channel.txt +cargo run --release --bin mpsc | tee mpsc.txt +go run go.go | tee go.txt + +./plot.py *.txt diff --git a/crossbeam-channel/benchmarks/segqueue.rs b/crossbeam-channel/benchmarks/segqueue.rs new file mode 100644 index 000000000..7fa811b7a --- /dev/null +++ b/crossbeam-channel/benchmarks/segqueue.rs @@ -0,0 +1,117 @@ +extern crate crossbeam; + +use crossbeam::sync::SegQueue; +use shared::message; +use std::thread; + +mod shared; + +const MESSAGES: usize = 5_000_000; +const THREADS: usize = 4; + +fn seq() { + let q = SegQueue::new(); + + for i in 0..MESSAGES { + q.push(message(i)); + } + + for _ in 0..MESSAGES { + q.try_pop().unwrap(); + } +} + +fn spsc() { + let q = SegQueue::new(); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..MESSAGES { + q.push(message(i)); + } + }); + + for _ in 0..MESSAGES { + loop { + if q.try_pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); +} + +fn mpsc() { + let q = SegQueue::new(); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + q.push(message(i)); + } + }); + } + + for _ in 0..MESSAGES { + loop { + if q.try_pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); +} + +fn mpmc() { + let q = SegQueue::new(); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..MESSAGES / THREADS { + q.push(message(i)); + } + }); + } + + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..MESSAGES / THREADS { + loop { + if q.try_pop().is_none() { + thread::yield_now(); + } else { + break; + } + } + } + }); + } + }); +} + +fn main() { + macro_rules! run { + ($name:expr, $f:expr) => { + let now = ::std::time::Instant::now(); + $f; + let elapsed = now.elapsed(); + println!( + "{:25} {:15} {:7.3} sec", + $name, + "Rust segqueue", + elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 + ); + } + } + + run!("unbounded_mpmc", mpmc()); + run!("unbounded_mpsc", mpsc()); + run!("unbounded_seq", seq()); + run!("unbounded_spsc", spsc()); +} diff --git a/crossbeam-channel/benchmarks/shared.rs b/crossbeam-channel/benchmarks/shared.rs new file mode 100644 index 000000000..cc885926a --- /dev/null +++ b/crossbeam-channel/benchmarks/shared.rs @@ -0,0 +1,43 @@ +#[derive(Debug, Clone, Copy)] +pub struct Message(i32); + +#[inline] +pub fn message(msg: usize) -> Message { + Message(msg as i32) +} + +#[allow(dead_code)] +pub fn shuffle(v: &mut [T]) { + use std::cell::Cell; + use std::num::Wrapping; + + let len = v.len(); + if len <= 1 { + return; + } + + thread_local! { + static RNG: Cell> = Cell::new(Wrapping(1)); + } + + RNG.with(|rng| { + for i in 1..len { + // This is the 32-bit variant of Xorshift. + // https://en.wikipedia.org/wiki/Xorshift + let mut x = rng.get(); + x ^= x << 13; + x ^= x >> 17; + x ^= x << 5; + rng.set(x); + + let x = x.0; + let n = i + 1; + + // This is a fast alternative to `let j = x % n`. + // https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ + let j = ((x as u64).wrapping_mul(n as u64) >> 32) as u32 as usize; + + v.swap(i, j); + } + }); +} diff --git a/crossbeam-channel/ci/script.sh b/crossbeam-channel/ci/script.sh new file mode 100755 index 000000000..7244ce779 --- /dev/null +++ b/crossbeam-channel/ci/script.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +check_min_version() { + local rustc="`rustc -V | cut -d' ' -f2 | cut -d- -f1`" + if [[ "$rustc" != "`echo -e "$rustc\n$1" | sort -V | tail -n1`" ]]; then + echo "Unsupported Rust version: $1 < $rustc" + exit 0 + fi +} +check_min_version 1.26.0 + +set -ex + +cargo test -- --test-threads=1 + +if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then + cd benchmarks + cargo build --bins +fi diff --git a/crossbeam-channel/examples/fibonacci.rs b/crossbeam-channel/examples/fibonacci.rs new file mode 100644 index 000000000..499887a05 --- /dev/null +++ b/crossbeam-channel/examples/fibonacci.rs @@ -0,0 +1,27 @@ +//! An asynchronous fibonacci sequence generator. + +extern crate crossbeam_channel; + +use std::thread; + +use crossbeam_channel::{bounded, Sender}; + +// Sends the Fibonacci sequence into the channel until it becomes disconnected. +fn fibonacci(sender: Sender) { + let (mut x, mut y) = (0, 1); + while sender.send(x).is_ok() { + let tmp = x; + x = y; + y = tmp + y; + } +} + +fn main() { + let (s, r) = bounded(0); + thread::spawn(|| fibonacci(s)); + + // Print the first 20 Fibonacci numbers. + for num in r.iter().take(20) { + println!("{}", num); + } +} diff --git a/crossbeam-channel/examples/matching.rs b/crossbeam-channel/examples/matching.rs new file mode 100644 index 000000000..18b6f2ff9 --- /dev/null +++ b/crossbeam-channel/examples/matching.rs @@ -0,0 +1,73 @@ +//! Using `select!` to send and receive on the same channel at the same time. +//! +//! # Copyright +//! +//! This example is based on the following program in Go. +//! +//! Author: Stefan Nilsson +//! License: Creative Commons Attribution 3.0 Unported License. +//! Sources: +//! - https://web.archive.org/web/20171209034309/https://www.nada.kth.se/~snilsson/concurrency +//! - http://www.nada.kth.se/~snilsson/concurrency/src/matching.go +//! +//! ```go +//! func main() { +//! people := []string{"Anna", "Bob", "Cody", "Dave", "Eva"} +//! match := make(chan string, 1) // Make room for one unmatched send. +//! wg := new(sync.WaitGroup) +//! for _, name := range people { +//! wg.Add(1) +//! go Seek(name, match, wg) +//! } +//! wg.Wait() +//! select { +//! case name := <-match: +//! fmt.Printf("No one received %s’s message.\n", name) +//! default: +//! // There was no pending send operation. +//! } +//! } +//! +//! // Seek either sends or receives, whichever possible, a name on the match +//! // channel and notifies the wait group when done. +//! func Seek(name string, match chan string, wg *sync.WaitGroup) { +//! select { +//! case peer := <-match: +//! fmt.Printf("%s received a message from %s.\n", name, peer) +//! case match <- name: +//! // Wait for someone to receive my message. +//! } +//! wg.Done() +//! } +//! ``` + +extern crate crossbeam; +#[macro_use] +extern crate crossbeam_channel; + +use crossbeam_channel::bounded; + +fn main() { + let people = vec!["Anna", "Bob", "Cody", "Dave", "Eva"]; + let (s, r) = bounded(1); // Make room for one unmatched send. + + // Either send my name into the channel or receive someone else's, whatever happens first. + let seek = |name, s, r| { + select! { + recv(r) -> peer => println!("{} received a message from {}.", name, peer.unwrap()), + send(s, name) -> _ => {}, // Wait for someone to receive my message. + } + }; + + crossbeam::scope(|scope| { + for name in people { + let (s, r) = (s.clone(), r.clone()); + scope.spawn(move || seek(name, s, r)); + } + }); + + // Check if there is a pending send operation. + if let Ok(name) = r.try_recv() { + println!("No one received {}’s message.", name); + } +} diff --git a/crossbeam-channel/examples/stopwatch.rs b/crossbeam-channel/examples/stopwatch.rs new file mode 100644 index 000000000..b7fa88cfc --- /dev/null +++ b/crossbeam-channel/examples/stopwatch.rs @@ -0,0 +1,54 @@ +//! Prints the elapsed time every 1 second and quits on Ctrl+C. + +#[macro_use] +extern crate crossbeam_channel; +extern crate signal_hook; + +use std::io; +use std::time::{Duration, Instant}; +use std::thread; + +use crossbeam_channel::{tick, unbounded, Receiver}; +use signal_hook::SIGINT; +use signal_hook::iterator::Signals; + +// Creates a channel that gets a message every time `SIGINT` is signalled. +fn sigint_notifier() -> io::Result> { + let (s, r) = unbounded(); + let signals = Signals::new(&[SIGINT])?; + + thread::spawn(move || { + for _ in signals.forever() { + if s.send(()).is_err() { + break; + } + } + }); + + Ok(r) +} + +// Prints the elapsed time. +fn show(dur: Duration) { + println!("Elapsed: {}.{:03} sec", dur.as_secs(), dur.subsec_nanos() / 1_000_000); +} + +fn main() { + let start = Instant::now(); + let update = tick(Duration::from_secs(1)); + let ctrl_c = sigint_notifier().unwrap(); + + loop { + select! { + recv(update) -> _ => { + show(start.elapsed()); + } + recv(ctrl_c) -> _ => { + println!(); + println!("Goodbye!"); + show(start.elapsed()); + break; + } + } + } +} diff --git a/crossbeam-channel/src/channel.rs b/crossbeam-channel/src/channel.rs new file mode 100644 index 000000000..1d4c5236b --- /dev/null +++ b/crossbeam-channel/src/channel.rs @@ -0,0 +1,1381 @@ +//! The channel interface. + +use std::fmt; +use std::isize; +use std::iter::FusedIterator; +use std::mem; +use std::panic::{RefUnwindSafe, UnwindSafe}; +use std::process; +use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::time::{Duration, Instant}; + +use context::Context; +use err::{RecvError, RecvTimeoutError, SendError, SendTimeoutError, TryRecvError, TrySendError}; +use flavors; +use select::{Operation, SelectHandle, Token}; + +/// A channel in the form of one of the three different flavors. +pub struct Channel { + /// The number of senders associated with this channel. + senders: AtomicUsize, + + /// The number of receivers associated with this channel. + receivers: AtomicUsize, + + /// This channel's flavor. + flavor: ChannelFlavor, +} + +/// Channel flavors. +enum ChannelFlavor { + /// Bounded channel based on a preallocated array. + Array(flavors::array::Channel), + + /// Unbounded channel implemented as a linked list. + List(flavors::list::Channel), + + /// Zero-capacity channel. + Zero(flavors::zero::Channel), +} + +/// Creates a channel of unbounded capacity. +/// +/// This channel has a growable buffer that can hold any number of messages at a time. +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use crossbeam_channel::unbounded; +/// +/// let (s, r) = unbounded(); +/// +/// // Computes the n-th Fibonacci number. +/// fn fib(n: i32) -> i32 { +/// if n <= 1 { +/// n +/// } else { +/// fib(n - 1) + fib(n - 2) +/// } +/// } +/// +/// // Spawn an asynchronous computation. +/// thread::spawn(move || s.send(fib(20)).unwrap()); +/// +/// // Print the result of the computation. +/// println!("{}", r.recv().unwrap()); +/// ``` +pub fn unbounded() -> (Sender, Receiver) { + let chan = Arc::new(Channel { + senders: AtomicUsize::new(0), + receivers: AtomicUsize::new(0), + flavor: ChannelFlavor::List(flavors::list::Channel::new()), + }); + + let s = Sender::new(chan.clone()); + let r = Receiver::new(chan); + (s, r) +} + +/// Creates a channel of bounded capacity. +/// +/// This channel has a buffer that can hold at most `cap` messages at a time. +/// +/// A special case is zero-capacity channel, which cannot hold any messages. Instead, send and +/// receive operations must appear at the same time in order to pair up and pass the message over. +/// +/// # Panics +/// +/// Panics if the capacity is greater than `usize::max_value() / 4`. +/// +/// # Examples +/// +/// A channel of capacity 1: +/// +/// ``` +/// use std::thread; +/// use std::time::Duration; +/// use crossbeam_channel::bounded; +/// +/// let (s, r) = bounded(1); +/// +/// // This call returns immediately because there is enough space in the channel. +/// s.send(1).unwrap(); +/// +/// thread::spawn(move || { +/// // This call blocks the current thread because the channel is full. +/// // It will be able to complete only after the first message is received. +/// s.send(2).unwrap(); +/// }); +/// +/// thread::sleep(Duration::from_secs(1)); +/// assert_eq!(r.recv(), Ok(1)); +/// assert_eq!(r.recv(), Ok(2)); +/// ``` +/// +/// A zero-capacity channel: +/// +/// ``` +/// use std::thread; +/// use std::time::Duration; +/// use crossbeam_channel::bounded; +/// +/// let (s, r) = bounded(0); +/// +/// thread::spawn(move || { +/// // This call blocks the current thread until a receive operation appears +/// // on the other side of the channel. +/// s.send(1).unwrap(); +/// }); +/// +/// thread::sleep(Duration::from_secs(1)); +/// assert_eq!(r.recv(), Ok(1)); +/// ``` +pub fn bounded(cap: usize) -> (Sender, Receiver) { + let chan = Arc::new(Channel { + senders: AtomicUsize::new(0), + receivers: AtomicUsize::new(0), + flavor: { + if cap == 0 { + ChannelFlavor::Zero(flavors::zero::Channel::new()) + } else { + ChannelFlavor::Array(flavors::array::Channel::with_capacity(cap)) + } + }, + }); + + let s = Sender::new(chan.clone()); + let r = Receiver::new(chan); + (s, r) +} + +/// Creates a receiver that delivers a message after a certain duration of time. +/// +/// The channel is bounded with capacity of 1 and never gets disconnected. Exactly one message will +/// be sent into the channel after `duration` elapses. The message is the instant at which it is +/// sent. +/// +/// # Examples +/// +/// Using an `after` channel for timeouts: +/// +/// ``` +/// # #[macro_use] +/// # extern crate crossbeam_channel; +/// # fn main() { +/// use std::time::Duration; +/// use crossbeam_channel::{after, unbounded}; +/// +/// let (s, r) = unbounded::(); +/// let timeout = Duration::from_millis(100); +/// +/// select! { +/// recv(r) -> msg => println!("received {:?}", msg), +/// recv(after(timeout)) -> _ => println!("timed out"), +/// } +/// # } +/// ``` +/// +/// When the message gets sent: +/// +/// ``` +/// use std::thread; +/// use std::time::{Duration, Instant}; +/// use crossbeam_channel::after; +/// +/// // Converts a number of milliseconds into a `Duration`. +/// let ms = |ms| Duration::from_millis(ms); +/// +/// // Returns `true` if `a` and `b` are very close `Instant`s. +/// let eq = |a, b| a + ms(50) > b && b + ms(50) > a; +/// +/// let start = Instant::now(); +/// let r = after(ms(100)); +/// +/// thread::sleep(ms(500)); +/// +/// // This message was sent 100 ms from the start and received 500 ms from the start. +/// assert!(eq(r.recv().unwrap(), start + ms(100))); +/// assert!(eq(Instant::now(), start + ms(500))); +/// ``` +pub fn after(duration: Duration) -> Receiver { + Receiver { + flavor: ReceiverFlavor::After(flavors::after::Channel::new(duration)), + } +} + +/// Creates a receiver that never delivers messages. +/// +/// The channel is bounded with capacity of 0 and never gets disconnected. +/// +/// # Examples +/// +/// Using a `never` channel to optionally add a timeout to [`select!`]: +/// +/// ``` +/// # #[macro_use] +/// # extern crate crossbeam_channel; +/// # fn main() { +/// use std::thread; +/// use std::time::{Duration, Instant}; +/// use crossbeam_channel::{after, never, unbounded}; +/// +/// let (s, r) = unbounded(); +/// +/// thread::spawn(move || { +/// thread::sleep(Duration::from_secs(1)); +/// s.send(1).unwrap(); +/// }); +/// +/// // This duration can be a `Some` or a `None`. +/// let duration = Some(Duration::from_millis(100)); +/// +/// // Create a channel that times out after the specified duration. +/// let timeout = duration +/// .map(|d| after(d)) +/// .unwrap_or(never()); +/// +/// select! { +/// recv(r) -> msg => assert_eq!(msg, Ok(1)), +/// recv(timeout) -> _ => println!("timed out"), +/// } +/// # } +/// ``` +/// +/// [`select!`]: macro.select.html +pub fn never() -> Receiver { + Receiver { + flavor: ReceiverFlavor::Never(flavors::never::Channel::new()), + } +} + +/// Creates a receiver that delivers messages periodically. +/// +/// The channel is bounded with capacity of 1 and never gets disconnected. Messages will be +/// sent into the channel in intervals of `duration`. Each message is the instant at which it is +/// sent. +/// +/// # Examples +/// +/// Using a `tick` channel to periodically print elapsed time: +/// +/// ``` +/// use std::time::{Duration, Instant}; +/// use crossbeam_channel::tick; +/// +/// let start = Instant::now(); +/// let ticker = tick(Duration::from_millis(100)); +/// +/// for _ in 0..5 { +/// ticker.recv().unwrap(); +/// println!("elapsed: {:?}", start.elapsed()); +/// } +/// ``` +/// +/// When messages get sent: +/// +/// ``` +/// use std::thread; +/// use std::time::{Duration, Instant}; +/// use crossbeam_channel::tick; +/// +/// // Converts a number of milliseconds into a `Duration`. +/// let ms = |ms| Duration::from_millis(ms); +/// +/// // Returns `true` if `a` and `b` are very close `Instant`s. +/// let eq = |a, b| a + ms(50) > b && b + ms(50) > a; +/// +/// let start = Instant::now(); +/// let r = tick(ms(100)); +/// +/// // This message was sent 100 ms from the start and received 100 ms from the start. +/// assert!(eq(r.recv().unwrap(), start + ms(100))); +/// assert!(eq(Instant::now(), start + ms(100))); +/// +/// thread::sleep(ms(500)); +/// +/// // This message was sent 200 ms from the start and received 600 ms from the start. +/// assert!(eq(r.recv().unwrap(), start + ms(200))); +/// assert!(eq(Instant::now(), start + ms(600))); +/// +/// // This message was sent 700 ms from the start and received 700 ms from the start. +/// assert!(eq(r.recv().unwrap(), start + ms(700))); +/// assert!(eq(Instant::now(), start + ms(700))); +/// ``` +pub fn tick(duration: Duration) -> Receiver { + Receiver { + flavor: ReceiverFlavor::Tick(flavors::tick::Channel::new(duration)), + } +} + +/// The sending side of a channel. +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use crossbeam_channel::unbounded; +/// +/// let (s1, r) = unbounded(); +/// let s2 = s1.clone(); +/// +/// thread::spawn(move || s1.send(1).unwrap()); +/// thread::spawn(move || s2.send(2).unwrap()); +/// +/// let msg1 = r.recv().unwrap(); +/// let msg2 = r.recv().unwrap(); +/// +/// assert_eq!(msg1 + msg2, 3); +/// ``` +pub struct Sender { + inner: Arc>, +} + +unsafe impl Send for Sender {} +unsafe impl Sync for Sender {} + +impl UnwindSafe for Sender {} +impl RefUnwindSafe for Sender {} + +impl Sender { + /// Creates a sender handle for the channel and increments the sender count. + fn new(chan: Arc>) -> Self { + let old_count = chan.senders.fetch_add(1, Ordering::SeqCst); + + // Cloning senders and calling `mem::forget` on the clones could potentially overflow the + // counter. It's very difficult to recover sensibly from such degenerate scenarios so we + // just abort when the count becomes very large. + if old_count > isize::MAX as usize { + process::abort(); + } + + Sender { inner: chan } + } + + /// Attempts to send a message into the channel without blocking. + /// + /// This method will either send a message into the channel immediately or return an error if + /// the channel is full or disconnected. The returned error contains the original message. + /// + /// If called on a zero-capacity channel, this method will send the message only if there + /// happens to be a receive operation on the other side of the channel at the same time. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::{bounded, TrySendError}; + /// + /// let (s, r) = bounded(1); + /// + /// assert_eq!(s.try_send(1), Ok(())); + /// assert_eq!(s.try_send(2), Err(TrySendError::Full(2))); + /// + /// drop(r); + /// assert_eq!(s.try_send(3), Err(TrySendError::Disconnected(3))); + /// ``` + pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.try_send(msg), + ChannelFlavor::List(chan) => chan.try_send(msg), + ChannelFlavor::Zero(chan) => chan.try_send(msg), + } + } + + /// Blocks the current thread until a message is sent or the channel is disconnected. + /// + /// If the channel is full and not disconnected, this call will block until the send operation + /// can proceed. If the channel becomes disconnected, this call will wake up and return an + /// error. The returned error contains the original message. + /// + /// If called on a zero-capacity channel, this method will wait for a receive operation to + /// appear on the other side of the channel. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use std::time::Duration; + /// use crossbeam_channel::{bounded, SendError}; + /// + /// let (s, r) = bounded(1); + /// assert_eq!(s.send(1), Ok(())); + /// + /// thread::spawn(move || { + /// assert_eq!(r.recv(), Ok(1)); + /// thread::sleep(Duration::from_secs(1)); + /// drop(r); + /// }); + /// + /// assert_eq!(s.send(2), Ok(())); + /// assert_eq!(s.send(3), Err(SendError(3))); + /// ``` + pub fn send(&self, msg: T) -> Result<(), SendError> { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.send(msg, None), + ChannelFlavor::List(chan) => chan.send(msg, None), + ChannelFlavor::Zero(chan) => chan.send(msg, None), + }.map_err(|err| { + match err { + SendTimeoutError::Disconnected(msg) => SendError(msg), + SendTimeoutError::Timeout(_) => unreachable!(), + } + }) + } + + /// Waits for a message to be sent into the channel, but only for a limited time. + /// + /// If the channel is full and not disconnected, this call will block until the send operation + /// can proceed or the operation times out. If the channel becomes disconnected, this call will + /// wake up and return an error. The returned error contains the original message. + /// + /// If called on a zero-capacity channel, this method will wait for a receive operation to + /// appear on the other side of the channel. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use std::time::Duration; + /// use crossbeam_channel::{bounded, SendTimeoutError}; + /// + /// let (s, r) = bounded(0); + /// + /// thread::spawn(move || { + /// thread::sleep(Duration::from_secs(1)); + /// assert_eq!(r.recv(), Ok(2)); + /// drop(r); + /// }); + /// + /// assert_eq!( + /// s.send_timeout(1, Duration::from_millis(500)), + /// Err(SendTimeoutError::Timeout(1)), + /// ); + /// assert_eq!( + /// s.send_timeout(2, Duration::from_secs(1)), + /// Ok(()), + /// ); + /// assert_eq!( + /// s.send_timeout(3, Duration::from_millis(500)), + /// Err(SendTimeoutError::Disconnected(3)), + /// ); + /// ``` + pub fn send_timeout(&self, msg: T, timeout: Duration) -> Result<(), SendTimeoutError> { + let deadline = Instant::now() + timeout; + + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.send(msg, Some(deadline)), + ChannelFlavor::List(chan) => chan.send(msg, Some(deadline)), + ChannelFlavor::Zero(chan) => chan.send(msg, Some(deadline)), + } + } + + /// Returns `true` if the channel is empty. + /// + /// Note: Zero-capacity channels are always empty. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// assert!(s.is_empty()); + /// + /// s.send(0).unwrap(); + /// assert!(!s.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.is_empty(), + ChannelFlavor::List(chan) => chan.is_empty(), + ChannelFlavor::Zero(chan) => chan.is_empty(), + } + } + + /// Returns `true` if the channel is full. + /// + /// Note: Zero-capacity channels are always full. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::bounded; + /// + /// let (s, r) = bounded(1); + /// + /// assert!(!s.is_full()); + /// s.send(0).unwrap(); + /// assert!(s.is_full()); + /// ``` + pub fn is_full(&self) -> bool { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.is_full(), + ChannelFlavor::List(chan) => chan.is_full(), + ChannelFlavor::Zero(chan) => chan.is_full(), + } + } + + /// Returns the number of messages in the channel. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(s.len(), 0); + /// + /// s.send(1).unwrap(); + /// s.send(2).unwrap(); + /// assert_eq!(s.len(), 2); + /// ``` + pub fn len(&self) -> usize { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.len(), + ChannelFlavor::List(chan) => chan.len(), + ChannelFlavor::Zero(chan) => chan.len(), + } + } + + /// If the channel is bounded, returns its capacity. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::{bounded, unbounded}; + /// + /// let (s, _) = unbounded::(); + /// assert_eq!(s.capacity(), None); + /// + /// let (s, _) = bounded::(5); + /// assert_eq!(s.capacity(), Some(5)); + /// + /// let (s, _) = bounded::(0); + /// assert_eq!(s.capacity(), Some(0)); + /// ``` + pub fn capacity(&self) -> Option { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.capacity(), + ChannelFlavor::List(chan) => chan.capacity(), + ChannelFlavor::Zero(chan) => chan.capacity(), + } + } +} + +impl Drop for Sender { + fn drop(&mut self) { + if self.inner.senders.fetch_sub(1, Ordering::SeqCst) == 1 { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.disconnect(), + ChannelFlavor::List(chan) => chan.disconnect(), + ChannelFlavor::Zero(chan) => chan.disconnect(), + }; + } + } +} + +impl Clone for Sender { + fn clone(&self) -> Self { + Sender::new(self.inner.clone()) + } +} + +impl fmt::Debug for Sender { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Sender").finish() + } +} + +/// The receiving side of a channel. +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use std::time::Duration; +/// use crossbeam_channel::unbounded; +/// +/// let (s, r) = unbounded(); +/// +/// thread::spawn(move || { +/// s.send(1); +/// thread::sleep(Duration::from_secs(1)); +/// s.send(2); +/// }); +/// +/// assert_eq!(r.recv(), Ok(1)); // Received immediately. +/// assert_eq!(r.recv(), Ok(2)); // Received after 1 second. +/// ``` +pub struct Receiver { + flavor: ReceiverFlavor +} + +/// Receiver flavors. +pub enum ReceiverFlavor { + /// A regular channel (array, list, or zero flavor). + Channel(Arc>), + + /// The after flavor. + After(flavors::after::Channel), + + /// The tick flavor. + Tick(flavors::tick::Channel), + + /// The never flavor. + Never(flavors::never::Channel), +} + +unsafe impl Send for Receiver {} +unsafe impl Sync for Receiver {} + +impl UnwindSafe for Receiver {} +impl RefUnwindSafe for Receiver {} + +impl Receiver { + /// Creates a receiver handle for the channel and increments the receiver count. + fn new(chan: Arc>) -> Self { + let old_count = chan.receivers.fetch_add(1, Ordering::SeqCst); + + // Cloning receivers and calling `mem::forget` on the clones could potentially overflow the + // counter. It's very difficult to recover sensibly from such degenerate scenarios so we + // just abort when the count becomes very large. + if old_count > isize::MAX as usize { + process::abort(); + } + + Receiver { + flavor: ReceiverFlavor::Channel(chan), + } + } + + /// Attempts to receive a message from the channel without blocking. + /// + /// This method will either receive a message from the channel immediately or return an error + /// if the channel is empty. + /// + /// If called on a zero-capacity channel, this method will receive a message only if there + /// happens to be a send operation on the other side of the channel at the same time. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::{unbounded, TryRecvError}; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + /// + /// s.send(5).unwrap(); + /// drop(s); + /// + /// assert_eq!(r.try_recv(), Ok(5)); + /// assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); + /// ``` + pub fn try_recv(&self) -> Result { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.try_recv(), + ChannelFlavor::List(chan) => chan.try_recv(), + ChannelFlavor::Zero(chan) => chan.try_recv(), + }, + ReceiverFlavor::After(chan) => { + let msg = chan.try_recv(); + unsafe { + mem::transmute_copy::< + Result, + Result + >(&msg) + } + }, + ReceiverFlavor::Tick(chan) => { + let msg = chan.try_recv(); + unsafe { + mem::transmute_copy::< + Result, + Result + >(&msg) + } + }, + ReceiverFlavor::Never(chan) => chan.try_recv(), + } + } + + /// Blocks the current thread until a message is received or the channel is empty and + /// disconnected. + /// + /// If the channel is empty and not disconnected, this call will block until the receive + /// operation can proceed. If the channel is empty and becomes disconnected, this call will + /// wake up and return an error. + /// + /// If called on a zero-capacity channel, this method will wait for a send operation to appear + /// on the other side of the channel. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use std::time::Duration; + /// use crossbeam_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded(); + /// + /// thread::spawn(move || { + /// thread::sleep(Duration::from_secs(1)); + /// s.send(5).unwrap(); + /// drop(s); + /// }); + /// + /// assert_eq!(r.recv(), Ok(5)); + /// assert_eq!(r.recv(), Err(RecvError)); + /// ``` + pub fn recv(&self) -> Result { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.recv(None), + ChannelFlavor::List(chan) => chan.recv(None), + ChannelFlavor::Zero(chan) => chan.recv(None), + }, + ReceiverFlavor::After(chan) => { + let msg = chan.recv(None); + unsafe { + mem::transmute_copy::< + Result, + Result, + >(&msg) + } + }, + ReceiverFlavor::Tick(chan) => { + let msg = chan.recv(None); + unsafe { + mem::transmute_copy::< + Result, + Result, + >(&msg) + } + }, + ReceiverFlavor::Never(chan) => chan.recv(None), + }.map_err(|_| RecvError) + } + + /// Waits for a message to be received from the channel, but only for a limited time. + /// + /// If the channel is empty and not disconnected, this call will block until the receive + /// operation can proceed or the operation times out. If the channel is empty and becomes + /// disconnected, this call will wake up and return an error. + /// + /// If called on a zero-capacity channel, this method will wait for a send operation to appear + /// on the other side of the channel. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use std::time::Duration; + /// use crossbeam_channel::{unbounded, RecvTimeoutError}; + /// + /// let (s, r) = unbounded(); + /// + /// thread::spawn(move || { + /// thread::sleep(Duration::from_secs(1)); + /// s.send(5).unwrap(); + /// drop(s); + /// }); + /// + /// assert_eq!( + /// r.recv_timeout(Duration::from_millis(500)), + /// Err(RecvTimeoutError::Timeout), + /// ); + /// assert_eq!( + /// r.recv_timeout(Duration::from_secs(1)), + /// Ok(5), + /// ); + /// assert_eq!( + /// r.recv_timeout(Duration::from_secs(1)), + /// Err(RecvTimeoutError::Disconnected), + /// ); + /// ``` + pub fn recv_timeout(&self, timeout: Duration) -> Result { + let deadline = Instant::now() + timeout; + + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.recv(Some(deadline)), + ChannelFlavor::List(chan) => chan.recv(Some(deadline)), + ChannelFlavor::Zero(chan) => chan.recv(Some(deadline)), + }, + ReceiverFlavor::After(chan) => { + let msg = chan.recv(Some(deadline)); + unsafe { + mem::transmute_copy::< + Result, + Result, + >(&msg) + } + }, + ReceiverFlavor::Tick(chan) => { + let msg = chan.recv(Some(deadline)); + unsafe { + mem::transmute_copy::< + Result, + Result, + >(&msg) + } + }, + ReceiverFlavor::Never(chan) => chan.recv(Some(deadline)), + } + } + + /// Returns `true` if the channel is empty. + /// + /// Note: Zero-capacity channels are always empty. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// + /// assert!(r.is_empty()); + /// s.send(0).unwrap(); + /// assert!(!r.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.is_empty(), + ChannelFlavor::List(chan) => chan.is_empty(), + ChannelFlavor::Zero(chan) => chan.is_empty(), + }, + ReceiverFlavor::After(chan) => chan.is_empty(), + ReceiverFlavor::Tick(chan) => chan.is_empty(), + ReceiverFlavor::Never(chan) => chan.is_empty(), + } + } + + /// Returns `true` if the channel is full. + /// + /// Note: Zero-capacity channels are always full. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::bounded; + /// + /// let (s, r) = bounded(1); + /// + /// assert!(!r.is_full()); + /// s.send(0).unwrap(); + /// assert!(r.is_full()); + /// ``` + pub fn is_full(&self) -> bool { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.is_full(), + ChannelFlavor::List(chan) => chan.is_full(), + ChannelFlavor::Zero(chan) => chan.is_full(), + }, + ReceiverFlavor::After(chan) => chan.is_full(), + ReceiverFlavor::Tick(chan) => chan.is_full(), + ReceiverFlavor::Never(chan) => chan.is_full(), + } + } + + /// Returns the number of messages in the channel. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(r.len(), 0); + /// + /// s.send(1).unwrap(); + /// s.send(2).unwrap(); + /// assert_eq!(r.len(), 2); + /// ``` + pub fn len(&self) -> usize { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.len(), + ChannelFlavor::List(chan) => chan.len(), + ChannelFlavor::Zero(chan) => chan.len(), + }, + ReceiverFlavor::After(chan) => chan.len(), + ReceiverFlavor::Tick(chan) => chan.len(), + ReceiverFlavor::Never(chan) => chan.len(), + } + } + + /// If the channel is bounded, returns its capacity. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::{bounded, unbounded}; + /// + /// let (_, r) = unbounded::(); + /// assert_eq!(r.capacity(), None); + /// + /// let (_, r) = bounded::(5); + /// assert_eq!(r.capacity(), Some(5)); + /// + /// let (_, r) = bounded::(0); + /// assert_eq!(r.capacity(), Some(0)); + /// ``` + pub fn capacity(&self) -> Option { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.capacity(), + ChannelFlavor::List(chan) => chan.capacity(), + ChannelFlavor::Zero(chan) => chan.capacity(), + }, + ReceiverFlavor::After(chan) => chan.capacity(), + ReceiverFlavor::Tick(chan) => chan.capacity(), + ReceiverFlavor::Never(chan) => chan.capacity(), + } + } + + /// A blocking iterator over messages in the channel. + /// + /// Each call to [`next`] blocks waiting for the next message and then returns it. However, if + /// the channel becomes empty and disconnected, it returns [`None`] without blocking. + /// + /// [`next`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#tymethod.next + /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use crossbeam_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// + /// thread::spawn(move || { + /// s.send(1).unwrap(); + /// s.send(2).unwrap(); + /// s.send(3).unwrap(); + /// drop(s); // Disconnect the channel. + /// }); + /// + /// // Collect all messages from the channel. + /// // Note that the call to `collect` blocks until the sender is dropped. + /// let v: Vec<_> = r.iter().collect(); + /// + /// assert_eq!(v, [1, 2, 3]); + /// ``` + pub fn iter(&self) -> Iter { + Iter { receiver: self } + } + + /// A non-blocking iterator over messages in the channel. + /// + /// Each call to [`next`] returns a message if there is one ready to be received. The iterator + /// never blocks waiting for the next message. + /// + /// [`next`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#tymethod.next + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use std::time::Duration; + /// use crossbeam_channel::unbounded; + /// + /// let (s, r) = unbounded::(); + /// + /// thread::spawn(move || { + /// s.send(1).unwrap(); + /// thread::sleep(Duration::from_secs(1)); + /// s.send(2).unwrap(); + /// thread::sleep(Duration::from_secs(2)); + /// s.send(3).unwrap(); + /// }); + /// + /// thread::sleep(Duration::from_secs(2)); + /// + /// // Collect all messages from the channel without blocking. + /// // The third message hasn't been sent yet so we'll collect only the first two. + /// let v: Vec<_> = r.try_iter().collect(); + /// + /// assert_eq!(v, [1, 2]); + /// ``` + pub fn try_iter(&self) -> TryIter { + TryIter { receiver: self } + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + if let ReceiverFlavor::Channel(chan) = &self.flavor { + if chan.receivers.fetch_sub(1, Ordering::SeqCst) == 1 { + match &chan.flavor { + ChannelFlavor::Array(chan) => chan.disconnect(), + ChannelFlavor::List(chan) => chan.disconnect(), + ChannelFlavor::Zero(chan) => chan.disconnect(), + } + } + } + } +} + +impl Clone for Receiver { + fn clone(&self) -> Self { + match &self.flavor { + ReceiverFlavor::Channel(arc) => Receiver::new(arc.clone()), + ReceiverFlavor::After(chan) => Receiver { + flavor: ReceiverFlavor::After(chan.clone()), + }, + ReceiverFlavor::Tick(chan) => Receiver { + flavor: ReceiverFlavor::Tick(chan.clone()), + }, + ReceiverFlavor::Never(chan) => Receiver { + flavor: ReceiverFlavor::Never(chan.clone()), + }, + } + } +} + +impl fmt::Debug for Receiver { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Receiver").finish() + } +} + +impl<'a, T> IntoIterator for &'a Receiver { + type Item = T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for Receiver { + type Item = T; + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter { receiver: self } + } +} + +/// A blocking iterator over messages in a channel. +/// +/// Each call to [`next`] blocks waiting for the next message and then returns it. However, if the +/// channel becomes empty and disconnected, it returns [`None`] without blocking. +/// +/// [`next`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#tymethod.next +/// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use crossbeam_channel::unbounded; +/// +/// let (s, r) = unbounded(); +/// +/// thread::spawn(move || { +/// s.send(1).unwrap(); +/// s.send(2).unwrap(); +/// s.send(3).unwrap(); +/// drop(s); // Disconnect the channel. +/// }); +/// +/// // Collect all messages from the channel. +/// // Note that the call to `collect` blocks until the sender is dropped. +/// let v: Vec<_> = r.iter().collect(); +/// +/// assert_eq!(v, [1, 2, 3]); +/// ``` +pub struct Iter<'a, T: 'a> { + receiver: &'a Receiver, +} + +impl<'a, T> FusedIterator for Iter<'a, T> {} + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.receiver.recv().ok() + } +} + +impl<'a, T> fmt::Debug for Iter<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Iter").finish() + } +} + +/// A non-blocking iterator over messages in a channel. +/// +/// Each call to [`next`] returns a message if there is one ready to be received. The iterator +/// never blocks waiting for the next message. +/// +/// [`next`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#tymethod.next +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use std::time::Duration; +/// use crossbeam_channel::unbounded; +/// +/// let (s, r) = unbounded::(); +/// +/// thread::spawn(move || { +/// s.send(1).unwrap(); +/// thread::sleep(Duration::from_secs(1)); +/// s.send(2).unwrap(); +/// thread::sleep(Duration::from_secs(2)); +/// s.send(3).unwrap(); +/// }); +/// +/// thread::sleep(Duration::from_secs(2)); +/// +/// // Collect all messages from the channel without blocking. +/// // The third message hasn't been sent yet so we'll collect only the first two. +/// let v: Vec<_> = r.try_iter().collect(); +/// +/// assert_eq!(v, [1, 2]); +/// ``` +pub struct TryIter<'a, T: 'a> { + receiver: &'a Receiver, +} + +impl<'a, T> Iterator for TryIter<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.receiver.try_recv().ok() + } +} + +impl<'a, T> fmt::Debug for TryIter<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("TryIter").finish() + } +} + +/// A blocking iterator over messages in a channel. +/// +/// Each call to [`next`] blocks waiting for the next message and then returns it. However, if the +/// channel becomes empty and disconnected, it returns [`None`] without blocking. +/// +/// [`next`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#tymethod.next +/// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use crossbeam_channel::unbounded; +/// +/// let (s, r) = unbounded(); +/// +/// thread::spawn(move || { +/// s.send(1).unwrap(); +/// s.send(2).unwrap(); +/// s.send(3).unwrap(); +/// drop(s); // Disconnect the channel. +/// }); +/// +/// // Collect all messages from the channel. +/// // Note that the call to `collect` blocks until the sender is dropped. +/// let v: Vec<_> = r.into_iter().collect(); +/// +/// assert_eq!(v, [1, 2, 3]); +/// ``` +pub struct IntoIter { + receiver: Receiver, +} + +impl FusedIterator for IntoIter {} + +impl Iterator for IntoIter { + type Item = T; + + fn next(&mut self) -> Option { + self.receiver.recv().ok() + } +} + +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("IntoIter").finish() + } +} + +impl SelectHandle for Sender { + fn try(&self, token: &mut Token) -> bool { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.sender().try(token), + ChannelFlavor::List(chan) => chan.sender().try(token), + ChannelFlavor::Zero(chan) => chan.sender().try(token), + } + } + + fn retry(&self, token: &mut Token) -> bool { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.sender().retry(token), + ChannelFlavor::List(chan) => chan.sender().retry(token), + ChannelFlavor::Zero(chan) => chan.sender().retry(token), + } + } + + fn deadline(&self) -> Option { + None + } + + fn register(&self, token: &mut Token, oper: Operation, cx: &Context) -> bool { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.sender().register(token, oper, cx), + ChannelFlavor::List(chan) => chan.sender().register(token, oper, cx), + ChannelFlavor::Zero(chan) => chan.sender().register(token, oper, cx), + } + } + + fn unregister(&self, oper: Operation) { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.sender().unregister(oper), + ChannelFlavor::List(chan) => chan.sender().unregister(oper), + ChannelFlavor::Zero(chan) => chan.sender().unregister(oper), + } + } + + fn accept(&self, token: &mut Token, cx: &Context) -> bool { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.sender().accept(token, cx), + ChannelFlavor::List(chan) => chan.sender().accept(token, cx), + ChannelFlavor::Zero(chan) => chan.sender().accept(token, cx), + } + } + + fn state(&self) -> usize { + match &self.inner.flavor { + ChannelFlavor::Array(chan) => chan.sender().state(), + ChannelFlavor::List(chan) => chan.sender().state(), + ChannelFlavor::Zero(chan) => chan.sender().state(), + } + } +} + +impl SelectHandle for Receiver { + fn try(&self, token: &mut Token) -> bool { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.receiver().try(token), + ChannelFlavor::List(chan) => chan.receiver().try(token), + ChannelFlavor::Zero(chan) => chan.receiver().try(token), + }, + ReceiverFlavor::After(chan) => chan.try(token), + ReceiverFlavor::Tick(chan) => chan.try(token), + ReceiverFlavor::Never(chan) => chan.try(token), + } + } + + fn retry(&self, token: &mut Token) -> bool { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.receiver().retry(token), + ChannelFlavor::List(chan) => chan.receiver().retry(token), + ChannelFlavor::Zero(chan) => chan.receiver().retry(token), + }, + ReceiverFlavor::After(chan) => chan.retry(token), + ReceiverFlavor::Tick(chan) => chan.retry(token), + ReceiverFlavor::Never(chan) => chan.retry(token), + } + } + + fn deadline(&self) -> Option { + match &self.flavor { + ReceiverFlavor::Channel(_) => None, + ReceiverFlavor::After(chan) => chan.deadline(), + ReceiverFlavor::Tick(chan) => chan.deadline(), + ReceiverFlavor::Never(chan) => chan.deadline(), + } + } + + fn register(&self, token: &mut Token, oper: Operation, cx: &Context) -> bool { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.receiver().register(token, oper, cx), + ChannelFlavor::List(chan) => chan.receiver().register(token, oper, cx), + ChannelFlavor::Zero(chan) => chan.receiver().register(token, oper, cx), + }, + ReceiverFlavor::After(chan) => chan.register(token, oper, cx), + ReceiverFlavor::Tick(chan) => chan.register(token, oper, cx), + ReceiverFlavor::Never(chan) => chan.register(token, oper, cx), + } + } + + fn unregister(&self, oper: Operation) { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.receiver().unregister(oper), + ChannelFlavor::List(chan) => chan.receiver().unregister(oper), + ChannelFlavor::Zero(chan) => chan.receiver().unregister(oper), + }, + ReceiverFlavor::After(chan) => chan.unregister(oper), + ReceiverFlavor::Tick(chan) => chan.unregister(oper), + ReceiverFlavor::Never(chan) => chan.unregister(oper), + } + } + + fn accept(&self, token: &mut Token, cx: &Context) -> bool { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.receiver().accept(token, cx), + ChannelFlavor::List(chan) => chan.receiver().accept(token, cx), + ChannelFlavor::Zero(chan) => chan.receiver().accept(token, cx), + }, + ReceiverFlavor::After(chan) => chan.accept(token, cx), + ReceiverFlavor::Tick(chan) => chan.accept(token, cx), + ReceiverFlavor::Never(chan) => chan.accept(token, cx), + } + } + + fn state(&self) -> usize { + match &self.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.receiver().state(), + ChannelFlavor::List(chan) => chan.receiver().state(), + ChannelFlavor::Zero(chan) => chan.receiver().state(), + }, + ReceiverFlavor::After(chan) => chan.state(), + ReceiverFlavor::Tick(chan) => chan.state(), + ReceiverFlavor::Never(chan) => chan.state(), + } + } +} + +/// Writes a message into the channel. +pub unsafe fn write(s: &Sender, token: &mut Token, msg: T) -> Result<(), T> { + match &s.inner.flavor { + ChannelFlavor::Array(chan) => chan.write(token, msg), + ChannelFlavor::List(chan) => chan.write(token, msg), + ChannelFlavor::Zero(chan) => chan.write(token, msg), + } +} + +/// Reads a message from the channel. +pub unsafe fn read(r: &Receiver, token: &mut Token) -> Result { + match &r.flavor { + ReceiverFlavor::Channel(arc) => match &arc.flavor { + ChannelFlavor::Array(chan) => chan.read(token), + ChannelFlavor::List(chan) => chan.read(token), + ChannelFlavor::Zero(chan) => chan.read(token), + }, + ReceiverFlavor::After(chan) => { + mem::transmute_copy::, Result>(&chan.read(token)) + } + ReceiverFlavor::Tick(chan) => { + mem::transmute_copy::, Result>(&chan.read(token)) + } + ReceiverFlavor::Never(chan) => chan.read(token), + } +} diff --git a/crossbeam-channel/src/context.rs b/crossbeam-channel/src/context.rs new file mode 100644 index 000000000..77a846782 --- /dev/null +++ b/crossbeam-channel/src/context.rs @@ -0,0 +1,187 @@ +//! Thread-local context used in select. + +use std::cell::Cell; +use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::{self, Thread, ThreadId}; +use std::time::Instant; + +use select::Selected; +use utils::Backoff; + +/// Thread-local context used in select. +#[derive(Clone)] +pub struct Context { + inner: Arc, +} + +/// Inner representation of `Context`. +struct Inner { + /// Selected operation. + select: AtomicUsize, + + /// A slot into which another thread may store a pointer to its `Packet`. + packet: AtomicUsize, + + /// Thread handle. + thread: Thread, + + /// Thread id. + thread_id: ThreadId, +} + +impl Context { + /// Creates a new context for the duration of the closure. + #[inline] + pub fn with(f: F) -> R + where + F: FnOnce(&Context) -> R, + { + thread_local! { + /// Cached thread-local context. + static CONTEXT: Cell> = Cell::new(Some(Context::new())); + } + + let mut f = Some(f); + let mut f = move |cx: &Context| -> R { + let f = f.take().unwrap(); + f(cx) + }; + + CONTEXT.try_with(|cell| { + match cell.take() { + None => f(&Context::new()), + Some(cx) => { + cx.reset(); + let res = f(&cx); + cell.set(Some(cx)); + res + } + } + }).unwrap_or_else(|_| { + f(&Context::new()) + }) + } + + /// Creates a new `Context`. + #[cold] + fn new() -> Context { + Context { + inner: Arc::new(Inner { + select: AtomicUsize::new(Selected::Waiting.into()), + packet: AtomicUsize::new(0), + thread: thread::current(), + thread_id: thread::current().id(), + }), + } + } + + /// Resets `select` and `packet`. + #[inline] + fn reset(&self) { + self.inner.select.store(Selected::Waiting.into(), Ordering::Release); + self.inner.packet.store(0, Ordering::Release); + } + + /// Attempts to select an operation. + /// + /// On failure, the previously selected operation is returned. + #[inline] + pub fn try_select(&self, select: Selected) -> Result<(), Selected> { + self.inner + .select + .compare_exchange( + Selected::Waiting.into(), + select.into(), + Ordering::AcqRel, + Ordering::Acquire, + ) + .map(|_| ()) + .map_err(|e| e.into()) + } + + /// Returns the selected operation. + #[inline] + pub fn selected(&self) -> Selected { + Selected::from(self.inner.select.load(Ordering::Acquire)) + } + + /// Stores a packet. + /// + /// This method must be called after `try_select` succeeds and there is a packet to provide. + #[inline] + pub fn store_packet(&self, packet: usize) { + if packet != 0 { + self.inner.packet.store(packet, Ordering::Release); + } + } + + /// Waits until a packet is provided and returns it. + #[inline] + pub fn wait_packet(&self) -> usize { + let mut backoff = Backoff::new(); + loop { + let packet = self.inner.packet.load(Ordering::Acquire); + if packet != 0 { + return packet; + } + backoff.snooze(); + } + } + + /// Waits until an operation is selected and returns it. + /// + /// If the deadline is reached, `Selected::Aborted` will be selected. + #[inline] + pub fn wait_until(&self, deadline: Option) -> Selected { + // Spin for a short time, waiting until an operation is selected. + let mut backoff = Backoff::new(); + loop { + let sel = Selected::from(self.inner.select.load(Ordering::Acquire)); + if sel != Selected::Waiting { + return sel; + } + + if !backoff.snooze() { + break; + } + } + + loop { + // Check whether an operation has been selected. + let sel = Selected::from(self.inner.select.load(Ordering::Acquire)); + if sel != Selected::Waiting { + return sel; + } + + // If there's a deadline, park the current thread until the deadline is reached. + if let Some(end) = deadline { + let now = Instant::now(); + + if now < end { + thread::park_timeout(end - now); + } else { + // The deadline has been reached. Try aborting select. + return match self.try_select(Selected::Aborted) { + Ok(()) => Selected::Aborted, + Err(s) => s, + }; + } + } else { + thread::park(); + } + } + } + + /// Unparks the thread this context belongs to. + #[inline] + pub fn unpark(&self) { + self.inner.thread.unpark(); + } + + /// Returns the id of the thread this context belongs to. + #[inline] + pub fn thread_id(&self) -> ThreadId { + self.inner.thread_id + } +} diff --git a/crossbeam-channel/src/err.rs b/crossbeam-channel/src/err.rs new file mode 100644 index 000000000..1a122d7ae --- /dev/null +++ b/crossbeam-channel/src/err.rs @@ -0,0 +1,367 @@ +use std::error; +use std::fmt; + +/// An error returned from the [`send`] method. +/// +/// The message could not be sent because the channel is disconnected. +/// +/// The error contains the message so it can be recovered. +/// +/// [`send`]: struct.Sender.html#method.send +#[derive(PartialEq, Eq, Clone, Copy)] +pub struct SendError(pub T); + +/// An error returned from the [`try_send`] method. +/// +/// The error contains the message being sent so it can be recovered. +/// +/// [`try_send`]: struct.Sender.html#method.try_send +#[derive(PartialEq, Eq, Clone, Copy)] +pub enum TrySendError { + /// The message could not be sent because the channel is full. + /// + /// If this is a zero-capacity channel, then the error indicates that there was no receiver + /// available to receive the message at the time. + Full(T), + + /// The message could not be sent because the channel is disconnected. + Disconnected(T), +} + +/// An error returned from the [`send_timeout`] method. +/// +/// The error contains the message being sent so it can be recovered. +/// +/// [`send_timeout`]: struct.Sender.html#method.send_timeout +#[derive(PartialEq, Eq, Clone, Copy)] +pub enum SendTimeoutError { + /// The message could not be sent because the channel is full and the operation timed out. + /// + /// If this is a zero-capacity channel, then the error indicates that there was no receiver + /// available to receive the message and the operation timed out. + Timeout(T), + + /// The message could not be sent because the channel is disconnected. + Disconnected(T), +} + +/// An error returned from the [`recv`] method. +/// +/// A message could not be received because the channel is empty and disconnected. +/// +/// [`recv`]: struct.Receiver.html#method.recv +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct RecvError; + +/// An error returned from the [`try_recv`] method. +/// +/// [`try_recv`]: struct.Receiver.html#method.recv +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub enum TryRecvError { + /// A message could not be received because the channel is empty. + /// + /// If this is a zero-capacity channel, then the error indicates that there was no sender + /// available to send a message at the time. + Empty, + + /// The message could not be received because the channel is empty and disconnected. + Disconnected, +} + +/// An error returned from the [`recv_timeout`] method. +/// +/// [`recv_timeout`]: struct.Receiver.html#method.recv_timeout +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub enum RecvTimeoutError { + /// A message could not be received because the channel is empty and the operation timed out. + /// + /// If this is a zero-capacity channel, then the error indicates that there was no sender + /// available to send a message and the operation timed out. + Timeout, + + /// The message could not be received because the channel is empty and disconnected. + Disconnected, +} + +/// An error returned from the [`try_select`] method. +/// +/// Failed because none of the channel operations were ready. +/// +/// [`try_select`]: struct.Select.html#method.try_select +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct TrySelectError; + +/// An error returned from the [`select_timeout`] method. +/// +/// Failed because none of the channel operations became ready before the timeout. +/// +/// [`select_timeout`]: struct.Select.html#method.select_timeout +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct SelectTimeoutError; + +impl fmt::Debug for SendError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + "SendError(..)".fmt(f) + } +} + +impl fmt::Display for SendError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + "sending on a disconnected channel".fmt(f) + } +} + +impl error::Error for SendError { + fn description(&self) -> &str { + "sending on a disconnected channel" + } + + fn cause(&self) -> Option<&error::Error> { + None + } +} + +impl SendError { + /// Unwraps the message. + /// + /// # Examples + /// + /// ```rust + /// use crossbeam_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// drop(r); + /// + /// if let Err(err) = s.send("foo") { + /// assert_eq!(err.into_inner(), "foo"); + /// } + /// ``` + pub fn into_inner(self) -> T { + self.0 + } +} + +impl fmt::Debug for TrySendError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + TrySendError::Full(..) => "Full(..)".fmt(f), + TrySendError::Disconnected(..) => "Disconnected(..)".fmt(f), + } + } +} + +impl fmt::Display for TrySendError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + TrySendError::Full(..) => "sending on a full channel".fmt(f), + TrySendError::Disconnected(..) => "sending on a disconnected channel".fmt(f), + } + } +} + +impl error::Error for TrySendError { + fn description(&self) -> &str { + match *self { + TrySendError::Full(..) => "sending on a full channel", + TrySendError::Disconnected(..) => "sending on a disconnected channel", + } + } + + fn cause(&self) -> Option<&error::Error> { + None + } +} + +impl From> for TrySendError { + fn from(err: SendError) -> TrySendError { + match err { + SendError(t) => TrySendError::Disconnected(t), + } + } +} + +impl TrySendError { + /// Unwraps the message. + /// + /// # Examples + /// + /// ```rust + /// use crossbeam_channel::bounded; + /// + /// let (s, r) = bounded(0); + /// + /// if let Err(err) = s.try_send("foo") { + /// assert_eq!(err.into_inner(), "foo"); + /// } + /// ``` + pub fn into_inner(self) -> T { + match self { + TrySendError::Full(v) => v, + TrySendError::Disconnected(v) => v, + } + } +} + +impl fmt::Debug for SendTimeoutError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + "SendTimeoutError(..)".fmt(f) + } +} + +impl fmt::Display for SendTimeoutError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + SendTimeoutError::Timeout(..) => "timed out waiting on send operation".fmt(f), + SendTimeoutError::Disconnected(..) => "sending on a disconnected channel".fmt(f), + } + } +} + +impl error::Error for SendTimeoutError { + fn description(&self) -> &str { + "sending on an empty and disconnected channel" + } + + fn cause(&self) -> Option<&error::Error> { + None + } +} + +impl From> for SendTimeoutError { + fn from(err: SendError) -> SendTimeoutError { + match err { + SendError(e) => SendTimeoutError::Disconnected(e), + } + } +} + +impl SendTimeoutError { + /// Unwraps the message. + /// + /// # Examples + /// + /// ```rust + /// use std::time::Duration; + /// use crossbeam_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// + /// if let Err(err) = s.send_timeout("foo", Duration::from_secs(1)) { + /// assert_eq!(err.into_inner(), "foo"); + /// } + /// ``` + pub fn into_inner(self) -> T { + match self { + SendTimeoutError::Timeout(v) => v, + SendTimeoutError::Disconnected(v) => v, + } + } +} + +impl fmt::Display for RecvError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + "receiving on an empty and disconnected channel".fmt(f) + } +} + +impl error::Error for RecvError { + fn description(&self) -> &str { + "receiving on an empty and disconnected channel" + } + + fn cause(&self) -> Option<&error::Error> { + None + } +} + +impl fmt::Display for TryRecvError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + TryRecvError::Empty => "receiving on an empty channel".fmt(f), + TryRecvError::Disconnected => "receiving on an empty and disconnected channel".fmt(f), + } + } +} + +impl error::Error for TryRecvError { + fn description(&self) -> &str { + match *self { + TryRecvError::Empty => "receiving on an empty channel", + TryRecvError::Disconnected => "receiving on an empty and disconnected channel", + } + } + + fn cause(&self) -> Option<&error::Error> { + None + } +} + +impl From for TryRecvError { + fn from(err: RecvError) -> TryRecvError { + match err { + RecvError => TryRecvError::Disconnected, + } + } +} + +impl fmt::Display for RecvTimeoutError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + RecvTimeoutError::Timeout => "timed out waiting on receive operation".fmt(f), + RecvTimeoutError::Disconnected => "channel is empty and disconnected".fmt(f), + } + } +} + +impl error::Error for RecvTimeoutError { + fn description(&self) -> &str { + match *self { + RecvTimeoutError::Timeout => "timed out waiting on receive operation", + RecvTimeoutError::Disconnected => "channel is empty and disconnected", + } + } + + fn cause(&self) -> Option<&error::Error> { + None + } +} + +impl From for RecvTimeoutError { + fn from(err: RecvError) -> RecvTimeoutError { + match err { + RecvError => RecvTimeoutError::Disconnected, + } + } +} + +impl fmt::Display for TrySelectError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + "all operations in select would block".fmt(f) + } +} + +impl error::Error for TrySelectError { + fn description(&self) -> &str { + "all operations in select would block" + } + + fn cause(&self) -> Option<&error::Error> { + None + } +} + +impl fmt::Display for SelectTimeoutError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + "timed out waiting on select".fmt(f) + } +} + +impl error::Error for SelectTimeoutError { + fn description(&self) -> &str { + "timed out waiting on select" + } + + fn cause(&self) -> Option<&error::Error> { + None + } +} diff --git a/crossbeam-channel/src/flavors/after.rs b/crossbeam-channel/src/flavors/after.rs new file mode 100644 index 000000000..be13e2494 --- /dev/null +++ b/crossbeam-channel/src/flavors/after.rs @@ -0,0 +1,212 @@ +//! Channel that delivers a message after a certain amount of time. +//! +//! Messages cannot be sent into this kind of channel; they are materialized on demand. + +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::thread; +use std::time::{Duration, Instant}; + +use context::Context; +use err::{RecvTimeoutError, TryRecvError}; +use select::{Operation, SelectHandle, Token}; +use utils; + +/// Result of a receive operation. +pub type AfterToken = Option; + +/// Channel that delivers a message after a certain amount of time. +pub struct Channel { + /// The instant at which the message will be delivered. + delivery_time: Instant, + + /// `true` if the message has been received. + received: Arc, +} + +impl Channel { + /// Creates a channel that delivers a message after a certain duration of time. + #[inline] + pub fn new(dur: Duration) -> Self { + Channel { + delivery_time: Instant::now() + dur, + received: Arc::new(AtomicBool::new(false)), + } + } + + /// Attempts to receive a message without blocking. + #[inline] + pub fn try_recv(&self) -> Result { + // We use relaxed ordering because this is just an optional optimistic check. + if self.received.load(Ordering::Relaxed) { + // The message has already been received. + return Err(TryRecvError::Empty); + } + + if Instant::now() < self.delivery_time { + // The message was not delivered yet. + return Err(TryRecvError::Empty); + } + + // Try receiving the message if it is still available. + if !self.received.swap(true, Ordering::SeqCst) { + // Success! Return delivery time as the message. + Ok(self.delivery_time) + } else { + // The message was already received. + Err(TryRecvError::Empty) + } + } + + /// Receives a message from the channel. + #[inline] + pub fn recv(&self, deadline: Option) -> Result { + // We use relaxed ordering because this is just an optional optimistic check. + if self.received.load(Ordering::Relaxed) { + // The message has already been received. + utils::sleep_until(deadline); + return Err(RecvTimeoutError::Timeout); + } + + // Wait until the message is received or the deadline is reached. + loop { + let now = Instant::now(); + + // Check if we can receive the next message. + if now >= self.delivery_time { + break; + } + + // Check if the deadline has been reached. + if let Some(d) = deadline { + if now >= d { + return Err(RecvTimeoutError::Timeout); + } + + thread::sleep(self.delivery_time.min(d) - now); + } else { + thread::sleep(self.delivery_time - now); + } + } + + // Try receiving the message if it is still available. + if !self.received.swap(true, Ordering::SeqCst) { + // Success! Return the message, which is the instant at which it was delivered. + Ok(self.delivery_time) + } else { + // The message was already received. Block forever. + utils::sleep_until(None); + unreachable!() + } + } + + /// Reads a message from the channel. + #[inline] + pub unsafe fn read(&self, token: &mut Token) -> Result { + token.after.ok_or(()) + } + + /// Returns `true` if the channel is empty. + #[inline] + pub fn is_empty(&self) -> bool { + // We use relaxed ordering because this is just an optional optimistic check. + if self.received.load(Ordering::Relaxed) { + return true; + } + + // If the delivery time hasn't been reached yet, the channel is empty. + if Instant::now() < self.delivery_time { + return true; + } + + // The delivery time has been reached. The channel is empty only if the message has already + // been received. + self.received.load(Ordering::SeqCst) + } + + /// Returns `true` if the channel is full. + #[inline] + pub fn is_full(&self) -> bool { + !self.is_empty() + } + + /// Returns the number of messages in the channel. + #[inline] + pub fn len(&self) -> usize { + if self.is_empty() { + 0 + } else { + 1 + } + } + + /// Returns the capacity of the channel. + #[inline] + pub fn capacity(&self) -> Option { + Some(1) + } +} + +impl Clone for Channel { + #[inline] + fn clone(&self) -> Channel { + Channel { + delivery_time: self.delivery_time, + received: self.received.clone(), + } + } +} + +impl SelectHandle for Channel { + #[inline] + fn try(&self, token: &mut Token) -> bool { + match self.try_recv() { + Ok(msg) => { + token.after = Some(msg); + true + } + Err(TryRecvError::Disconnected) => { + token.after = None; + true + } + Err(TryRecvError::Empty) => { + false + } + } + } + + #[inline] + fn retry(&self, token: &mut Token) -> bool { + self.try(token) + } + + #[inline] + fn deadline(&self) -> Option { + Some(self.delivery_time) + } + + #[inline] + fn register(&self, _token: &mut Token, _oper: Operation, _cx: &Context) -> bool { + true + } + + #[inline] + fn unregister(&self, _oper: Operation) {} + + #[inline] + fn accept(&self, token: &mut Token, _cx: &Context) -> bool { + self.try(token) + } + + #[inline] + fn state(&self) -> usize { + // Return 1 if the deadline has been reached and 0 otherwise. + if self.received.load(Ordering::SeqCst) { + 1 + } else if Instant::now() < self.delivery_time { + 0 + } else { + 1 + } + } +} diff --git a/crossbeam-channel/src/flavors/array.rs b/crossbeam-channel/src/flavors/array.rs new file mode 100644 index 000000000..06c21e3f4 --- /dev/null +++ b/crossbeam-channel/src/flavors/array.rs @@ -0,0 +1,638 @@ +//! Bounded channel based on a preallocated array. +//! +//! This flavor has a fixed, positive capacity. +//! +//! # Copyright +//! +//! The implementation is based on Dmitry Vyukov's bounded MPMC queue. +//! +//! Author: Dmitry Vyukov +//! License: http://www.1024cores.net/home/code-license +//! Sources: +//! - http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue +//! - https://docs.google.com/document/d/1yIAYmbvL3JxOKOjuCyon7JhW4cSv1wy5hC0ApeGMV9s/pub +//! +//! ```text +//! Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. +//! +//! Redistribution and use in source and binary forms, with or without modification, are permitted +//! provided that the following conditions are met: +//! +//! 1. Redistributions of source code must retain the above copyright notice, this list of +//! conditions and the following disclaimer. +//! +//! 2. Redistributions in binary form must reproduce the above copyright notice, this list +//! of conditions and the following disclaimer in the documentation and/or other materials +//! provided with the distribution. +//! +//! THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +//! INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +//! PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE +//! FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +//! BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +//! OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +//! STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +//! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +//! +//! The views and conclusions contained in the software and documentation are those of the authors +//! and should not be interpreted as representing official policies, either expressed or implied, +//! of Dmitry Vyukov. +//! ``` + +use std::cell::UnsafeCell; +use std::marker::PhantomData; +use std::mem; +use std::ptr; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::time::Instant; + +use crossbeam_utils::CachePadded; + +use context::Context; +use err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError}; +use select::{Operation, SelectHandle, Selected, Token}; +use utils::Backoff; +use waker::SyncWaker; + +/// A slot in a channel. +struct Slot { + /// The current stamp. + /// + /// If the stamp equals the tail, this node will be next written to. If it equals the head, + /// this node will be next read from. + stamp: AtomicUsize, + + /// The message in this slot. + /// + /// If the lap in the stamp is odd, this value contains a message. Otherwise, it is empty. + msg: UnsafeCell, +} + +/// The token type for the array flavor. +pub struct ArrayToken { + /// Slot to read from or write to. + slot: *const u8, + + /// Stamp to store into the slot after reading or writing. + stamp: usize, +} + +impl Default for ArrayToken { + #[inline] + fn default() -> Self { + ArrayToken { + slot: ptr::null(), + stamp: 0, + } + } +} + +/// Bounded channel based on a preallocated array. +pub struct Channel { + /// The head of the channel. + /// + /// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a + /// single `usize`. The lower bits represent the index, while the upper bits represent the lap. + /// The lap in the head is always an odd number. + /// + /// Messages are popped from the head of the channel. + head: CachePadded, + + /// The tail of the channel. + /// + /// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a + /// single `usize`. The lower bits represent the index, while the upper bits represent the lap. + /// The lap in the tail is always an even number. + /// + /// Messages are pushed into the tail of the channel. + tail: CachePadded, + + /// The buffer holding slots. + buffer: *mut Slot, + + /// The channel capacity. + cap: usize, + + /// A stamp with the value of `{ lap: 1, index: 0 }`. + one_lap: usize, + + /// Equals `true` when the channel is disconnected. + is_disconnected: AtomicBool, + + /// Senders waiting while the channel is full. + senders: SyncWaker, + + /// Receivers waiting while the channel is empty and not disconnected. + receivers: SyncWaker, + + /// Indicates that dropping a `Channel` may drop values of type `T`. + _marker: PhantomData, +} + +impl Channel { + /// Creates a bounded channel of capacity `cap`. + /// + /// # Panics + /// + /// Panics if the capacity is not in the range `1 ..= usize::max_value() / 4`. + pub fn with_capacity(cap: usize) -> Self { + assert!(cap > 0, "capacity must be positive"); + + // Make sure there are at least two most significant bits to encode laps. If we can't + // reserve two bits, then panic. In that case, the buffer is likely too large to allocate + // anyway. + let cap_limit = usize::max_value() / 4; + assert!( + cap <= cap_limit, + "channel capacity is too large: {} > {}", + cap, + cap_limit + ); + + // One lap is the smallest power of two greater than or equal to `cap`. + let one_lap = cap.next_power_of_two(); + + // Head is initialized to `{ lap: 1, index: 0 }`. + // Tail is initialized to `{ lap: 0, index: 0 }`. + let head = one_lap; + let tail = 0; + + // Allocate a buffer of `cap` slots. + let buffer = { + let mut v = Vec::>::with_capacity(cap); + let ptr = v.as_mut_ptr(); + mem::forget(v); + ptr + }; + + // Initialize stamps in the slots. + for i in 0..cap { + unsafe { + // Set the stamp to `{ lap: 0, index: i }`. + let slot = buffer.add(i); + ptr::write(&mut (*slot).stamp, AtomicUsize::new(i)); + } + } + + Channel { + buffer, + cap, + one_lap, + is_disconnected: AtomicBool::new(false), + head: CachePadded::new(AtomicUsize::new(head)), + tail: CachePadded::new(AtomicUsize::new(tail)), + senders: SyncWaker::new(), + receivers: SyncWaker::new(), + _marker: PhantomData, + } + } + + /// Returns a receiver handle to the channel. + pub fn receiver(&self) -> Receiver { + Receiver(self) + } + + /// Returns a sender handle to the channel. + pub fn sender(&self) -> Sender { + Sender(self) + } + + /// Attempts to reserve a slot for sending a message. + fn start_send(&self, token: &mut Token) -> bool { + // If the channel is disconnected, return early. + if self.is_disconnected() { + token.array.slot = ptr::null(); + token.array.stamp = 0; + return true; + } + + let mut backoff = Backoff::new(); + + loop { + // Load the tail and deconstruct it. + let tail = self.tail.load(Ordering::SeqCst); + let index = tail & (self.one_lap - 1); + let lap = tail & !(self.one_lap - 1); + + // Inspect the corresponding slot. + let slot = unsafe { &*self.buffer.add(index) }; + let stamp = slot.stamp.load(Ordering::Acquire); + + // If the tail and the stamp match, we may attempt to push. + if tail == stamp { + let new_tail = if index + 1 < self.cap { + // Same lap, incremented index. + // Set to `{ lap: lap, index: index + 1 }`. + tail + 1 + } else { + // Two laps forward, index wraps around to zero. + // Set to `{ lap: lap.wrapping_add(2), index: 0 }`. + lap.wrapping_add(self.one_lap.wrapping_mul(2)) + }; + + // Try moving the tail. + if self + .tail + .compare_exchange_weak(tail, new_tail, Ordering::SeqCst, Ordering::Relaxed) + .is_ok() + { + // Prepare the token for the follow-up call to `write`. + token.array.slot = slot as *const Slot as *const u8; + token.array.stamp = stamp.wrapping_add(self.one_lap); + return true; + } + // But if the slot lags one lap behind the tail... + } else if stamp.wrapping_add(self.one_lap) == tail { + let head = self.head.load(Ordering::SeqCst); + + // ...and if the head lags one lap behind the tail as well... + if head.wrapping_add(self.one_lap) == tail { + // ...then the channel is full. + return false; + } + } + + backoff.spin(); + } + } + + /// Writes a message into the channel. + pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> { + // If there is no slot, the channel is disconnected. + if token.array.slot.is_null() { + return Err(msg); + } + + let slot: &Slot = &*(token.array.slot as *const Slot); + + // Write the message into the slot and update the stamp. + slot.msg.get().write(msg); + slot.stamp.store(token.array.stamp, Ordering::Release); + + // Wake a sleeping receiver. + self.receivers.wake_one(); + Ok(()) + } + + /// Attempts to reserve a slot for receiving a message. + fn start_recv(&self, token: &mut Token) -> bool { + let mut backoff = Backoff::new(); + + loop { + // Load the head and deconstruct it. + let head = self.head.load(Ordering::SeqCst); + let index = head & (self.one_lap - 1); + let lap = head & !(self.one_lap - 1); + + // Inspect the corresponding slot. + let slot = unsafe { &*self.buffer.add(index) }; + let stamp = slot.stamp.load(Ordering::Acquire); + + // If the the head and the stamp match, we may attempt to pop. + if head == stamp { + let new = if index + 1 < self.cap { + // Same lap, incremented index. + // Set to `{ lap: lap, index: index + 1 }`. + head + 1 + } else { + // Two laps forward, index wraps around to zero. + // Set to `{ lap: lap.wrapping_add(2), index: 0 }`. + lap.wrapping_add(self.one_lap.wrapping_mul(2)) + }; + + // Try moving the head. + if self + .head + .compare_exchange_weak(head, new, Ordering::SeqCst, Ordering::Relaxed) + .is_ok() + { + // Prepare the token for the follow-up call to `read`. + token.array.slot = slot as *const Slot as *const u8; + token.array.stamp = stamp.wrapping_add(self.one_lap); + return true; + } + // But if the slot lags one lap behind the head... + } else if stamp.wrapping_add(self.one_lap) == head { + let tail = self.tail.load(Ordering::SeqCst); + + // ...and if the tail lags one lap behind the head as well, that means the channel + // is empty. + if tail.wrapping_add(self.one_lap) == head { + // If the channel is disconnected... + if self.is_disconnected() { + // ...and still empty... + if self.tail.load(Ordering::SeqCst) == tail { + // ...then receive an error. + token.array.slot = ptr::null(); + token.array.stamp = 0; + return true; + } + } else { + // Otherwise, the receive operation is not ready. + return false; + } + } + } + + backoff.spin(); + } + } + + /// Reads a message from the channel. + pub unsafe fn read(&self, token: &mut Token) -> Result { + if token.array.slot.is_null() { + // The channel is disconnected. + return Err(()); + } + + let slot: &Slot = &*(token.array.slot as *const Slot); + + // Read the message from the slot and update the stamp. + let msg = slot.msg.get().read(); + slot.stamp.store(token.array.stamp, Ordering::Release); + + // Wake a sleeping sender. + self.senders.wake_one(); + Ok(msg) + } + + /// Attempts to send a message into the channel. + pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { + let token = &mut Token::default(); + if self.start_send(token) { + unsafe { + self.write(token, msg).map_err(TrySendError::Disconnected) + } + } else { + Err(TrySendError::Full(msg)) + } + } + + /// Sends a message into the channel. + pub fn send(&self, msg: T, deadline: Option) -> Result<(), SendTimeoutError> { + let token = &mut Token::default(); + loop { + // Try sending a message several times. + let mut backoff = Backoff::new(); + loop { + if self.start_send(token) { + let res = unsafe { self.write(token, msg) }; + return res.map_err(SendTimeoutError::Disconnected); + } + if !backoff.snooze() { + break; + } + } + + Context::with(|cx| { + // Prepare for blocking until a receiver wakes us up. + let oper = Operation::hook(token); + self.senders.register(oper, cx); + + // Has the channel become ready just now? + if !self.is_full() || self.is_disconnected() { + let _ = cx.try_select(Selected::Aborted); + } + + // Block the current thread. + let sel = cx.wait_until(deadline); + + match sel { + Selected::Waiting => unreachable!(), + Selected::Aborted | Selected::Disconnected => { + self.senders.unregister(oper).unwrap(); + } + Selected::Operation(_) => {} + } + }); + + if let Some(d) = deadline { + if Instant::now() >= d { + return Err(SendTimeoutError::Timeout(msg)); + } + } + } + } + + /// Attempts to receive a message without blocking. + pub fn try_recv(&self) -> Result { + let token = &mut Token::default(); + + if self.start_recv(token) { + unsafe { + self.read(token).map_err(|_| TryRecvError::Disconnected) + } + } else { + Err(TryRecvError::Empty) + } + } + + /// Receives a message from the channel. + pub fn recv(&self, deadline: Option) -> Result { + let token = &mut Token::default(); + loop { + // Try receiving a message several times. + let mut backoff = Backoff::new(); + loop { + if self.start_recv(token) { + let res = unsafe { self.read(token) }; + return res.map_err(|_| RecvTimeoutError::Disconnected); + } + if !backoff.snooze() { + break; + } + } + + Context::with(|cx| { + // Prepare for blocking until a sender wakes us up. + let oper = Operation::hook(token); + self.receivers.register(oper, cx); + + // Has the channel become ready just now? + if !self.is_empty() || self.is_disconnected() { + let _ = cx.try_select(Selected::Aborted); + } + + // Block the current thread. + let sel = cx.wait_until(deadline); + + match sel { + Selected::Waiting => unreachable!(), + Selected::Aborted | Selected::Disconnected => { + self.receivers.unregister(oper).unwrap(); + // If the channel was disconnected, we still have to check for remaining + // messages. + } + Selected::Operation(_) => {} + } + }); + + if let Some(d) = deadline { + if Instant::now() >= d { + return Err(RecvTimeoutError::Timeout); + } + } + } + } + + /// Returns the current number of messages inside the channel. + pub fn len(&self) -> usize { + loop { + // Load the tail, then load the head. + let tail = self.tail.load(Ordering::SeqCst); + let head = self.head.load(Ordering::SeqCst); + + // If the tail didn't change, we've got consistent values to work with. + if self.tail.load(Ordering::SeqCst) == tail { + let hix = head & (self.one_lap - 1); + let tix = tail & (self.one_lap - 1); + + return if hix < tix { + tix - hix + } else if hix > tix { + self.cap - hix + tix + } else if tail.wrapping_add(self.one_lap) == head { + 0 + } else { + self.cap + }; + } + } + } + + /// Returns the capacity of the channel. + pub fn capacity(&self) -> Option { + Some(self.cap) + } + + /// Disconnects the channel and wakes up all blocked receivers. + pub fn disconnect(&self) { + if !self.is_disconnected.swap(true, Ordering::SeqCst) { + self.senders.disconnect(); + self.receivers.disconnect(); + } + } + + /// Returns `true` if the channel is disconnected. + pub fn is_disconnected(&self) -> bool { + self.is_disconnected.load(Ordering::SeqCst) + } + + /// Returns `true` if the channel is empty. + pub fn is_empty(&self) -> bool { + let head = self.head.load(Ordering::SeqCst); + let tail = self.tail.load(Ordering::SeqCst); + + // Is the tail lagging one lap behind head? + // + // Note: If the head changes just before we load the tail, that means there was a moment + // when the channel was not empty, so it is safe to just return `false`. + tail.wrapping_add(self.one_lap) == head + } + + /// Returns `true` if the channel is full. + pub fn is_full(&self) -> bool { + let tail = self.tail.load(Ordering::SeqCst); + let head = self.head.load(Ordering::SeqCst); + + // Is the head lagging one lap behind tail? + // + // Note: If the tail changes just before we load the head, that means there was a moment + // when the channel was not full, so it is safe to just return `false`. + head.wrapping_add(self.one_lap) == tail + } +} + +impl Drop for Channel { + fn drop(&mut self) { + // Get the index of the head. + let hix = self.head.load(Ordering::Relaxed) & (self.one_lap - 1); + + // Loop over all slots that hold a message and drop them. + for i in 0..self.len() { + // Compute the index of the next slot holding a message. + let index = if hix + i < self.cap { + hix + i + } else { + hix + i - self.cap + }; + + unsafe { + self.buffer.add(index).drop_in_place(); + } + } + + // Finally, deallocate the buffer, but don't run any destructors. + unsafe { + Vec::from_raw_parts(self.buffer, 0, self.cap); + } + } +} + +/// Receiver handle to a channel. +pub struct Receiver<'a, T: 'a>(&'a Channel); + +/// Sender handle to a channel. +pub struct Sender<'a, T: 'a>(&'a Channel); + +impl<'a, T> SelectHandle for Receiver<'a, T> { + fn try(&self, token: &mut Token) -> bool { + self.0.start_recv(token) + } + + fn retry(&self, token: &mut Token) -> bool { + self.0.start_recv(token) + } + + fn deadline(&self) -> Option { + None + } + + fn register(&self, _token: &mut Token, oper: Operation, cx: &Context) -> bool { + self.0.receivers.register(oper, cx); + self.0.is_empty() && !self.0.is_disconnected() + } + + fn unregister(&self, oper: Operation) { + self.0.receivers.unregister(oper); + } + + fn accept(&self, token: &mut Token, _cx: &Context) -> bool { + self.0.start_recv(token) + } + + fn state(&self) -> usize { + self.0.tail.load(Ordering::SeqCst) + } +} + +impl<'a, T> SelectHandle for Sender<'a, T> { + fn try(&self, token: &mut Token) -> bool { + self.0.start_send(token) + } + + fn retry(&self, token: &mut Token) -> bool { + self.0.start_send(token) + } + + fn deadline(&self) -> Option { + None + } + + fn register(&self, _token: &mut Token, oper: Operation, cx: &Context) -> bool { + self.0.senders.register(oper, cx); + self.0.is_full() && !self.0.is_disconnected() + } + + fn unregister(&self, oper: Operation) { + self.0.senders.unregister(oper); + } + + fn accept(&self, token: &mut Token, _cx: &Context) -> bool { + self.0.start_send(token) + } + + fn state(&self) -> usize { + self.0.head.load(Ordering::SeqCst) + } +} diff --git a/crossbeam-channel/src/flavors/list.rs b/crossbeam-channel/src/flavors/list.rs new file mode 100644 index 000000000..aa1875bb9 --- /dev/null +++ b/crossbeam-channel/src/flavors/list.rs @@ -0,0 +1,594 @@ +//! Unbounded channel implemented as a linked list. + +use std::cell::UnsafeCell; +use std::marker::PhantomData; +use std::mem::{self, ManuallyDrop}; +use std::ptr; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::time::Instant; + +use crossbeam_epoch::{self as epoch, Atomic, Guard, Owned, Shared}; +use crossbeam_utils::CachePadded; + +use context::Context; +use err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError}; +use select::{Operation, SelectHandle, Selected, Token}; +use utils::Backoff; +use waker::SyncWaker; + +// TODO(stjepang): Once we bump the minimum required Rust version to 1.28 or newer, reapply the +// following changes by @kleimkuhler: +// +// 1. https://github.com/crossbeam-rs/crossbeam-channel/pull/100 +// 2. https://github.com/crossbeam-rs/crossbeam-channel/pull/101 + +/// The maximum number of messages a block can hold. +const BLOCK_CAP: usize = 32; + +/// A slot in a block. +struct Slot { + /// The message. + msg: UnsafeCell>, + + /// Equals `true` if the message is ready for reading. + ready: AtomicBool, +} + +/// The token type for the list flavor. +pub struct ListToken { + /// Slot to read from or write to. + slot: *const u8, + + /// Guard keeping alive the block that contains the slot. + guard: Option, +} + +impl Default for ListToken { + #[inline] + fn default() -> Self { + ListToken { + slot: ptr::null(), + guard: None, + } + } +} + +/// A block in a linked list. +/// +/// Each block in the list can hold up to `BLOCK_CAP` messages. +struct Block { + /// The start index of this block. + /// + /// Slots in this block have indices in `start_index .. start_index + BLOCK_CAP`. + start_index: usize, + + /// The next block in the linked list. + next: Atomic>, + + /// Slots for messages. + slots: [UnsafeCell>; BLOCK_CAP], +} + +impl Block { + /// Creates an empty block that starts at `start_index`. + fn new(start_index: usize) -> Block { + Block { + start_index, + slots: unsafe { mem::zeroed() }, + next: Atomic::null(), + } + } +} + +/// Position in the channel (index and block). +/// +/// This struct describes the current position of the head or the tail in a linked list. +struct Position { + /// The index in the channel. + index: AtomicUsize, + + /// The block in the linked list. + block: Atomic>, +} + +/// Unbounded channel implemented as a linked list. +/// +/// Each message sent into the channel is assigned a sequence number, i.e. an index. Indices are +/// represented as numbers of type `usize` and wrap on overflow. +/// +/// Consecutive messages are grouped into blocks in order to put less pressure on the allocator and +/// improve cache efficiency. +pub struct Channel { + /// The head of the channel. + head: CachePadded>, + + /// The tail of the channel. + tail: CachePadded>, + + /// Equals `true` when the channel is disconnected. + is_disconnected: AtomicBool, + + /// Receivers waiting while the channel is empty and not disconnected. + receivers: SyncWaker, + + /// Indicates that dropping a `Channel` may drop values of type `T`. + _marker: PhantomData, +} + +impl Channel { + /// Creates a new unbounded channel. + pub fn new() -> Self { + let channel = Channel { + head: CachePadded::new(Position { + index: AtomicUsize::new(0), + block: Atomic::null(), + }), + tail: CachePadded::new(Position { + index: AtomicUsize::new(0), + block: Atomic::null(), + }), + is_disconnected: AtomicBool::new(false), + receivers: SyncWaker::new(), + _marker: PhantomData, + }; + + // Allocate an empty block for the first batch of messages. + let block = unsafe { Owned::new(Block::new(0)).into_shared(epoch::unprotected()) }; + channel.head.block.store(block, Ordering::Relaxed); + channel.tail.block.store(block, Ordering::Relaxed); + + channel + } + + /// Returns a receiver handle to the channel. + pub fn receiver(&self) -> Receiver { + Receiver(self) + } + + /// Returns a sender handle to the channel. + pub fn sender(&self) -> Sender { + Sender(self) + } + + /// Attempts to reserve a slot for sending a message. + fn start_send(&self, token: &mut Token) -> bool { + // If the channel is disconnected, return early. + if self.is_disconnected() { + token.list.slot = ptr::null(); + return true; + } + + let guard = epoch::pin(); + let mut backoff = Backoff::new(); + + loop { + // These two load operations don't have to be `SeqCst`. If they happen to retrieve + // stale values, the following CAS will fail or won't even be attempted. + let tail_ptr = self.tail.block.load(Ordering::Acquire, &guard); + let tail = unsafe { tail_ptr.deref() }; + let tail_index = self.tail.index.load(Ordering::Relaxed); + + // Calculate the index of the corresponding slot in the block. + let offset = tail_index.wrapping_sub(tail.start_index); + + // Advance the current index one slot forward. + let new_index = tail_index.wrapping_add(1); + + // A closure that installs a block following `tail` in case it hasn't been yet. + let install_next_block = || { + let current = tail + .next + .compare_and_set( + Shared::null(), + Owned::new(Block::new(tail.start_index.wrapping_add(BLOCK_CAP))), + Ordering::AcqRel, + &guard, + ).unwrap_or_else(|err| err.current); + + let _ = + self.tail + .block + .compare_and_set(tail_ptr, current, Ordering::Release, &guard); + }; + + // If `tail_index` is pointing into `tail`... + if offset < BLOCK_CAP { + // Try moving the tail index forward. + if self + .tail + .index + .compare_exchange_weak( + tail_index, + new_index, + Ordering::SeqCst, + Ordering::Relaxed, + ).is_ok() + { + // If this was the last slot in the block, install a new block. + if offset + 1 == BLOCK_CAP { + install_next_block(); + } + + unsafe { + let slot = tail.slots.get_unchecked(offset).get(); + token.list.slot = slot as *const Slot as *const u8; + } + break; + } + + backoff.spin(); + } else if offset == BLOCK_CAP { + // Help install the next block. + install_next_block(); + } + } + + token.list.guard = Some(guard); + true + } + + /// Writes a message into the channel. + pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> { + // If there is no slot, the channel is disconnected. + if token.list.slot.is_null() { + return Err(msg); + } + + let slot = &*(token.list.slot as *const Slot); + let _guard: Guard = token.list.guard.take().unwrap(); + + // Write the message into the slot. + (*slot).msg.get().write(ManuallyDrop::new(msg)); + (*slot).ready.store(true, Ordering::Release); + + // Wake a sleeping receiver. + self.receivers.wake_one(); + Ok(()) + } + + /// Attempts to reserve a slot for receiving a message. + fn start_recv(&self, token: &mut Token) -> bool { + let guard = epoch::pin(); + let mut backoff = Backoff::new(); + + loop { + // Loading the head block doesn't have to be a `SeqCst` operation. If we get a stale + // value, the following CAS will fail or not even be attempted. Loading the head index + // must be `SeqCst` because we need the up-to-date value when checking whether the + // channel is empty. + let head_ptr = self.head.block.load(Ordering::Acquire, &guard); + let head = unsafe { head_ptr.deref() }; + let head_index = self.head.index.load(Ordering::SeqCst); + + // Calculate the index of the corresponding slot in the block. + let offset = head_index.wrapping_sub(head.start_index); + + // Advance the current index one slot forward. + let new_index = head_index.wrapping_add(1); + + // A closure that installs a block following `head` in case it hasn't been yet. + let install_next_block = || { + let current = head + .next + .compare_and_set( + Shared::null(), + Owned::new(Block::new(head.start_index.wrapping_add(BLOCK_CAP))), + Ordering::AcqRel, + &guard, + ).unwrap_or_else(|err| err.current); + + let _ = + self.head + .block + .compare_and_set(head_ptr, current, Ordering::Release, &guard); + }; + + // If `head_index` is pointing into `head`... + if offset < BLOCK_CAP { + let slot = unsafe { &*head.slots.get_unchecked(offset).get() }; + + // If this slot does not contain a message... + if !slot.ready.load(Ordering::Relaxed) { + let tail_index = self.tail.index.load(Ordering::SeqCst); + + // If the tail equals the head, that means the channel is empty. + if tail_index == head_index { + // If the channel is disconnected... + if self.is_disconnected() { + // ...and still empty... + if self.tail.index.load(Ordering::SeqCst) == tail_index { + // ...then receive an error. + token.list.slot = ptr::null(); + return true; + } + } else { + // Otherwise, the receive operation is not ready. + return false; + } + } + } + + // Try moving the head index forward. + if self + .head + .index + .compare_exchange_weak( + head_index, + new_index, + Ordering::SeqCst, + Ordering::Relaxed, + ).is_ok() + { + // If this was the last slot in the block, install a new block and destroy the + // old one. + if offset + 1 == BLOCK_CAP { + install_next_block(); + unsafe { + guard.defer_destroy(head_ptr); + } + } + + token.list.slot = slot as *const Slot as *const u8; + break; + } + + backoff.spin(); + } else if offset == BLOCK_CAP { + // Help install the next block. + install_next_block(); + } + } + + token.list.guard = Some(guard); + true + } + + /// Reads a message from the channel. + pub unsafe fn read(&self, token: &mut Token) -> Result { + if token.list.slot.is_null() { + // The channel is disconnected. + return Err(()); + } + + let slot = &*(token.list.slot as *const Slot); + let _guard: Guard = token.list.guard.take().unwrap(); + + // Wait until the message becomes ready. + let mut backoff = Backoff::new(); + while !slot.ready.load(Ordering::Acquire) { + backoff.snooze(); + } + + // Read the message. + let m = slot.msg.get().read(); + let msg = ManuallyDrop::into_inner(m); + Ok(msg) + } + + /// Attempts to send a message into the channel. + pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { + self.send(msg, None).map_err(|err| { + match err { + SendTimeoutError::Disconnected(msg) => TrySendError::Disconnected(msg), + SendTimeoutError::Timeout(_) => unreachable!(), + } + }) + } + + /// Sends a message into the channel. + pub fn send(&self, msg: T, _deadline: Option) -> Result<(), SendTimeoutError> { + let token = &mut Token::default(); + assert!(self.start_send(token)); + unsafe { + self.write(token, msg).map_err(SendTimeoutError::Disconnected) + } + } + + /// Attempts to receive a message without blocking. + pub fn try_recv(&self) -> Result { + let token = &mut Token::default(); + + if self.start_recv(token) { + unsafe { + self.read(token).map_err(|_| TryRecvError::Disconnected) + } + } else { + Err(TryRecvError::Empty) + } + } + + /// Receives a message from the channel. + pub fn recv(&self, deadline: Option) -> Result { + let token = &mut Token::default(); + loop { + // Try receiving a message several times. + let mut backoff = Backoff::new(); + loop { + if self.start_recv(token) { + unsafe { + return self.read(token).map_err(|_| RecvTimeoutError::Disconnected); + } + } + if !backoff.snooze() { + break; + } + } + + // Prepare for blocking until a sender wakes us up. + Context::with(|cx| { + let oper = Operation::hook(token); + self.receivers.register(oper, cx); + + // Has the channel become ready just now? + if !self.is_empty() || self.is_disconnected() { + let _ = cx.try_select(Selected::Aborted); + } + + // Block the current thread. + let sel = cx.wait_until(deadline); + + match sel { + Selected::Waiting => unreachable!(), + Selected::Aborted | Selected::Disconnected => { + self.receivers.unregister(oper).unwrap(); + // If the channel was disconnected, we still have to check for remaining + // messages. + } + Selected::Operation(_) => {} + } + }); + + if let Some(d) = deadline { + if Instant::now() >= d { + return Err(RecvTimeoutError::Timeout); + } + } + } + } + + /// Returns the current number of messages inside the channel. + pub fn len(&self) -> usize { + loop { + // Load the tail index, then load the head index. + let tail_index = self.tail.index.load(Ordering::SeqCst); + let head_index = self.head.index.load(Ordering::SeqCst); + + // If the tail index didn't change, we've got consistent indices to work with. + if self.tail.index.load(Ordering::SeqCst) == tail_index { + return tail_index.wrapping_sub(head_index); + } + } + } + + /// Returns the capacity of the channel. + pub fn capacity(&self) -> Option { + None + } + + /// Disconnects the channel and wakes up all blocked receivers. + pub fn disconnect(&self) { + if !self.is_disconnected.swap(true, Ordering::SeqCst) { + self.receivers.disconnect(); + } + } + + /// Returns `true` if the channel is disconnected. + pub fn is_disconnected(&self) -> bool { + self.is_disconnected.load(Ordering::SeqCst) + } + + /// Returns `true` if the channel is empty. + pub fn is_empty(&self) -> bool { + let head_index = self.head.index.load(Ordering::SeqCst); + let tail_index = self.tail.index.load(Ordering::SeqCst); + head_index == tail_index + } + + /// Returns `true` if the channel is full. + pub fn is_full(&self) -> bool { + false + } +} + +impl Drop for Channel { + fn drop(&mut self) { + // Get the tail and head indices. + let tail_index = self.tail.index.load(Ordering::Relaxed); + let mut head_index = self.head.index.load(Ordering::Relaxed); + + unsafe { + let mut head_ptr = self + .head + .block + .load(Ordering::Relaxed, epoch::unprotected()); + + // Manually drop all messages between `head_index` and `tail_index` and destroy the + // heap-allocated nodes along the way. + while head_index != tail_index { + let head = head_ptr.deref(); + let offset = head_index.wrapping_sub(head.start_index); + + let slot = &mut *head.slots.get_unchecked(offset).get(); + ManuallyDrop::drop(&mut (*slot).msg.get().read()); + + if offset + 1 == BLOCK_CAP { + let next = head.next.load(Ordering::Relaxed, epoch::unprotected()); + drop(head_ptr.into_owned()); + head_ptr = next; + } + + head_index = head_index.wrapping_add(1); + } + + // If there is one last remaining block in the end, destroy it. + if !head_ptr.is_null() { + drop(head_ptr.into_owned()); + } + } + } +} + +/// Receiver handle to a channel. +pub struct Receiver<'a, T: 'a>(&'a Channel); + +/// Sender handle to a channel. +pub struct Sender<'a, T: 'a>(&'a Channel); + +impl<'a, T> SelectHandle for Receiver<'a, T> { + fn try(&self, token: &mut Token) -> bool { + self.0.start_recv(token) + } + + fn retry(&self, token: &mut Token) -> bool { + self.0.start_recv(token) + } + + fn deadline(&self) -> Option { + None + } + + fn register(&self, _token: &mut Token, oper: Operation, cx: &Context) -> bool { + self.0.receivers.register(oper, cx); + self.0.is_empty() && !self.0.is_disconnected() + } + + fn unregister(&self, oper: Operation) { + self.0.receivers.unregister(oper); + } + + fn accept(&self, token: &mut Token, _cx: &Context) -> bool { + self.0.start_recv(token) + } + + fn state(&self) -> usize { + self.0.tail.index.load(Ordering::SeqCst) + } +} + +impl<'a, T> SelectHandle for Sender<'a, T> { + fn try(&self, token: &mut Token) -> bool { + self.0.start_send(token) + } + + fn retry(&self, token: &mut Token) -> bool { + self.0.start_send(token) + } + + fn deadline(&self) -> Option { + None + } + + fn register(&self, _token: &mut Token, _oper: Operation, _cx: &Context) -> bool { + !self.0.is_disconnected() + } + + fn unregister(&self, _oper: Operation) {} + + fn accept(&self, token: &mut Token, _cx: &Context) -> bool { + self.0.start_send(token) + } + + fn state(&self) -> usize { + self.0.head.index.load(Ordering::SeqCst) + } +} diff --git a/crossbeam-channel/src/flavors/mod.rs b/crossbeam-channel/src/flavors/mod.rs new file mode 100644 index 000000000..ebd0cfa54 --- /dev/null +++ b/crossbeam-channel/src/flavors/mod.rs @@ -0,0 +1,17 @@ +//! Channel flavors. +//! +//! There are six flavors: +//! +//! 1. `after` - Channel that delivers a message after a certain amount of time. +//! 2. `array` - Bounded channel based on a preallocated array. +//! 3. `list` - Unbounded channel implemented as a linked list. +//! 4. `never` - Channel that never delivers messages. +//! 5. `tick` - Channel that delivers messages periodically. +//! 6. `zero` - Zero-capacity channel. + +pub mod after; +pub mod array; +pub mod list; +pub mod never; +pub mod tick; +pub mod zero; diff --git a/crossbeam-channel/src/flavors/never.rs b/crossbeam-channel/src/flavors/never.rs new file mode 100644 index 000000000..b4e2de7f2 --- /dev/null +++ b/crossbeam-channel/src/flavors/never.rs @@ -0,0 +1,114 @@ +//! Channel that never delivers messages. +//! +//! Messages cannot be sent into this kind of channel. + +use std::marker::PhantomData; +use std::time::Instant; + +use context::Context; +use err::{RecvTimeoutError, TryRecvError}; +use select::{Operation, SelectHandle, Token}; +use utils; + +/// This flavor doesn't need a token. +pub type NeverToken = (); + +/// Channel that never delivers messages. +pub struct Channel { + _marker: PhantomData, +} + +impl Channel { + /// Creates a channel that never delivers messages. + #[inline] + pub fn new() -> Self { + Channel { + _marker: PhantomData, + } + } + + /// Attempts to receive a message without blocking. + #[inline] + pub fn try_recv(&self) -> Result { + Err(TryRecvError::Empty) + } + + /// Receives a message from the channel. + #[inline] + pub fn recv(&self, deadline: Option) -> Result { + utils::sleep_until(deadline); + Err(RecvTimeoutError::Timeout) + } + + /// Reads a message from the channel. + #[inline] + pub unsafe fn read(&self, _token: &mut Token) -> Result { + Err(()) + } + + /// Returns `true` if the channel is empty. + #[inline] + pub fn is_empty(&self) -> bool { + true + } + + /// Returns `true` if the channel is full. + #[inline] + pub fn is_full(&self) -> bool { + true + } + + /// Returns the number of messages in the channel. + #[inline] + pub fn len(&self) -> usize { + 0 + } + + /// Returns the capacity of the channel. + #[inline] + pub fn capacity(&self) -> Option { + Some(0) + } +} + +impl Clone for Channel { + #[inline] + fn clone(&self) -> Channel { + Channel::new() + } +} + +impl SelectHandle for Channel { + #[inline] + fn try(&self, _token: &mut Token) -> bool { + false + } + + #[inline] + fn retry(&self, _token: &mut Token) -> bool { + false + } + + #[inline] + fn deadline(&self) -> Option { + None + } + + #[inline] + fn register(&self, _token: &mut Token, _oper: Operation, _cx: &Context) -> bool { + true + } + + #[inline] + fn unregister(&self, _oper: Operation) {} + + #[inline] + fn accept(&self, _token: &mut Token, _cx: &Context) -> bool { + false + } + + #[inline] + fn state(&self) -> usize { + 0 + } +} diff --git a/crossbeam-channel/src/flavors/tick.rs b/crossbeam-channel/src/flavors/tick.rs new file mode 100644 index 000000000..f29541146 --- /dev/null +++ b/crossbeam-channel/src/flavors/tick.rs @@ -0,0 +1,199 @@ +//! Channel that delivers messages periodically. +//! +//! Messages cannot be sent into this kind of channel; they are materialized on demand. + +use std::num::Wrapping; +use std::sync::Arc; +use std::thread; +use std::time::{Duration, Instant}; + +use parking_lot::Mutex; + +use context::Context; +use err::{RecvTimeoutError, TryRecvError}; +use select::{Operation, SelectHandle, Token}; + +/// Result of a receive operation. +pub type TickToken = Option; + +/// Channel state. +struct Inner { + /// The instant at which the next message will be delivered. + next_tick: Instant, + + /// The index of the next message to be received. + index: Wrapping, +} + +/// Channel that delivers messages periodically. +pub struct Channel { + /// The state of the channel. + // TODO: Use `Arc>` here once we implement `AtomicCell`. + inner: Arc>, + + /// The time interval in which messages get delivered. + duration: Duration, +} + +impl Channel { + /// Creates a channel that delivers messages periodically. + #[inline] + pub fn new(dur: Duration) -> Self { + Channel { + inner: Arc::new(Mutex::new(Inner { + next_tick: Instant::now() + dur, + index: Wrapping(0), + })), + duration: dur, + } + } + + /// Attempts to receive a message without blocking. + #[inline] + pub fn try_recv(&self) -> Result { + let mut inner = self.inner.lock(); + let now = Instant::now(); + + // If the next tick time has been reached, we can receive the next message. + if now >= inner.next_tick { + let msg = inner.next_tick; + inner.next_tick = now + self.duration; + inner.index += Wrapping(1); + Ok(msg) + } else { + Err(TryRecvError::Empty) + } + } + + /// Receives a message from the channel. + #[inline] + pub fn recv(&self, deadline: Option) -> Result { + loop { + // Compute the time to sleep until the next message or the deadline. + let offset = { + let mut inner = self.inner.lock(); + let now = Instant::now(); + + // Check if we can receive the next message. + if now >= inner.next_tick { + let msg = inner.next_tick; + inner.next_tick = now + self.duration; + inner.index += Wrapping(1); + return Ok(msg); + } + + // Check if the operation deadline has been reached. + if let Some(d) = deadline { + if now >= d { + return Err(RecvTimeoutError::Timeout); + } + + inner.next_tick.min(d) - now + } else { + inner.next_tick - now + } + }; + + thread::sleep(offset); + } + } + + /// Reads a message from the channel. + #[inline] + pub unsafe fn read(&self, token: &mut Token) -> Result { + token.tick.ok_or(()) + } + + /// Returns `true` if the channel is empty. + #[inline] + pub fn is_empty(&self) -> bool { + let inner = self.inner.lock(); + Instant::now() < inner.next_tick + } + + /// Returns `true` if the channel is full. + #[inline] + pub fn is_full(&self) -> bool { + !self.is_empty() + } + + /// Returns the number of messages in the channel. + #[inline] + pub fn len(&self) -> usize { + if self.is_empty() { + 0 + } else { + 1 + } + } + + /// Returns the capacity of the channel. + #[inline] + pub fn capacity(&self) -> Option { + Some(1) + } +} + +impl Clone for Channel { + #[inline] + fn clone(&self) -> Channel { + Channel { + inner: self.inner.clone(), + duration: self.duration, + } + } +} + +impl SelectHandle for Channel { + #[inline] + fn try(&self, token: &mut Token) -> bool { + match self.try_recv() { + Ok(msg) => { + token.tick = Some(msg); + true + } + Err(TryRecvError::Disconnected) => { + token.tick = None; + true + } + Err(TryRecvError::Empty) => { + false + } + } + } + + #[inline] + fn retry(&self, token: &mut Token) -> bool { + self.try(token) + } + + #[inline] + fn deadline(&self) -> Option { + Some(self.inner.lock().next_tick) + } + + #[inline] + fn register(&self, _token: &mut Token, _oper: Operation, _cx: &Context) -> bool { + true + } + + #[inline] + fn unregister(&self, _oper: Operation) {} + + #[inline] + fn accept(&self, token: &mut Token, _cx: &Context) -> bool { + self.try(token) + } + + #[inline] + fn state(&self) -> usize { + // Return the index of the next message to be delivered to the channel. + let inner = self.inner.lock(); + let index = if Instant::now() < inner.next_tick { + inner.index + } else { + inner.index + Wrapping(1) + }; + index.0 + } +} diff --git a/crossbeam-channel/src/flavors/zero.rs b/crossbeam-channel/src/flavors/zero.rs new file mode 100644 index 000000000..cf5909f9e --- /dev/null +++ b/crossbeam-channel/src/flavors/zero.rs @@ -0,0 +1,540 @@ +//! Zero-capacity channel. +//! +//! This kind of channel is also known as *rendezvous* channel. + +use std::cell::UnsafeCell; +use std::marker::PhantomData; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::thread; +use std::time::Instant; + +use parking_lot::Mutex; + +use context::Context; +use err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError}; +use select::{Operation, SelectHandle, Selected, Token}; +use utils::Backoff; +use waker::Waker; + +/// A pointer to a packet. +pub type ZeroToken = usize; + +/// A slot for passing one message from a sender to a receiver. +struct Packet { + /// Equals `true` if the packet is allocated on the stack. + on_stack: bool, + + /// Equals `true` once the packet is ready for reading or writing. + ready: AtomicBool, + + /// The message. + msg: UnsafeCell>, +} + +impl Packet { + /// Creates an empty packet on the stack. + fn empty_on_stack() -> Packet { + Packet { + on_stack: true, + ready: AtomicBool::new(false), + msg: UnsafeCell::new(None), + } + } + + /// Creates an empty packet on the heap. + fn empty_on_heap() -> Box> { + Box::new(Packet { + on_stack: false, + ready: AtomicBool::new(false), + msg: UnsafeCell::new(None), + }) + } + + /// Creates a packet on the stack, containing a message. + fn message_on_stack(msg: T) -> Packet { + Packet { + on_stack: true, + ready: AtomicBool::new(false), + msg: UnsafeCell::new(Some(msg)), + } + } + + /// Waits until the packet becomes ready for reading or writing. + fn wait_ready(&self) { + let mut backoff = Backoff::new(); + while !self.ready.load(Ordering::Acquire) { + backoff.snooze(); + } + } +} + +/// Inner representation of a zero-capacity channel. +struct Inner { + /// Senders waiting to pair up with a receive operation. + senders: Waker, + + /// Receivers waiting to pair up with a send operation. + receivers: Waker, + + /// Equals `true` when the channel is disconnected. + is_disconnected: bool, +} + +/// Zero-capacity channel. +pub struct Channel { + /// Inner representation of the channel. + inner: Mutex, + + /// Indicates that dropping a `Channel` may drop values of type `T`. + _marker: PhantomData, +} + +impl Channel { + /// Constructs a new zero-capacity channel. + pub fn new() -> Self { + Channel { + inner: Mutex::new(Inner { + senders: Waker::new(), + receivers: Waker::new(), + is_disconnected: false, + }), + _marker: PhantomData, + } + } + + /// Returns a receiver handle to the channel. + pub fn receiver(&self) -> Receiver { + Receiver(self) + } + + /// Returns a sender handle to the channel. + pub fn sender(&self) -> Sender { + Sender(self) + } + + /// Attempts to reserve a slot for sending a message. + fn start_send(&self, token: &mut Token, short_pause: bool) -> bool { + let mut inner = self.inner.lock(); + + // If there's a waiting receiver, pair up with it. + if let Some(operation) = inner.receivers.wake_one() { + token.zero = operation.packet; + return true; + } else if inner.is_disconnected { + token.zero = 0; + return true; + } + + if !short_pause { + return false; + } + + Context::with(|cx| { + // Register this send operation so that another receiver can pair up with it. + let oper = Operation::hook(token); + let packet = Box::into_raw(Packet::::empty_on_heap()); + inner + .senders + .register_with_packet(oper, packet as usize, cx); + drop(inner); + + // Yield to give receivers a chance to pair up with this operation. + thread::yield_now(); + + let sel = match cx.try_select(Selected::Aborted) { + Ok(()) => Selected::Aborted, + Err(s) => s, + }; + + match sel { + Selected::Waiting => unreachable!(), + Selected::Disconnected => { + // Unregister and destroy the packet. + let operation = self.inner.lock().senders.unregister(oper).unwrap(); + unsafe { + drop(Box::from_raw(operation.packet as *mut Packet)); + } + + // All receivers have just been dropped. + token.zero = 0; + true + } + Selected::Aborted => { + // Unregister and destroy the packet. + let operation = self.inner.lock().senders.unregister(oper).unwrap(); + unsafe { + drop(Box::from_raw(operation.packet as *mut Packet)); + } + false + } + Selected::Operation(_) => { + // Success! A receiver has paired up with this operation. + token.zero = cx.wait_packet(); + true + } + } + }) + } + + /// Writes a message into the packet. + pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> { + // If there is no packet, the channel is disconnected. + if token.zero == 0 { + return Err(msg); + } + + let packet = &*(token.zero as *const Packet); + packet.msg.get().write(Some(msg)); + packet.ready.store(true, Ordering::Release); + Ok(()) + } + + /// Attempts to pair up with a sender. + fn start_recv(&self, token: &mut Token, short_pause: bool) -> bool { + let mut inner = self.inner.lock(); + + // If there's a waiting sender, pair up with it. + if let Some(operation) = inner.senders.wake_one() { + token.zero = operation.packet; + return true; + } else if inner.is_disconnected { + token.zero = 0; + return true; + } + + if !short_pause { + return false; + } + + Context::with(|cx| { + // Register this receive operation so that another sender can pair up with it. + let oper = Operation::hook(token); + let packet = Box::into_raw(Packet::::empty_on_heap()); + inner + .receivers + .register_with_packet(oper, packet as usize, cx); + drop(inner); + + // Yield to give senders a chance to pair up with this operation. + thread::yield_now(); + + let sel = match cx.try_select(Selected::Aborted) { + Ok(()) => Selected::Aborted, + Err(s) => s, + }; + + match sel { + Selected::Waiting => unreachable!(), + Selected::Aborted => { + // Unregister and destroy the packet. + let operation = self.inner.lock().receivers.unregister(oper).unwrap(); + unsafe { + drop(Box::from_raw(operation.packet as *mut Packet)); + } + false + } + Selected::Disconnected => { + // Unregister and destroy the packet. + let operation = self.inner.lock().receivers.unregister(oper).unwrap(); + unsafe { + drop(Box::from_raw(operation.packet as *mut Packet)); + } + + // All senders have just been dropped. + token.zero = 0; + true + } + Selected::Operation(_) => { + // Success! A sender has paired up with this operation. + token.zero = cx.wait_packet(); + true + } + } + }) + } + + /// Reads a message from the packet. + pub unsafe fn read(&self, token: &mut Token) -> Result { + // If there is no packet, the channel is disconnected. + if token.zero == 0 { + return Err(()); + } + + let packet = &*(token.zero as *const Packet); + + if packet.on_stack { + // The message has been in the packet from the beginning, so there is no need to wait + // for it. However, after reading the message, we need to set `ready` to `true` in + // order to signal that the packet can be destroyed. + let msg = packet.msg.get().replace(None).unwrap(); + packet.ready.store(true, Ordering::Release); + Ok(msg) + } else { + // Wait until the message becomes available, then read it and destroy the + // heap-allocated packet. + packet.wait_ready(); + let msg = packet.msg.get().replace(None).unwrap(); + drop(Box::from_raw(packet as *const Packet as *mut Packet)); + Ok(msg) + } + } + + /// Attempts to send a message into the channel. + pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { + let token = &mut Token::default(); + let mut inner = self.inner.lock(); + + // If there's a waiting receiver, pair up with it. + if let Some(operation) = inner.receivers.wake_one() { + token.zero = operation.packet; + drop(inner); + unsafe { + self.write(token, msg).ok().unwrap(); + } + Ok(()) + } else if inner.is_disconnected { + Err(TrySendError::Disconnected(msg)) + } else { + Err(TrySendError::Full(msg)) + } + } + + /// Sends a message into the channel. + pub fn send(&self, msg: T, deadline: Option) -> Result<(), SendTimeoutError> { + let token = &mut Token::default(); + let mut inner = self.inner.lock(); + + // If there's a waiting receiver, pair up with it. + if let Some(operation) = inner.receivers.wake_one() { + token.zero = operation.packet; + drop(inner); + unsafe { + self.write(token, msg).ok().unwrap(); + } + return Ok(()); + } + + if inner.is_disconnected { + return Err(SendTimeoutError::Disconnected(msg)); + } + + Context::with(|cx| { + // Prepare for blocking until a receiver wakes us up. + let oper = Operation::hook(token); + let packet = Packet::::message_on_stack(msg); + inner + .senders + .register_with_packet(oper, &packet as *const Packet as usize, cx); + drop(inner); + + // Block the current thread. + let sel = cx.wait_until(deadline); + + match sel { + Selected::Waiting => unreachable!(), + Selected::Aborted => { + self.inner.lock().senders.unregister(oper).unwrap(); + let msg = unsafe { packet.msg.get().replace(None).unwrap() }; + Err(SendTimeoutError::Timeout(msg)) + } + Selected::Disconnected => { + self.inner.lock().senders.unregister(oper).unwrap(); + let msg = unsafe { packet.msg.get().replace(None).unwrap() }; + Err(SendTimeoutError::Disconnected(msg)) + } + Selected::Operation(_) => { + // Wait until the message is read, then drop the packet. + packet.wait_ready(); + Ok(()) + } + } + }) + } + + /// Attempts to receive a message without blocking. + pub fn try_recv(&self) -> Result { + let token = &mut Token::default(); + let mut inner = self.inner.lock(); + + // If there's a waiting sender, pair up with it. + if let Some(operation) = inner.senders.wake_one() { + token.zero = operation.packet; + drop(inner); + unsafe { + self.read(token).map_err(|_| TryRecvError::Disconnected) + } + } else if inner.is_disconnected { + Err(TryRecvError::Disconnected) + } else { + Err(TryRecvError::Empty) + } + } + + /// Receives a message from the channel. + pub fn recv(&self, deadline: Option) -> Result { + let token = &mut Token::default(); + let mut inner = self.inner.lock(); + + // If there's a waiting sender, pair up with it. + if let Some(operation) = inner.senders.wake_one() { + token.zero = operation.packet; + drop(inner); + unsafe { + return self.read(token).map_err(|_| RecvTimeoutError::Disconnected); + } + } + + if inner.is_disconnected { + return Err(RecvTimeoutError::Disconnected); + } + + Context::with(|cx| { + // Prepare for blocking until a sender wakes us up. + let oper = Operation::hook(token); + let packet = Packet::::empty_on_stack(); + inner + .receivers + .register_with_packet(oper, &packet as *const Packet as usize, cx); + drop(inner); + + // Block the current thread. + let sel = cx.wait_until(deadline); + + match sel { + Selected::Waiting => unreachable!(), + Selected::Aborted => { + self.inner.lock().receivers.unregister(oper).unwrap(); + Err(RecvTimeoutError::Timeout) + } + Selected::Disconnected => { + self.inner.lock().receivers.unregister(oper).unwrap(); + Err(RecvTimeoutError::Disconnected) + } + Selected::Operation(_) => { + // Wait until the message is provided, then read it. + packet.wait_ready(); + unsafe { Ok(packet.msg.get().replace(None).unwrap()) } + } + } + }) + } + + /// Disconnects the channel and wakes up all blocked receivers. + pub fn disconnect(&self) { + let mut inner = self.inner.lock(); + + if !inner.is_disconnected { + inner.is_disconnected = true; + inner.senders.disconnect(); + inner.receivers.disconnect(); + } + } + + /// Returns the current number of messages inside the channel. + pub fn len(&self) -> usize { + 0 + } + + /// Returns the capacity of the channel. + pub fn capacity(&self) -> Option { + Some(0) + } + + /// Returns `true` if the channel is empty. + pub fn is_empty(&self) -> bool { + true + } + + /// Returns `true` if the channel is full. + pub fn is_full(&self) -> bool { + true + } +} + +/// Receiver handle to a channel. +pub struct Receiver<'a, T: 'a>(&'a Channel); + +/// Sender handle to a channel. +pub struct Sender<'a, T: 'a>(&'a Channel); + +impl<'a, T> SelectHandle for Receiver<'a, T> { + fn try(&self, token: &mut Token) -> bool { + self.0.start_recv(token, false) + } + + fn retry(&self, token: &mut Token) -> bool { + self.0.start_recv(token, true) + } + + fn deadline(&self) -> Option { + None + } + + fn register(&self, _token: &mut Token, oper: Operation, cx: &Context) -> bool { + let packet = Box::into_raw(Packet::::empty_on_heap()); + + let mut inner = self.0.inner.lock(); + inner + .receivers + .register_with_packet(oper, packet as usize, cx); + !inner.senders.can_wake_one() && !inner.is_disconnected + } + + fn unregister(&self, oper: Operation) { + if let Some(operation) = self.0.inner.lock().receivers.unregister(oper) { + unsafe { + drop(Box::from_raw(operation.packet as *mut Packet)); + } + } + } + + fn accept(&self, token: &mut Token, cx: &Context) -> bool { + token.zero = cx.wait_packet(); + true + } + + fn state(&self) -> usize { + self.0.inner.lock().senders.register_count() + } +} + +impl<'a, T> SelectHandle for Sender<'a, T> { + fn try(&self, token: &mut Token) -> bool { + self.0.start_send(token, false) + } + + fn retry(&self, token: &mut Token) -> bool { + self.0.start_send(token, true) + } + + fn deadline(&self) -> Option { + None + } + + fn register(&self, _token: &mut Token, oper: Operation, cx: &Context) -> bool { + let packet = Box::into_raw(Packet::::empty_on_heap()); + + let mut inner = self.0.inner.lock(); + inner + .senders + .register_with_packet(oper, packet as usize, cx); + !inner.receivers.can_wake_one() && !inner.is_disconnected + } + + fn unregister(&self, oper: Operation) { + if let Some(operation) = self.0.inner.lock().senders.unregister(oper) { + unsafe { + drop(Box::from_raw(operation.packet as *mut Packet)); + } + } + } + + fn accept(&self, token: &mut Token, cx: &Context) -> bool { + token.zero = cx.wait_packet(); + true + } + + fn state(&self) -> usize { + self.0.inner.lock().receivers.register_count() + } +} diff --git a/crossbeam-channel/src/lib.rs b/crossbeam-channel/src/lib.rs new file mode 100644 index 000000000..f17af54c4 --- /dev/null +++ b/crossbeam-channel/src/lib.rs @@ -0,0 +1,374 @@ +#![warn(missing_docs, missing_debug_implementations)] + +//! Multi-producer multi-consumer channels for message passing. +//! +//! This library is an alternative to [`std::sync::mpsc`] with more features and better +//! performance. +//! +//! # Hello, world! +//! +//! ``` +//! use crossbeam_channel::unbounded; +//! +//! // Create a channel of unbounded capacity. +//! let (s, r) = unbounded(); +//! +//! // Send a message into the channel. +//! s.send("Hello, world!").unwrap(); +//! +//! // Receive the message from the channel. +//! assert_eq!(r.recv(), Ok("Hello, world!")); +//! ``` +//! +//! # Channel types +//! +//! Channels can be created using two functions: +//! +//! * [`bounded`] creates a channel of bounded capacity, i.e. there is a limit to how many messages +//! it can hold at a time. +//! +//! * [`unbounded`] creates a channel of unbounded capacity, i.e. it can hold any number of +//! messages at a time. +//! +//! Both functions return a [`Sender`] and a [`Receiver`], which represent the two opposite sides +//! of a channel. +//! +//! Creating a bounded channel: +//! +//! ``` +//! use crossbeam_channel::bounded; +//! +//! // Create a channel that can hold at most 5 messages at a time. +//! let (s, r) = bounded(5); +//! +//! // Can send only 5 messages without blocking. +//! for i in 0..5 { +//! s.send(i).unwrap(); +//! } +//! +//! // Another call to `send` would block because the channel is full. +//! // s.send(5).unwrap(); +//! ``` +//! +//! Creating an unbounded channel: +//! +//! ``` +//! use crossbeam_channel::unbounded; +//! +//! // Create an unbounded channel. +//! let (s, r) = unbounded(); +//! +//! // Can send any number of messages into the channel without blocking. +//! for i in 0..1000 { +//! s.send(i).unwrap(); +//! } +//! ``` +//! +//! A special case is zero-capacity channel, which cannot hold any messages. Instead, send and +//! receive operations must appear at the same time in order to pair up and pass the message over: +//! +//! ``` +//! use std::thread; +//! use crossbeam_channel::bounded; +//! +//! // Create a zero-capacity channel. +//! let (s, r) = bounded(0); +//! +//! // Sending blocks until a receive operation appears on the other side. +//! thread::spawn(move || s.send("Hi!").unwrap()); +//! +//! // Receiving blocks until a send operation appears on the other side. +//! assert_eq!(r.recv(), Ok("Hi!")); +//! ``` +//! +//! # Sharing channels +//! +//! Senders and receivers can be cloned and sent to other threads: +//! +//! ``` +//! use std::thread; +//! use crossbeam_channel::bounded; +//! +//! let (s1, r1) = bounded(0); +//! let (s2, r2) = (s1.clone(), r1.clone()); +//! +//! // Spawn a thread that receives a message and then sends one. +//! thread::spawn(move || { +//! r2.recv().unwrap(); +//! s2.send(2).unwrap(); +//! }); +//! +//! // Send a message and then receive one. +//! s1.send(1).unwrap(); +//! r1.recv().unwrap(); +//! ``` +//! +//! Note that cloning only creates a new handle to the same sending or receiving side. It does not +//! create a separate stream of messages in any way: +//! +//! ``` +//! use crossbeam_channel::unbounded; +//! +//! let (s1, r1) = unbounded(); +//! let (s2, r2) = (s1.clone(), r1.clone()); +//! let (s3, r3) = (s2.clone(), r2.clone()); +//! +//! s1.send(10).unwrap(); +//! s2.send(20).unwrap(); +//! s3.send(30).unwrap(); +//! +//! assert_eq!(r3.recv(), Ok(10)); +//! assert_eq!(r1.recv(), Ok(20)); +//! assert_eq!(r2.recv(), Ok(30)); +//! ``` +//! +//! It's also possible to share senders and receivers by reference: +//! +//! ``` +//! # extern crate crossbeam; +//! # extern crate crossbeam_channel; +//! # fn main() { +//! use std::thread; +//! use crossbeam; +//! use crossbeam_channel::bounded; +//! +//! let (s, r) = bounded(0); +//! +//! crossbeam::scope(|scope| { +//! // Spawn a thread that receives a message and then sends one. +//! scope.spawn(|| { +//! r.recv().unwrap(); +//! s.send(2).unwrap(); +//! }); +//! +//! // Send a message and then receive one. +//! s.send(1).unwrap(); +//! r.recv().unwrap(); +//! }); +//! # } +//! ``` +//! +//! # Disconnection +//! +//! When all senders or all receivers associated with a channel get dropped, the channel becomes +//! disconnected. No more messages can be sent, but any remaining messages can still be received. +//! Send and receive operations on a disconnected channel never block. +//! +//! ``` +//! use crossbeam_channel::{unbounded, RecvError}; +//! +//! let (s, r) = unbounded(); +//! s.send(1).unwrap(); +//! s.send(2).unwrap(); +//! s.send(3).unwrap(); +//! +//! // The only sender is dropped, disconnecting the channel. +//! drop(s); +//! +//! // The remaining messages can be received. +//! assert_eq!(r.recv(), Ok(1)); +//! assert_eq!(r.recv(), Ok(2)); +//! assert_eq!(r.recv(), Ok(3)); +//! +//! // There are no more messages in the channel. +//! assert!(r.is_empty()); +//! +//! // Note that calling `r.recv()` does not block. +//! // Instead, `Err(RecvError)` is returned immediately. +//! assert_eq!(r.recv(), Err(RecvError)); +//! ``` +//! +//! # Blocking operations +//! +//! Send and receive operations come in three flavors: +//! +//! * Non-blocking (returns immediately with success or failure). +//! * Blocking (waits until the operation succeeds or the channel becomes disconnected). +//! * Blocking with a timeout (blocks only for a certain duration of time). +//! +//! A simple example showing the difference between non-blocking and blocking operations: +//! +//! ``` +//! use crossbeam_channel::{bounded, RecvError, TryRecvError}; +//! +//! let (s, r) = bounded(1); +//! +//! // Send a message into the channel. +//! s.send("foo").unwrap(); +//! +//! // This call would block because the channel is full. +//! // s.send("bar").unwrap(); +//! +//! // Receive the message. +//! assert_eq!(r.recv(), Ok("foo")); +//! +//! // This call would block because the channel is empty. +//! // r.recv(); +//! +//! // Try receiving a message without blocking. +//! assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +//! +//! // Disconnect the channel. +//! drop(s); +//! +//! // This call doesn't block because the channel is now disconnected. +//! assert_eq!(r.recv(), Err(RecvError)); +//! ``` +//! +//! # Iteration +//! +//! Receivers can be used as iterators. For example, method [`iter`] creates an iterator that +//! receives messages until the channel becomes empty and disconnected. Note that iteration may +//! block waiting for next message to arrive. +//! +//! ``` +//! use std::thread; +//! use crossbeam_channel::unbounded; +//! +//! let (s, r) = unbounded(); +//! +//! thread::spawn(move || { +//! s.send(1).unwrap(); +//! s.send(2).unwrap(); +//! s.send(3).unwrap(); +//! drop(s); // Disconnect the channel. +//! }); +//! +//! // Collect all messages from the channel. +//! // Note that the call to `collect` blocks until the sender is dropped. +//! let v: Vec<_> = r.iter().collect(); +//! +//! assert_eq!(v, [1, 2, 3]); +//! ``` +//! +//! A non-blocking iterator can be created using [`try_iter`], which receives all available +//! messages without blocking: +//! +//! ``` +//! use crossbeam_channel::unbounded; +//! +//! let (s, r) = unbounded(); +//! s.send(1).unwrap(); +//! s.send(2).unwrap(); +//! s.send(3).unwrap(); +//! // No need to drop the sender. +//! +//! // Receive all messages currently in the channel. +//! let v: Vec<_> = r.try_iter().collect(); +//! +//! assert_eq!(v, [1, 2, 3]); +//! ``` +//! +//! # Selection +//! +//! The [`select!`] macro allows you to define a set of channel operations, wait until any one of +//! them becomes ready, and finally execute it. If multiple operations are ready at the same time, +//! a random one among them is selected. +//! +//! It is also possible to define a `default` case that gets executed if none of the operations are +//! ready, either right away or for a certain duration of time. +//! +//! An operation is considered to be ready if it doesn't have to block. Note that it is ready even +//! when it will simply return an error because the channel is disconnected. +//! +//! An example of receiving a message from two channels: +//! +//! ``` +//! # #[macro_use] +//! # extern crate crossbeam_channel; +//! # fn main() { +//! use std::thread; +//! use std::time::Duration; +//! use crossbeam_channel::unbounded; +//! +//! let (s1, r1) = unbounded(); +//! let (s2, r2) = unbounded(); +//! +//! thread::spawn(move || s1.send(10).unwrap()); +//! thread::spawn(move || s2.send(20).unwrap()); +//! +//! // At most one of these two receive operations will be executed. +//! select! { +//! recv(r1) -> msg => assert_eq!(msg, Ok(10)), +//! recv(r2) -> msg => assert_eq!(msg, Ok(20)), +//! default(Duration::from_secs(1)) => println!("timed out"), +//! } +//! # } +//! ``` +//! +//! If you need to select over a dynamically created list of channel operations, use [`Select`] +//! instead. The [`select!`] macro is just a convenience wrapper around [`Select`]. +//! +//! # Extra channels +//! +//! Three functions can create special kinds of channels, all of which return just a [`Receiver`] +//! handle: +//! +//! * [`after`] creates a channel that delivers a single message after a certain duration of time. +//! * [`tick`] creates a channel that delivers messages periodically. +//! * [`never`] creates a channel that never delivers messages. +//! +//! These channels are very efficient because messages get lazily generated on receive operations. +//! +//! An example that prints elapsed time every 50 milliseconds for the duration of 1 second: +//! +//! ``` +//! # #[macro_use] +//! # extern crate crossbeam_channel; +//! # fn main() { +//! use std::time::{Duration, Instant}; +//! use crossbeam_channel::{after, tick}; +//! +//! let start = Instant::now(); +//! let ticker = tick(Duration::from_millis(50)); +//! let timeout = after(Duration::from_secs(1)); +//! +//! loop { +//! select! { +//! recv(ticker) -> _ => println!("elapsed: {:?}", start.elapsed()), +//! recv(timeout) -> _ => break, +//! } +//! } +//! # } +//! ``` +//! +//! [`std::sync::mpsc`]: https://doc.rust-lang.org/std/sync/mpsc/index.html +//! [`unbounded`]: fn.unbounded.html +//! [`bounded`]: fn.bounded.html +//! [`after`]: fn.after.html +//! [`tick`]: fn.tick.html +//! [`never`]: fn.never.html +//! [`send`]: struct.Sender.html#method.send +//! [`recv`]: struct.Receiver.html#method.recv +//! [`iter`]: struct.Receiver.html#method.iter +//! [`try_iter`]: struct.Receiver.html#method.try_iter +//! [`select!`]: macro.select.html +//! [`Select`]: struct.Select.html +//! [`Sender`]: struct.Sender.html +//! [`Receiver`]: struct.Receiver.html + +extern crate crossbeam_epoch; +extern crate crossbeam_utils; +extern crate parking_lot; +extern crate rand; +extern crate smallvec; + +mod channel; +mod context; +mod err; +mod flavors; +mod select; +mod select_macro; +mod utils; +mod waker; + +pub use channel::{Receiver, Sender}; +pub use channel::{bounded, unbounded}; +pub use channel::{after, never, tick}; +pub use channel::{IntoIter, Iter, TryIter}; + +pub use select::{Select, SelectedOperation}; + +pub use err::{RecvError, RecvTimeoutError, TryRecvError}; +pub use err::{SendError, SendTimeoutError, TrySendError}; +pub use err::{SelectTimeoutError, TrySelectError}; diff --git a/crossbeam-channel/src/select.rs b/crossbeam-channel/src/select.rs new file mode 100644 index 000000000..09fc5d9fa --- /dev/null +++ b/crossbeam-channel/src/select.rs @@ -0,0 +1,813 @@ +//! Interface to the select mechanism. + +use std::fmt; +use std::marker::PhantomData; +use std::mem; +use std::time::{Duration, Instant}; + +use channel::{self, Receiver, Sender}; +use context::Context; +use err::{RecvError, SelectTimeoutError, SendError, TrySelectError}; +use smallvec::SmallVec; +use utils; + +use flavors; + +/// Temporary data that gets initialized during select or a blocking operation, and is consumed by +/// `read` or `write`. +/// +/// Each field contains data associated with a specific channel flavor. +#[derive(Default)] +pub struct Token { + pub after: flavors::after::AfterToken, + pub array: flavors::array::ArrayToken, + pub list: flavors::list::ListToken, + pub never: flavors::never::NeverToken, + pub tick: flavors::tick::TickToken, + pub zero: flavors::zero::ZeroToken, +} + +/// Identifier associated with an operation by a specific thread on a specific channel. +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct Operation(usize); + +impl Operation { + /// Creates an operation identifier from a mutable reference. + /// + /// This function essentially just turns the address of the reference into a number. The + /// reference should point to a variable that is specific to the thread and the operation, + /// and is alive for the entire duration of select or blocking operation. + #[inline] + pub fn hook(r: &mut T) -> Operation { + let val = r as *mut T as usize; + // Make sure that the pointer address doesn't equal the numerical representation of + // `Selected::{Waiting, Aborted, Disconnected}`. + assert!(val > 2); + Operation(val) + } +} + +/// Current state of a select or a blocking operation. +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum Selected { + /// Still waiting for an operation. + Waiting, + + /// The attempt to block the current thread has been aborted. + Aborted, + + /// A channel was disconnected. + Disconnected, + + /// An operation became ready. + Operation(Operation), +} + +impl From for Selected { + #[inline] + fn from(val: usize) -> Selected { + match val { + 0 => Selected::Waiting, + 1 => Selected::Aborted, + 2 => Selected::Disconnected, + oper => Selected::Operation(Operation(oper)), + } + } +} + +impl Into for Selected { + #[inline] + fn into(self) -> usize { + match self { + Selected::Waiting => 0, + Selected::Aborted => 1, + Selected::Disconnected => 2, + Selected::Operation(Operation(val)) => val, + } + } +} + +/// A receiver or a sender that can participate in select. +/// +/// This is a handle that assists select in executing the operation, registration, deciding on the +/// appropriate deadline for blocking, etc. +pub trait SelectHandle { + /// Attempts to execute the operation and returns `true` on success. + fn try(&self, token: &mut Token) -> bool; + + /// Attempts to execute the operation again and returns `true` on success. + /// + /// Retries are allowed to take a little bit more time than the initial try. + fn retry(&self, token: &mut Token) -> bool; + + /// Returns a deadline for the operation, if there is one. + fn deadline(&self) -> Option; + + /// Registers the operation. + fn register(&self, token: &mut Token, oper: Operation, cx: &Context) -> bool; + + /// Unregisters the operation. + fn unregister(&self, oper: Operation); + + /// Attempts to execute the selected operation. + fn accept(&self, token: &mut Token, cx: &Context) -> bool; + + /// Returns the current state of the opposite side of the channel. + /// + /// This is typically represented by the current message index at the opposite side of the + /// channel. + /// + /// For example, by calling `state()`, the receiving side can check how much activity the + /// sending side has had and viceversa. + fn state(&self) -> usize; +} + +impl<'a, T: SelectHandle> SelectHandle for &'a T { + fn try(&self, token: &mut Token) -> bool { + (**self).try(token) + } + + fn retry(&self, token: &mut Token) -> bool { + (**self).retry(token) + } + + fn deadline(&self) -> Option { + (**self).deadline() + } + + fn register(&self, token: &mut Token, oper: Operation, cx: &Context) -> bool { + (**self).register(token, oper, cx) + } + + fn unregister(&self, oper: Operation) { + (**self).unregister(oper); + } + + fn accept(&self, token: &mut Token, cx: &Context) -> bool { + (**self).accept(token, cx) + } + + fn state(&self) -> usize { + (**self).state() + } +} + +/// Determines when a select operation should time out. +#[derive(Clone, Copy, Eq, PartialEq)] +enum Timeout { + /// Try firing operations without blocking. + Now, + + /// Block forever. + Never, + + /// Time out after an instant in time. + At(Instant), +} + +/// Runs until one of the operations is fired, potentially blocking the current thread. +/// +/// Successful receive operations will have to be followed up by `channel::read()` and successful +/// send operations by `channel::write()`. +fn run_select( + handles: &mut [(&S, usize, *const u8)], + timeout: Timeout, +) -> Option<(Token, usize, *const u8)> +where + S: SelectHandle + ?Sized, +{ + if handles.is_empty() { + // Wait until the timeout and return. + match timeout { + Timeout::Now => return None, + Timeout::Never => { + utils::sleep_until(None); + unreachable!(); + } + Timeout::At(when) => { + utils::sleep_until(Some(when)); + return None; + } + } + } + + // Create a token, which serves as a temporary variable that gets initialized in this function + // and is later used by a call to `channel::read()` or `channel::write()` that completes the + // selected operation. + let mut token = Token::default(); + + // Is this is a non-blocking select? + if timeout == Timeout::Now { + if handles.len() <= 1 { + // Try firing the operations without blocking. + for &(handle, i, ptr) in handles.iter() { + if handle.try(&mut token) { + return Some((token, i, ptr)); + } + } + + return None; + } + + // Shuffle the operations for fairness. + utils::shuffle(handles); + + let mut states = SmallVec::<[usize; 4]>::with_capacity(handles.len()); + + // Snapshot the channel states of all operations. + for &(handle, _, _) in handles.iter() { + states.push(handle.state()); + } + + loop { + // Try firing the operations. + for &(handle, i, ptr) in handles.iter() { + if handle.try(&mut token) { + return Some((token, i, ptr)); + } + } + + let mut changed = false; + + // Update the channel states and check whether any have been changed. + for (&(handle, _, _), state) in handles.iter().zip(states.iter_mut()) { + let current = handle.state(); + + if *state != current { + *state = current; + changed = true; + } + } + + // If none of the states have changed, selection failed. + if !changed { + return None; + } + } + } + + loop { + // Shuffle the operations for fairness. + if handles.len() >= 2 { + utils::shuffle(handles); + } + + // Try firing the operations without blocking. + for &(handle, i, ptr) in handles.iter() { + if handle.try(&mut token) { + return Some((token, i, ptr)); + } + } + + // Before blocking, try firing the operations one more time. Retries are permitted to take + // a little bit more time than the initial tries, but they still mustn't block. + for &(handle, i, ptr) in handles.iter() { + if handle.retry(&mut token) { + return Some((token, i, ptr)); + } + } + + // Prepare for blocking. + let res = Context::with(|cx| { + let mut sel = Selected::Waiting; + let mut registered_count = 0; + + // Register all operations. + for (handle, _, _) in handles.iter_mut() { + registered_count += 1; + + // If registration returns `false`, that means the operation has just become ready. + if !handle.register(&mut token, Operation::hook::<&S>(handle), cx) { + // Try aborting select. + sel = match cx.try_select(Selected::Aborted) { + Ok(()) => Selected::Aborted, + Err(s) => s, + }; + break; + } + + // If another thread has already selected one of the operations, stop registration. + sel = cx.selected(); + if sel != Selected::Waiting { + break; + } + } + + if sel == Selected::Waiting { + // Check with each operation for how long we're allowed to block, and compute the + // earliest deadline. + let mut deadline: Option = match timeout { + Timeout::Now => unreachable!(), + Timeout::Never => None, + Timeout::At(when) => Some(when), + }; + for &(handle, _, _) in handles.iter() { + if let Some(x) = handle.deadline() { + deadline = deadline.map(|y| x.min(y)).or(Some(x)); + } + } + + // Block the current thread. + sel = cx.wait_until(deadline); + } + + // Unregister all registered operations. + for (handle, _, _) in handles.iter_mut().take(registered_count) { + handle.unregister(Operation::hook::<&S>(handle)); + } + + match sel { + Selected::Waiting => unreachable!(), + Selected::Aborted => {} + Selected::Disconnected | Selected::Operation(_) => { + // Find the selected operation. + for (handle, i, ptr) in handles.iter_mut() { + // Is this the selected operation? + if sel == Selected::Operation(Operation::hook::<&S>(handle)) { + // Try firing this operation. + if handle.accept(&mut token, cx) { + return Some((*i, *ptr)); + } + } + } + } + } + + None + }); + + // Return if an operation was fired. + if let Some((i, ptr)) = res { + return Some((token, i, ptr)); + } + + // Check for timeout. + match timeout { + Timeout::Now => unreachable!(), + Timeout::Never => {}, + Timeout::At(when) => { + if Instant::now() >= when { + // Fall back to one final non-blocking select. This is needed to make the whole + // select invocation appear from the outside as a single operation. + return run_select(handles, Timeout::Now); + } + } + }; + } +} + +/// Selects from a set of channel operations. +/// +/// `Select` allows you to define a set of channel operations, wait until any one of them becomes +/// ready, and finally execute it. If multiple operations are ready at the same time, a random one +/// among them is selected. +/// +/// An operation is considered to be ready if it doesn't have to block. Note that it is ready even +/// when it will simply return an error because the channel is disconnected. +/// +/// The [`select!`] macro is a convenience wrapper around `Select`. However, it cannot select over a +/// dynamically created list of channel operations. +/// +/// [`select!`]: macro.select.html +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use crossbeam_channel::{unbounded, Select}; +/// +/// let (s1, r1) = unbounded(); +/// let (s2, r2) = unbounded(); +/// s1.send(10).unwrap(); +/// +/// let mut sel = Select::new(); +/// let oper1 = sel.recv(&r1); +/// let oper2 = sel.send(&s2); +/// +/// // Both operations are initially ready, so a random one will be executed. +/// let oper = sel.select(); +/// match oper.index() { +/// i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(10)), +/// i if i == oper2 => assert_eq!(oper.send(&s2, 20), Ok(())), +/// _ => unreachable!(), +/// } +/// ``` +pub struct Select<'a> { + /// A list of senders and receivers participating in selection. + handles: SmallVec<[(&'a SelectHandle, usize, *const u8); 4]>, +} + +unsafe impl<'a> Send for Select<'a> {} +unsafe impl<'a> Sync for Select<'a> {} + +impl<'a> Select<'a> { + /// Creates an empty list of channel operations for selection. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::Select; + /// + /// let mut sel = Select::new(); + /// + /// // The list of operations is empty, which means no operation can be selected. + /// assert!(sel.try_select().is_err()); + /// ``` + pub fn new() -> Select<'a> { + Select { + handles: SmallVec::new(), + } + } + + /// Adds a send operation. + /// + /// Returns the index of the added operation. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use crossbeam_channel::{unbounded, Select}; + /// + /// let (s1, r1) = unbounded::(); + /// let (s2, r2) = unbounded::(); + /// let (s3, r3) = unbounded::(); + /// + /// let mut sel = Select::new(); + /// let oper1 = sel.send(&s1); + /// let oper2 = sel.send(&s2); + /// let oper3 = sel.send(&s3); + /// + /// assert_eq!(oper1, 0); + /// assert_eq!(oper2, 1); + /// assert_eq!(oper3, 2); + /// ``` + pub fn send(&mut self, s: &'a Sender) -> usize { + let i = self.handles.len(); + let ptr = s as *const Sender<_> as *const u8; + self.handles.push((s, i, ptr)); + i + } + + /// Adds a receive operation. + /// + /// Returns the index of the added operation. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use crossbeam_channel::{unbounded, Select}; + /// + /// let (s1, r1) = unbounded::(); + /// let (s2, r2) = unbounded::(); + /// let (s3, r3) = unbounded::(); + /// + /// let mut sel = Select::new(); + /// let oper1 = sel.recv(&r1); + /// let oper2 = sel.recv(&r2); + /// let oper3 = sel.recv(&r3); + /// + /// assert_eq!(oper1, 0); + /// assert_eq!(oper2, 1); + /// assert_eq!(oper3, 2); + /// ``` + pub fn recv(&mut self, r: &'a Receiver) -> usize { + let i = self.handles.len(); + let ptr = r as *const Receiver<_> as *const u8; + self.handles.push((r, i, ptr)); + i + } + + /// Attempts to execute one of the operations without blocking. + /// + /// If an operation is ready, it is selected and returned. If multiple operations are ready at + /// the same time, a random one among them is selected. If none of the operations are ready, an + /// error is returned. + /// + /// An operation is considered to be ready if it doesn't have to block. Note that it is ready + /// even when it will simply return an error because the channel is disconnected. + /// + /// The selected operation must be completed with [`SelectedOperation::send`] + /// or [`SelectedOperation::recv`]. + /// + /// [`SelectedOperation::send`]: struct.SelectedOperation.html#method.send + /// [`SelectedOperation::recv`]: struct.SelectedOperation.html#method.recv + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use crossbeam_channel::{unbounded, Select}; + /// + /// let (s1, r1) = unbounded(); + /// let (s2, r2) = unbounded(); + /// + /// s1.send(10).unwrap(); + /// s2.send(20).unwrap(); + /// + /// let mut sel = Select::new(); + /// let oper1 = sel.recv(&r1); + /// let oper2 = sel.recv(&r2); + /// + /// // Both operations are initially ready, so a random one will be executed. + /// let oper = sel.try_select(); + /// match oper { + /// Err(_) => panic!("both operations should be ready"), + /// Ok(oper) => match oper.index() { + /// i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(10)), + /// i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(20)), + /// _ => unreachable!(), + /// } + /// } + /// ``` + pub fn try_select(&mut self) -> Result, TrySelectError> { + match run_select(&mut self.handles, Timeout::Now) { + None => Err(TrySelectError), + Some((token, index, ptr)) => Ok(SelectedOperation { + token, + index, + ptr, + _marker: PhantomData, + }), + } + } + + /// Blocks until one of the operations becomes ready. + /// + /// Once an operation becomes ready, it is selected and returned. + /// + /// An operation is considered to be ready if it doesn't have to block. Note that it is ready + /// even when it will simply return an error because the channel is disconnected. + /// + /// The selected operation must be completed with [`SelectedOperation::send`] + /// or [`SelectedOperation::recv`]. + /// + /// [`SelectedOperation::send`]: struct.SelectedOperation.html#method.send + /// [`SelectedOperation::recv`]: struct.SelectedOperation.html#method.recv + /// + /// # Panics + /// + /// Panics if no operations have been added to `Select`. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use std::time::Duration; + /// use crossbeam_channel::{unbounded, Select}; + /// + /// let (s1, r1) = unbounded(); + /// let (s2, r2) = unbounded(); + /// + /// thread::spawn(move || { + /// thread::sleep(Duration::from_secs(1)); + /// s1.send(10).unwrap(); + /// }); + /// thread::spawn(move || s2.send(20).unwrap()); + /// + /// let mut sel = Select::new(); + /// let oper1 = sel.recv(&r1); + /// let oper2 = sel.recv(&r2); + /// + /// // The second operation will be selected because it becomes ready first. + /// let oper = sel.select(); + /// match oper.index() { + /// i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(10)), + /// i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(20)), + /// _ => unreachable!(), + /// } + /// ``` + pub fn select(&mut self) -> SelectedOperation<'_> { + if self.handles.is_empty() { + panic!("no operations have been added to `Select`"); + } + + let (token, index, ptr) = run_select(&mut self.handles, Timeout::Never).unwrap(); + SelectedOperation { + token, + index, + ptr, + _marker: PhantomData, + } + } + + /// Waits until one of the operations becomes ready, but only for a limited time. + /// + /// If an operation becomes ready, it is selected and returned. If multiple operations are + /// ready at the same time, a random one among them is selected. If none of the operations + /// become ready for the specified duration, an error is returned. + /// + /// An operation is considered to be ready if it doesn't have to block. Note that it is ready + /// even when it will simply return an error because the channel is disconnected. + /// + /// The selected operation must be completed with [`SelectedOperation::send`] + /// or [`SelectedOperation::recv`]. + /// + /// [`SelectedOperation::send`]: struct.SelectedOperation.html#method.send + /// [`SelectedOperation::recv`]: struct.SelectedOperation.html#method.recv + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use std::time::Duration; + /// use crossbeam_channel::{unbounded, Select}; + /// + /// let (s1, r1) = unbounded(); + /// let (s2, r2) = unbounded(); + /// + /// thread::spawn(move || { + /// thread::sleep(Duration::from_secs(1)); + /// s1.send(10).unwrap(); + /// }); + /// thread::spawn(move || s2.send(20).unwrap()); + /// + /// let mut sel = Select::new(); + /// let oper1 = sel.recv(&r1); + /// let oper2 = sel.recv(&r2); + /// + /// // The second operation will be selected because it becomes ready first. + /// let oper = sel.select_timeout(Duration::from_millis(500)); + /// match oper { + /// Err(_) => panic!("should not have timed out"), + /// Ok(oper) => match oper.index() { + /// i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(10)), + /// i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(20)), + /// _ => unreachable!(), + /// } + /// } + /// ``` + pub fn select_timeout( + &mut self, + timeout: Duration, + ) -> Result, SelectTimeoutError> { + let timeout = Timeout::At(Instant::now() + timeout); + + match run_select(&mut self.handles, timeout) { + None => Err(SelectTimeoutError), + Some((token, index, ptr)) => Ok(SelectedOperation { + token, + index, + ptr, + _marker: PhantomData, + }), + } + } +} + +impl<'a> Clone for Select<'a> { + fn clone(&self) -> Select<'a> { + Select { + handles: self.handles.clone(), + } + } +} + +impl<'a> fmt::Debug for Select<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Select").finish() + } +} + +/// A selected operation that needs to be completed. +/// +/// To complete the operation, call [`send`] or [`recv`]. +/// +/// # Panics +/// +/// Forgetting to complete the operation is an error and might lead to deadlocks. If a +/// `SelectedOperation` is dropped without completion, a panic occurs. +/// +/// [`send`]: struct.SelectedOperation.html#method.send +/// [`recv`]: struct.SelectedOperation.html#method.recv +#[must_use] +pub struct SelectedOperation<'a> { + /// Token needed to complete the operation. + token: Token, + + /// The index of the selected operation. + index: usize, + + /// The address of the selected `Sender` or `Receiver`. + ptr: *const u8, + + /// Indicates that a `Select<'a>` is mutably borrowed. + _marker: PhantomData<&'a mut Select<'a>>, +} + +impl<'a> SelectedOperation<'a> { + /// Returns the index of the selected operation. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::{bounded, Select}; + /// + /// let (s1, r1) = bounded::<()>(0); + /// let (s2, r2) = bounded::<()>(0); + /// let (s3, r3) = bounded::<()>(1); + /// + /// let mut sel = Select::new(); + /// let oper1 = sel.send(&s1); + /// let oper2 = sel.recv(&r2); + /// let oper3 = sel.send(&s3); + /// + /// // Only the last operation is ready. + /// let oper = sel.select(); + /// assert_eq!(oper.index(), 2); + /// assert_eq!(oper.index(), oper3); + /// + /// // Complete the operation. + /// oper.send(&s3, ()).unwrap(); + /// ``` + pub fn index(&self) -> usize { + self.index + } + + /// Completes the send operation. + /// + /// The passed [`Sender`] reference must be the same one that was used in [`Select::send`] + /// when the operation was added. + /// + /// # Panics + /// + /// Panics if an incorrect [`Sender`] reference is passed. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::{bounded, Select, SendError}; + /// + /// let (s, r) = bounded::(0); + /// drop(r); + /// + /// let mut sel = Select::new(); + /// let oper1 = sel.send(&s); + /// + /// let oper = sel.select(); + /// assert_eq!(oper.index(), oper1); + /// assert_eq!(oper.send(&s, 10), Err(SendError(10))); + /// ``` + /// + /// [`Sender`]: struct.Sender.html + /// [`Select::send`]: struct.Select.html#method.send + pub fn send(mut self, s: &Sender, msg: T) -> Result<(), SendError> { + assert!( + s as *const Sender as *const u8 == self.ptr, + "passed a sender that wasn't selected", + ); + let res = unsafe { channel::write(s, &mut self.token, msg) }; + mem::forget(self); + res.map_err(SendError) + } + + /// Completes the receive operation. + /// + /// The passed [`Receiver`] reference must be the same one that was used in [`Select::recv`] + /// when the operation was added. + /// + /// # Panics + /// + /// Panics if an incorrect [`Receiver`] reference is passed. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_channel::{bounded, Select, RecvError}; + /// + /// let (s, r) = bounded::(0); + /// drop(s); + /// + /// let mut sel = Select::new(); + /// let oper1 = sel.recv(&r); + /// + /// let oper = sel.select(); + /// assert_eq!(oper.index(), oper1); + /// assert_eq!(oper.recv(&r), Err(RecvError)); + /// ``` + /// + /// [`Receiver`]: struct.Receiver.html + /// [`Select::recv`]: struct.Select.html#method.recv + pub fn recv(mut self, r: &Receiver) -> Result { + assert!( + r as *const Receiver as *const u8 == self.ptr, + "passed a receiver that wasn't selected", + ); + let res = unsafe { channel::read(r, &mut self.token) }; + mem::forget(self); + res.map_err(|_| RecvError) + } +} + +impl<'a> fmt::Debug for SelectedOperation<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SelectedOperation").finish() + } +} + +impl<'a> Drop for SelectedOperation<'a> { + fn drop(&mut self) { + panic!("dropped `SelectedOperation` without completing the operation"); + } +} diff --git a/crossbeam-channel/src/select_macro.rs b/crossbeam-channel/src/select_macro.rs new file mode 100644 index 000000000..c732472b6 --- /dev/null +++ b/crossbeam-channel/src/select_macro.rs @@ -0,0 +1,1163 @@ +//! The `select!` macro. + +/// A simple wrapper around the standard macros. +/// +/// This is just an ugly workaround until it becomes possible to import macros with `use` +/// statements. +/// +/// TODO(stjepang): Once we bump the minimum required Rust version to 1.30 or newer, we should: +/// +/// 1. Remove all `#[macro_export(local_inner_macros)]` lines. +/// 2. Replace `crossbeam_channel_delegate` with direct macro invocations. +#[doc(hidden)] +#[macro_export] +macro_rules! crossbeam_channel_delegate { + (concat($($args:tt)*)) => { + concat!($($args)*) + }; + (stringify($($args:tt)*)) => { + stringify!($($args)*) + }; + (unreachable($($args:tt)*)) => { + unreachable!($($args)*) + }; + (compile_error($($args:tt)*)) => { + compile_error!($($args)*) + }; +} + +/// A helper macro for `select!` to hide the long list of macro patterns from the documentation. +/// +/// The macro consists of two stages: +/// 1. Parsing +/// 2. Code generation +/// +/// The parsing stage consists of these subparts: +/// 1. `@list`: Turns a list of tokens into a list of cases. +/// 2. `@list_errorN`: Diagnoses the syntax error. +/// 3. `@case`: Parses a single case and verifies its argument list. +/// +/// The codegen stage consists of these subparts: +/// 1. `@init`: Attempts to optimize `select!` away and initializes a `Select`. +/// 2. `@add`: Adds send/receive operations to the `Select` and starts selection. +/// 3. `@complete`: Completes the selected send/receive operation. +/// +/// If the parsing stage encounters a syntax error or the codegen stage ends up with too many +/// cases to process, the macro fails with a compile-time error. +#[doc(hidden)] +#[macro_export(local_inner_macros)] +macro_rules! crossbeam_channel_internal { + // The list is empty. Now check the arguments of each processed case. + (@list + () + ($($head:tt)*) + ) => { + crossbeam_channel_internal!( + @case + ($($head)*) + () + () + ) + }; + // If necessary, insert an empty argument list after `default`. + (@list + (default => $($tail:tt)*) + ($($head:tt)*) + ) => { + crossbeam_channel_internal!( + @list + (default() => $($tail)*) + ($($head)*) + ) + }; + // But print an error if `default` is followed by a `->`. + (@list + (default -> $($tail:tt)*) + ($($head:tt)*) + ) => { + crossbeam_channel_delegate!(compile_error( + "expected `=>` after `default` case, found `->`" + )) + }; + // Print an error if there's an `->` after the argument list in the default case. + (@list + (default $args:tt -> $($tail:tt)*) + ($($head:tt)*) + ) => { + crossbeam_channel_delegate!(compile_error( + "expected `=>` after `default` case, found `->`" + )) + }; + // Print an error if there is a missing result in a recv case. + (@list + (recv($($args:tt)*) => $($tail:tt)*) + ($($head:tt)*) + ) => { + crossbeam_channel_delegate!(compile_error( + "expected `->` after `recv` case, found `=>`" + )) + }; + // Print an error if there is a missing result in a send case. + (@list + (send($($args:tt)*) => $($tail:tt)*) + ($($head:tt)*) + ) => { + crossbeam_channel_delegate!(compile_error( + "expected `->` after `send` operation, found `=>`" + )) + }; + // Make sure the arrow and the result are not repeated. + (@list + ($case:ident $args:tt -> $res:tt -> $($tail:tt)*) + ($($head:tt)*) + ) => { + crossbeam_channel_delegate!(compile_error("expected `=>`, found `->`")) + }; + // Print an error if there is a semicolon after the block. + (@list + ($case:ident $args:tt $(-> $res:pat)* => $body:block; $($tail:tt)*) + ($($head:tt)*) + ) => { + crossbeam_channel_delegate!(compile_error( + "did you mean to put a comma instead of the semicolon after `}`?" + )) + }; + // The first case is separated by a comma. + (@list + ($case:ident ($($args:tt)*) $(-> $res:pat)* => $body:expr, $($tail:tt)*) + ($($head:tt)*) + ) => { + crossbeam_channel_internal!( + @list + ($($tail)*) + ($($head)* $case ($($args)*) $(-> $res)* => { $body },) + ) + }; + // Don't require a comma after the case if it has a proper block. + (@list + ($case:ident ($($args:tt)*) $(-> $res:pat)* => $body:block $($tail:tt)*) + ($($head:tt)*) + ) => { + crossbeam_channel_internal!( + @list + ($($tail)*) + ($($head)* $case ($($args)*) $(-> $res)* => { $body },) + ) + }; + // Only one case remains. + (@list + ($case:ident ($($args:tt)*) $(-> $res:pat)* => $body:expr) + ($($head:tt)*) + ) => { + crossbeam_channel_internal!( + @list + () + ($($head)* $case ($($args)*) $(-> $res)* => { $body },) + ) + }; + // Accept a trailing comma at the end of the list. + (@list + ($case:ident ($($args:tt)*) $(-> $res:pat)* => $body:expr,) + ($($head:tt)*) + ) => { + crossbeam_channel_internal!( + @list + () + ($($head)* $case ($($args)*) $(-> $res)* => { $body },) + ) + }; + // Diagnose and print an error. + (@list + ($($tail:tt)*) + ($($head:tt)*) + ) => { + crossbeam_channel_internal!(@list_error1 $($tail)*) + }; + // Stage 1: check the case type. + (@list_error1 recv $($tail:tt)*) => { + crossbeam_channel_internal!(@list_error2 recv $($tail)*) + }; + (@list_error1 send $($tail:tt)*) => { + crossbeam_channel_internal!(@list_error2 send $($tail)*) + }; + (@list_error1 default $($tail:tt)*) => { + crossbeam_channel_internal!(@list_error2 default $($tail)*) + }; + (@list_error1 $t:tt $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected one of `recv`, `send`, or `default`, found `", + crossbeam_channel_delegate!(stringify($t)), + "`", + )) + )) + }; + (@list_error1 $($tail:tt)*) => { + crossbeam_channel_internal!(@list_error2 $($tail)*); + }; + // Stage 2: check the argument list. + (@list_error2 $case:ident) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "missing argument list after `", + crossbeam_channel_delegate!(stringify($case)), + "`", + )) + )) + }; + (@list_error2 $case:ident => $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "missing argument list after `", + crossbeam_channel_delegate!(stringify($case)), + "`", + )) + )) + }; + (@list_error2 $($tail:tt)*) => { + crossbeam_channel_internal!(@list_error3 $($tail)*) + }; + // Stage 3: check the `=>` and what comes after it. + (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "missing `=>` after `", + crossbeam_channel_delegate!(stringify($case)), + "` case", + )) + )) + }; + (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* =>) => { + crossbeam_channel_delegate!(compile_error( + "expected expression after `=>`" + )) + }; + (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $body:expr; $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "did you mean to put a comma instead of the semicolon after `", + crossbeam_channel_delegate!(stringify($body)), + "`?", + )) + )) + }; + (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => recv($($a:tt)*) $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + "expected an expression after `=>`" + )) + }; + (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => send($($a:tt)*) $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + "expected an expression after `=>`" + )) + }; + (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => default($($a:tt)*) $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + "expected an expression after `=>`" + )) + }; + (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident($($a:tt)*) $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "did you mean to put a comma after `", + crossbeam_channel_delegate!(stringify($f)), + "(", + crossbeam_channel_delegate!(stringify($($a)*)), + ")`?", + )) + )) + }; + (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident!($($a:tt)*) $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "did you mean to put a comma after `", + crossbeam_channel_delegate!(stringify($f)), + "!(", + crossbeam_channel_delegate!(stringify($($a)*)), + ")`?", + )) + )) + }; + (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident![$($a:tt)*] $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "did you mean to put a comma after `", + crossbeam_channel_delegate!(stringify($f)), + "![", + crossbeam_channel_delegate!(stringify($($a)*)), + "]`?", + )) + )) + }; + (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident!{$($a:tt)*} $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "did you mean to put a comma after `", + crossbeam_channel_delegate!(stringify($f)), + "!{", + crossbeam_channel_delegate!(stringify($($a)*)), + "}`?", + )) + )) + }; + (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $body:tt $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "did you mean to put a comma after `", + crossbeam_channel_delegate!(stringify($body)), + "`?", + )) + )) + }; + (@list_error3 $case:ident($($args:tt)*) -> => $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error("missing pattern after `->`")) + }; + (@list_error3 $case:ident($($args:tt)*) $t:tt $(-> $r:pat)* => $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected `->`, found `", + crossbeam_channel_delegate!(stringify($t)), + "`", + )) + )) + }; + (@list_error3 $case:ident($($args:tt)*) -> $t:tt $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected a pattern, found `", + crossbeam_channel_delegate!(stringify($t)), + "`", + )) + )) + }; + (@list_error3 recv($($args:tt)*) $t:tt $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected `->`, found `", + crossbeam_channel_delegate!(stringify($t)), + "`", + )) + )) + }; + (@list_error3 send($($args:tt)*) $t:tt $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected `->`, found `", + crossbeam_channel_delegate!(stringify($t)), + "`", + )) + )) + }; + (@list_error3 recv $args:tt $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected an argument list after `recv`, found `", + crossbeam_channel_delegate!(stringify($args)), + "`", + )) + )) + }; + (@list_error3 send $args:tt $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected an argument list after `send`, found `", + crossbeam_channel_delegate!(stringify($args)), + "`", + )) + )) + }; + (@list_error3 default $args:tt $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected an argument list or `=>` after `default`, found `", + crossbeam_channel_delegate!(stringify($args)), + "`", + )) + )) + }; + (@list_error3 $($tail:tt)*) => { + crossbeam_channel_internal!(@list_error4 $($tail)*) + }; + // Stage 4: fail with a generic error message. + (@list_error4 $($tail:tt)*) => { + crossbeam_channel_delegate!(compile_error("invalid syntax")) + }; + + // Success! All cases were parsed. + (@case + () + $cases:tt + $default:tt + ) => { + crossbeam_channel_internal!( + @init + $cases + $default + ) + }; + + // Check the format of a recv case. + (@case + (recv($r:expr) -> $res:pat => $body:tt, $($tail:tt)*) + ($($cases:tt)*) + $default:tt + ) => { + crossbeam_channel_internal!( + @case + ($($tail)*) + ($($cases)* recv($r) -> $res => $body,) + $default + ) + }; + // Allow trailing comma... + (@case + (recv($r:expr,) -> $res:pat => $body:tt, $($tail:tt)*) + ($($cases:tt)*) + $default:tt + ) => { + crossbeam_channel_internal!( + @case + ($($tail)*) + ($($cases)* recv($r) -> $res => $body,) + $default + ) + }; + // Print an error if the argument list is invalid. + (@case + (recv($($args:tt)*) -> $res:pat => $body:tt, $($tail:tt)*) + ($($cases:tt)*) + $default:tt + ) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "invalid argument list in `recv(", + crossbeam_channel_delegate!(stringify($($args)*)), + ")`", + )) + )) + }; + // Print an error if there is no argument list. + (@case + (recv $t:tt $($tail:tt)*) + ($($cases:tt)*) + $default:tt + ) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected an argument list after `recv`, found `", + crossbeam_channel_delegate!(stringify($t)), + "`", + )) + )) + }; + + // Check the format of a send case. + (@case + (send($s:expr, $m:expr) -> $res:pat => $body:tt, $($tail:tt)*) + ($($cases:tt)*) + $default:tt + ) => { + crossbeam_channel_internal!( + @case + ($($tail)*) + ($($cases)* send($s, $m) -> $res => $body,) + $default + ) + }; + // Allow trailing comma... + (@case + (send($s:expr, $m:expr,) -> $res:pat => $body:tt, $($tail:tt)*) + ($($cases:tt)*) + $default:tt + ) => { + crossbeam_channel_internal!( + @case + ($($tail)*) + ($($cases)* send($s, $m) -> $res => $body,) + $default + ) + }; + // Print an error if the argument list is invalid. + (@case + (send($($args:tt)*) -> $res:pat => $body:tt, $($tail:tt)*) + ($($cases:tt)*) + $default:tt + ) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "invalid argument list in `send(", + crossbeam_channel_delegate!(stringify($($args)*)), + ")`", + )) + )) + }; + // Print an error if there is no argument list. + (@case + (send $t:tt $($tail:tt)*) + ($($cases:tt)*) + $default:tt + ) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected an argument list after `send`, found `", + crossbeam_channel_delegate!(stringify($t)), + "`", + )) + )) + }; + + // Check the format of a default case. + (@case + (default() => $body:tt, $($tail:tt)*) + $cases:tt + () + ) => { + crossbeam_channel_internal!( + @case + ($($tail)*) + $cases + (default() => $body,) + ) + }; + // Check the format of a default case with timeout. + (@case + (default($timeout:expr) => $body:tt, $($tail:tt)*) + $cases:tt + () + ) => { + crossbeam_channel_internal!( + @case + ($($tail)*) + $cases + (default($timeout) => $body,) + ) + }; + // Allow trailing comma... + (@case + (default($timeout:expr,) => $body:tt, $($tail:tt)*) + $cases:tt + () + ) => { + crossbeam_channel_internal!( + @case + ($($tail)*) + $cases + (default($timeout) => $body,) + ) + }; + // Check for duplicate default cases... + (@case + (default $($tail:tt)*) + $cases:tt + ($($def:tt)+) + ) => { + crossbeam_channel_delegate!(compile_error( + "there can be only one `default` case in a `select!` block" + )) + }; + // Print an error if the argument list is invalid. + (@case + (default($($args:tt)*) => $body:tt, $($tail:tt)*) + $cases:tt + $default:tt + ) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "invalid argument list in `default(", + crossbeam_channel_delegate!(stringify($($args)*)), + ")`", + )) + )) + }; + // Print an error if there is an unexpected token after `default`. + (@case + (default $($tail:tt)*) + $cases:tt + $default:tt + ) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected an argument list or `=>` after `default`, found `", + crossbeam_channel_delegate!(stringify($t)), + "`", + )) + )) + }; + + // The case was not consumed, therefore it must be invalid. + (@case + ($case:ident $($tail:tt)*) + $cases:tt + $default:tt + ) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "expected one of `recv`, `send`, or `default`, found `", + crossbeam_channel_delegate!(stringify($case)), + "`", + )) + )) + }; + + // Optimize `select!` into `try_recv()`. + (@init + (recv($r:expr) -> $res:pat => $recv_body:tt,) + (default() => $default_body:tt,) + ) => {{ + match $r { + ref _r => { + let _r: &$crate::Receiver<_> = _r; + match _r.try_recv() { + ::std::result::Result::Err($crate::TryRecvError::Empty) => { + $default_body + } + _res => { + let _res = _res.map_err(|_| $crate::RecvError); + let $res = _res; + $recv_body + } + } + } + } + }}; + // Optimize `select!` into `recv()`. + (@init + (recv($r:expr) -> $res:pat => $body:tt,) + () + ) => {{ + match $r { + ref _r => { + let _r: &$crate::Receiver<_> = _r; + let _res = _r.recv(); + let $res = _res; + $body + } + } + }}; + // Optimize `select!` into `recv_timeout()`. + (@init + (recv($r:expr) -> $res:pat => $recv_body:tt,) + (default($timeout:expr) => $default_body:tt,) + ) => {{ + match $r { + ref _r => { + let _r: &$crate::Receiver<_> = _r; + match _r.recv_timeout($timeout) { + ::std::result::Result::Err($crate::RecvTimeoutError::Timeout) => { + $default_body + } + _res => { + let _res = _res.map_err(|_| $crate::RecvError); + let $res = _res; + $recv_body + } + } + } + } + }}; + + // // Optimize the non-blocking case with two receive operations. + // (@init + // (recv($r1:expr) -> $res1:pat => $recv_body1:tt,) + // (recv($r2:expr) -> $res2:pat => $recv_body2:tt,) + // (default() => $default_body:tt,) + // ) => {{ + // match $r1 { + // ref _r1 => { + // let _r1: &$crate::Receiver<_> = _r1; + // + // match $r2 { + // ref _r2 => { + // let _r2: &$crate::Receiver<_> = _r2; + // + // // TODO(stjepang): Implement this optimization. + // } + // } + // } + // } + // }}; + // // Optimize the blocking case with two receive operations. + // (@init + // (recv($r1:expr) -> $res1:pat => $body1:tt,) + // (recv($r2:expr) -> $res2:pat => $body2:tt,) + // () + // ) => {{ + // match $r1 { + // ref _r1 => { + // let _r1: &$crate::Receiver<_> = _r1; + // + // match $r2 { + // ref _r2 => { + // let _r2: &$crate::Receiver<_> = _r2; + // + // // TODO(stjepang): Implement this optimization. + // } + // } + // } + // } + // }}; + // // Optimize the case with two receive operations and a timeout. + // (@init + // (recv($r1:expr) -> $res1:pat => $recv_body1:tt,) + // (recv($r2:expr) -> $res2:pat => $recv_body2:tt,) + // (default($timeout:expr) => $default_body:tt,) + // ) => {{ + // match $r1 { + // ref _r1 => { + // let _r1: &$crate::Receiver<_> = _r1; + // + // match $r2 { + // ref _r2 => { + // let _r2: &$crate::Receiver<_> = _r2; + // + // // TODO(stjepang): Implement this optimization. + // } + // } + // } + // } + // }}; + + // // Optimize `select!` into `try_send()`. + // (@init + // (send($s:expr, $m:expr) -> $res:pat => $send_body:tt,) + // (default() => $default_body:tt,) + // ) => {{ + // match $s { + // ref _s => { + // let _s: &$crate::Sender<_> = _s; + // // TODO(stjepang): Implement this optimization. + // } + // } + // }}; + // // Optimize `select!` into `send()`. + // (@init + // (send($s:expr, $m:expr) -> $res:pat => $body:tt,) + // () + // ) => {{ + // match $s { + // ref _s => { + // let _s: &$crate::Sender<_> = _s; + // // TODO(stjepang): Implement this optimization. + // } + // } + // }}; + // // Optimize `select!` into `send_timeout()`. + // (@init + // (send($s:expr, $m:expr) -> $res:pat => $body:tt,) + // (default($timeout:expr) => $body:tt,) + // ) => {{ + // match $s { + // ref _s => { + // let _s: &$crate::Sender<_> = _s; + // // TODO(stjepang): Implement this optimization. + // } + // } + // }}; + + // Create a `Select` and add operations to it. + (@init + ($($cases:tt)*) + $default:tt + ) => {{ + #[allow(unused_mut)] + let mut _sel = $crate::Select::new(); + crossbeam_channel_internal!( + @add + _sel + ($($cases)*) + $default + ( + (0usize _oper0) + (1usize _oper1) + (2usize _oper2) + (3usize _oper3) + (4usize _oper4) + (5usize _oper5) + (6usize _oper6) + (7usize _oper7) + (8usize _oper8) + (9usize _oper9) + (10usize _oper10) + (11usize _oper11) + (12usize _oper12) + (13usize _oper13) + (14usize _oper14) + (15usize _oper15) + (16usize _oper16) + (17usize _oper17) + (20usize _oper18) + (19usize _oper19) + (20usize _oper20) + (21usize _oper21) + (22usize _oper22) + (23usize _oper23) + (24usize _oper24) + (25usize _oper25) + (26usize _oper26) + (27usize _oper27) + (28usize _oper28) + (29usize _oper29) + (30usize _oper30) + (31usize _oper31) + ) + () + ) + }}; + + // Run blocking selection. + (@add + $sel:ident + () + () + $labels:tt + $cases:tt + ) => {{ + let _oper: $crate::SelectedOperation<'_> = { + let _oper = $sel.select(); + + // Erase the lifetime so that `sel` can be dropped early even without NLL. + #[allow(unsafe_code)] + unsafe { ::std::mem::transmute(_oper) } + }; + + crossbeam_channel_internal! { + @complete + $sel + _oper + $cases + } + }}; + // Run non-blocking selection. + (@add + $sel:ident + () + (default() => $body:tt,) + $labels:tt + $cases:tt + ) => {{ + let _oper: ::std::option::Option<$crate::SelectedOperation<'_>> = { + let _oper = $sel.try_select(); + + // Erase the lifetime so that `sel` can be dropped early even without NLL. + #[allow(unsafe_code)] + unsafe { ::std::mem::transmute(_oper) } + }; + + match _oper { + None => { + ::std::mem::drop($sel); + $body + } + Some(_oper) => { + crossbeam_channel_internal! { + @complete + $sel + _oper + $cases + } + } + } + }}; + // Run selection with a timeout. + (@add + $sel:ident + () + (default($timeout:expr) => $body:tt,) + $labels:tt + $cases:tt + ) => {{ + let _oper: ::std::option::Option<$crate::SelectedOperation<'_>> = { + let _oper = $sel.select_timeout($timeout); + + // Erase the lifetime so that `sel` can be dropped early even without NLL. + #[allow(unsafe_code)] + unsafe { ::std::mem::transmute(_oper) } + }; + + match _oper { + ::std::option::Option::None => { + ::std::mem::drop($sel); + $body + } + ::std::option::Option::Some(_oper) => { + crossbeam_channel_internal! { + @complete + $sel + _oper + $cases + } + } + } + }}; + // Have we used up all labels? + (@add + $sel:ident + $input:tt + $default:tt + () + $cases:tt + ) => { + crossbeam_channel_delegate!(compile_error("too many operations in a `select!` block")) + }; + // Add a receive operation to `sel`. + (@add + $sel:ident + (recv($r:expr) -> $res:pat => $body:tt, $($tail:tt)*) + $default:tt + (($i:tt $var:ident) $($labels:tt)*) + ($($cases:tt)*) + ) => {{ + match $r { + ref _r => { + #[allow(unsafe_code)] + let $var: &$crate::Receiver<_> = unsafe { + let _r: &$crate::Receiver<_> = _r; + + // Erase the lifetime so that `sel` can be dropped early even without NLL. + unsafe fn unbind<'a, T>(x: &T) -> &'a T { + ::std::mem::transmute(x) + } + unbind(_r) + }; + $sel.recv($var); + + crossbeam_channel_internal!( + @add + $sel + ($($tail)*) + $default + ($($labels)*) + ($($cases)* [$i] recv($var) -> $res => $body,) + ) + } + } + }}; + // Add a send operation to `sel`. + (@add + $sel:ident + (send($s:expr, $m:expr) -> $res:pat => $body:tt, $($tail:tt)*) + $default:tt + (($i:tt $var:ident) $($labels:tt)*) + ($($cases:tt)*) + ) => {{ + match $s { + ref _s => { + #[allow(unsafe_code)] + let $var: &$crate::Sender<_> = unsafe { + let _s: &$crate::Sender<_> = _s; + + // Erase the lifetime so that `sel` can be dropped early even without NLL. + unsafe fn unbind<'a, T>(x: &T) -> &'a T { + ::std::mem::transmute(x) + } + unbind(_s) + }; + $sel.send($var); + + crossbeam_channel_internal!( + @add + $sel + ($($tail)*) + $default + ($($labels)*) + ($($cases)* [$i] send($var, $m) -> $res => $body,) + ) + } + } + }}; + + // Complete a receive operation. + (@complete + $sel:ident + $oper:ident + ([$i:tt] recv($r:ident) -> $res:pat => $body:tt, $($tail:tt)*) + ) => {{ + if $oper.index() == $i { + let _res = $oper.recv($r); + ::std::mem::drop($sel); + + let $res = _res; + $body + } else { + crossbeam_channel_internal! { + @complete + $sel + $oper + ($($tail)*) + } + } + }}; + // Complete a send operation. + (@complete + $sel:ident + $oper:ident + ([$i:tt] send($s:ident, $m:expr) -> $res:pat => $body:tt, $($tail:tt)*) + ) => {{ + if $oper.index() == $i { + let _res = $oper.send($s, $m); + ::std::mem::drop($sel); + + let $res = _res; + $body + } else { + crossbeam_channel_internal! { + @complete + $sel + $oper + ($($tail)*) + } + } + }}; + // Panic if we don't identify the selected case, but this should never happen. + (@complete + $sel:ident + $oper:ident + () + ) => {{ + crossbeam_channel_delegate!(unreachable( + "internal error in crossbeam-channel: invalid case" + )) + }}; + + // Catches a bug within this macro (should not happen). + (@$($tokens:tt)*) => { + crossbeam_channel_delegate!(compile_error( + crossbeam_channel_delegate!(concat( + "internal error in crossbeam-channel: ", + crossbeam_channel_delegate!(stringify(@$($tokens)*)), + )) + )) + }; + + // The entry points. + () => { + crossbeam_channel_delegate!(compile_error("empty `select!` block")) + }; + ($($case:ident $(($($args:tt)*))* => $body:expr $(,)*)*) => { + crossbeam_channel_internal!( + @list + ($($case $(($($args)*))* => { $body },)*) + () + ) + }; + ($($tokens:tt)*) => { + crossbeam_channel_internal!( + @list + ($($tokens)*) + () + ) + }; +} + +/// Selects from a set of channel operations. +/// +/// This macro allows you to define a set of channel operations, wait until any one of them becomes +/// ready, and finally execute it. If multiple operations are ready at the same time, a random one +/// among them is selected. +/// +/// It is also possible to define a `default` case that gets executed if none of the operations are +/// ready, either right away or for a certain duration of time. +/// +/// An operation is considered to be ready if it doesn't have to block. Note that it is ready even +/// when it will simply return an error because the channel is disconnected. +/// +/// The `select` macro is a convenience wrapper around [`Select`]. However, it cannot select over a +/// dynamically created list of channel operations. +/// +/// [`Select`]: struct.Select.html +/// +/// # Examples +/// +/// Block until a send or a receive operation becomes ready: +/// +/// ``` +/// # #[macro_use] +/// # extern crate crossbeam_channel; +/// # fn main() { +/// use std::thread; +/// use crossbeam_channel::unbounded; +/// +/// let (s1, r1) = unbounded(); +/// let (s2, r2) = unbounded(); +/// s1.send(10).unwrap(); +/// +/// // Since both operations are initially ready, a random one will be executed. +/// select! { +/// recv(r1) -> msg => assert_eq!(msg, Ok(10)), +/// send(s2, 20) -> res => { +/// assert_eq!(res, Ok(())); +/// assert_eq!(r2.recv(), Ok(20)); +/// } +/// } +/// # } +/// ``` +/// +/// Select from a set of operations without blocking: +/// +/// ``` +/// # #[macro_use] +/// # extern crate crossbeam_channel; +/// # fn main() { +/// use std::thread; +/// use std::time::Duration; +/// use crossbeam_channel::unbounded; +/// +/// let (s1, r1) = unbounded(); +/// let (s2, r2) = unbounded(); +/// +/// thread::spawn(move || { +/// thread::sleep(Duration::from_secs(1)); +/// s1.send(10).unwrap(); +/// }); +/// thread::spawn(move || { +/// thread::sleep(Duration::from_millis(500)); +/// s2.send(20).unwrap(); +/// }); +/// +/// // None of the operations are initially ready. +/// select! { +/// recv(r1) -> msg => panic!(), +/// recv(r2) -> msg => panic!(), +/// default => println!("not ready"), +/// } +/// # } +/// ``` +/// +/// Wait on a set of operations with a timeout: +/// +/// ``` +/// # #[macro_use] +/// # extern crate crossbeam_channel; +/// # fn main() { +/// use std::thread; +/// use std::time::Duration; +/// use crossbeam_channel::unbounded; +/// +/// let (s1, r1) = unbounded(); +/// let (s2, r2) = unbounded(); +/// +/// thread::spawn(move || { +/// thread::sleep(Duration::from_secs(1)); +/// s1.send(10).unwrap(); +/// }); +/// thread::spawn(move || { +/// thread::sleep(Duration::from_millis(500)); +/// s2.send(10).unwrap(); +/// }); +/// +/// // None of the two operations will become ready within 100 milliseconds. +/// select! { +/// recv(r1) -> msg => panic!(), +/// recv(r2) -> msg => panic!(), +/// default(Duration::from_millis(100)) => println!("timed out"), +/// } +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! select { + ($($tokens:tt)*) => { + crossbeam_channel_internal!( + $($tokens)* + ) + }; +} diff --git a/crossbeam-channel/src/utils.rs b/crossbeam-channel/src/utils.rs new file mode 100644 index 000000000..a8ab2b759 --- /dev/null +++ b/crossbeam-channel/src/utils.rs @@ -0,0 +1,107 @@ +//! Miscellaneous utilities. + +use std::cell::Cell; +use std::num::Wrapping; +use std::sync::atomic; +use std::thread; +use std::time::{Duration, Instant}; + +use rand; + +/// A counter that performs exponential backoff in spin loops. +pub struct Backoff(u32); + +impl Backoff { + /// Creates a new `Backoff`. + #[inline] + pub fn new() -> Self { + Backoff(0) + } + + /// Backs off in a spin loop. + /// + /// This method may yield the current processor. Use it in lock-free retry loops. + #[inline] + pub fn spin(&mut self) { + for _ in 0..1 << self.0.min(6) { + atomic::spin_loop_hint(); + } + self.0 = self.0.wrapping_add(1); + } + + /// Backs off in a wait loop. + /// + /// Returns `true` if snoozing has reached a threshold where we should consider parking the + /// thread instead. + /// + /// This method may yield the current processor or the current thread. Use it when waiting on a + /// resource. + #[inline] + pub fn snooze(&mut self) -> bool { + if self.0 <= 6 { + for _ in 0..1 << self.0 { + atomic::spin_loop_hint(); + } + } else { + thread::yield_now(); + } + + self.0 = self.0.wrapping_add(1); + self.0 <= 10 + } +} + +/// Randomly shuffles a slice. +pub fn shuffle(v: &mut [T]) { + let len = v.len(); + if len <= 1 { + return; + } + + thread_local! { + static RNG: Cell> = { + let init = rand::random::() | 1; + Cell::new(Wrapping(init)) + } + } + + let _ = RNG.try_with(|rng| { + for i in 1..len { + // This is the 32-bit variant of Xorshift. + // + // Source: https://en.wikipedia.org/wiki/Xorshift + let mut x = rng.get(); + x ^= x << 13; + x ^= x >> 17; + x ^= x << 5; + rng.set(x); + + let x = x.0; + let n = i + 1; + + // This is a fast alternative to `let j = x % n`. + // + // Author: Daniel Lemire + // Source: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ + let j = ((x as u64).wrapping_mul(n as u64) >> 32) as u32 as usize; + + v.swap(i, j); + } + }); +} + +/// Sleeps until the deadline, or forever if the deadline isn't specified. +pub fn sleep_until(deadline: Option) { + loop { + match deadline { + None => thread::sleep(Duration::from_secs(1000)), + Some(d) => { + let now = Instant::now(); + if now >= d { + break; + } + thread::sleep(d - now); + } + } + } +} diff --git a/crossbeam-channel/src/waker.rs b/crossbeam-channel/src/waker.rs new file mode 100644 index 000000000..8cfffa1d9 --- /dev/null +++ b/crossbeam-channel/src/waker.rs @@ -0,0 +1,251 @@ +//! Waking mechanism for threads blocked on channel operations. + +use std::collections::VecDeque; +use std::num::Wrapping; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::{self, ThreadId}; + +use parking_lot::Mutex; + +use context::Context; +use select::{Operation, Selected}; + +/// Represents a thread blocked on a specific channel operation. +pub struct Entry { + /// Context associated with the thread owning this operation. + pub context: Context, + + /// The operation. + pub oper: Operation, + + /// Optional packet. + pub packet: usize, +} + +/// A queue of threads blocked on channel operations. +/// +/// This data structure is used by threads to register blocking operations and get woken up once +/// an operation becomes ready. +pub struct Waker { + /// The list of registered blocking operations. + entries: VecDeque, + + /// The number of calls to `register` and `register_with_packet`. + register_count: Wrapping, +} + +impl Waker { + /// Creates a new `Waker`. + #[inline] + pub fn new() -> Self { + Waker { + entries: VecDeque::new(), + register_count: Wrapping(0), + } + } + + /// Registers the current thread with an operation. + #[inline] + pub fn register(&mut self, oper: Operation, cx: &Context) { + self.register_with_packet(oper, 0, cx); + } + + /// Registers the current thread with an operation and a packet. + #[inline] + pub fn register_with_packet(&mut self, oper: Operation, packet: usize, cx: &Context) { + self.entries.push_back(Entry { + context: cx.clone(), + oper, + packet, + }); + self.register_count += Wrapping(1); + } + + /// Unregisters an operation previously registered by the current thread. + #[inline] + pub fn unregister(&mut self, oper: Operation) -> Option { + if let Some((i, _)) = self.entries + .iter() + .enumerate() + .find(|&(_, entry)| entry.oper == oper) + { + let entry = self.entries.remove(i); + Self::maybe_shrink(&mut self.entries); + entry + } else { + None + } + } + + /// Attempts to find one thread (not the current one), select its operation, and wake it up. + #[inline] + pub fn wake_one(&mut self) -> Option { + if !self.entries.is_empty() { + let thread_id = current_thread_id(); + + for i in 0..self.entries.len() { + // Does the entry belong to a different thread? + if self.entries[i].context.thread_id() != thread_id { + // Try selecting this operation. + let sel = Selected::Operation(self.entries[i].oper); + let res = self.entries[i].context.try_select(sel); + + if res.is_ok() { + // Provide the packet. + self.entries[i].context.store_packet(self.entries[i].packet); + // Wake the thread up. + self.entries[i].context.unpark(); + + // Remove the entry from the queue to keep it clean and improve + // performance. + let entry = self.entries.remove(i).unwrap(); + Self::maybe_shrink(&mut self.entries); + return Some(entry); + } + } + } + } + + None + } + + /// Notifies all threads that the channel is disconnected. + #[inline] + pub fn disconnect(&mut self) { + for entry in self.entries.iter() { + if entry.context.try_select(Selected::Disconnected).is_ok() { + // Wake the thread up. + // + // Here we don't remove the entry from the queue. Registered threads must + // unregister from the waker by themselves. They might also want to recover the + // packet value and destroy it, if necessary. + entry.context.unpark(); + } + } + } + + /// Returns `true` if there is an entry which can be woken up by the current thread. + #[inline] + pub fn can_wake_one(&self) -> bool { + if self.entries.is_empty() { + false + } else { + let thread_id = current_thread_id(); + + self.entries.iter().any(|entry| { + entry.context.thread_id() != thread_id + && entry.context.selected() == Selected::Waiting + }) + } + } + + /// Returns the number of entries in the queue. + #[inline] + pub fn len(&self) -> usize { + self.entries.len() + } + + #[inline] + pub fn register_count(&self) -> usize { + self.register_count.0 + } + + /// Shrinks the internal queue if its capacity is much larger than length. + #[inline] + fn maybe_shrink(entries: &mut VecDeque) { + if entries.capacity() > 32 && entries.len() < entries.capacity() / 4 { + let mut v = VecDeque::with_capacity(entries.capacity() / 2); + v.extend(entries.drain(..)); + *entries = v; + } + } +} + +impl Drop for Waker { + #[inline] + fn drop(&mut self) { + debug_assert!(self.entries.is_empty()); + } +} + +/// A waker that can be shared among threads without locking. +/// +/// This is a simple wrapper around `Waker` that internally uses a mutex for synchronization. +pub struct SyncWaker { + /// The inner `Waker`. + inner: Mutex, + + /// Number of operations in the waker. + len: AtomicUsize, +} + +impl SyncWaker { + /// Creates a new `SyncWaker`. + #[inline] + pub fn new() -> Self { + SyncWaker { + inner: Mutex::new(Waker::new()), + len: AtomicUsize::new(0), + } + } + + /// Registers the current thread with an operation. + #[inline] + pub fn register(&self, oper: Operation, cx: &Context) { + let mut inner = self.inner.lock(); + inner.register(oper, cx); + self.len.store(inner.len(), Ordering::SeqCst); + } + + /// Unregisters an operation previously registered by the current thread. + #[inline] + pub fn unregister(&self, oper: Operation) -> Option { + if self.len.load(Ordering::SeqCst) > 0 { + let mut inner = self.inner.lock(); + let entry = inner.unregister(oper); + self.len.store(inner.len(), Ordering::SeqCst); + entry + } else { + None + } + } + + /// Attempts to find one thread (not the current one), select its operation, and wake it up. + #[inline] + pub fn wake_one(&self) -> Option { + if self.len.load(Ordering::SeqCst) > 0 { + let mut inner = self.inner.lock(); + let entry = inner.wake_one(); + self.len.store(inner.len(), Ordering::SeqCst); + entry + } else { + None + } + } + + /// Notifies all threads that the channel is disconnected. + pub fn disconnect(&self) { + self.inner.lock().disconnect(); + } +} + +impl Drop for SyncWaker { + #[inline] + fn drop(&mut self) { + debug_assert_eq!(self.inner.lock().len(), 0); + debug_assert_eq!(self.len.load(Ordering::SeqCst), 0); + } +} + +/// Returns the id of the current thread. +#[inline] +fn current_thread_id() -> ThreadId { + thread_local! { + /// Cached thread-local id. + static THREAD_ID: ThreadId = thread::current().id(); + } + + THREAD_ID + .try_with(|id| *id) + .unwrap_or_else(|_| thread::current().id()) +} diff --git a/crossbeam-channel/tests/after.rs b/crossbeam-channel/tests/after.rs new file mode 100644 index 000000000..dd868031d --- /dev/null +++ b/crossbeam-channel/tests/after.rs @@ -0,0 +1,331 @@ +//! Tests for the after channel flavor. + +extern crate crossbeam; +#[macro_use] +extern crate crossbeam_channel; +extern crate rand; + +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::thread; +use std::time::{Duration, Instant}; + +use crossbeam_channel::{after, Select, TryRecvError}; + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn fire() { + let start = Instant::now(); + let r = after(ms(50)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + thread::sleep(ms(100)); + + let fired = r.try_recv().unwrap(); + assert!(start < fired); + assert!(fired - start >= ms(50)); + + let now = Instant::now(); + assert!(fired < now); + assert!(now - fired >= ms(50)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + + select! { + recv(r) -> _ => panic!(), + default => {} + } + + select! { + recv(r) -> _ => panic!(), + recv(after(ms(200))) -> _ => {} + } +} + +#[test] +fn capacity() { + const COUNT: usize = 10; + + for i in 0..COUNT { + let r = after(ms(i as u64)); + assert_eq!(r.capacity(), Some(1)); + } +} + +#[test] +fn len_empty_full() { + let r = after(ms(50)); + + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); + + thread::sleep(ms(100)); + + assert_eq!(r.len(), 1); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), true); + + r.try_recv().unwrap(); + + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); +} + +#[test] +fn try_recv() { + let r = after(ms(200)); + assert!(r.try_recv().is_err()); + + thread::sleep(ms(100)); + assert!(r.try_recv().is_err()); + + thread::sleep(ms(200)); + assert!(r.try_recv().is_ok()); + assert!(r.try_recv().is_err()); + + thread::sleep(ms(200)); + assert!(r.try_recv().is_err()); +} + +#[test] +fn recv() { + let start = Instant::now(); + let r = after(ms(50)); + + let fired = r.recv().unwrap(); + assert!(start < fired); + assert!(fired - start >= ms(50)); + + let now = Instant::now(); + assert!(fired < now); + assert!(now - fired < fired - start); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn recv_timeout() { + let start = Instant::now(); + let r = after(ms(200)); + + assert!(r.recv_timeout(ms(100)).is_err()); + let now = Instant::now(); + assert!(now - start >= ms(100)); + assert!(now - start <= ms(150)); + + let fired = r.recv_timeout(ms(200)).unwrap(); + assert!(fired - start >= ms(200)); + assert!(fired - start <= ms(250)); + + assert!(r.recv_timeout(ms(200)).is_err()); + let now = Instant::now(); + assert!(now - start >= ms(400)); + assert!(now - start <= ms(450)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn recv_two() { + let r1 = after(ms(50)); + let r2 = after(ms(50)); + + crossbeam::scope(|scope| { + scope.spawn(|| { + select! { + recv(r1) -> _ => {} + recv(r2) -> _ => {} + } + }); + scope.spawn(|| { + select! { + recv(r1) -> _ => {} + recv(r2) -> _ => {} + } + }); + }); +} + +#[test] +fn recv_race() { + select! { + recv(after(ms(50))) -> _ => {} + recv(after(ms(100))) -> _ => panic!(), + } + + select! { + recv(after(ms(100))) -> _ => panic!(), + recv(after(ms(50))) -> _ => {} + } +} + +#[test] +fn stress_default() { + const COUNT: usize = 10; + + for _ in 0..COUNT { + select! { + recv(after(ms(0))) -> _ => {} + default => panic!(), + } + } + + for _ in 0..COUNT { + select! { + recv(after(ms(100))) -> _ => panic!(), + default => {} + } + } +} + +#[test] +fn select_shared() { + const THREADS: usize = 4; + const COUNT: usize = 1000; + const TIMEOUT_MS: u64 = 100; + + let v = (0..COUNT).map(|i| after(ms(i as u64 / TIMEOUT_MS / 2))) + .collect::>(); + let hits = AtomicUsize::new(0); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + let v: Vec<&_> = v.iter().collect(); + + loop { + let timeout = after(ms(TIMEOUT_MS)); + let mut sel = Select::new(); + for r in &v { + sel.recv(r); + } + let oper_timeout = sel.recv(&timeout); + + let oper = sel.select(); + match oper.index() { + i if i == oper_timeout => { + oper.recv(&timeout).unwrap(); + break; + } + i => { + oper.recv(&v[i]).unwrap(); + hits.fetch_add(1, Ordering::SeqCst); + } + } + } + }); + } + }); + + assert_eq!(hits.load(Ordering::SeqCst), COUNT); +} + +#[test] +fn select_cloned() { + const THREADS: usize = 4; + const COUNT: usize = 1000; + const TIMEOUT_MS: u64 = 100; + + let v = (0..COUNT).map(|i| after(ms(i as u64 / TIMEOUT_MS / 2))) + .collect::>(); + let hits = AtomicUsize::new(0); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + loop { + let timeout = after(ms(TIMEOUT_MS)); + let mut sel = Select::new(); + for r in &v { + sel.recv(r); + } + let oper_timeout = sel.recv(&timeout); + + let oper = sel.select(); + match oper.index() { + i if i == oper_timeout => { + oper.recv(&timeout).unwrap(); + break; + } + i => { + oper.recv(&v[i]).unwrap(); + hits.fetch_add(1, Ordering::SeqCst); + } + } + } + }); + } + }); + + assert_eq!(hits.load(Ordering::SeqCst), COUNT); +} + +#[test] +fn stress_clone() { + const RUNS: usize = 1000; + const THREADS: usize = 10; + const COUNT: usize = 50; + + for i in 0..RUNS { + let r = after(ms(i as u64)); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + let r = r.clone(); + let _ = r.try_recv(); + + for _ in 0..COUNT { + drop(r.clone()); + thread::yield_now(); + } + }); + } + }); + } +} + +#[test] +fn fairness() { + const COUNT: usize = 1000; + + for &dur in &[0, 1] { + let mut hits = [0usize; 2]; + + for _ in 0..COUNT { + select! { + recv(after(ms(dur))) -> _ => hits[0] += 1, + recv(after(ms(dur))) -> _ => hits[1] += 1, + } + } + + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); + } +} + +#[test] +fn fairness_duplicates() { + const COUNT: usize = 1000; + + for &dur in &[0, 1] { + let mut hits = [0usize; 5]; + + for _ in 0..COUNT { + let r = after(ms(dur)); + select! { + recv(r) -> _ => hits[0] += 1, + recv(r) -> _ => hits[1] += 1, + recv(r) -> _ => hits[2] += 1, + recv(r) -> _ => hits[3] += 1, + recv(r) -> _ => hits[4] += 1, + } + } + + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); + } +} diff --git a/crossbeam-channel/tests/array.rs b/crossbeam-channel/tests/array.rs new file mode 100644 index 000000000..0a485ebe6 --- /dev/null +++ b/crossbeam-channel/tests/array.rs @@ -0,0 +1,557 @@ +//! Tests for the array channel flavor. + +extern crate crossbeam; +#[macro_use] +extern crate crossbeam_channel; +extern crate rand; + +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::thread; +use std::time::Duration; + +use crossbeam_channel::{bounded}; +use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError}; +use crossbeam_channel::{SendError, SendTimeoutError, TrySendError}; +use rand::{thread_rng, Rng}; + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn smoke() { + let (s, r) = bounded(1); + s.send(7).unwrap(); + assert_eq!(r.try_recv(), Ok(7)); + + s.send(8).unwrap(); + assert_eq!(r.recv(), Ok(8)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); +} + +#[test] +fn capacity() { + for i in 1..10 { + let (s, r) = bounded::<()>(i); + assert_eq!(s.capacity(), Some(i)); + assert_eq!(r.capacity(), Some(i)); + } +} + +#[test] +fn len_empty_full() { + let (s, r) = bounded(2); + + assert_eq!(s.len(), 0); + assert_eq!(s.is_empty(), true); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); + + s.send(()).unwrap(); + + assert_eq!(s.len(), 1); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 1); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), false); + + s.send(()).unwrap(); + + assert_eq!(s.len(), 2); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), true); + assert_eq!(r.len(), 2); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), true); + + r.recv().unwrap(); + + assert_eq!(s.len(), 1); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 1); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), false); +} + +#[test] +fn try_recv() { + let (s, r) = bounded(100); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + thread::sleep(ms(1500)); + assert_eq!(r.try_recv(), Ok(7)); + thread::sleep(ms(500)); + assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + s.send(7).unwrap(); + }); + }); +} + +#[test] +fn recv() { + let (s, r) = bounded(100); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.recv(), Ok(7)); + thread::sleep(ms(1000)); + assert_eq!(r.recv(), Ok(8)); + thread::sleep(ms(1000)); + assert_eq!(r.recv(), Ok(9)); + assert_eq!(r.recv(), Err(RecvError)); + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + s.send(7).unwrap(); + s.send(8).unwrap(); + s.send(9).unwrap(); + }); + }); +} + +#[test] +fn recv_timeout() { + let (s, r) = bounded::(100); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); + assert_eq!(r.recv_timeout(ms(1000)), Ok(7)); + assert_eq!( + r.recv_timeout(ms(1000)), + Err(RecvTimeoutError::Disconnected) + ); + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + s.send(7).unwrap(); + }); + }); +} + +#[test] +fn try_send() { + let (s, r) = bounded(1); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(s.try_send(1), Ok(())); + assert_eq!(s.try_send(2), Err(TrySendError::Full(2))); + thread::sleep(ms(1500)); + assert_eq!(s.try_send(3), Ok(())); + thread::sleep(ms(500)); + assert_eq!(s.try_send(4), Err(TrySendError::Disconnected(4))); + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + assert_eq!(r.try_recv(), Ok(1)); + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + assert_eq!(r.recv(), Ok(3)); + }); + }); +} + +#[test] +fn send() { + let (s, r) = bounded(1); + + crossbeam::scope(|scope| { + scope.spawn(|| { + s.send(7).unwrap(); + thread::sleep(ms(1000)); + s.send(8).unwrap(); + thread::sleep(ms(1000)); + s.send(9).unwrap(); + thread::sleep(ms(1000)); + s.send(10).unwrap(); + }); + scope.spawn(|| { + thread::sleep(ms(1500)); + assert_eq!(r.recv(), Ok(7)); + assert_eq!(r.recv(), Ok(8)); + assert_eq!(r.recv(), Ok(9)); + }); + }); +} + +#[test] +fn send_timeout() { + let (s, r) = bounded(2); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(s.send_timeout(1, ms(1000)), Ok(())); + assert_eq!(s.send_timeout(2, ms(1000)), Ok(())); + assert_eq!( + s.send_timeout(3, ms(500)), + Err(SendTimeoutError::Timeout(3)) + ); + thread::sleep(ms(1000)); + assert_eq!(s.send_timeout(4, ms(1000)), Ok(())); + thread::sleep(ms(1000)); + assert_eq!(s.send(5), Err(SendError(5))); + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + assert_eq!(r.recv(), Ok(1)); + thread::sleep(ms(1000)); + assert_eq!(r.recv(), Ok(2)); + assert_eq!(r.recv(), Ok(4)); + }); + }); +} + +#[test] +fn send_after_disconnect() { + let (s, r) = bounded(100); + + s.send(1).unwrap(); + s.send(2).unwrap(); + s.send(3).unwrap(); + + drop(r); + + assert_eq!(s.send(4), Err(SendError(4))); + assert_eq!(s.try_send(5), Err(TrySendError::Disconnected(5))); + assert_eq!(s.send_timeout(6, ms(500)), Err(SendTimeoutError::Disconnected(6))); +} + +#[test] +fn recv_after_disconnect() { + let (s, r) = bounded(100); + + s.send(1).unwrap(); + s.send(2).unwrap(); + s.send(3).unwrap(); + + drop(s); + + assert_eq!(r.recv(), Ok(1)); + assert_eq!(r.recv(), Ok(2)); + assert_eq!(r.recv(), Ok(3)); + assert_eq!(r.recv(), Err(RecvError)); +} + +#[test] +fn len() { + const COUNT: usize = 25_000; + const CAP: usize = 1000; + + let (s, r) = bounded(CAP); + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + for _ in 0..CAP / 10 { + for i in 0..50 { + s.send(i).unwrap(); + assert_eq!(s.len(), i + 1); + } + + for i in 0..50 { + r.recv().unwrap(); + assert_eq!(r.len(), 50 - i - 1); + } + } + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + for i in 0..CAP { + s.send(i).unwrap(); + assert_eq!(s.len(), i + 1); + } + + for _ in 0..CAP { + r.recv().unwrap(); + } + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + assert_eq!(r.recv(), Ok(i)); + let len = r.len(); + assert!(len <= CAP); + } + }); + + scope.spawn(|| { + for i in 0..COUNT { + s.send(i).unwrap(); + let len = s.len(); + assert!(len <= CAP); + } + }); + }); + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); +} + +#[test] +fn disconnect_wakes_sender() { + let (s, r) = bounded(1); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(s.send(()), Ok(())); + assert_eq!(s.send(()), Err(SendError(()))); + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + drop(r); + }); + }); +} + +#[test] +fn disconnect_wakes_receiver() { + let (s, r) = bounded::<()>(1); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.recv(), Err(RecvError)); + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + drop(s); + }); + }); +} + +#[test] +fn spsc() { + const COUNT: usize = 100_000; + + let (s, r) = bounded(3); + + crossbeam::scope(|scope| { + scope.spawn(move || { + for i in 0..COUNT { + assert_eq!(r.recv(), Ok(i)); + } + assert_eq!(r.recv(), Err(RecvError)); + }); + scope.spawn(move || { + for i in 0..COUNT { + s.send(i).unwrap(); + } + }); + }); +} + +#[test] +fn mpmc() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = bounded::(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..COUNT { + let n = r.recv().unwrap(); + v[n].fetch_add(1, Ordering::SeqCst); + } + }); + } + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..COUNT { + s.send(i).unwrap(); + } + }); + } + }); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[test] +fn stress_timeout_two_threads() { + const COUNT: usize = 100; + + let (s, r) = bounded(2); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + if i % 2 == 0 { + thread::sleep(ms(50)); + } + loop { + if let Ok(()) = s.send_timeout(i, ms(10)) { + break; + } + } + } + }); + + scope.spawn(|| { + for i in 0..COUNT { + if i % 2 == 0 { + thread::sleep(ms(50)); + } + loop { + if let Ok(x) = r.recv_timeout(ms(10)) { + assert_eq!(x, i); + break; + } + } + } + }); + }); +} + +#[test] +fn drops() { + const RUNS: usize = 100; + + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + let mut rng = thread_rng(); + + for _ in 0..RUNS { + let steps = rng.gen_range(0, 10_000); + let additional = rng.gen_range(0, 50); + + DROPS.store(0, Ordering::SeqCst); + let (s, r) = bounded::(50); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for _ in 0..steps { + r.recv().unwrap(); + } + }); + + scope.spawn(|| { + for _ in 0..steps { + s.send(DropCounter).unwrap(); + } + }); + }); + + for _ in 0..additional { + s.send(DropCounter).unwrap(); + } + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(s); + drop(r); + assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); + } +} + +#[test] +fn linearizable() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = bounded(THREADS); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..COUNT { + s.send(0).unwrap(); + r.try_recv().unwrap(); + } + }); + } + }); +} + +#[test] +fn fairness() { + const COUNT: usize = 10_000; + + let (s1, r1) = bounded::<()>(COUNT); + let (s2, r2) = bounded::<()>(COUNT); + + for _ in 0..COUNT { + s1.send(()).unwrap(); + s2.send(()).unwrap(); + } + + let mut hits = [0usize; 2]; + for _ in 0..COUNT { + select! { + recv(r1) -> _ => hits[0] += 1, + recv(r2) -> _ => hits[1] += 1, + } + } + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); +} + +#[test] +fn fairness_duplicates() { + const COUNT: usize = 10_000; + + let (s, r) = bounded::<()>(COUNT); + + for _ in 0..COUNT { + s.send(()).unwrap(); + } + + let mut hits = [0usize; 5]; + for _ in 0..COUNT { + select! { + recv(r) -> _ => hits[0] += 1, + recv(r) -> _ => hits[1] += 1, + recv(r) -> _ => hits[2] += 1, + recv(r) -> _ => hits[3] += 1, + recv(r) -> _ => hits[4] += 1, + } + } + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); +} + +#[test] +fn recv_in_send() { + let (s, _r) = bounded(1); + s.send(()).unwrap(); + + #[allow(unreachable_code)] + { + select! { + send(s, panic!()) -> _ => panic!(), + default => {} + } + } + + let (s, r) = bounded(2); + s.send(()).unwrap(); + + select! { + send(s, assert_eq!(r.recv(), Ok(()))) -> _ => {} + } +} diff --git a/crossbeam-channel/tests/golang.rs b/crossbeam-channel/tests/golang.rs new file mode 100644 index 000000000..8dc4d824c --- /dev/null +++ b/crossbeam-channel/tests/golang.rs @@ -0,0 +1,1053 @@ +//! Tests copied from Go and manually rewritten in Rust. +//! +//! # Copyright +//! +//! The original tests can be found in the Go source distribution. +//! +//! Authors: https://golang.org/AUTHORS +//! License: https://golang.org/LICENSE +//! Source: https://github.com/golang/go +//! +//! ```text +//! Copyright (c) 2009 The Go Authors. All rights reserved. +//! +//! Redistribution and use in source and binary forms, with or without +//! modification, are permitted provided that the following conditions are +//! met: +//! +//! * Redistributions of source code must retain the above copyright +//! notice, this list of conditions and the following disclaimer. +//! * Redistributions in binary form must reproduce the above +//! copyright notice, this list of conditions and the following disclaimer +//! in the documentation and/or other materials provided with the +//! distribution. +//! * Neither the name of Google Inc. nor the names of its +//! contributors may be used to endorse or promote products derived from +//! this software without specific prior written permission. +//! +//! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +//! "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +//! LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +//! A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +//! OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +//! SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +//! LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +//! DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +//! THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +//! (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +//! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +//! ``` + +#[macro_use] +extern crate crossbeam_channel; +extern crate parking_lot; + +use std::any::Any; +use std::collections::HashMap; +use std::sync::Arc; +use std::thread; +use std::time::Duration; + +use crossbeam_channel::{bounded, Receiver, Select, Sender}; +use parking_lot::{Condvar, Mutex}; + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +struct Chan { + inner: Arc>>, +} + +struct ChanInner { + s: Option>, + r: Receiver, +} + +impl Clone for Chan { + fn clone(&self) -> Chan { + Chan { + inner: self.inner.clone(), + } + } +} + +impl Chan { + fn send(&self, msg: T) { + let s = self.inner + .lock() + .s + .as_ref() + .expect("sending into closed channel") + .clone(); + let _ = s.send(msg); + } + + fn try_recv(&self) -> Option { + let r = self.inner + .lock() + .r + .clone(); + r.try_recv().ok() + } + + fn recv(&self) -> Option { + let r = self.inner + .lock() + .r + .clone(); + r.recv().ok() + } + + fn close(&self) { + self.inner + .lock() + .s + .take() + .expect("channel already closed"); + } + + fn rx(&self) -> Receiver { + self.inner + .lock() + .r + .clone() + } + + fn tx(&self) -> Sender { + match self.inner.lock().s.as_ref() { + None => { + let (s, r) = bounded(0); + std::mem::forget(r); + s + } + Some(s) => s.clone(), + } + } +} + +impl Iterator for Chan { + type Item = T; + + fn next(&mut self) -> Option { + self.recv() + } +} + +impl<'a, T> IntoIterator for &'a Chan { + type Item = T; + type IntoIter = Chan; + + fn into_iter(self) -> Self::IntoIter { + self.clone() + } +} + +fn make(cap: usize) -> Chan { + let (s, r) = bounded(cap); + Chan { + inner: Arc::new(Mutex::new(ChanInner { + s: Some(s), + r, + })), + } +} + +#[derive(Clone)] +struct WaitGroup(Arc); + +struct WaitGroupInner { + cond: Condvar, + count: Mutex, +} + +impl WaitGroup { + fn new() -> WaitGroup { + WaitGroup(Arc::new(WaitGroupInner { + cond: Condvar::new(), + count: Mutex::new(0), + })) + } + + fn add(&self, delta: i32) { + let mut count = self.0.count.lock(); + *count += delta; + assert!(*count >= 0); + self.0.cond.notify_all(); + } + + fn done(&self) { + self.add(-1); + } + + fn wait(&self) { + let mut count = self.0.count.lock(); + while *count > 0 { + self.0.cond.wait(&mut count); + } + } +} + +struct Defer { + f: Option>, +} + +impl Drop for Defer { + fn drop(&mut self) { + let f = self.f.take().unwrap(); + let mut f = Some(f); + let mut f = move || f.take().unwrap()(); + f(); + } +} + +macro_rules! defer { + ($body:expr) => { + let _defer = Defer { + f: Some(Box::new(|| $body)), + }; + }; +} + +macro_rules! go { + (@parse ref $v:ident, $($tail:tt)*) => {{ + let ref $v = $v; + go!(@parse $($tail)*) + }}; + (@parse move $v:ident, $($tail:tt)*) => {{ + let $v = $v; + go!(@parse $($tail)*) + }}; + (@parse $v:ident, $($tail:tt)*) => {{ + let $v = $v.clone(); + go!(@parse $($tail)*) + }}; + (@parse $body:expr) => { + ::std::thread::spawn(move || { + let res = ::std::panic::catch_unwind(::std::panic::AssertUnwindSafe(|| { + $body + })); + if res.is_err() { + eprintln!("goroutine panicked: {:?}", res); + ::std::process::abort(); + } + }) + }; + (@parse $($tail:tt)*) => { + compile_error!("invalid `go!` syntax") + }; + ($($tail:tt)*) => {{ + go!(@parse $($tail)*) + }}; +} + +// https://github.com/golang/go/blob/master/test/chan/doubleselect.go +mod doubleselect { + use super::*; + + const ITERATIONS: i32 = 10_000; + + fn sender(n: i32, c1: Chan, c2: Chan, c3: Chan, c4: Chan) { + defer! { c1.close() } + defer! { c2.close() } + defer! { c3.close() } + defer! { c4.close() } + + for i in 0..n { + select! { + send(c1.tx(), i) -> _ => {} + send(c2.tx(), i) -> _ => {} + send(c3.tx(), i) -> _ => {} + send(c4.tx(), i) -> _ => {} + } + } + } + + fn mux(out: Chan, inp: Chan, done: Chan) { + for v in inp { + out.send(v); + } + done.send(true); + } + + fn recver(inp: Chan) { + let mut seen = HashMap::new(); + + for v in &inp { + if seen.contains_key(&v) { + panic!("got duplicate value for {}", v); + } + seen.insert(v, true); + } + } + + #[test] + fn main() { + let c1 = make::(0); + let c2 = make::(0); + let c3 = make::(0); + let c4 = make::(0); + let done = make::(0); + let cmux = make::(0); + + go!(c1, c2, c3, c4, sender(ITERATIONS, c1, c2, c3, c4)); + go!(cmux, c1, done, mux(cmux, c1, done)); + go!(cmux, c2, done, mux(cmux, c2, done)); + go!(cmux, c3, done, mux(cmux, c3, done)); + go!(cmux, c4, done, mux(cmux, c4, done)); + go!(done, cmux, { + done.recv(); + done.recv(); + done.recv(); + done.recv(); + cmux.close(); + }); + recver(cmux); + } +} + +// https://github.com/golang/go/blob/master/test/chan/fifo.go +mod fifo { + use super::*; + + const N: i32 = 10; + + #[test] + fn asynch_fifo() { + let ch = make::(N as usize); + for i in 0..N { + ch.send(i); + } + for i in 0..N { + if ch.recv() != Some(i) { + panic!("bad receive"); + } + } + } + + fn chain(ch: Chan, val: i32, inp: Chan, out: Chan) { + inp.recv(); + if ch.recv() != Some(val) { + panic!(val); + } + out.send(1); + } + + #[test] + fn synch_fifo() { + let ch = make::(0); + let mut inp = make::(0); + let start = inp.clone(); + + for i in 0..N { + let out = make::(0); + go!(ch, i, inp, out, chain(ch, i, inp, out)); + inp = out; + } + + start.send(0); + for i in 0..N { + ch.send(i); + } + inp.recv(); + } +} + +// https://github.com/golang/go/blob/master/test/chan/nonblock.go +mod nonblock { + // TODO +} + +// https://github.com/golang/go/blob/master/test/chan/select.go +mod select { + // TODO +} + +// https://github.com/golang/go/blob/master/test/chan/select2.go +mod select2 { + // TODO +} + +// https://github.com/golang/go/blob/master/test/chan/select3.go +mod select3 { + // TODO +} + +// https://github.com/golang/go/blob/master/test/chan/select4.go +mod select4 { + // TODO +} + +// https://github.com/golang/go/blob/master/test/chan/select5.go +mod select5 { + // TODO +} + +// https://github.com/golang/go/blob/master/test/chan/select6.go +mod select6 { + // TODO + use super::*; + + #[test] + fn main() { + let c1 = make::(0); + let c2 = make::(0); + let c3 = make::(0); + + go!(c1, c1.recv()); + go!(c1, c2, c3, { + select! { + recv(c1.rx()) -> _ => panic!("dummy"), + recv(c2.rx()) -> _ => c3.send(true), + } + c1.recv(); + }); + go!(c2, c2.send(true)); + + c3.recv(); + c1.send(true); + c1.send(true); + } +} + +// https://github.com/golang/go/blob/master/test/chan/select7.go +mod select7 { + // TODO +} + +// https://github.com/golang/go/blob/master/test/chan/sieve1.go +mod sieve1 { + // TODO +} + +// https://github.com/golang/go/blob/master/test/chan/sieve2.go +mod sieve2 { + // TODO +} + +// https://github.com/golang/go/blob/master/test/chan/zerosize.go +mod zerosize { + use super::*; + + #[test] + fn zero_size_struct() { + struct ZeroSize; + let _ = make::(0); + } + + #[test] + fn zero_size_array() { + let _ = make::<[u8; 0]>(0); + } +} + +// https://github.com/golang/go/blob/master/src/runtime/chan_test.go +mod chan_test { + use super::*; + + #[test] + fn test_chan() { + const N: i32 = 200; + + for cap in 0..N { + { + // Ensure that receive from empty chan blocks. + let c = make::(cap as usize); + + let recv1 = Arc::new(Mutex::new(false)); + go!(c, recv1, { + c.recv(); + *recv1.lock() = true; + }); + + let recv2 = Arc::new(Mutex::new(false)); + go!(c, recv2, { + c.recv(); + *recv2.lock() = true; + }); + + thread::sleep(ms(1)); + + if *recv1.lock() || *recv2.lock() { + panic!(); + } + + // Ensure that non-blocking receive does not block. + select! { + recv(c.rx()) -> _ => panic!(), + default => {} + } + select! { + recv(c.rx()) -> _ => panic!(), + default => {} + } + + c.send(0); + c.send(0); + } + + { + // Ensure that send to full chan blocks. + let c = make::(cap as usize); + for i in 0..cap { + c.send(i); + } + + let sent = Arc::new(Mutex::new(0)); + go!(sent, c, { + c.send(0); + *sent.lock() = 1; + }); + + thread::sleep(ms(1)); + + if *sent.lock() != 0 { + panic!(); + } + + // Ensure that non-blocking send does not block. + select! { + send(c.tx(), 0) -> _ => panic!(), + default => {} + } + c.recv(); + } + + { + // Ensure that we receive 0 from closed chan. + let c = make::(cap as usize); + for i in 0..cap { + c.send(i); + } + c.close(); + + for i in 0..cap { + let v = c.recv(); + if v != Some(i) { + panic!(); + } + } + + if c.recv() != None { + panic!(); + } + if c.try_recv() != None { + panic!(); + } + } + + { + // Ensure that close unblocks receive. + let c = make::(cap as usize); + let done = make::(0); + + go!(c, done, { + let v = c.try_recv(); + done.send(v.is_some()); + }); + + thread::sleep(ms(1)); + c.close(); + + if !done.recv().unwrap() { + // panic!(); + } + } + + { + // Send 100 integers, + // ensure that we receive them non-corrupted in FIFO order. + let c = make::(cap as usize); + go!(c, { + for i in 0..100 { + c.send(i); + } + }); + for i in 0..100 { + if c.recv() != Some(i) { + panic!(); + } + } + + // Same, but using recv2. + go!(c, { + for i in 0..100 { + c.send(i); + } + }); + for i in 0..100 { + if c.recv() != Some(i) { + panic!(); + } + } + } + } + } + + #[test] + fn test_nonblock_recv_race() { + const N: usize = 1000; + + for _ in 0..N { + let c = make::(1); + c.send(1); + + let t = go!(c, { + select! { + recv(c.rx()) -> _ => {} + default => panic!("chan is not ready"), + } + }); + + c.close(); + c.recv(); + t.join().unwrap(); + } + } + + #[test] + fn test_nonblock_select_race() { + const N: usize = 1000; + + let done = make::(1); + for _ in 0..N { + let c1 = make::(1); + let c2 = make::(1); + c1.send(1); + + go!(c1, c2, done, { + select! { + recv(c1.rx()) -> _ => {} + recv(c2.rx()) -> _ => {} + default => { + done.send(false); + return; + } + } + done.send(true); + }); + + c2.send(1); + select! { + recv(c1.rx()) -> _ => {} + default => {} + } + if !done.recv().unwrap() { + panic!("no chan is ready"); + } + } + } + + #[test] + fn test_nonblock_select_race2() { + const N: usize = 1000; + + let done = make::(1); + for _ in 0..N { + let c1 = make::(1); + let c2 = make::(0); + c1.send(1); + + go!(c1, c2, done, { + select! { + recv(c1.rx()) -> _ => {} + recv(c2.rx()) -> _ => {} + default => { + done.send(false); + return; + } + } + done.send(true); + }); + + c2.close(); + select! { + recv(c1.rx()) -> _ => {} + default => {} + } + if !done.recv().unwrap() { + panic!("no chan is ready"); + } + } + } + + #[test] + fn test_self_select() { + // Ensure that send/recv on the same chan in select + // does not crash nor deadlock. + + for &cap in &[0, 10] { + let wg = WaitGroup::new(); + wg.add(2); + let c = make::(cap); + + for p in 0..2 { + let p = p; + go!(wg, p, c, { + defer! { wg.done() } + for i in 0..1000 { + if p == 0 || i % 2 == 0 { + select! { + send(c.tx(), p) -> _ => {} + recv(c.rx()) -> v => { + if cap == 0 && v.ok() == Some(p) { + panic!("self receive"); + } + } + } + } else { + select! { + recv(c.rx()) -> v => { + if cap == 0 && v.ok() == Some(p) { + panic!("self receive"); + } + } + send(c.tx(), p) -> _ => {} + } + } + } + }); + } + wg.wait(); + } + } + + #[test] + fn test_select_stress() { + let c = vec![ + make::(0), + make::(0), + make::(2), + make::(3), + ]; + + const N: usize = 10000; + + // There are 4 goroutines that send N values on each of the chans, + // + 4 goroutines that receive N values on each of the chans, + // + 1 goroutine that sends N values on each of the chans in a single select, + // + 1 goroutine that receives N values on each of the chans in a single select. + // All these sends, receives and selects interact chaotically at runtime, + // but we are careful that this whole construct does not deadlock. + let wg = WaitGroup::new(); + wg.add(10); + + for k in 0..4 { + go!(k, c, wg, { + for _ in 0..N { + c[k].send(0); + } + wg.done(); + }); + go!(k, c, wg, { + for _ in 0..N { + c[k].recv(); + } + wg.done(); + }); + } + + go!(c, wg, { + let mut n = [0; 4]; + let mut c1 = c.iter() + .map(|c| Some(c.rx().clone())) + .collect::>(); + + for _ in 0..4 * N { + let index = { + let mut sel = Select::new(); + let mut opers = [!0; 4]; + for &i in &[3, 2, 0, 1] { + if let Some(c) = &c1[i] { + opers[i] = sel.recv(c); + } + } + + let oper = sel.select(); + let mut index = !0; + for i in 0..4 { + if opers[i] == oper.index() { + index = i; + let _ = oper.recv(c1[i].as_ref().unwrap()); + break; + } + } + index + }; + + n[index] += 1; + if n[index] == N { + c1[index] = None; + } + } + wg.done(); + }); + + go!(c, wg, { + let mut n = [0; 4]; + let mut c1 = c.iter() + .map(|c| Some(c.tx().clone())) + .collect::>(); + + for _ in 0..4 * N { + let index = { + let mut sel = Select::new(); + let mut opers = [!0; 4]; + for &i in &[0, 1, 2, 3] { + if let Some(c) = &c1[i] { + opers[i] = sel.send(c); + } + } + + let oper = sel.select(); + let mut index = !0; + for i in 0..4 { + if opers[i] == oper.index() { + index = i; + let _ = oper.send(c1[i].as_ref().unwrap(), 0); + break; + } + } + index + }; + + n[index] += 1; + if n[index] == N { + c1[index] = None; + } + } + wg.done(); + }); + + wg.wait(); + } + + #[test] + fn test_select_fairness() { + const TRIALS: usize = 10000; + + let c1 = make::(TRIALS + 1); + let c2 = make::(TRIALS + 1); + + for _ in 0..TRIALS + 1 { + c1.send(1); + c2.send(2); + } + + let c3 = make::(0); + let c4 = make::(0); + let out = make::(0); + let done = make::(0); + let wg = WaitGroup::new(); + + wg.add(1); + go!(wg, c1, c2, c3, c4, out, done, { + defer! { wg.done() }; + loop { + let b; + select! { + recv(c3.rx()) -> m => b = m.unwrap(), + recv(c4.rx()) -> m => b = m.unwrap(), + recv(c1.rx()) -> m => b = m.unwrap(), + recv(c2.rx()) -> m => b = m.unwrap(), + } + select! { + send(out.tx(), b) -> _ => {} + recv(done.rx()) -> _ => return, + } + } + }); + + let (mut cnt1, mut cnt2) = (0, 0); + for _ in 0..TRIALS { + match out.recv() { + Some(1) => cnt1 += 1, + Some(2) => cnt2 += 1, + b => panic!("unexpected value {:?} on channel", b), + } + } + + // If the select in the goroutine is fair, + // cnt1 and cnt2 should be about the same value. + // With 10,000 trials, the expected margin of error at + // a confidence level of five nines is 4.4172 / (2 * Sqrt(10000)). + + let r = cnt1 as f64 / TRIALS as f64; + let e = (r - 0.5).abs(); + + if e > 4.4172 / (2.0 * (TRIALS as f64).sqrt()) { + panic!( + "unfair select: in {} trials, results were {}, {}", + TRIALS, + cnt1, + cnt2, + ); + } + + done.close(); + wg.wait(); + } + + #[test] + fn test_chan_send_interface() { + struct Mt; + + let c = make::>(1); + c.send(Box::new(Mt)); + + select! { + send(c.tx(), Box::new(Mt)) -> _ => {} + default => {} + } + + select! { + send(c.tx(), Box::new(Mt)) -> _ => {} + send(c.tx(), Box::new(Mt)) -> _ => {} + default => {} + } + } + + #[test] + fn test_pseudo_random_send() { + const N: usize = 100; + + for cap in 0..N { + let c = make::(cap); + let l = Arc::new(Mutex::new(vec![0i32; N])); + let done = make::(0); + + go!(c, done, l, { + let mut l = l.lock(); + for i in 0..N { + thread::yield_now(); + l[i] = c.recv().unwrap(); + } + done.send(true); + }); + + for _ in 0..N { + select! { + send(c.tx(), 1) -> _ => {} + send(c.tx(), 0) -> _ => {} + } + } + done.recv(); + + let mut n0 = 0; + let mut n1 = 0; + for &i in l.lock().iter() { + n0 += (i + 1) % 2; + n1 += i; + } + + if n0 <= N as i32 / 10 || n1 <= N as i32 / 10 { + panic!( + "Want pseudorandom, got {} zeros and {} ones (chan cap {})", + n0, + n1, + cap, + ); + } + } + } + + #[test] + fn test_multi_consumer() { + const NWORK: usize = 23; + const NITER: usize = 271828; + + let pn = [2, 3, 7, 11, 13, 17, 19, 23, 27, 31]; + + let q = make::(NWORK * 3); + let r = make::(NWORK * 3); + + let wg = WaitGroup::new(); + for i in 0..NWORK { + wg.add(1); + let w = i; + go!(q, r, wg, pn, { + for v in &q { + if pn[w % pn.len()] == v { + thread::yield_now(); + } + r.send(v); + } + wg.done(); + }); + } + + let expect = Arc::new(Mutex::new(0)); + go!(q, r, expect, wg, pn, { + for i in 0..NITER { + let v = pn[i % pn.len()]; + *expect.lock() += v; + q.send(v); + } + q.close(); + wg.wait(); + r.close(); + }); + + let mut n = 0; + let mut s = 0; + for v in &r { + n += 1; + s += v; + } + + if n != NITER || s != *expect.lock() { + panic!(); + } + } + + #[test] + fn test_select_duplicate_channel() { + // This test makes sure we can queue a G on + // the same channel multiple times. + let c = make::(0); + let d = make::(0); + let e = make::(0); + + go!(c, d, e, { + select! { + recv(c.rx()) -> _ => {} + recv(d.rx()) -> _ => {} + recv(e.rx()) -> _ => {} + } + e.send(9); + }); + thread::sleep(ms(1)); + + go!(c, c.recv()); + thread::sleep(ms(1)); + + d.send(7); + e.recv(); + c.send(8); + } +} + +// https://github.com/golang/go/blob/master/test/closedchan.go +mod closedchan { + // TODO +} + +// https://github.com/golang/go/blob/master/src/runtime/chanbarrier_test.go +mod chanbarrier_test { + // TODO +} + +// https://github.com/golang/go/blob/master/src/runtime/race/testdata/chan_test.go +mod race_chan_test { + // TODO +} + +// https://github.com/golang/go/blob/master/test/ken/chan.go +mod chan { + // TODO +} + +// https://github.com/golang/go/blob/master/test/ken/chan1.go +mod chan1 { + // TODO +} diff --git a/crossbeam-channel/tests/iter.rs b/crossbeam-channel/tests/iter.rs new file mode 100644 index 000000000..3eee149e5 --- /dev/null +++ b/crossbeam-channel/tests/iter.rs @@ -0,0 +1,110 @@ +//! Tests for iteration over receivers. + +extern crate crossbeam; +extern crate crossbeam_channel; + +use crossbeam_channel::unbounded; + +#[test] +fn nested_recv_iter() { + let (s, r) = unbounded::(); + let (total_s, total_r) = unbounded::(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + let mut acc = 0; + for x in r.iter() { + acc += x; + } + total_s.send(acc).unwrap(); + }); + + s.send(3).unwrap(); + s.send(1).unwrap(); + s.send(2).unwrap(); + drop(s); + assert_eq!(total_r.recv().unwrap(), 6); + }); +} + +#[test] +fn recv_iter_break() { + let (s, r) = unbounded::(); + let (count_s, count_r) = unbounded(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + let mut count = 0; + for x in r.iter() { + if count >= 3 { + break; + } else { + count += x; + } + } + count_s.send(count).unwrap(); + }); + + s.send(2).unwrap(); + s.send(2).unwrap(); + s.send(2).unwrap(); + let _ = s.send(2); + drop(s); + assert_eq!(count_r.recv().unwrap(), 4); + }) +} + +#[test] +fn recv_try_iter() { + let (request_s, request_r) = unbounded(); + let (response_s, response_r) = unbounded(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + let mut count = 0; + loop { + for x in response_r.try_iter() { + count += x; + if count == 6 { + assert_eq!(count, 6); + return; + } + } + request_s.send(()).unwrap(); + } + }); + + for _ in request_r.iter() { + if response_s.send(2).is_err() { + break; + } + } + }) +} + +#[test] +fn recv_into_iter_owned() { + let mut iter = { + let (s, r) = unbounded::(); + s.send(1).unwrap(); + s.send(2).unwrap(); + r.into_iter() + }; + + assert_eq!(iter.next().unwrap(), 1); + assert_eq!(iter.next().unwrap(), 2); + assert_eq!(iter.next().is_none(), true); +} + +#[test] +fn recv_into_iter_borrowed() { + let (s, r) = unbounded::(); + s.send(1).unwrap(); + s.send(2).unwrap(); + drop(s); + + let mut iter = (&r).into_iter(); + assert_eq!(iter.next().unwrap(), 1); + assert_eq!(iter.next().unwrap(), 2); + assert_eq!(iter.next().is_none(), true); +} diff --git a/crossbeam-channel/tests/list.rs b/crossbeam-channel/tests/list.rs new file mode 100644 index 000000000..ae75c1ccb --- /dev/null +++ b/crossbeam-channel/tests/list.rs @@ -0,0 +1,438 @@ +//! Tests for the list channel flavor. + +extern crate crossbeam; +#[macro_use] +extern crate crossbeam_channel; +extern crate rand; + +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::thread; +use std::time::Duration; + +use crossbeam_channel::{unbounded}; +use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError}; +use crossbeam_channel::{SendError, SendTimeoutError, TrySendError}; +use rand::{thread_rng, Rng}; + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn smoke() { + let (s, r) = unbounded(); + s.try_send(7).unwrap(); + assert_eq!(r.try_recv(), Ok(7)); + + s.send(8).unwrap(); + assert_eq!(r.recv(), Ok(8)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); +} + +#[test] +fn capacity() { + let (s, r) = unbounded::<()>(); + assert_eq!(s.capacity(), None); + assert_eq!(r.capacity(), None); +} + +#[test] +fn len_empty_full() { + let (s, r) = unbounded(); + + assert_eq!(s.len(), 0); + assert_eq!(s.is_empty(), true); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); + + s.send(()).unwrap(); + + assert_eq!(s.len(), 1); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 1); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), false); + + r.recv().unwrap(); + + assert_eq!(s.len(), 0); + assert_eq!(s.is_empty(), true); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); +} + +#[test] +fn try_recv() { + let (s, r) = unbounded(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + thread::sleep(ms(1500)); + assert_eq!(r.try_recv(), Ok(7)); + thread::sleep(ms(500)); + assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + s.send(7).unwrap(); + }); + }); +} + +#[test] +fn recv() { + let (s, r) = unbounded(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.recv(), Ok(7)); + thread::sleep(ms(1000)); + assert_eq!(r.recv(), Ok(8)); + thread::sleep(ms(1000)); + assert_eq!(r.recv(), Ok(9)); + assert_eq!(r.recv(), Err(RecvError)); + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + s.send(7).unwrap(); + s.send(8).unwrap(); + s.send(9).unwrap(); + }); + }); +} + +#[test] +fn recv_timeout() { + let (s, r) = unbounded::(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); + assert_eq!(r.recv_timeout(ms(1000)), Ok(7)); + assert_eq!( + r.recv_timeout(ms(1000)), + Err(RecvTimeoutError::Disconnected) + ); + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + s.send(7).unwrap(); + }); + }); +} + +#[test] +fn try_send() { + let (s, r) = unbounded(); + for i in 0..1000 { + assert_eq!(s.try_send(i), Ok(())); + } + + drop(r); + assert_eq!(s.try_send(777), Err(TrySendError::Disconnected(777))); +} + +#[test] +fn send() { + let (s, r) = unbounded(); + for i in 0..1000 { + assert_eq!(s.send(i), Ok(())); + } + + drop(r); + assert_eq!(s.send(777), Err(SendError(777))); +} + +#[test] +fn send_timeout() { + let (s, r) = unbounded(); + for i in 0..1000 { + assert_eq!(s.send_timeout(i, ms(i as u64)), Ok(())); + } + + drop(r); + assert_eq!(s.send_timeout(777, ms(0)), Err(SendTimeoutError::Disconnected(777))); +} + +#[test] +fn send_after_disconnect() { + let (s, r) = unbounded(); + + s.send(1).unwrap(); + s.send(2).unwrap(); + s.send(3).unwrap(); + + drop(r); + + assert_eq!(s.send(4), Err(SendError(4))); + assert_eq!(s.try_send(5), Err(TrySendError::Disconnected(5))); + assert_eq!(s.send_timeout(6, ms(0)), Err(SendTimeoutError::Disconnected(6))); +} + +#[test] +fn recv_after_disconnect() { + let (s, r) = unbounded(); + + s.send(1).unwrap(); + s.send(2).unwrap(); + s.send(3).unwrap(); + + drop(s); + + assert_eq!(r.recv(), Ok(1)); + assert_eq!(r.recv(), Ok(2)); + assert_eq!(r.recv(), Ok(3)); + assert_eq!(r.recv(), Err(RecvError)); +} + +#[test] +fn len() { + let (s, r) = unbounded(); + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + for i in 0..50 { + s.send(i).unwrap(); + assert_eq!(s.len(), i + 1); + } + + for i in 0..50 { + r.recv().unwrap(); + assert_eq!(r.len(), 50 - i - 1); + } + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); +} + +#[test] +fn disconnect_wakes_receiver() { + let (s, r) = unbounded::<()>(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.recv(), Err(RecvError)); + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + drop(s); + }); + }); +} + +#[test] +fn spsc() { + const COUNT: usize = 100_000; + + let (s, r) = unbounded(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + for i in 0..COUNT { + assert_eq!(r.recv(), Ok(i)); + } + assert_eq!(r.recv(), Err(RecvError)); + }); + scope.spawn(move || { + for i in 0..COUNT { + s.send(i).unwrap(); + } + }); + }); +} + +#[test] +fn mpmc() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = unbounded::(); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..COUNT { + let n = r.recv().unwrap(); + v[n].fetch_add(1, Ordering::SeqCst); + } + }); + } + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..COUNT { + s.send(i).unwrap(); + } + }); + } + }); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[test] +fn stress_timeout_two_threads() { + const COUNT: usize = 100; + + let (s, r) = unbounded(); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + if i % 2 == 0 { + thread::sleep(ms(50)); + } + s.send(i).unwrap(); + } + }); + + scope.spawn(|| { + for i in 0..COUNT { + if i % 2 == 0 { + thread::sleep(ms(50)); + } + loop { + if let Ok(x) = r.recv_timeout(ms(10)) { + assert_eq!(x, i); + break; + } + } + } + }); + }); +} + +#[test] +fn drops() { + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + let mut rng = thread_rng(); + + for _ in 0..100 { + let steps = rng.gen_range(0, 10_000); + let additional = rng.gen_range(0, 1000); + + DROPS.store(0, Ordering::SeqCst); + let (s, r) = unbounded::(); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for _ in 0..steps { + r.recv().unwrap(); + } + }); + + scope.spawn(|| { + for _ in 0..steps { + s.send(DropCounter).unwrap(); + } + }); + }); + + for _ in 0..additional { + s.try_send(DropCounter).unwrap(); + } + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(s); + drop(r); + assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); + } +} + +#[test] +fn linearizable() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = unbounded(); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..COUNT { + s.send(0).unwrap(); + r.try_recv().unwrap(); + } + }); + } + }); +} + +#[test] +fn fairness() { + const COUNT: usize = 10_000; + + let (s1, r1) = unbounded::<()>(); + let (s2, r2) = unbounded::<()>(); + + for _ in 0..COUNT { + s1.send(()).unwrap(); + s2.send(()).unwrap(); + } + + let mut hits = [0usize; 2]; + for _ in 0..COUNT { + select! { + recv(r1) -> _ => hits[0] += 1, + recv(r2) -> _ => hits[1] += 1, + } + } + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); +} + +#[test] +fn fairness_duplicates() { + const COUNT: usize = 10_000; + + let (s, r) = unbounded(); + + for _ in 0..COUNT { + s.send(()).unwrap(); + } + + let mut hits = [0usize; 5]; + for _ in 0..COUNT { + select! { + recv(r) -> _ => hits[0] += 1, + recv(r) -> _ => hits[1] += 1, + recv(r) -> _ => hits[2] += 1, + recv(r) -> _ => hits[3] += 1, + recv(r) -> _ => hits[4] += 1, + } + } + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); +} + +#[test] +fn recv_in_send() { + let (s, r) = unbounded(); + s.send(()).unwrap(); + + select! { + send(s, assert_eq!(r.recv(), Ok(()))) -> _ => {} + } +} diff --git a/crossbeam-channel/tests/mpsc.rs b/crossbeam-channel/tests/mpsc.rs new file mode 100644 index 000000000..cd447e630 --- /dev/null +++ b/crossbeam-channel/tests/mpsc.rs @@ -0,0 +1,1893 @@ +//! Tests copied from `std::sync::mpsc`. +//! +//! # Copyright +//! +//! This is a copy of tests for the `std::sync::mpsc` channels from the stanard library, but +//! modified to work with `crossbeam-channel` instead. +//! +//! Minor tweaks were needed to make the tests compile: +//! +//! - Replace `box` syntax with `Box::new`. +//! - Replace all uses of `Select` with `select!`. +//! - Change the imports. +//! +//! License: https://github.com/rust-lang/rust/blob/master/COPYRIGHT +//! Source: https://github.com/rust-lang/rust/tree/master/src/libstd/sync/mpsc +//! +//! ```text +//! Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +//! file at the top-level directory of this distribution and at +//! http://rust-lang.org/COPYRIGHT. +//! +//! Licensed under the Apache License, Version 2.0 or the MIT license +//! , at your +//! option. This file may not be copied, modified, or distributed +//! except according to those terms. +//! ``` + +#[macro_use] +extern crate crossbeam_channel as cc; + +use std::sync::mpsc::{RecvError, RecvTimeoutError, TryRecvError}; +use std::sync::mpsc::{SendError, TrySendError}; +use std::time::Duration; + +pub struct Sender { + pub inner: cc::Sender, +} + +impl Sender { + pub fn send(&self, t: T) -> Result<(), SendError> { + self.inner.send(t).map_err(|cc::SendError(m)| SendError(m)) + } +} + +impl Clone for Sender { + fn clone(&self) -> Sender { + Sender { + inner: self.inner.clone(), + } + } +} + +pub struct SyncSender { + pub inner: cc::Sender, +} + +impl SyncSender { + pub fn send(&self, t: T) -> Result<(), SendError> { + self.inner.send(t).map_err(|cc::SendError(m)| SendError(m)) + } + + pub fn try_send(&self, t: T) -> Result<(), TrySendError> { + self.inner.try_send(t).map_err(|err| { + match err { + cc::TrySendError::Full(m) => TrySendError::Full(m), + cc::TrySendError::Disconnected(m) => TrySendError::Disconnected(m), + } + }) + } +} + +impl Clone for SyncSender { + fn clone(&self) -> SyncSender { + SyncSender { + inner: self.inner.clone(), + } + } +} + +pub struct Receiver { + pub inner: cc::Receiver, +} + +impl Receiver { + pub fn try_recv(&self) -> Result { + self.inner.try_recv().map_err(|err| { + match err { + cc::TryRecvError::Empty => TryRecvError::Empty, + cc::TryRecvError::Disconnected => TryRecvError::Disconnected, + } + }) + } + + pub fn recv(&self) -> Result { + self.inner.recv().map_err(|_| RecvError) + } + + pub fn recv_timeout(&self, timeout: Duration) -> Result { + self.inner.recv_timeout(timeout).map_err(|err| { + match err { + cc::RecvTimeoutError::Timeout => RecvTimeoutError::Timeout, + cc::RecvTimeoutError::Disconnected => RecvTimeoutError::Disconnected, + } + }) + } + + pub fn iter(&self) -> Iter { + Iter { inner: self } + } + + pub fn try_iter(&self) -> TryIter { + TryIter { inner: self } + } +} + +impl<'a, T> IntoIterator for &'a Receiver { + type Item = T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +impl IntoIterator for Receiver { + type Item = T; + type IntoIter = IntoIter; + + fn into_iter(self) -> IntoIter { + IntoIter { inner: self } + } +} + +pub struct TryIter<'a, T: 'a> { + inner: &'a Receiver, +} + +impl<'a, T> Iterator for TryIter<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.inner.try_recv().ok() + } +} + +pub struct Iter<'a, T: 'a> { + inner: &'a Receiver, +} + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.inner.recv().ok() + } +} + +pub struct IntoIter { + inner: Receiver, +} + +impl Iterator for IntoIter { + type Item = T; + + fn next(&mut self) -> Option { + self.inner.recv().ok() + } +} + +pub fn channel() -> (Sender, Receiver) { + let (s, r) = cc::unbounded(); + let s = Sender { inner: s }; + let r = Receiver { inner: r }; + (s, r) +} + +pub fn sync_channel(bound: usize) -> (SyncSender, Receiver) { + let (s, r) = cc::bounded(bound); + let s = SyncSender { inner: s }; + let r = Receiver { inner: r }; + (s, r) +} + +macro_rules! select { + ( + $($name:pat = $rx:ident.$meth:ident() => $code:expr),+ + ) => ({ + crossbeam_channel_internal! { + $( + recv(($rx).inner) -> res => { + let $name = res.map_err(|_| ::std::sync::mpsc::RecvError); + $code + } + )+ + } + }) +} + +// Source: https://github.com/rust-lang/rust/blob/master/src/libstd/sync/mpsc/mod.rs +mod channel_tests { + use super::*; + + use std::env; + use std::thread; + use std::time::{Duration, Instant}; + + pub fn stress_factor() -> usize { + match env::var("RUST_TEST_STRESS") { + Ok(val) => val.parse().unwrap(), + Err(..) => 1, + } + } + + #[test] + fn smoke() { + let (tx, rx) = channel::(); + tx.send(1).unwrap(); + assert_eq!(rx.recv().unwrap(), 1); + } + + #[test] + fn drop_full() { + let (tx, _rx) = channel::>(); + tx.send(Box::new(1)).unwrap(); + } + + #[test] + fn drop_full_shared() { + let (tx, _rx) = channel::>(); + drop(tx.clone()); + drop(tx.clone()); + tx.send(Box::new(1)).unwrap(); + } + + #[test] + fn smoke_shared() { + let (tx, rx) = channel::(); + tx.send(1).unwrap(); + assert_eq!(rx.recv().unwrap(), 1); + let tx = tx.clone(); + tx.send(1).unwrap(); + assert_eq!(rx.recv().unwrap(), 1); + } + + #[test] + fn smoke_threads() { + let (tx, rx) = channel::(); + let _t = thread::spawn(move|| { + tx.send(1).unwrap(); + }); + assert_eq!(rx.recv().unwrap(), 1); + } + + #[test] + fn smoke_port_gone() { + let (tx, rx) = channel::(); + drop(rx); + assert!(tx.send(1).is_err()); + } + + #[test] + fn smoke_shared_port_gone() { + let (tx, rx) = channel::(); + drop(rx); + assert!(tx.send(1).is_err()) + } + + #[test] + fn smoke_shared_port_gone2() { + let (tx, rx) = channel::(); + drop(rx); + let tx2 = tx.clone(); + drop(tx); + assert!(tx2.send(1).is_err()); + } + + #[test] + fn port_gone_concurrent() { + let (tx, rx) = channel::(); + let _t = thread::spawn(move|| { + rx.recv().unwrap(); + }); + while tx.send(1).is_ok() {} + } + + #[test] + fn port_gone_concurrent_shared() { + let (tx, rx) = channel::(); + let tx2 = tx.clone(); + let _t = thread::spawn(move|| { + rx.recv().unwrap(); + }); + while tx.send(1).is_ok() && tx2.send(1).is_ok() {} + } + + #[test] + fn smoke_chan_gone() { + let (tx, rx) = channel::(); + drop(tx); + assert!(rx.recv().is_err()); + } + + #[test] + fn smoke_chan_gone_shared() { + let (tx, rx) = channel::<()>(); + let tx2 = tx.clone(); + drop(tx); + drop(tx2); + assert!(rx.recv().is_err()); + } + + #[test] + fn chan_gone_concurrent() { + let (tx, rx) = channel::(); + let _t = thread::spawn(move|| { + tx.send(1).unwrap(); + tx.send(1).unwrap(); + }); + while rx.recv().is_ok() {} + } + + #[test] + fn stress() { + let (tx, rx) = channel::(); + let t = thread::spawn(move|| { + for _ in 0..10000 { tx.send(1).unwrap(); } + }); + for _ in 0..10000 { + assert_eq!(rx.recv().unwrap(), 1); + } + t.join().ok().unwrap(); + } + + #[test] + fn stress_shared() { + const AMT: u32 = 10000; + const NTHREADS: u32 = 8; + let (tx, rx) = channel::(); + + let t = thread::spawn(move|| { + for _ in 0..AMT * NTHREADS { + assert_eq!(rx.recv().unwrap(), 1); + } + match rx.try_recv() { + Ok(..) => panic!(), + _ => {} + } + }); + + for _ in 0..NTHREADS { + let tx = tx.clone(); + thread::spawn(move|| { + for _ in 0..AMT { tx.send(1).unwrap(); } + }); + } + drop(tx); + t.join().ok().unwrap(); + } + + #[test] + fn send_from_outside_runtime() { + let (tx1, rx1) = channel::<()>(); + let (tx2, rx2) = channel::(); + let t1 = thread::spawn(move|| { + tx1.send(()).unwrap(); + for _ in 0..40 { + assert_eq!(rx2.recv().unwrap(), 1); + } + }); + rx1.recv().unwrap(); + let t2 = thread::spawn(move|| { + for _ in 0..40 { + tx2.send(1).unwrap(); + } + }); + t1.join().ok().unwrap(); + t2.join().ok().unwrap(); + } + + #[test] + fn recv_from_outside_runtime() { + let (tx, rx) = channel::(); + let t = thread::spawn(move|| { + for _ in 0..40 { + assert_eq!(rx.recv().unwrap(), 1); + } + }); + for _ in 0..40 { + tx.send(1).unwrap(); + } + t.join().ok().unwrap(); + } + + #[test] + fn no_runtime() { + let (tx1, rx1) = channel::(); + let (tx2, rx2) = channel::(); + let t1 = thread::spawn(move|| { + assert_eq!(rx1.recv().unwrap(), 1); + tx2.send(2).unwrap(); + }); + let t2 = thread::spawn(move|| { + tx1.send(1).unwrap(); + assert_eq!(rx2.recv().unwrap(), 2); + }); + t1.join().ok().unwrap(); + t2.join().ok().unwrap(); + } + + #[test] + fn oneshot_single_thread_close_port_first() { + // Simple test of closing without sending + let (_tx, rx) = channel::(); + drop(rx); + } + + #[test] + fn oneshot_single_thread_close_chan_first() { + // Simple test of closing without sending + let (tx, _rx) = channel::(); + drop(tx); + } + + #[test] + fn oneshot_single_thread_send_port_close() { + // Testing that the sender cleans up the payload if receiver is closed + let (tx, rx) = channel::>(); + drop(rx); + assert!(tx.send(Box::new(0)).is_err()); + } + + #[test] + fn oneshot_single_thread_recv_chan_close() { + // Receiving on a closed chan will panic + let res = thread::spawn(move|| { + let (tx, rx) = channel::(); + drop(tx); + rx.recv().unwrap(); + }).join(); + // What is our res? + assert!(res.is_err()); + } + + #[test] + fn oneshot_single_thread_send_then_recv() { + let (tx, rx) = channel::>(); + tx.send(Box::new(10)).unwrap(); + assert!(*rx.recv().unwrap() == 10); + } + + #[test] + fn oneshot_single_thread_try_send_open() { + let (tx, rx) = channel::(); + assert!(tx.send(10).is_ok()); + assert!(rx.recv().unwrap() == 10); + } + + #[test] + fn oneshot_single_thread_try_send_closed() { + let (tx, rx) = channel::(); + drop(rx); + assert!(tx.send(10).is_err()); + } + + #[test] + fn oneshot_single_thread_try_recv_open() { + let (tx, rx) = channel::(); + tx.send(10).unwrap(); + assert!(rx.recv() == Ok(10)); + } + + #[test] + fn oneshot_single_thread_try_recv_closed() { + let (tx, rx) = channel::(); + drop(tx); + assert!(rx.recv().is_err()); + } + + #[test] + fn oneshot_single_thread_peek_data() { + let (tx, rx) = channel::(); + assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); + tx.send(10).unwrap(); + assert_eq!(rx.try_recv(), Ok(10)); + } + + #[test] + fn oneshot_single_thread_peek_close() { + let (tx, rx) = channel::(); + drop(tx); + assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); + assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); + } + + #[test] + fn oneshot_single_thread_peek_open() { + let (_tx, rx) = channel::(); + assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); + } + + #[test] + fn oneshot_multi_task_recv_then_send() { + let (tx, rx) = channel::>(); + let _t = thread::spawn(move|| { + assert!(*rx.recv().unwrap() == 10); + }); + + tx.send(Box::new(10)).unwrap(); + } + + #[test] + fn oneshot_multi_task_recv_then_close() { + let (tx, rx) = channel::>(); + let _t = thread::spawn(move|| { + drop(tx); + }); + let res = thread::spawn(move|| { + assert!(*rx.recv().unwrap() == 10); + }).join(); + assert!(res.is_err()); + } + + #[test] + fn oneshot_multi_thread_close_stress() { + for _ in 0..stress_factor() { + let (tx, rx) = channel::(); + let _t = thread::spawn(move|| { + drop(rx); + }); + drop(tx); + } + } + + #[test] + fn oneshot_multi_thread_send_close_stress() { + for _ in 0..stress_factor() { + let (tx, rx) = channel::(); + let _t = thread::spawn(move|| { + drop(rx); + }); + let _ = thread::spawn(move|| { + tx.send(1).unwrap(); + }).join(); + } + } + + #[test] + fn oneshot_multi_thread_recv_close_stress() { + for _ in 0..stress_factor() { + let (tx, rx) = channel::(); + thread::spawn(move|| { + let res = thread::spawn(move|| { + rx.recv().unwrap(); + }).join(); + assert!(res.is_err()); + }); + let _t = thread::spawn(move|| { + thread::spawn(move|| { + drop(tx); + }); + }); + } + } + + #[test] + fn oneshot_multi_thread_send_recv_stress() { + for _ in 0..stress_factor() { + let (tx, rx) = channel::>(); + let _t = thread::spawn(move|| { + tx.send(Box::new(10)).unwrap(); + }); + assert!(*rx.recv().unwrap() == 10); + } + } + + #[test] + fn stream_send_recv_stress() { + for _ in 0..stress_factor() { + let (tx, rx) = channel(); + + send(tx, 0); + recv(rx, 0); + + fn send(tx: Sender>, i: i32) { + if i == 10 { return } + + thread::spawn(move|| { + tx.send(Box::new(i)).unwrap(); + send(tx, i + 1); + }); + } + + fn recv(rx: Receiver>, i: i32) { + if i == 10 { return } + + thread::spawn(move|| { + assert!(*rx.recv().unwrap() == i); + recv(rx, i + 1); + }); + } + } + } + + #[test] + fn oneshot_single_thread_recv_timeout() { + let (tx, rx) = channel(); + tx.send(()).unwrap(); + assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(())); + assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Err(RecvTimeoutError::Timeout)); + tx.send(()).unwrap(); + assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(())); + } + + #[test] + fn stress_recv_timeout_two_threads() { + let (tx, rx) = channel(); + let stress = stress_factor() + 100; + let timeout = Duration::from_millis(100); + + thread::spawn(move || { + for i in 0..stress { + if i % 2 == 0 { + thread::sleep(timeout * 2); + } + tx.send(1usize).unwrap(); + } + }); + + let mut recv_count = 0; + loop { + match rx.recv_timeout(timeout) { + Ok(n) => { + assert_eq!(n, 1usize); + recv_count += 1; + } + Err(RecvTimeoutError::Timeout) => continue, + Err(RecvTimeoutError::Disconnected) => break, + } + } + + assert_eq!(recv_count, stress); + } + + #[test] + fn recv_timeout_upgrade() { + let (tx, rx) = channel::<()>(); + let timeout = Duration::from_millis(1); + let _tx_clone = tx.clone(); + + let start = Instant::now(); + assert_eq!(rx.recv_timeout(timeout), Err(RecvTimeoutError::Timeout)); + assert!(Instant::now() >= start + timeout); + } + + #[test] + fn stress_recv_timeout_shared() { + let (tx, rx) = channel(); + let stress = stress_factor() + 100; + + for i in 0..stress { + let tx = tx.clone(); + thread::spawn(move || { + thread::sleep(Duration::from_millis(i as u64 * 10)); + tx.send(1usize).unwrap(); + }); + } + + drop(tx); + + let mut recv_count = 0; + loop { + match rx.recv_timeout(Duration::from_millis(10)) { + Ok(n) => { + assert_eq!(n, 1usize); + recv_count += 1; + } + Err(RecvTimeoutError::Timeout) => continue, + Err(RecvTimeoutError::Disconnected) => break, + } + } + + assert_eq!(recv_count, stress); + } + + #[test] + fn recv_a_lot() { + // Regression test that we don't run out of stack in scheduler context + let (tx, rx) = channel(); + for _ in 0..10000 { tx.send(()).unwrap(); } + for _ in 0..10000 { rx.recv().unwrap(); } + } + + #[test] + fn shared_recv_timeout() { + let (tx, rx) = channel(); + let total = 5; + for _ in 0..total { + let tx = tx.clone(); + thread::spawn(move|| { + tx.send(()).unwrap(); + }); + } + + for _ in 0..total { rx.recv().unwrap(); } + + assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Err(RecvTimeoutError::Timeout)); + tx.send(()).unwrap(); + assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(())); + } + + #[test] + fn shared_chan_stress() { + let (tx, rx) = channel(); + let total = stress_factor() + 100; + for _ in 0..total { + let tx = tx.clone(); + thread::spawn(move|| { + tx.send(()).unwrap(); + }); + } + + for _ in 0..total { + rx.recv().unwrap(); + } + } + + #[test] + fn test_nested_recv_iter() { + let (tx, rx) = channel::(); + let (total_tx, total_rx) = channel::(); + + let _t = thread::spawn(move|| { + let mut acc = 0; + for x in rx.iter() { + acc += x; + } + total_tx.send(acc).unwrap(); + }); + + tx.send(3).unwrap(); + tx.send(1).unwrap(); + tx.send(2).unwrap(); + drop(tx); + assert_eq!(total_rx.recv().unwrap(), 6); + } + + #[test] + fn test_recv_iter_break() { + let (tx, rx) = channel::(); + let (count_tx, count_rx) = channel(); + + let _t = thread::spawn(move|| { + let mut count = 0; + for x in rx.iter() { + if count >= 3 { + break; + } else { + count += x; + } + } + count_tx.send(count).unwrap(); + }); + + tx.send(2).unwrap(); + tx.send(2).unwrap(); + tx.send(2).unwrap(); + let _ = tx.send(2); + drop(tx); + assert_eq!(count_rx.recv().unwrap(), 4); + } + + #[test] + fn test_recv_try_iter() { + let (request_tx, request_rx) = channel(); + let (response_tx, response_rx) = channel(); + + // Request `x`s until we have `6`. + let t = thread::spawn(move|| { + let mut count = 0; + loop { + for x in response_rx.try_iter() { + count += x; + if count == 6 { + return count; + } + } + request_tx.send(()).unwrap(); + } + }); + + for _ in request_rx.iter() { + if response_tx.send(2).is_err() { + break; + } + } + + assert_eq!(t.join().unwrap(), 6); + } + + #[test] + fn test_recv_into_iter_owned() { + let mut iter = { + let (tx, rx) = channel::(); + tx.send(1).unwrap(); + tx.send(2).unwrap(); + + rx.into_iter() + }; + assert_eq!(iter.next().unwrap(), 1); + assert_eq!(iter.next().unwrap(), 2); + assert_eq!(iter.next().is_none(), true); + } + + #[test] + fn test_recv_into_iter_borrowed() { + let (tx, rx) = channel::(); + tx.send(1).unwrap(); + tx.send(2).unwrap(); + drop(tx); + let mut iter = (&rx).into_iter(); + assert_eq!(iter.next().unwrap(), 1); + assert_eq!(iter.next().unwrap(), 2); + assert_eq!(iter.next().is_none(), true); + } + + #[test] + fn try_recv_states() { + let (tx1, rx1) = channel::(); + let (tx2, rx2) = channel::<()>(); + let (tx3, rx3) = channel::<()>(); + let _t = thread::spawn(move|| { + rx2.recv().unwrap(); + tx1.send(1).unwrap(); + tx3.send(()).unwrap(); + rx2.recv().unwrap(); + drop(tx1); + tx3.send(()).unwrap(); + }); + + assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); + tx2.send(()).unwrap(); + rx3.recv().unwrap(); + assert_eq!(rx1.try_recv(), Ok(1)); + assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); + tx2.send(()).unwrap(); + rx3.recv().unwrap(); + assert_eq!(rx1.try_recv(), Err(TryRecvError::Disconnected)); + } + + // This bug used to end up in a livelock inside of the Receiver destructor + // because the internal state of the Shared packet was corrupted + #[test] + fn destroy_upgraded_shared_port_when_sender_still_active() { + let (tx, rx) = channel(); + let (tx2, rx2) = channel(); + let _t = thread::spawn(move|| { + rx.recv().unwrap(); // wait on a oneshot + drop(rx); // destroy a shared + tx2.send(()).unwrap(); + }); + // make sure the other thread has gone to sleep + for _ in 0..5000 { thread::yield_now(); } + + // upgrade to a shared chan and send a message + let t = tx.clone(); + drop(tx); + t.send(()).unwrap(); + + // wait for the child thread to exit before we exit + rx2.recv().unwrap(); + } + + #[test] + fn issue_32114() { + let (tx, _) = channel(); + let _ = tx.send(123); + assert_eq!(tx.send(123), Err(SendError(123))); + } +} + +// Source: https://github.com/rust-lang/rust/blob/master/src/libstd/sync/mpsc/mod.rs +mod sync_channel_tests { + use super::*; + + use std::env; + use std::thread; + use std::time::Duration; + + pub fn stress_factor() -> usize { + match env::var("RUST_TEST_STRESS") { + Ok(val) => val.parse().unwrap(), + Err(..) => 1, + } + } + + #[test] + fn smoke() { + let (tx, rx) = sync_channel::(1); + tx.send(1).unwrap(); + assert_eq!(rx.recv().unwrap(), 1); + } + + #[test] + fn drop_full() { + let (tx, _rx) = sync_channel::>(1); + tx.send(Box::new(1)).unwrap(); + } + + #[test] + fn smoke_shared() { + let (tx, rx) = sync_channel::(1); + tx.send(1).unwrap(); + assert_eq!(rx.recv().unwrap(), 1); + let tx = tx.clone(); + tx.send(1).unwrap(); + assert_eq!(rx.recv().unwrap(), 1); + } + + #[test] + fn recv_timeout() { + let (tx, rx) = sync_channel::(1); + assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Err(RecvTimeoutError::Timeout)); + tx.send(1).unwrap(); + assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(1)); + } + + #[test] + fn smoke_threads() { + let (tx, rx) = sync_channel::(0); + let _t = thread::spawn(move|| { + tx.send(1).unwrap(); + }); + assert_eq!(rx.recv().unwrap(), 1); + } + + #[test] + fn smoke_port_gone() { + let (tx, rx) = sync_channel::(0); + drop(rx); + assert!(tx.send(1).is_err()); + } + + #[test] + fn smoke_shared_port_gone2() { + let (tx, rx) = sync_channel::(0); + drop(rx); + let tx2 = tx.clone(); + drop(tx); + assert!(tx2.send(1).is_err()); + } + + #[test] + fn port_gone_concurrent() { + let (tx, rx) = sync_channel::(0); + let _t = thread::spawn(move|| { + rx.recv().unwrap(); + }); + while tx.send(1).is_ok() {} + } + + #[test] + fn port_gone_concurrent_shared() { + let (tx, rx) = sync_channel::(0); + let tx2 = tx.clone(); + let _t = thread::spawn(move|| { + rx.recv().unwrap(); + }); + while tx.send(1).is_ok() && tx2.send(1).is_ok() {} + } + + #[test] + fn smoke_chan_gone() { + let (tx, rx) = sync_channel::(0); + drop(tx); + assert!(rx.recv().is_err()); + } + + #[test] + fn smoke_chan_gone_shared() { + let (tx, rx) = sync_channel::<()>(0); + let tx2 = tx.clone(); + drop(tx); + drop(tx2); + assert!(rx.recv().is_err()); + } + + #[test] + fn chan_gone_concurrent() { + let (tx, rx) = sync_channel::(0); + thread::spawn(move|| { + tx.send(1).unwrap(); + tx.send(1).unwrap(); + }); + while rx.recv().is_ok() {} + } + + #[test] + fn stress() { + let (tx, rx) = sync_channel::(0); + thread::spawn(move|| { + for _ in 0..10000 { tx.send(1).unwrap(); } + }); + for _ in 0..10000 { + assert_eq!(rx.recv().unwrap(), 1); + } + } + + #[test] + fn stress_recv_timeout_two_threads() { + let (tx, rx) = sync_channel::(0); + + thread::spawn(move|| { + for _ in 0..10000 { tx.send(1).unwrap(); } + }); + + let mut recv_count = 0; + loop { + match rx.recv_timeout(Duration::from_millis(1)) { + Ok(v) => { + assert_eq!(v, 1); + recv_count += 1; + }, + Err(RecvTimeoutError::Timeout) => continue, + Err(RecvTimeoutError::Disconnected) => break, + } + } + + assert_eq!(recv_count, 10000); + } + + #[test] + fn stress_recv_timeout_shared() { + const AMT: u32 = 1000; + const NTHREADS: u32 = 8; + let (tx, rx) = sync_channel::(0); + let (dtx, drx) = sync_channel::<()>(0); + + thread::spawn(move|| { + let mut recv_count = 0; + loop { + match rx.recv_timeout(Duration::from_millis(10)) { + Ok(v) => { + assert_eq!(v, 1); + recv_count += 1; + }, + Err(RecvTimeoutError::Timeout) => continue, + Err(RecvTimeoutError::Disconnected) => break, + } + } + + assert_eq!(recv_count, AMT * NTHREADS); + assert!(rx.try_recv().is_err()); + + dtx.send(()).unwrap(); + }); + + for _ in 0..NTHREADS { + let tx = tx.clone(); + thread::spawn(move|| { + for _ in 0..AMT { tx.send(1).unwrap(); } + }); + } + + drop(tx); + + drx.recv().unwrap(); + } + + #[test] + fn stress_shared() { + const AMT: u32 = 1000; + const NTHREADS: u32 = 8; + let (tx, rx) = sync_channel::(0); + let (dtx, drx) = sync_channel::<()>(0); + + thread::spawn(move|| { + for _ in 0..AMT * NTHREADS { + assert_eq!(rx.recv().unwrap(), 1); + } + match rx.try_recv() { + Ok(..) => panic!(), + _ => {} + } + dtx.send(()).unwrap(); + }); + + for _ in 0..NTHREADS { + let tx = tx.clone(); + thread::spawn(move|| { + for _ in 0..AMT { tx.send(1).unwrap(); } + }); + } + drop(tx); + drx.recv().unwrap(); + } + + #[test] + fn oneshot_single_thread_close_port_first() { + // Simple test of closing without sending + let (_tx, rx) = sync_channel::(0); + drop(rx); + } + + #[test] + fn oneshot_single_thread_close_chan_first() { + // Simple test of closing without sending + let (tx, _rx) = sync_channel::(0); + drop(tx); + } + + #[test] + fn oneshot_single_thread_send_port_close() { + // Testing that the sender cleans up the payload if receiver is closed + let (tx, rx) = sync_channel::>(0); + drop(rx); + assert!(tx.send(Box::new(0)).is_err()); + } + + #[test] + fn oneshot_single_thread_recv_chan_close() { + // Receiving on a closed chan will panic + let res = thread::spawn(move|| { + let (tx, rx) = sync_channel::(0); + drop(tx); + rx.recv().unwrap(); + }).join(); + // What is our res? + assert!(res.is_err()); + } + + #[test] + fn oneshot_single_thread_send_then_recv() { + let (tx, rx) = sync_channel::>(1); + tx.send(Box::new(10)).unwrap(); + assert!(*rx.recv().unwrap() == 10); + } + + #[test] + fn oneshot_single_thread_try_send_open() { + let (tx, rx) = sync_channel::(1); + assert_eq!(tx.try_send(10), Ok(())); + assert!(rx.recv().unwrap() == 10); + } + + #[test] + fn oneshot_single_thread_try_send_closed() { + let (tx, rx) = sync_channel::(0); + drop(rx); + assert_eq!(tx.try_send(10), Err(TrySendError::Disconnected(10))); + } + + #[test] + fn oneshot_single_thread_try_send_closed2() { + let (tx, _rx) = sync_channel::(0); + assert_eq!(tx.try_send(10), Err(TrySendError::Full(10))); + } + + #[test] + fn oneshot_single_thread_try_recv_open() { + let (tx, rx) = sync_channel::(1); + tx.send(10).unwrap(); + assert!(rx.recv() == Ok(10)); + } + + #[test] + fn oneshot_single_thread_try_recv_closed() { + let (tx, rx) = sync_channel::(0); + drop(tx); + assert!(rx.recv().is_err()); + } + + #[test] + fn oneshot_single_thread_try_recv_closed_with_data() { + let (tx, rx) = sync_channel::(1); + tx.send(10).unwrap(); + drop(tx); + assert_eq!(rx.try_recv(), Ok(10)); + assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); + } + + #[test] + fn oneshot_single_thread_peek_data() { + let (tx, rx) = sync_channel::(1); + assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); + tx.send(10).unwrap(); + assert_eq!(rx.try_recv(), Ok(10)); + } + + #[test] + fn oneshot_single_thread_peek_close() { + let (tx, rx) = sync_channel::(0); + drop(tx); + assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); + assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); + } + + #[test] + fn oneshot_single_thread_peek_open() { + let (_tx, rx) = sync_channel::(0); + assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); + } + + #[test] + fn oneshot_multi_task_recv_then_send() { + let (tx, rx) = sync_channel::>(0); + let _t = thread::spawn(move|| { + assert!(*rx.recv().unwrap() == 10); + }); + + tx.send(Box::new(10)).unwrap(); + } + + #[test] + fn oneshot_multi_task_recv_then_close() { + let (tx, rx) = sync_channel::>(0); + let _t = thread::spawn(move|| { + drop(tx); + }); + let res = thread::spawn(move|| { + assert!(*rx.recv().unwrap() == 10); + }).join(); + assert!(res.is_err()); + } + + #[test] + fn oneshot_multi_thread_close_stress() { + for _ in 0..stress_factor() { + let (tx, rx) = sync_channel::(0); + let _t = thread::spawn(move|| { + drop(rx); + }); + drop(tx); + } + } + + #[test] + fn oneshot_multi_thread_send_close_stress() { + for _ in 0..stress_factor() { + let (tx, rx) = sync_channel::(0); + let _t = thread::spawn(move|| { + drop(rx); + }); + let _ = thread::spawn(move || { + tx.send(1).unwrap(); + }).join(); + } + } + + #[test] + fn oneshot_multi_thread_recv_close_stress() { + for _ in 0..stress_factor() { + let (tx, rx) = sync_channel::(0); + let _t = thread::spawn(move|| { + let res = thread::spawn(move|| { + rx.recv().unwrap(); + }).join(); + assert!(res.is_err()); + }); + let _t = thread::spawn(move|| { + thread::spawn(move|| { + drop(tx); + }); + }); + } + } + + #[test] + fn oneshot_multi_thread_send_recv_stress() { + for _ in 0..stress_factor() { + let (tx, rx) = sync_channel::>(0); + let _t = thread::spawn(move|| { + tx.send(Box::new(10)).unwrap(); + }); + assert!(*rx.recv().unwrap() == 10); + } + } + + #[test] + fn stream_send_recv_stress() { + for _ in 0..stress_factor() { + let (tx, rx) = sync_channel::>(0); + + send(tx, 0); + recv(rx, 0); + + fn send(tx: SyncSender>, i: i32) { + if i == 10 { return } + + thread::spawn(move|| { + tx.send(Box::new(i)).unwrap(); + send(tx, i + 1); + }); + } + + fn recv(rx: Receiver>, i: i32) { + if i == 10 { return } + + thread::spawn(move|| { + assert!(*rx.recv().unwrap() == i); + recv(rx, i + 1); + }); + } + } + } + + #[test] + fn recv_a_lot() { + // Regression test that we don't run out of stack in scheduler context + let (tx, rx) = sync_channel(10000); + for _ in 0..10000 { tx.send(()).unwrap(); } + for _ in 0..10000 { rx.recv().unwrap(); } + } + + #[test] + fn shared_chan_stress() { + let (tx, rx) = sync_channel(0); + let total = stress_factor() + 100; + for _ in 0..total { + let tx = tx.clone(); + thread::spawn(move|| { + tx.send(()).unwrap(); + }); + } + + for _ in 0..total { + rx.recv().unwrap(); + } + } + + #[test] + fn test_nested_recv_iter() { + let (tx, rx) = sync_channel::(0); + let (total_tx, total_rx) = sync_channel::(0); + + let _t = thread::spawn(move|| { + let mut acc = 0; + for x in rx.iter() { + acc += x; + } + total_tx.send(acc).unwrap(); + }); + + tx.send(3).unwrap(); + tx.send(1).unwrap(); + tx.send(2).unwrap(); + drop(tx); + assert_eq!(total_rx.recv().unwrap(), 6); + } + + #[test] + fn test_recv_iter_break() { + let (tx, rx) = sync_channel::(0); + let (count_tx, count_rx) = sync_channel(0); + + let _t = thread::spawn(move|| { + let mut count = 0; + for x in rx.iter() { + if count >= 3 { + break; + } else { + count += x; + } + } + count_tx.send(count).unwrap(); + }); + + tx.send(2).unwrap(); + tx.send(2).unwrap(); + tx.send(2).unwrap(); + let _ = tx.try_send(2); + drop(tx); + assert_eq!(count_rx.recv().unwrap(), 4); + } + + #[test] + fn try_recv_states() { + let (tx1, rx1) = sync_channel::(1); + let (tx2, rx2) = sync_channel::<()>(1); + let (tx3, rx3) = sync_channel::<()>(1); + let _t = thread::spawn(move|| { + rx2.recv().unwrap(); + tx1.send(1).unwrap(); + tx3.send(()).unwrap(); + rx2.recv().unwrap(); + drop(tx1); + tx3.send(()).unwrap(); + }); + + assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); + tx2.send(()).unwrap(); + rx3.recv().unwrap(); + assert_eq!(rx1.try_recv(), Ok(1)); + assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); + tx2.send(()).unwrap(); + rx3.recv().unwrap(); + assert_eq!(rx1.try_recv(), Err(TryRecvError::Disconnected)); + } + + // This bug used to end up in a livelock inside of the Receiver destructor + // because the internal state of the Shared packet was corrupted + #[test] + fn destroy_upgraded_shared_port_when_sender_still_active() { + let (tx, rx) = sync_channel::<()>(0); + let (tx2, rx2) = sync_channel::<()>(0); + let _t = thread::spawn(move|| { + rx.recv().unwrap(); // wait on a oneshot + drop(rx); // destroy a shared + tx2.send(()).unwrap(); + }); + // make sure the other thread has gone to sleep + for _ in 0..5000 { thread::yield_now(); } + + // upgrade to a shared chan and send a message + let t = tx.clone(); + drop(tx); + t.send(()).unwrap(); + + // wait for the child thread to exit before we exit + rx2.recv().unwrap(); + } + + #[test] + fn send1() { + let (tx, rx) = sync_channel::(0); + let _t = thread::spawn(move|| { rx.recv().unwrap(); }); + assert_eq!(tx.send(1), Ok(())); + } + + #[test] + fn send2() { + let (tx, rx) = sync_channel::(0); + let _t = thread::spawn(move|| { drop(rx); }); + assert!(tx.send(1).is_err()); + } + + #[test] + fn send3() { + let (tx, rx) = sync_channel::(1); + assert_eq!(tx.send(1), Ok(())); + let _t =thread::spawn(move|| { drop(rx); }); + assert!(tx.send(1).is_err()); + } + + #[test] + fn send4() { + let (tx, rx) = sync_channel::(0); + let tx2 = tx.clone(); + let (done, donerx) = channel(); + let done2 = done.clone(); + let _t = thread::spawn(move|| { + assert!(tx.send(1).is_err()); + done.send(()).unwrap(); + }); + let _t = thread::spawn(move|| { + assert!(tx2.send(2).is_err()); + done2.send(()).unwrap(); + }); + drop(rx); + donerx.recv().unwrap(); + donerx.recv().unwrap(); + } + + #[test] + fn try_send1() { + let (tx, _rx) = sync_channel::(0); + assert_eq!(tx.try_send(1), Err(TrySendError::Full(1))); + } + + #[test] + fn try_send2() { + let (tx, _rx) = sync_channel::(1); + assert_eq!(tx.try_send(1), Ok(())); + assert_eq!(tx.try_send(1), Err(TrySendError::Full(1))); + } + + #[test] + fn try_send3() { + let (tx, rx) = sync_channel::(1); + assert_eq!(tx.try_send(1), Ok(())); + drop(rx); + assert_eq!(tx.try_send(1), Err(TrySendError::Disconnected(1))); + } + + #[test] + fn issue_15761() { + fn repro() { + let (tx1, rx1) = sync_channel::<()>(3); + let (tx2, rx2) = sync_channel::<()>(3); + + let _t = thread::spawn(move|| { + rx1.recv().unwrap(); + tx2.try_send(()).unwrap(); + }); + + tx1.try_send(()).unwrap(); + rx2.recv().unwrap(); + } + + for _ in 0..100 { + repro() + } + } +} + +// Source: https://github.com/rust-lang/rust/blob/master/src/libstd/sync/mpsc/select.rs +mod select_tests { + use super::*; + + use std::thread; + + #[test] + fn smoke() { + let (tx1, rx1) = channel::(); + let (tx2, rx2) = channel::(); + tx1.send(1).unwrap(); + select! { + foo = rx1.recv() => { assert_eq!(foo.unwrap(), 1); }, + _bar = rx2.recv() => { panic!() } + } + tx2.send(2).unwrap(); + select! { + _foo = rx1.recv() => { panic!() }, + bar = rx2.recv() => { assert_eq!(bar.unwrap(), 2) } + } + drop(tx1); + select! { + foo = rx1.recv() => { assert!(foo.is_err()); }, + _bar = rx2.recv() => { panic!() } + } + drop(tx2); + select! { + bar = rx2.recv() => { assert!(bar.is_err()); } + } + } + + #[test] + fn smoke2() { + let (_tx1, rx1) = channel::(); + let (_tx2, rx2) = channel::(); + let (_tx3, rx3) = channel::(); + let (_tx4, rx4) = channel::(); + let (tx5, rx5) = channel::(); + tx5.send(4).unwrap(); + select! { + _foo = rx1.recv() => { panic!("1") }, + _foo = rx2.recv() => { panic!("2") }, + _foo = rx3.recv() => { panic!("3") }, + _foo = rx4.recv() => { panic!("4") }, + foo = rx5.recv() => { assert_eq!(foo.unwrap(), 4); } + } + } + + #[test] + fn closed() { + let (_tx1, rx1) = channel::(); + let (tx2, rx2) = channel::(); + drop(tx2); + + select! { + _a1 = rx1.recv() => { panic!() }, + a2 = rx2.recv() => { assert!(a2.is_err()); } + } + } + + #[test] + fn unblocks() { + let (tx1, rx1) = channel::(); + let (_tx2, rx2) = channel::(); + let (tx3, rx3) = channel::(); + + let _t = thread::spawn(move|| { + for _ in 0..20 { thread::yield_now(); } + tx1.send(1).unwrap(); + rx3.recv().unwrap(); + for _ in 0..20 { thread::yield_now(); } + }); + + select! { + a = rx1.recv() => { assert_eq!(a.unwrap(), 1); }, + _b = rx2.recv() => { panic!() } + } + tx3.send(1).unwrap(); + select! { + a = rx1.recv() => { assert!(a.is_err()) }, + _b = rx2.recv() => { panic!() } + } + } + + #[test] + fn both_ready() { + let (tx1, rx1) = channel::(); + let (tx2, rx2) = channel::(); + let (tx3, rx3) = channel::<()>(); + + let _t = thread::spawn(move|| { + for _ in 0..20 { thread::yield_now(); } + tx1.send(1).unwrap(); + tx2.send(2).unwrap(); + rx3.recv().unwrap(); + }); + + select! { + a = rx1.recv() => { assert_eq!(a.unwrap(), 1); }, + a = rx2.recv() => { assert_eq!(a.unwrap(), 2); } + } + select! { + a = rx1.recv() => { assert_eq!(a.unwrap(), 1); }, + a = rx2.recv() => { assert_eq!(a.unwrap(), 2); } + } + assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); + assert_eq!(rx2.try_recv(), Err(TryRecvError::Empty)); + tx3.send(()).unwrap(); + } + + #[test] + fn stress() { + const AMT: i32 = 10000; + let (tx1, rx1) = channel::(); + let (tx2, rx2) = channel::(); + let (tx3, rx3) = channel::<()>(); + + let _t = thread::spawn(move|| { + for i in 0..AMT { + if i % 2 == 0 { + tx1.send(i).unwrap(); + } else { + tx2.send(i).unwrap(); + } + rx3.recv().unwrap(); + } + }); + + for i in 0..AMT { + select! { + i1 = rx1.recv() => { assert!(i % 2 == 0 && i == i1.unwrap()); }, + i2 = rx2.recv() => { assert!(i % 2 == 1 && i == i2.unwrap()); } + } + tx3.send(()).unwrap(); + } + } + + #[allow(unused_must_use)] + #[test] + fn cloning() { + let (tx1, rx1) = channel::(); + let (_tx2, rx2) = channel::(); + let (tx3, rx3) = channel::<()>(); + + let _t = thread::spawn(move|| { + rx3.recv().unwrap(); + tx1.clone(); + assert_eq!(rx3.try_recv(), Err(TryRecvError::Empty)); + tx1.send(2).unwrap(); + rx3.recv().unwrap(); + }); + + tx3.send(()).unwrap(); + select! { + _i1 = rx1.recv() => {}, + _i2 = rx2.recv() => panic!() + } + tx3.send(()).unwrap(); + } + + #[allow(unused_must_use)] + #[test] + fn cloning2() { + let (tx1, rx1) = channel::(); + let (_tx2, rx2) = channel::(); + let (tx3, rx3) = channel::<()>(); + + let _t = thread::spawn(move|| { + rx3.recv().unwrap(); + tx1.clone(); + assert_eq!(rx3.try_recv(), Err(TryRecvError::Empty)); + tx1.send(2).unwrap(); + rx3.recv().unwrap(); + }); + + tx3.send(()).unwrap(); + select! { + _i1 = rx1.recv() => {}, + _i2 = rx2.recv() => panic!() + } + tx3.send(()).unwrap(); + } + + #[test] + fn cloning3() { + let (tx1, rx1) = channel::<()>(); + let (tx2, rx2) = channel::<()>(); + let (tx3, rx3) = channel::<()>(); + let _t = thread::spawn(move|| { + select! { + _ = rx1.recv() => panic!(), + _ = rx2.recv() => {} + } + tx3.send(()).unwrap(); + }); + + for _ in 0..1000 { thread::yield_now(); } + drop(tx1.clone()); + tx2.send(()).unwrap(); + rx3.recv().unwrap(); + } + + #[test] + fn preflight1() { + let (tx, rx) = channel(); + tx.send(()).unwrap(); + select! { + _n = rx.recv() => {} + } + } + + #[test] + fn preflight2() { + let (tx, rx) = channel(); + tx.send(()).unwrap(); + tx.send(()).unwrap(); + select! { + _n = rx.recv() => {} + } + } + + #[test] + fn preflight3() { + let (tx, rx) = channel(); + drop(tx.clone()); + tx.send(()).unwrap(); + select! { + _n = rx.recv() => {} + } + } + + #[test] + fn preflight4() { + let (tx, rx) = channel(); + tx.send(()).unwrap(); + select! { + _ = rx.recv() => {} + } + } + + #[test] + fn preflight5() { + let (tx, rx) = channel(); + tx.send(()).unwrap(); + tx.send(()).unwrap(); + select! { + _ = rx.recv() => {} + } + } + + #[test] + fn preflight6() { + let (tx, rx) = channel(); + drop(tx.clone()); + tx.send(()).unwrap(); + select! { + _ = rx.recv() => {} + } + } + + #[test] + fn preflight7() { + let (tx, rx) = channel::<()>(); + drop(tx); + select! { + _ = rx.recv() => {} + } + } + + #[test] + fn preflight8() { + let (tx, rx) = channel(); + tx.send(()).unwrap(); + drop(tx); + rx.recv().unwrap(); + select! { + _ = rx.recv() => {} + } + } + + #[test] + fn preflight9() { + let (tx, rx) = channel(); + drop(tx.clone()); + tx.send(()).unwrap(); + drop(tx); + rx.recv().unwrap(); + select! { + _ = rx.recv() => {} + } + } + + #[test] + fn oneshot_data_waiting() { + let (tx1, rx1) = channel(); + let (tx2, rx2) = channel(); + let _t = thread::spawn(move|| { + select! { + _n = rx1.recv() => {} + } + tx2.send(()).unwrap(); + }); + + for _ in 0..100 { thread::yield_now() } + tx1.send(()).unwrap(); + rx2.recv().unwrap(); + } + + #[test] + fn stream_data_waiting() { + let (tx1, rx1) = channel(); + let (tx2, rx2) = channel(); + tx1.send(()).unwrap(); + tx1.send(()).unwrap(); + rx1.recv().unwrap(); + rx1.recv().unwrap(); + let _t = thread::spawn(move|| { + select! { + _n = rx1.recv() => {} + } + tx2.send(()).unwrap(); + }); + + for _ in 0..100 { thread::yield_now() } + tx1.send(()).unwrap(); + rx2.recv().unwrap(); + } + + #[test] + fn shared_data_waiting() { + let (tx1, rx1) = channel(); + let (tx2, rx2) = channel(); + drop(tx1.clone()); + tx1.send(()).unwrap(); + rx1.recv().unwrap(); + let _t = thread::spawn(move|| { + select! { + _n = rx1.recv() => {} + } + tx2.send(()).unwrap(); + }); + + for _ in 0..100 { thread::yield_now() } + tx1.send(()).unwrap(); + rx2.recv().unwrap(); + } + + #[test] + fn sync1() { + let (tx, rx) = sync_channel::(1); + tx.send(1).unwrap(); + select! { + n = rx.recv() => { assert_eq!(n.unwrap(), 1); } + } + } + + #[test] + fn sync2() { + let (tx, rx) = sync_channel::(0); + let _t = thread::spawn(move|| { + for _ in 0..100 { thread::yield_now() } + tx.send(1).unwrap(); + }); + select! { + n = rx.recv() => { assert_eq!(n.unwrap(), 1); } + } + } + + #[test] + fn sync3() { + let (tx1, rx1) = sync_channel::(0); + let (tx2, rx2): (Sender, Receiver) = channel(); + let _t = thread::spawn(move|| { tx1.send(1).unwrap(); }); + let _t = thread::spawn(move|| { tx2.send(2).unwrap(); }); + select! { + n = rx1.recv() => { + let n = n.unwrap(); + assert_eq!(n, 1); + assert_eq!(rx2.recv().unwrap(), 2); + }, + n = rx2.recv() => { + let n = n.unwrap(); + assert_eq!(n, 2); + assert_eq!(rx1.recv().unwrap(), 1); + } + } + } +} diff --git a/crossbeam-channel/tests/never.rs b/crossbeam-channel/tests/never.rs new file mode 100644 index 000000000..b4a3bace1 --- /dev/null +++ b/crossbeam-channel/tests/never.rs @@ -0,0 +1,101 @@ +//! Tests for the never channel flavor. + +extern crate crossbeam; +#[macro_use] +extern crate crossbeam_channel; +extern crate rand; + +use std::thread; +use std::time::{Duration, Instant}; + +use crossbeam_channel::{never, tick, unbounded}; + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn smoke() { + select! { + recv(never::()) -> _ => panic!(), + default => {} + } +} + +#[test] +fn optional() { + let (s, r) = unbounded::(); + s.send(1).unwrap(); + s.send(2).unwrap(); + + let mut r = Some(&r); + select! { + recv(r.unwrap_or(&never())) -> _ => {} + default => panic!(), + } + + r = None; + select! { + recv(r.unwrap_or(&never())) -> _ => panic!(), + default => {} + } +} + + +#[test] +fn tick_n() { + let mut r = tick(ms(100)); + let mut step = 0; + + loop { + select! { + recv(r) -> _ => step += 1, + default(ms(500)) => break, + } + + if step == 10 { + r = never(); + } + } + + assert_eq!(step, 10); +} + +#[test] +fn capacity() { + let r = never::(); + assert_eq!(r.capacity(), Some(0)); +} + +#[test] +fn len_empty_full() { + let r = never::(); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), true); +} + +#[test] +fn try_recv() { + let r = never::(); + assert!(r.try_recv().is_err()); + + thread::sleep(ms(100)); + assert!(r.try_recv().is_err()); +} + +#[test] +fn recv_timeout() { + let start = Instant::now(); + let r = never::(); + + assert!(r.recv_timeout(ms(100)).is_err()); + let now = Instant::now(); + assert!(now - start >= ms(100)); + assert!(now - start <= ms(150)); + + assert!(r.recv_timeout(ms(100)).is_err()); + let now = Instant::now(); + assert!(now - start >= ms(200)); + assert!(now - start <= ms(250)); +} diff --git a/crossbeam-channel/tests/select.rs b/crossbeam-channel/tests/select.rs new file mode 100644 index 000000000..0a967e2dd --- /dev/null +++ b/crossbeam-channel/tests/select.rs @@ -0,0 +1,1287 @@ +//! Tests for the `Select` struct. + +extern crate crossbeam; +extern crate crossbeam_channel; + +use std::any::Any; +use std::cell::Cell; +use std::thread; +use std::time::{Duration, Instant}; + +use crossbeam_channel::{after, bounded, unbounded, tick, Receiver, Select, TryRecvError}; + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn smoke1() { + let (s1, r1) = unbounded::(); + let (s2, r2) = unbounded::(); + + s1.send(1).unwrap(); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(1)), + i if i == oper2 => panic!(), + _ => unreachable!(), + } + + s2.send(2).unwrap(); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => panic!(), + i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(2)), + _ => unreachable!(), + } +} + +#[test] +fn smoke2() { + let (_s1, r1) = unbounded::(); + let (_s2, r2) = unbounded::(); + let (_s3, r3) = unbounded::(); + let (_s4, r4) = unbounded::(); + let (s5, r5) = unbounded::(); + + s5.send(5).unwrap(); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper3 = sel.recv(&r3); + let oper4 = sel.recv(&r4); + let oper5 = sel.recv(&r5); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => panic!(), + i if i == oper2 => panic!(), + i if i == oper3 => panic!(), + i if i == oper4 => panic!(), + i if i == oper5 => assert_eq!(oper.recv(&r5), Ok(5)), + _ => unreachable!(), + } +} + +#[test] +fn disconnected() { + let (s1, r1) = unbounded::(); + let (s2, r2) = unbounded::(); + + crossbeam::scope(|scope| { + scope.spawn(|| { + drop(s1); + thread::sleep(ms(500)); + s2.send(5).unwrap(); + }); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.select_timeout(ms(1000)); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => assert!(oper.recv(&r1).is_err()), + i if i == oper2 => panic!(), + _ => unreachable!(), + } + } + + r2.recv().unwrap(); + }); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.select_timeout(ms(1000)); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => assert!(oper.recv(&r1).is_err()), + i if i == oper2 => panic!(), + _ => unreachable!(), + } + } + + crossbeam::scope(|scope| { + scope.spawn(|| { + thread::sleep(ms(500)); + drop(s2); + }); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r2); + let oper = sel.select_timeout(ms(1000)); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => assert!(oper.recv(&r2).is_err()), + i if i == oper2 => panic!(), + _ => unreachable!(), + } + } + }); +} + +#[test] +fn default() { + let (s1, r1) = unbounded::(); + let (s2, r2) = unbounded::(); + + let mut sel = Select::new(); + let _oper1 = sel.recv(&r1); + let _oper2 = sel.recv(&r2); + let oper = sel.try_select(); + match oper { + Err(_) => {} + Ok(_) => panic!(), + } + + drop(s1); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.try_select(); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => assert!(oper.recv(&r1).is_err()), + i if i == oper2 => panic!(), + _ => unreachable!() + } + } + + s2.send(2).unwrap(); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r2); + let oper = sel.try_select(); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => assert_eq!(oper.recv(&r2), Ok(2)), + _ => unreachable!() + } + } + + let mut sel = Select::new(); + let _oper1 = sel.recv(&r2); + let oper = sel.try_select(); + match oper { + Err(_) => {} + Ok(_) => panic!(), + } + + let mut sel = Select::new(); + let oper = sel.try_select(); + match oper { + Err(_) => {} + Ok(_) => panic!(), + } +} + +#[test] +fn timeout() { + let (_s1, r1) = unbounded::(); + let (s2, r2) = unbounded::(); + + crossbeam::scope(|scope| { + scope.spawn(|| { + thread::sleep(ms(1500)); + s2.send(2).unwrap(); + }); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.select_timeout(ms(1000)); + match oper { + Err(_) => {} + Ok(oper) => match oper.index() { + i if i == oper1 => panic!(), + i if i == oper2 => panic!(), + _ => unreachable!(), + } + } + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.select_timeout(ms(1000)); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => panic!(), + i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(2)), + _ => unreachable!(), + } + } + }); + + crossbeam::scope(|scope| { + let (s, r) = unbounded::(); + + scope.spawn(move || { + thread::sleep(ms(500)); + drop(s); + }); + + let mut sel = Select::new(); + let oper = sel.select_timeout(ms(1000)); + match oper { + Err(_) => { + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper = sel.try_select(); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => assert!(oper.recv(&r).is_err()), + _ => unreachable!(), + } + } + } + Ok(_) => unreachable!(), + } + }); +} + +#[test] +fn default_when_disconnected() { + let (_, r) = unbounded::(); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper = sel.try_select(); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => assert!(oper.recv(&r).is_err()), + _ => unreachable!(), + } + } + + let (_, r) = unbounded::(); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper = sel.select_timeout(ms(1000)); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => assert!(oper.recv(&r).is_err()), + _ => unreachable!(), + } + } + + let (s, _) = bounded::(0); + + let mut sel = Select::new(); + let oper1 = sel.send(&s); + let oper = sel.try_select(); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => assert!(oper.send(&s, 0).is_err()), + _ => unreachable!(), + } + } + + let (s, _) = bounded::(0); + + let mut sel = Select::new(); + let oper1 = sel.send(&s); + let oper = sel.select_timeout(ms(1000)); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => assert!(oper.send(&s, 0).is_err()), + _ => unreachable!(), + } + } +} + +#[test] +fn default_only() { + let start = Instant::now(); + + let mut sel = Select::new(); + let oper = sel.try_select(); + assert!(oper.is_err()); + let now = Instant::now(); + assert!(now - start <= ms(50)); + + let start = Instant::now(); + let mut sel = Select::new(); + let oper = sel.select_timeout(ms(500)); + assert!(oper.is_err()); + let now = Instant::now(); + assert!(now - start >= ms(450)); + assert!(now - start <= ms(550)); +} + + +#[test] +fn unblocks() { + let (s1, r1) = bounded::(0); + let (s2, r2) = bounded::(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + thread::sleep(ms(500)); + s2.send(2).unwrap(); + }); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.select_timeout(ms(1000)); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => panic!(), + i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(2)), + _ => unreachable!(), + } + } + }); + + crossbeam::scope(|scope| { + scope.spawn(|| { + thread::sleep(ms(500)); + assert_eq!(r1.recv().unwrap(), 1); + }); + + let mut sel = Select::new(); + let oper1 = sel.send(&s1); + let oper2 = sel.send(&s2); + let oper = sel.select_timeout(ms(1000)); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + i if i == oper1 => oper.send(&s1, 1).unwrap(), + i if i == oper2 => panic!(), + _ => unreachable!(), + } + } + }); +} + +#[test] +fn both_ready() { + let (s1, r1) = bounded(0); + let (s2, r2) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + thread::sleep(ms(500)); + s1.send(1).unwrap(); + assert_eq!(r2.recv().unwrap(), 2); + }); + + for _ in 0..2 { + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.send(&s2); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(1)), + i if i == oper2 => oper.send(&s2, 2).unwrap(), + _ => unreachable!(), + } + } + }); +} + +#[test] +fn loop_try() { + const RUNS: usize = 20; + + for _ in 0..RUNS { + let (s1, r1) = bounded::(0); + let (s2, r2) = bounded::(0); + let (s_end, r_end) = bounded::<()>(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + loop { + let mut done = false; + + let mut sel = Select::new(); + let oper1 = sel.send(&s1); + let oper = sel.try_select(); + match oper { + Err(_) => {} + Ok(oper) => match oper.index() { + i if i == oper1 => { + let _ = oper.send(&s1, 1); + done = true; + } + _ => unreachable!(), + } + } + if done { + break; + } + + let mut sel = Select::new(); + let oper1 = sel.recv(&r_end); + let oper = sel.try_select(); + match oper { + Err(_) => {} + Ok(oper) => match oper.index() { + i if i == oper1 => { + let _ = oper.recv(&r_end); + done = true; + } + _ => unreachable!(), + } + } + if done { + break; + } + } + }); + + scope.spawn(|| { + loop { + if let Ok(x) = r2.try_recv() { + assert_eq!(x, 2); + break; + } + + let mut done = false; + let mut sel = Select::new(); + let oper1 = sel.recv(&r_end); + let oper = sel.try_select(); + match oper { + Err(_) => {} + Ok(oper) => match oper.index() { + i if i == oper1 => { + let _ = oper.recv(&r_end); + done = true; + } + _ => unreachable!(), + } + } + if done { + break; + } + } + }); + + scope.spawn(|| { + thread::sleep(ms(500)); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.send(&s2); + let oper = sel.select_timeout(ms(1000)); + match oper { + Err(_) => {} + Ok(oper) => match oper.index() { + i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(1)), + i if i == oper2 => assert!(oper.send(&s2, 2).is_ok()), + _ => unreachable!(), + } + } + + drop(s_end); + }); + }); + } +} + +#[test] +fn cloning1() { + crossbeam::scope(|scope| { + let (s1, r1) = unbounded::(); + let (_s2, r2) = unbounded::(); + let (s3, r3) = unbounded::<()>(); + + scope.spawn(move || { + r3.recv().unwrap(); + drop(s1.clone()); + assert!(r3.try_recv().is_err()); + s1.send(1).unwrap(); + r3.recv().unwrap(); + }); + + s3.send(()).unwrap(); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => drop(oper.recv(&r1)), + i if i == oper2 => drop(oper.recv(&r2)), + _ => unreachable!(), + } + + s3.send(()).unwrap(); + }); +} + +#[test] +fn cloning2() { + let (s1, r1) = unbounded::<()>(); + let (s2, r2) = unbounded::<()>(); + let (_s3, _r3) = unbounded::<()>(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => panic!(), + i if i == oper2 => drop(oper.recv(&r2)), + _ => unreachable!(), + } + }); + + thread::sleep(ms(500)); + drop(s1.clone()); + s2.send(()).unwrap(); + }) +} + +#[test] +fn preflight1() { + let (s, r) = unbounded(); + s.send(()).unwrap(); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => drop(oper.recv(&r)), + _ => unreachable!(), + } +} + +#[test] +fn preflight2() { + let (s, r) = unbounded(); + drop(s.clone()); + s.send(()).unwrap(); + drop(s); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => assert_eq!(oper.recv(&r), Ok(())), + _ => unreachable!(), + } + + assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); +} + +#[test] +fn preflight3() { + let (s, r) = unbounded(); + drop(s.clone()); + s.send(()).unwrap(); + drop(s); + r.recv().unwrap(); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => assert!(oper.recv(&r).is_err()), + _ => unreachable!(), + } +} + +#[test] +fn duplicate_operations() { + let (s, r) = unbounded::(); + let hit = vec![Cell::new(false); 4]; + + while hit.iter().map(|h| h.get()).any(|hit| !hit) { + let mut sel = Select::new(); + let oper0 = sel.recv(&r); + let oper1 = sel.recv(&r); + let oper2 = sel.send(&s); + let oper3 = sel.send(&s); + let oper = sel.select(); + match oper.index() { + i if i == oper0 => { + assert!(oper.recv(&r).is_ok()); + hit[0].set(true); + } + i if i == oper1 => { + assert!(oper.recv(&r).is_ok()); + hit[1].set(true); + } + i if i == oper2 => { + assert!(oper.send(&s, 0).is_ok()); + hit[2].set(true); + } + i if i == oper3 => { + assert!(oper.send(&s, 0).is_ok()); + hit[3].set(true); + } + _ => unreachable!(), + } + } +} + +#[test] +fn nesting() { + let (s, r) = unbounded::(); + + let mut sel = Select::new(); + let oper1 = sel.send(&s); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => { + assert!(oper.send(&s, 0).is_ok()); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => { + assert_eq!(oper.recv(&r), Ok(0)); + + let mut sel = Select::new(); + let oper1 = sel.send(&s); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => { + assert!(oper.send(&s, 1).is_ok()); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => { + assert_eq!(oper.recv(&r), Ok(1)); + } + _ => unreachable!(), + } + } + _ => unreachable!(), + } + } + _ => unreachable!(), + } + } + _ => unreachable!(), + } +} + +#[test] +fn stress_recv() { + const COUNT: usize = 10_000; + + let (s1, r1) = unbounded(); + let (s2, r2) = bounded(5); + let (s3, r3) = bounded(100); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + s1.send(i).unwrap(); + r3.recv().unwrap(); + + s2.send(i).unwrap(); + r3.recv().unwrap(); + } + }); + + for i in 0..COUNT { + for _ in 0..2 { + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.select(); + match oper.index() { + ix if ix == oper1 => assert_eq!(oper.recv(&r1), Ok(i)), + ix if ix == oper2 => assert_eq!(oper.recv(&r2), Ok(i)), + _ => unreachable!(), + } + + s3.send(()).unwrap(); + } + } + }); +} + +#[test] +fn stress_send() { + const COUNT: usize = 10_000; + + let (s1, r1) = bounded(0); + let (s2, r2) = bounded(0); + let (s3, r3) = bounded(100); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + assert_eq!(r1.recv().unwrap(), i); + assert_eq!(r2.recv().unwrap(), i); + r3.recv().unwrap(); + } + }); + + for i in 0..COUNT { + for _ in 0..2 { + let mut sel = Select::new(); + let oper1 = sel.send(&s1); + let oper2 = sel.send(&s2); + let oper = sel.select(); + match oper.index() { + ix if ix == oper1 => assert!(oper.send(&s1, i).is_ok()), + ix if ix == oper2 => assert!(oper.send(&s2, i).is_ok()), + _ => unreachable!(), + } + } + s3.send(()).unwrap(); + } + }); +} + +#[test] +fn stress_mixed() { + const COUNT: usize = 10_000; + + let (s1, r1) = bounded(0); + let (s2, r2) = bounded(0); + let (s3, r3) = bounded(100); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + s1.send(i).unwrap(); + assert_eq!(r2.recv().unwrap(), i); + r3.recv().unwrap(); + } + }); + + for i in 0..COUNT { + for _ in 0..2 { + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.send(&s2); + let oper = sel.select(); + match oper.index() { + ix if ix == oper1 => assert_eq!(oper.recv(&r1), Ok(i)), + ix if ix == oper2 => assert!(oper.send(&s2, i).is_ok()), + _ => unreachable!(), + } + } + s3.send(()).unwrap(); + } + }); +} + +#[test] +fn stress_timeout_two_threads() { + const COUNT: usize = 20; + + let (s, r) = bounded(2); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + if i % 2 == 0 { + thread::sleep(ms(500)); + } + + let mut done = false; + while !done { + let mut sel = Select::new(); + let oper1 = sel.send(&s); + let oper = sel.select_timeout(ms(100)); + match oper { + Err(_) => {} + Ok(oper) => match oper.index() { + ix if ix == oper1 => { + assert!(oper.send(&s, i).is_ok()); + break; + } + _ => unreachable!(), + } + } + } + } + }); + + scope.spawn(|| { + for i in 0..COUNT { + if i % 2 == 0 { + thread::sleep(ms(500)); + } + + let mut done = false; + while !done { + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper = sel.select_timeout(ms(100)); + match oper { + Err(_) => {} + Ok(oper) => match oper.index() { + ix if ix == oper1 => { + assert_eq!(oper.recv(&r), Ok(i)); + done = true; + } + _ => unreachable!(), + } + } + } + } + }); + }); +} + +#[test] +fn send_recv_same_channel() { + let (s, r) = bounded::(0); + let mut sel = Select::new(); + let oper1 = sel.send(&s); + let oper2 = sel.recv(&r); + let oper = sel.select_timeout(ms(100)); + match oper { + Err(_) => {} + Ok(oper) => match oper.index() { + ix if ix == oper1 => panic!(), + ix if ix == oper2 => panic!(), + _ => unreachable!(), + } + } + + let (s, r) = unbounded::(); + let mut sel = Select::new(); + let oper1 = sel.send(&s); + let oper2 = sel.recv(&r); + let oper = sel.select_timeout(ms(100)); + match oper { + Err(_) => panic!(), + Ok(oper) => match oper.index() { + ix if ix == oper1 => assert!(oper.send(&s, 0).is_ok()), + ix if ix == oper2 => panic!(), + _ => unreachable!(), + } + } +} + +#[test] +fn matching() { + const THREADS: usize = 44; + + let (s, r) = &bounded::(0); + + crossbeam::scope(|scope| { + for i in 0..THREADS { + scope.spawn(move || { + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper2 = sel.send(&s); + let oper = sel.select(); + match oper.index() { + ix if ix == oper1 => assert_ne!(oper.recv(&r), Ok(i)), + ix if ix == oper2 => assert!(oper.send(&s, i).is_ok()), + _ => unreachable!(), + } + }); + } + }); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn matching_with_leftover() { + const THREADS: usize = 55; + + let (s, r) = &bounded::(0); + + crossbeam::scope(|scope| { + for i in 0..THREADS { + scope.spawn(move || { + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper2 = sel.send(&s); + let oper = sel.select(); + match oper.index() { + ix if ix == oper1 => assert_ne!(oper.recv(&r), Ok(i)), + ix if ix == oper2 => assert!(oper.send(&s, i).is_ok()), + _ => unreachable!(), + } + }); + } + s.send(!0).unwrap(); + }); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn channel_through_channel() { + const COUNT: usize = 1000; + + type T = Box; + + for cap in 0..3 { + let (s, r) = bounded::(cap); + + crossbeam::scope(|scope| { + scope.spawn(move || { + let mut s = s; + + for _ in 0..COUNT { + let (new_s, new_r) = bounded(cap); + let mut new_r: T = Box::new(Some(new_r)); + + { + let mut sel = Select::new(); + let oper1 = sel.send(&s); + let oper = sel.select(); + match oper.index() { + ix if ix == oper1 => assert!(oper.send(&s, new_r).is_ok()), + _ => unreachable!(), + } + } + + s = new_s; + } + }); + + scope.spawn(move || { + let mut r = r; + + for _ in 0..COUNT { + let new = { + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper = sel.select(); + match oper.index() { + ix if ix == oper1 => { + oper.recv(&r) + .unwrap() + .downcast_mut::>>() + .unwrap() + .take() + .unwrap() + } + _ => unreachable!(), + } + }; + r = new; + } + }); + }); + } +} + +#[test] +fn linearizable_try() { + const COUNT: usize = 100_000; + + for step in 0..2 { + let (start_s, start_r) = bounded::<()>(0); + let (end_s, end_r) = bounded::<()>(0); + + let ((s1, r1), (s2, r2)) = if step == 0 { + (bounded::(1), bounded::(1)) + } else { + (unbounded::(), unbounded::()) + }; + + crossbeam::scope(|scope| { + scope.spawn(|| { + for _ in 0..COUNT { + start_s.send(()).unwrap(); + + s1.send(1).unwrap(); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.try_select(); + match oper { + Err(_) => unreachable!(), + Ok(oper) => match oper.index() { + ix if ix == oper1 => assert!(oper.recv(&r1).is_ok()), + ix if ix == oper2 => assert!(oper.recv(&r2).is_ok()), + _ => unreachable!(), + } + } + + end_s.send(()).unwrap(); + let _ = r2.try_recv(); + } + }); + + for _ in 0..COUNT { + start_r.recv().unwrap(); + + s2.send(1).unwrap(); + let _ = r1.try_recv(); + + end_r.recv().unwrap(); + } + }); + } +} + +#[test] +fn linearizable_timeout() { + const COUNT: usize = 100_000; + + for step in 0..2 { + let (start_s, start_r) = bounded::<()>(0); + let (end_s, end_r) = bounded::<()>(0); + + let ((s1, r1), (s2, r2)) = if step == 0 { + (bounded::(1), bounded::(1)) + } else { + (unbounded::(), unbounded::()) + }; + + crossbeam::scope(|scope| { + scope.spawn(|| { + for _ in 0..COUNT { + start_s.send(()).unwrap(); + + s1.send(1).unwrap(); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper = sel.select_timeout(ms(0)); + match oper { + Err(_) => unreachable!(), + Ok(oper) => match oper.index() { + ix if ix == oper1 => assert!(oper.recv(&r1).is_ok()), + ix if ix == oper2 => assert!(oper.recv(&r2).is_ok()), + _ => unreachable!(), + } + } + + end_s.send(()).unwrap(); + let _ = r2.try_recv(); + } + }); + + for _ in 0..COUNT { + start_r.recv().unwrap(); + + s2.send(1).unwrap(); + let _ = r1.try_recv(); + + end_r.recv().unwrap(); + } + }); + } +} + +#[test] +fn fairness1() { + const COUNT: usize = 10_000; + + let (s1, r1) = bounded::<()>(COUNT); + let (s2, r2) = unbounded::<()>(); + + for _ in 0..COUNT { + s1.send(()).unwrap(); + s2.send(()).unwrap(); + } + + let hits = vec![Cell::new(0usize); 4]; + for _ in 0..COUNT { + let after = after(ms(0)); + let tick = tick(ms(0)); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper3 = sel.recv(&after); + let oper4 = sel.recv(&tick); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => { + oper.recv(&r1).unwrap(); + hits[0].set(hits[0].get() + 1); + } + i if i == oper2 => { + oper.recv(&r2).unwrap(); + hits[1].set(hits[1].get() + 1); + } + i if i == oper3 => { + oper.recv(&after).unwrap(); + hits[2].set(hits[2].get() + 1); + } + i if i == oper4 => { + oper.recv(&tick).unwrap(); + hits[3].set(hits[3].get() + 1); + } + _ => unreachable!(), + } + } + assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 2)); +} + +#[test] +fn fairness2() { + const COUNT: usize = 10_000; + + let (s1, r1) = unbounded::<()>(); + let (s2, r2) = bounded::<()>(1); + let (s3, r3) = bounded::<()>(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for _ in 0..COUNT { + let mut sel = Select::new(); + let mut oper1 = None; + let mut oper2 = None; + if s1.is_empty() { + oper1 = Some(sel.send(&s1)); + } + if s2.is_empty() { + oper2 = Some(sel.send(&s2)); + } + let oper3 = sel.send(&s3); + let oper = sel.select(); + match oper.index() { + i if Some(i) == oper1 => assert!(oper.send(&s1, ()).is_ok()), + i if Some(i) == oper2 => assert!(oper.send(&s2, ()).is_ok()), + i if i == oper3 => assert!(oper.send(&s3, ()).is_ok()), + _ => unreachable!(), + } + } + }); + + let hits = vec![Cell::new(0usize); 3]; + for _ in 0..COUNT { + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.recv(&r2); + let oper3 = sel.recv(&r3); + let oper = sel.select(); + match oper.index() { + i if i == oper1 => { + oper.recv(&r1).unwrap(); + hits[0].set(hits[0].get() + 1); + } + i if i == oper2 => { + oper.recv(&r2).unwrap(); + hits[1].set(hits[1].get() + 1); + } + i if i == oper3 => { + oper.recv(&r3).unwrap(); + hits[2].set(hits[2].get() + 1); + } + _ => unreachable!(), + } + } + assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 10)); + }); +} + +#[test] +fn sync_and_clone() { + const THREADS: usize = 20; + + let (s, r) = &bounded::(0); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper2 = sel.send(&s); + let sel = &sel; + + crossbeam::scope(|scope| { + for i in 0..THREADS { + scope.spawn(move || { + let mut sel = sel.clone(); + let oper = sel.select(); + match oper.index() { + ix if ix == oper1 => assert_ne!(oper.recv(&r), Ok(i)), + ix if ix == oper2 => assert!(oper.send(&s, i).is_ok()), + _ => unreachable!(), + } + }); + } + }); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn send_and_clone() { + const THREADS: usize = 20; + + let (s, r) = &bounded::(0); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r); + let oper2 = sel.send(&s); + + crossbeam::scope(|scope| { + for i in 0..THREADS { + let mut sel = sel.clone(); + scope.spawn(move || { + let oper = sel.select(); + match oper.index() { + ix if ix == oper1 => assert_ne!(oper.recv(&r), Ok(i)), + ix if ix == oper2 => assert!(oper.send(&s, i).is_ok()), + _ => unreachable!(), + } + }); + } + }); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn reuse() { + const COUNT: usize = 10_000; + + let (s1, r1) = bounded(0); + let (s2, r2) = bounded(0); + let (s3, r3) = bounded(100); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + s1.send(i).unwrap(); + assert_eq!(r2.recv().unwrap(), i); + r3.recv().unwrap(); + } + }); + + let mut sel = Select::new(); + let oper1 = sel.recv(&r1); + let oper2 = sel.send(&s2); + + for i in 0..COUNT { + for _ in 0..2 { + let oper = sel.select(); + match oper.index() { + ix if ix == oper1 => assert_eq!(oper.recv(&r1), Ok(i)), + ix if ix == oper2 => assert!(oper.send(&s2, i).is_ok()), + _ => unreachable!(), + } + } + s3.send(()).unwrap(); + } + }); +} diff --git a/crossbeam-channel/tests/select_macro.rs b/crossbeam-channel/tests/select_macro.rs new file mode 100644 index 000000000..371c82c20 --- /dev/null +++ b/crossbeam-channel/tests/select_macro.rs @@ -0,0 +1,1415 @@ +//! Tests for the `select!` macro. + +#![deny(unsafe_code)] + +extern crate crossbeam; +#[macro_use] +extern crate crossbeam_channel; + +use std::any::Any; +use std::cell::Cell; +use std::ops::Deref; +use std::thread; +use std::time::{Duration, Instant}; + +use crossbeam_channel::{after, bounded, never, unbounded, tick}; +use crossbeam_channel::{Sender, Receiver, RecvError, SendError, TryRecvError}; + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn smoke1() { + let (s1, r1) = unbounded::(); + let (s2, r2) = unbounded::(); + + s1.send(1).unwrap(); + + select! { + recv(r1) -> v => assert_eq!(v, Ok(1)), + recv(r2) -> _ => panic!(), + } + + s2.send(2).unwrap(); + + select! { + recv(r1) -> _ => panic!(), + recv(r2) -> v => assert_eq!(v, Ok(2)), + } +} + +#[test] +fn smoke2() { + let (_s1, r1) = unbounded::(); + let (_s2, r2) = unbounded::(); + let (_s3, r3) = unbounded::(); + let (_s4, r4) = unbounded::(); + let (s5, r5) = unbounded::(); + + s5.send(5).unwrap(); + + select! { + recv(r1) -> _ => panic!(), + recv(r2) -> _ => panic!(), + recv(r3) -> _ => panic!(), + recv(r4) -> _ => panic!(), + recv(r5) -> v => assert_eq!(v, Ok(5)), + } +} + +#[test] +fn disconnected() { + let (s1, r1) = unbounded::(); + let (s2, r2) = unbounded::(); + + crossbeam::scope(|scope| { + scope.spawn(|| { + drop(s1); + thread::sleep(ms(500)); + s2.send(5).unwrap(); + }); + + select! { + recv(r1) -> v => assert!(v.is_err()), + recv(r2) -> _ => panic!(), + default(ms(1000)) => panic!(), + } + + r2.recv().unwrap(); + }); + + select! { + recv(r1) -> v => assert!(v.is_err()), + recv(r2) -> _ => panic!(), + default(ms(1000)) => panic!(), + } + + crossbeam::scope(|scope| { + scope.spawn(|| { + thread::sleep(ms(500)); + drop(s2); + }); + + select! { + recv(r2) -> v => assert!(v.is_err()), + default(ms(1000)) => panic!(), + } + }); +} + +#[test] +fn default() { + let (s1, r1) = unbounded::(); + let (s2, r2) = unbounded::(); + + select! { + recv(r1) -> _ => panic!(), + recv(r2) -> _ => panic!(), + default => {} + } + + drop(s1); + + select! { + recv(r1) -> v => assert!(v.is_err()), + recv(r2) -> _ => panic!(), + default => panic!(), + } + + s2.send(2).unwrap(); + + select! { + recv(r2) -> v => assert_eq!(v, Ok(2)), + default => panic!(), + } + + select! { + recv(r2) -> _ => panic!(), + default => {}, + } + + select! { + default => {}, + } +} + +#[test] +fn timeout() { + let (_s1, r1) = unbounded::(); + let (s2, r2) = unbounded::(); + + crossbeam::scope(|scope| { + scope.spawn(|| { + thread::sleep(ms(1500)); + s2.send(2).unwrap(); + }); + + select! { + recv(r1) -> _ => panic!(), + recv(r2) -> _ => panic!(), + default(ms(1000)) => {}, + } + + select! { + recv(r1) -> _ => panic!(), + recv(r2) -> v => assert_eq!(v, Ok(2)), + default(ms(1000)) => panic!(), + } + }); + + crossbeam::scope(|scope| { + let (s, r) = unbounded::(); + + scope.spawn(move || { + thread::sleep(ms(500)); + drop(s); + }); + + select! { + default(ms(1000)) => { + select! { + recv(r) -> v => assert!(v.is_err()), + default => panic!(), + } + } + } + }); +} + +#[test] +fn default_when_disconnected() { + let (_, r) = unbounded::(); + + select! { + recv(r) -> res => assert!(res.is_err()), + default => panic!(), + } + + let (_, r) = unbounded::(); + + select! { + recv(r) -> res => assert!(res.is_err()), + default(ms(1000)) => panic!(), + } + + let (s, _) = bounded::(0); + + select! { + send(s, 0) -> res => assert!(res.is_err()), + default => panic!(), + } + + let (s, _) = bounded::(0); + + select! { + send(s, 0) -> res => assert!(res.is_err()), + default(ms(1000)) => panic!(), + } +} + +#[test] +fn default_only() { + let start = Instant::now(); + select! { + default => {} + } + let now = Instant::now(); + assert!(now - start <= ms(50)); + + let start = Instant::now(); + select! { + default(ms(500)) => {} + } + let now = Instant::now(); + assert!(now - start >= ms(450)); + assert!(now - start <= ms(550)); +} + +#[test] +fn unblocks() { + let (s1, r1) = bounded::(0); + let (s2, r2) = bounded::(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + thread::sleep(ms(500)); + s2.send(2).unwrap(); + }); + + select! { + recv(r1) -> _ => panic!(), + recv(r2) -> v => assert_eq!(v, Ok(2)), + default(ms(1000)) => panic!(), + } + }); + + crossbeam::scope(|scope| { + scope.spawn(|| { + thread::sleep(ms(500)); + assert_eq!(r1.recv().unwrap(), 1); + }); + + select! { + send(s1, 1) -> _ => {}, + send(s2, 2) -> _ => panic!(), + default(ms(1000)) => panic!(), + } + }); +} + +#[test] +fn both_ready() { + let (s1, r1) = bounded(0); + let (s2, r2) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + thread::sleep(ms(500)); + s1.send(1).unwrap(); + assert_eq!(r2.recv().unwrap(), 2); + }); + + for _ in 0..2 { + select! { + recv(r1) -> v => assert_eq!(v, Ok(1)), + send(s2, 2) -> _ => {}, + } + } + }); +} + +#[test] +fn loop_try() { + const RUNS: usize = 20; + + for _ in 0..RUNS { + let (s1, r1) = bounded::(0); + let (s2, r2) = bounded::(0); + let (s_end, r_end) = bounded::<()>(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + loop { + select! { + send(s1, 1) -> _ => break, + default => {} + } + + select! { + recv(r_end) -> _ => break, + default => {} + } + } + }); + + scope.spawn(|| { + loop { + if let Ok(x) = r2.try_recv() { + assert_eq!(x, 2); + break; + } + + select! { + recv(r_end) -> _ => break, + default => {} + } + } + }); + + scope.spawn(|| { + thread::sleep(ms(500)); + + select! { + recv(r1) -> v => assert_eq!(v, Ok(1)), + send(s2, 2) -> _ => {}, + default(ms(500)) => panic!(), + } + + drop(s_end); + }); + }); + } +} + +#[test] +fn cloning1() { + crossbeam::scope(|scope| { + let (s1, r1) = unbounded::(); + let (_s2, r2) = unbounded::(); + let (s3, r3) = unbounded::<()>(); + + scope.spawn(move || { + r3.recv().unwrap(); + drop(s1.clone()); + assert_eq!(r3.try_recv(), Err(TryRecvError::Empty)); + s1.send(1).unwrap(); + r3.recv().unwrap(); + }); + + s3.send(()).unwrap(); + + select! { + recv(r1) -> _ => {}, + recv(r2) -> _ => {}, + } + + s3.send(()).unwrap(); + }); +} + +#[test] +fn cloning2() { + let (s1, r1) = unbounded::<()>(); + let (s2, r2) = unbounded::<()>(); + let (_s3, _r3) = unbounded::<()>(); + + crossbeam::scope(|scope| { + scope.spawn(move || { + select! { + recv(r1) -> _ => panic!(), + recv(r2) -> _ => {}, + } + }); + + thread::sleep(ms(500)); + drop(s1.clone()); + s2.send(()).unwrap(); + }) +} + +#[test] +fn preflight1() { + let (s, r) = unbounded(); + s.send(()).unwrap(); + + select! { + recv(r) -> _ => {} + } +} + +#[test] +fn preflight2() { + let (s, r) = unbounded(); + drop(s.clone()); + s.send(()).unwrap(); + drop(s); + + select! { + recv(r) -> v => assert!(v.is_ok()), + } + assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); +} + +#[test] +fn preflight3() { + let (s, r) = unbounded(); + drop(s.clone()); + s.send(()).unwrap(); + drop(s); + r.recv().unwrap(); + + select! { + recv(r) -> v => assert!(v.is_err()) + } +} + +#[test] +fn duplicate_operations() { + let (s, r) = unbounded::(); + let mut hit = [false; 4]; + + while hit.iter().any(|hit| !hit) { + select! { + recv(r) -> _ => hit[0] = true, + recv(r) -> _ => hit[1] = true, + send(s, 0) -> _ => hit[2] = true, + send(s, 0) -> _ => hit[3] = true, + } + } +} + +#[test] +fn nesting() { + let (s, r) = unbounded::(); + + select! { + send(s, 0) -> _ => { + select! { + recv(r) -> v => { + assert_eq!(v, Ok(0)); + select! { + send(s, 1) -> _ => { + select! { + recv(r) -> v => { + assert_eq!(v, Ok(1)); + } + } + } + } + } + } + } + } +} + +#[test] +#[should_panic(expected = "send panicked")] +fn panic_sender() { + fn get() -> Sender { + panic!("send panicked") + } + + #[allow(unreachable_code)] + { + select! { + send(get(), panic!()) -> _ => {} + } + } +} + +#[test] +#[should_panic(expected = "recv panicked")] +fn panic_receiver() { + fn get() -> Receiver { + panic!("recv panicked") + } + + select! { + recv(get()) -> _ => {} + } +} + +#[test] +fn stress_recv() { + const COUNT: usize = 10_000; + + let (s1, r1) = unbounded(); + let (s2, r2) = bounded(5); + let (s3, r3) = bounded(100); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + s1.send(i).unwrap(); + r3.recv().unwrap(); + + s2.send(i).unwrap(); + r3.recv().unwrap(); + } + }); + + for i in 0..COUNT { + for _ in 0..2 { + select! { + recv(r1) -> v => assert_eq!(v, Ok(i)), + recv(r2) -> v => assert_eq!(v, Ok(i)), + } + + s3.send(()).unwrap(); + } + } + }); +} + +#[test] +fn stress_send() { + const COUNT: usize = 10_000; + + let (s1, r1) = bounded(0); + let (s2, r2) = bounded(0); + let (s3, r3) = bounded(100); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + assert_eq!(r1.recv().unwrap(), i); + assert_eq!(r2.recv().unwrap(), i); + r3.recv().unwrap(); + } + }); + + for i in 0..COUNT { + for _ in 0..2 { + select! { + send(s1, i) -> _ => {}, + send(s2, i) -> _ => {}, + } + } + s3.send(()).unwrap(); + } + }); +} + +#[test] +fn stress_mixed() { + const COUNT: usize = 10_000; + + let (s1, r1) = bounded(0); + let (s2, r2) = bounded(0); + let (s3, r3) = bounded(100); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + s1.send(i).unwrap(); + assert_eq!(r2.recv().unwrap(), i); + r3.recv().unwrap(); + } + }); + + for i in 0..COUNT { + for _ in 0..2 { + select! { + recv(r1) -> v => assert_eq!(v, Ok(i)), + send(s2, i) -> _ => {}, + } + } + s3.send(()).unwrap(); + } + }); +} + +#[test] +fn stress_timeout_two_threads() { + const COUNT: usize = 20; + + let (s, r) = bounded(2); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + if i % 2 == 0 { + thread::sleep(ms(500)); + } + + loop { + select! { + send(s, i) -> _ => break, + default(ms(100)) => {} + } + } + } + }); + + scope.spawn(|| { + for i in 0..COUNT { + if i % 2 == 0 { + thread::sleep(ms(500)); + } + + loop { + select! { + recv(r) -> v => { + assert_eq!(v, Ok(i)); + break; + } + default(ms(100)) => {} + } + } + } + }); + }); +} + +#[test] +fn send_recv_same_channel() { + let (s, r) = bounded::(0); + select! { + send(s, 0) -> _ => panic!(), + recv(r) -> _ => panic!(), + default(ms(500)) => {} + } + + let (s, r) = unbounded::(); + select! { + send(s, 0) -> _ => {}, + recv(r) -> _ => panic!(), + default(ms(500)) => panic!(), + } +} + +#[test] +fn matching() { + const THREADS: usize = 44; + + let (s, r) = &bounded::(0); + + crossbeam::scope(|scope| { + for i in 0..THREADS { + scope.spawn(move || { + select! { + recv(r) -> v => assert_ne!(v.unwrap(), i), + send(s, i) -> _ => {}, + } + }); + } + }); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn matching_with_leftover() { + const THREADS: usize = 55; + + let (s, r) = &bounded::(0); + + crossbeam::scope(|scope| { + for i in 0..THREADS { + scope.spawn(move || { + select! { + recv(r) -> v => assert_ne!(v.unwrap(), i), + send(s, i) -> _ => {}, + } + }); + } + s.send(!0).unwrap(); + }); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn channel_through_channel() { + const COUNT: usize = 1000; + + type T = Box; + + for cap in 0..3 { + let (s, r) = bounded::(cap); + + crossbeam::scope(|scope| { + scope.spawn(move || { + let mut s = s; + + for _ in 0..COUNT { + let (new_s, new_r) = bounded(cap); + let mut new_r: T = Box::new(Some(new_r)); + + select! { + send(s, new_r) -> _ => {} + } + + s = new_s; + } + }); + + scope.spawn(move || { + let mut r = r; + + for _ in 0..COUNT { + r = select! { + recv(r) -> mut msg => { + msg.unwrap() + .downcast_mut::>>() + .unwrap() + .take() + .unwrap() + } + } + } + }); + }); + } +} + +#[test] +fn linearizable_default() { + const COUNT: usize = 100_000; + + for step in 0..2 { + let (start_s, start_r) = bounded::<()>(0); + let (end_s, end_r) = bounded::<()>(0); + + let ((s1, r1), (s2, r2)) = if step == 0 { + (bounded::(1), bounded::(1)) + } else { + (unbounded::(), unbounded::()) + }; + + crossbeam::scope(|scope| { + scope.spawn(|| { + for _ in 0..COUNT { + start_s.send(()).unwrap(); + + s1.send(1).unwrap(); + select! { + recv(r1) -> _ => {} + recv(r2) -> _ => {} + default => unreachable!() + } + + end_s.send(()).unwrap(); + let _ = r2.try_recv(); + } + }); + + for _ in 0..COUNT { + start_r.recv().unwrap(); + + s2.send(1).unwrap(); + let _ = r1.try_recv(); + + end_r.recv().unwrap(); + } + }); + } +} + +#[test] +fn linearizable_timeout() { + const COUNT: usize = 100_000; + + for step in 0..2 { + let (start_s, start_r) = bounded::<()>(0); + let (end_s, end_r) = bounded::<()>(0); + + let ((s1, r1), (s2, r2)) = if step == 0 { + (bounded::(1), bounded::(1)) + } else { + (unbounded::(), unbounded::()) + }; + + crossbeam::scope(|scope| { + scope.spawn(|| { + for _ in 0..COUNT { + start_s.send(()).unwrap(); + + s1.send(1).unwrap(); + select! { + recv(r1) -> _ => {} + recv(r2) -> _ => {} + default(ms(0)) => unreachable!() + } + + end_s.send(()).unwrap(); + let _ = r2.try_recv(); + } + }); + + for _ in 0..COUNT { + start_r.recv().unwrap(); + + s2.send(1).unwrap(); + let _ = r1.try_recv(); + + end_r.recv().unwrap(); + } + }); + } +} + +#[test] +fn fairness1() { + const COUNT: usize = 10_000; + + let (s1, r1) = bounded::<()>(COUNT); + let (s2, r2) = unbounded::<()>(); + + for _ in 0..COUNT { + s1.send(()).unwrap(); + s2.send(()).unwrap(); + } + + let mut hits = [0usize; 4]; + for _ in 0..COUNT { + select! { + recv(r1) -> _ => hits[0] += 1, + recv(r2) -> _ => hits[1] += 1, + recv(after(ms(0))) -> _ => hits[2] += 1, + recv(tick(ms(0))) -> _ => hits[3] += 1, + } + } + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); +} + +#[test] +fn fairness2() { + const COUNT: usize = 10_000; + + let (s1, r1) = unbounded::<()>(); + let (s2, r2) = bounded::<()>(1); + let (s3, r3) = bounded::<()>(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + let (hole, _r) = bounded(0); + + for _ in 0..COUNT { + let s1 = if s1.is_empty() { &s1 } else { &hole }; + let s2 = if s2.is_empty() { &s2 } else { &hole }; + + select! { + send(s1, ()) -> res => assert!(res.is_ok()), + send(s2, ()) -> res => assert!(res.is_ok()), + send(s3, ()) -> res => assert!(res.is_ok()), + } + } + }); + + let hits = vec![Cell::new(0usize); 3]; + for _ in 0..COUNT { + select! { + recv(r1) -> _ => hits[0].set(hits[0].get() + 1), + recv(r2) -> _ => hits[1].set(hits[1].get() + 1), + recv(r3) -> _ => hits[2].set(hits[2].get() + 1), + } + } + assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 10)); + }); +} + +#[test] +fn fairness_recv() { + const COUNT: usize = 10_000; + + let (s1, r1) = bounded::<()>(COUNT); + let (s2, r2) = unbounded::<()>(); + + for _ in 0..COUNT { + s1.send(()).unwrap(); + s2.send(()).unwrap(); + } + + let mut hits = [0usize; 2]; + while hits[0] + hits[1] < COUNT { + select! { + recv(r1) -> _ => hits[0] += 1, + recv(r2) -> _ => hits[1] += 1, + } + } + assert!(hits.iter().all(|x| *x >= COUNT / 4)); +} + +#[test] +fn fairness_send() { + const COUNT: usize = 10_000; + + let (s1, _r1) = bounded::<()>(COUNT); + let (s2, _r2) = unbounded::<()>(); + + let mut hits = [0usize; 2]; + for _ in 0..COUNT { + select! { + send(s1, ()) -> _ => hits[0] += 1, + send(s2, ()) -> _ => hits[1] += 1, + } + } + assert!(hits.iter().all(|x| *x >= COUNT / 4)); +} + +#[test] +fn references() { + let (s, r) = unbounded::(); + select! { + send(s, 0) -> _ => {} + recv(r) -> _ => {} + } + select! { + send(&&&&s, 0) -> _ => {} + recv(&&&&r) -> _ => {} + } + select! { + recv(Some(&r).unwrap_or(&never())) -> _ => {}, + default => {} + } + select! { + recv(Some(r).unwrap_or(never())) -> _ => {}, + default => {} + } +} + +#[test] +fn case_blocks() { + let (s, r) = unbounded::(); + + select! { + recv(r) -> _ => 3.0, + recv(r) -> _ => loop { + unreachable!() + }, + recv(r) -> _ => match 7 + 3 { + _ => unreachable!() + }, + default => 7. + }; + + select! { + recv(r) -> msg => if msg.is_ok() { + unreachable!() + }, + default => () + } + + drop(s); +} + +#[test] +fn move_handles() { + let (s, r) = unbounded::(); + select! { + recv((move || r)()) -> _ => {} + send((move || s)(), 0) -> _ => {} + } +} + +#[test] +fn infer_types() { + let (s, r) = unbounded(); + select! { + recv(r) -> _ => {} + default => {} + } + s.send(()).unwrap(); + + let (s, r) = unbounded(); + select! { + send(s, ()) -> _ => {} + } + r.recv().unwrap(); +} + +#[test] +fn default_syntax() { + let (s, r) = bounded::(0); + + select! { + recv(r) -> _ => panic!(), + default => {} + } + select! { + send(s, 0) -> _ => panic!(), + default() => {} + } + select! { + default => {} + } + select! { + default() => {} + } +} + +#[test] +fn same_variable_name() { + let (_, r) = unbounded::(); + select! { + recv(r) -> r => assert!(r.is_err()), + } +} + +#[test] +fn handles_on_heap() { + let (s, r) = unbounded::(); + let (s, r) = (Box::new(s), Box::new(r)); + + select! { + send(*s, 0) -> _ => {} + recv(*r) -> _ => {} + default => {} + } + + drop(s); + drop(r); +} + +#[test] +fn once_blocks() { + let (s, r) = unbounded::(); + + let once = Box::new(()); + select! { + send(s, 0) -> _ => drop(once), + } + + let once = Box::new(()); + select! { + recv(r) -> _ => drop(once), + } + + let once1 = Box::new(()); + let once2 = Box::new(()); + select! { + send(s, 0) -> _ => drop(once1), + default => drop(once2), + } + + let once1 = Box::new(()); + let once2 = Box::new(()); + select! { + recv(r) -> _ => drop(once1), + default => drop(once2), + } + + let once1 = Box::new(()); + let once2 = Box::new(()); + select! { + recv(r) -> _ => drop(once1), + send(s, 0) -> _ => drop(once2), + } +} + +#[test] +fn once_receiver() { + let (_, r) = unbounded::(); + + let once = Box::new(()); + let get = move || { + drop(once); + r + }; + + select! { + recv(get()) -> _ => {} + } +} + +#[test] +fn once_sender() { + let (s, _) = unbounded::(); + + let once = Box::new(()); + let get = move || { + drop(once); + s + }; + + select! { + send(get(), 5) -> _ => {} + } +} + +#[test] +fn parse_nesting() { + let (_, r) = unbounded::(); + + select! { + recv(r) -> _ => {} + recv(r) -> _ => { + select! { + recv(r) -> _ => {} + recv(r) -> _ => { + select! { + recv(r) -> _ => {} + recv(r) -> _ => { + select! { + default => {} + } + } + } + } + } + } + } +} + +#[test] +fn evaluate() { + let (s, r) = unbounded::(); + + let v = select! { + recv(r) -> _ => "foo".into(), + send(s, 0) -> _ => "bar".to_owned(), + default => "baz".to_string(), + }; + assert_eq!(v, "bar"); + + let v = select! { + recv(r) -> _ => "foo".into(), + default => "baz".to_string(), + }; + assert_eq!(v, "foo"); + + let v = select! { + recv(r) -> _ => "foo".into(), + default => "baz".to_string(), + }; + assert_eq!(v, "baz"); +} + +#[test] +fn deref() { + use crossbeam_channel as cc; + + struct Sender(cc::Sender); + struct Receiver(cc::Receiver); + + impl Deref for Receiver { + type Target = cc::Receiver; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl Deref for Sender { + type Target = cc::Sender; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + let (s, r) = bounded::(0); + let (s, r) = (Sender(s), Receiver(r)); + + select! { + send(s, 0) -> _ => panic!(), + recv(r) -> _ => panic!(), + default => {} + } +} + +#[test] +fn result_types() { + let (s, _) = bounded::(0); + let (_, r) = bounded::(0); + + select! { + recv(r) -> res => drop::>(res), + } + select! { + recv(r) -> res => drop::>(res), + default => {} + } + select! { + recv(r) -> res => drop::>(res), + default(ms(0)) => {} + } + + select! { + send(s, 0) -> res => drop::>>(res), + } + select! { + send(s, 0) -> res => drop::>>(res), + default => {} + } + select! { + send(s, 0) -> res => drop::>>(res), + default(ms(0)) => {} + } + + select! { + send(s, 0) -> res => drop::>>(res), + recv(r) -> res => drop::>(res), + } +} + +#[test] +fn try_recv() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + select! { + recv(r) -> _ => panic!(), + default => {} + } + thread::sleep(ms(1500)); + select! { + recv(r) -> v => assert_eq!(v, Ok(7)), + default => panic!(), + } + thread::sleep(ms(500)); + select! { + recv(r) -> v => assert_eq!(v, Err(RecvError)), + default => panic!(), + } + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + select! { + send(s, 7) -> res => res.unwrap(), + } + }); + }); +} + +#[test] +fn recv() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + select! { + recv(r) -> v => assert_eq!(v, Ok(7)), + } + thread::sleep(ms(1000)); + select! { + recv(r) -> v => assert_eq!(v, Ok(8)), + } + thread::sleep(ms(1000)); + select! { + recv(r) -> v => assert_eq!(v, Ok(9)), + } + select! { + recv(r) -> v => assert_eq!(v, Err(RecvError)), + } + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + select! { + send(s, 7) -> res => res.unwrap(), + } + select! { + send(s, 8) -> res => res.unwrap(), + } + select! { + send(s, 9) -> res => res.unwrap(), + } + }); + }); +} + +#[test] +fn recv_timeout() { + let (s, r) = bounded::(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + select! { + recv(r) -> _ => panic!(), + default(ms(1000)) => {} + } + select! { + recv(r) -> v => assert_eq!(v, Ok(7)), + default(ms(1000)) => panic!(), + } + select! { + recv(r) -> v => assert_eq!(v, Err(RecvError)), + default(ms(1000)) => panic!(), + } + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + select! { + send(s, 7) -> res => res.unwrap(), + } + }); + }); +} + +#[test] +fn try_send() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + select! { + send(s, 7) -> _ => panic!(), + default => {} + } + thread::sleep(ms(1500)); + select! { + send(s, 8) -> res => res.unwrap(), + default => panic!(), + } + thread::sleep(ms(500)); + select! { + send(s, 8) -> res => assert_eq!(res, Err(SendError(8))), + default => panic!(), + } + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + select! { + recv(r) -> v => assert_eq!(v, Ok(8)), + } + }); + }); +} + +#[test] +fn send() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + select! { + send(s, 7) -> res => res.unwrap(), + } + thread::sleep(ms(1000)); + select! { + send(s, 8) -> res => res.unwrap(), + } + thread::sleep(ms(1000)); + select! { + send(s, 9) -> res => res.unwrap(), + } + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + select! { + recv(r) -> v => assert_eq!(v, Ok(7)), + } + select! { + recv(r) -> v => assert_eq!(v, Ok(8)), + } + select! { + recv(r) -> v => assert_eq!(v, Ok(9)), + } + }); + }); +} + +#[test] +fn send_timeout() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + select! { + send(s, 7) -> _ => panic!(), + default(ms(1000)) => {} + } + select! { + send(s, 8) -> res => res.unwrap(), + default(ms(1000)) => panic!(), + } + select! { + send(s, 9) -> res => assert_eq!(res, Err(SendError(9))), + default(ms(1000)) => panic!(), + } + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + select! { + recv(r) -> v => assert_eq!(v, Ok(8)), + } + }); + }); +} + +#[test] +fn disconnect_wakes_sender() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + select! { + send(s, ()) -> res => assert_eq!(res, Err(SendError(()))), + } + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + drop(r); + }); + }); +} + +#[test] +fn disconnect_wakes_receiver() { + let (s, r) = bounded::<()>(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + select! { + recv(r) -> res => assert_eq!(res, Err(RecvError)), + } + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + drop(s); + }); + }); +} diff --git a/crossbeam-channel/tests/thread_locals.rs b/crossbeam-channel/tests/thread_locals.rs new file mode 100644 index 000000000..236857f62 --- /dev/null +++ b/crossbeam-channel/tests/thread_locals.rs @@ -0,0 +1,53 @@ +//! Tests that make sure accessing thread-locals while exiting the thread doesn't cause panics. + +extern crate crossbeam; +#[macro_use] +extern crate crossbeam_channel; + +use std::thread; +use std::time::Duration; + +use crossbeam_channel::unbounded; + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn use_while_exiting() { + struct Foo; + + impl Drop for Foo { + fn drop(&mut self) { + // A blocking operation after the thread-locals have been dropped. This will attempt to + // use the thread-locals and must not panic. + let (_s, r) = unbounded::<()>(); + select! { + recv(r) -> _ => {} + default(ms(100)) => {} + } + } + } + + thread_local! { + static FOO: Foo = Foo; + } + + let (s, r) = unbounded::<()>(); + + crossbeam::scope(|scope| { + scope.spawn(|| { + // First initialize `FOO`, then the thread-locals related to crossbeam-channel and + // crossbeam-epoch. + FOO.with(|_| ()); + r.recv().unwrap(); + // At thread exit, the crossbeam-related thread-locals get dropped first and `FOO` is + // dropped last. + }); + + scope.spawn(|| { + thread::sleep(ms(100)); + s.send(()).unwrap(); + }); + }); +} diff --git a/crossbeam-channel/tests/tick.rs b/crossbeam-channel/tests/tick.rs new file mode 100644 index 000000000..04fb35b68 --- /dev/null +++ b/crossbeam-channel/tests/tick.rs @@ -0,0 +1,291 @@ +//! Tests for the tick channel flavor. + +extern crate crossbeam; +#[macro_use] +extern crate crossbeam_channel; +extern crate rand; + +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::thread; +use std::time::{Duration, Instant}; + +use crossbeam_channel::{after, tick, TryRecvError}; + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn fire() { + let start = Instant::now(); + let r = tick(ms(50)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + thread::sleep(ms(100)); + + let fired = r.try_recv().unwrap(); + assert!(start < fired); + assert!(fired - start >= ms(50)); + + let now = Instant::now(); + assert!(fired < now); + assert!(now - fired >= ms(50)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + + select! { + recv(r) -> _ => panic!(), + default => {} + } + + select! { + recv(r) -> _ => {} + recv(tick(ms(200))) -> _ => panic!(), + } +} + +#[test] +fn intervals() { + let start = Instant::now(); + let r = tick(ms(50)); + + let t1 = r.recv().unwrap(); + assert!(start + ms(50) <= t1); + assert!(start + ms(100) > t1); + + thread::sleep(ms(300)); + let t2 = r.try_recv().unwrap(); + assert!(start + ms(100) <= t2); + assert!(start + ms(150) > t2); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + let t3 = r.recv().unwrap(); + assert!(start + ms(400) <= t3); + assert!(start + ms(450) > t3); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn capacity() { + const COUNT: usize = 10; + + for i in 0..COUNT { + let r = tick(ms(i as u64)); + assert_eq!(r.capacity(), Some(1)); + } +} + +#[test] +fn len_empty_full() { + let r = tick(ms(50)); + + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); + + thread::sleep(ms(100)); + + assert_eq!(r.len(), 1); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), true); + + r.try_recv().unwrap(); + + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); +} + +#[test] +fn try_recv() { + let r = tick(ms(200)); + assert!(r.try_recv().is_err()); + + thread::sleep(ms(100)); + assert!(r.try_recv().is_err()); + + thread::sleep(ms(200)); + assert!(r.try_recv().is_ok()); + assert!(r.try_recv().is_err()); + + thread::sleep(ms(200)); + assert!(r.try_recv().is_ok()); + assert!(r.try_recv().is_err()); +} + +#[test] +fn recv() { + let start = Instant::now(); + let r = tick(ms(50)); + + let fired = r.recv().unwrap(); + assert!(start < fired); + assert!(fired - start >= ms(50)); + + let now = Instant::now(); + assert!(fired < now); + assert!(now - fired < fired - start); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn recv_timeout() { + let start = Instant::now(); + let r = tick(ms(200)); + + assert!(r.recv_timeout(ms(100)).is_err()); + let now = Instant::now(); + assert!(now - start >= ms(100)); + assert!(now - start <= ms(150)); + + let fired = r.recv_timeout(ms(200)).unwrap(); + assert!(fired - start >= ms(200)); + assert!(fired - start <= ms(250)); + + assert!(r.recv_timeout(ms(100)).is_err()); + let now = Instant::now(); + assert!(now - start >= ms(300)); + assert!(now - start <= ms(350)); + + let fired = r.recv_timeout(ms(200)).unwrap(); + assert!(fired - start >= ms(400)); + assert!(fired - start <= ms(450)); +} + +#[test] +fn recv_two() { + let r1 = tick(ms(50)); + let r2 = tick(ms(50)); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for _ in 0..10 { + select! { + recv(r1) -> _ => {} + recv(r2) -> _ => {} + } + } + }); + scope.spawn(|| { + for _ in 0..10 { + select! { + recv(r1) -> _ => {} + recv(r2) -> _ => {} + } + } + }); + }); +} + +#[test] +fn recv_race() { + select! { + recv(tick(ms(50))) -> _ => {} + recv(tick(ms(100))) -> _ => panic!(), + } + + select! { + recv(tick(ms(100))) -> _ => panic!(), + recv(tick(ms(50))) -> _ => {} + } +} + +#[test] +fn stress_default() { + const COUNT: usize = 10; + + for _ in 0..COUNT { + select! { + recv(tick(ms(0))) -> _ => {} + default => panic!(), + } + } + + for _ in 0..COUNT { + select! { + recv(tick(ms(100))) -> _ => panic!(), + default => {} + } + } +} + +#[test] +fn select() { + const THREADS: usize = 4; + + let hits = AtomicUsize::new(0); + let r1 = tick(ms(200)); + let r2 = tick(ms(300)); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + let timeout = after(ms(1100)); + loop { + select! { + recv(r1) -> _ => { + hits.fetch_add(1, Ordering::SeqCst); + } + recv(r2) -> _ => { + hits.fetch_add(1, Ordering::SeqCst); + } + recv(timeout) -> _ => break + } + } + }); + } + }); + + assert_eq!(hits.load(Ordering::SeqCst), 8); +} + +#[test] +fn fairness() { + const COUNT: usize = 30; + + for &dur in &[0, 1] { + let mut hits = [0usize; 2]; + + for _ in 0..COUNT { + let r1 = tick(ms(dur)); + let r2 = tick(ms(dur)); + + for _ in 0..COUNT { + select! { + recv(r1) -> _ => hits[0] += 1, + recv(r2) -> _ => hits[1] += 1, + } + } + } + + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); + } +} + +#[test] +fn fairness_duplicates() { + const COUNT: usize = 30; + + for &dur in &[0, 1] { + let mut hits = [0usize; 5]; + + for _ in 0..COUNT { + let r = tick(ms(dur)); + + for _ in 0..COUNT { + select! { + recv(r) -> _ => hits[0] += 1, + recv(r) -> _ => hits[1] += 1, + recv(r) -> _ => hits[2] += 1, + recv(r) -> _ => hits[3] += 1, + recv(r) -> _ => hits[4] += 1, + } + } + } + + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); + } +} diff --git a/crossbeam-channel/tests/zero.rs b/crossbeam-channel/tests/zero.rs new file mode 100644 index 000000000..e0752539b --- /dev/null +++ b/crossbeam-channel/tests/zero.rs @@ -0,0 +1,457 @@ +//! Tests for the zero channel flavor. + +extern crate crossbeam; +#[macro_use] +extern crate crossbeam_channel; +extern crate rand; + +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::thread; +use std::time::Duration; + +use crossbeam_channel::{bounded}; +use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError}; +use crossbeam_channel::{SendError, SendTimeoutError, TrySendError}; +use rand::{thread_rng, Rng}; + +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn smoke() { + let (s, r) = bounded(0); + assert_eq!(s.try_send(7), Err(TrySendError::Full(7))); + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn capacity() { + let (s, r) = bounded::<()>(0); + assert_eq!(s.capacity(), Some(0)); + assert_eq!(r.capacity(), Some(0)); +} + +#[test] +fn len_empty_full() { + let (s, r) = bounded(0); + + assert_eq!(s.len(), 0); + assert_eq!(s.is_empty(), true); + assert_eq!(s.is_full(), true); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), true); + + crossbeam::scope(|scope| { + scope.spawn(|| s.send(0).unwrap()); + scope.spawn(|| r.recv().unwrap()); + }); + + assert_eq!(s.len(), 0); + assert_eq!(s.is_empty(), true); + assert_eq!(s.is_full(), true); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), true); +} + +#[test] +fn try_recv() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + thread::sleep(ms(1500)); + assert_eq!(r.try_recv(), Ok(7)); + thread::sleep(ms(500)); + assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + s.send(7).unwrap(); + }); + }); +} + +#[test] +fn recv() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.recv(), Ok(7)); + thread::sleep(ms(1000)); + assert_eq!(r.recv(), Ok(8)); + thread::sleep(ms(1000)); + assert_eq!(r.recv(), Ok(9)); + assert_eq!(r.recv(), Err(RecvError)); + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + s.send(7).unwrap(); + s.send(8).unwrap(); + s.send(9).unwrap(); + }); + }); +} + +#[test] +fn recv_timeout() { + let (s, r) = bounded::(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); + assert_eq!(r.recv_timeout(ms(1000)), Ok(7)); + assert_eq!( + r.recv_timeout(ms(1000)), + Err(RecvTimeoutError::Disconnected) + ); + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + s.send(7).unwrap(); + }); + }); +} + +#[test] +fn try_send() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(s.try_send(7), Err(TrySendError::Full(7))); + thread::sleep(ms(1500)); + assert_eq!(s.try_send(8), Ok(())); + thread::sleep(ms(500)); + assert_eq!(s.try_send(9), Err(TrySendError::Disconnected(9))); + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + assert_eq!(r.recv(), Ok(8)); + }); + }); +} + +#[test] +fn send() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + s.send(7).unwrap(); + thread::sleep(ms(1000)); + s.send(8).unwrap(); + thread::sleep(ms(1000)); + s.send(9).unwrap(); + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + assert_eq!(r.recv(), Ok(7)); + assert_eq!(r.recv(), Ok(8)); + assert_eq!(r.recv(), Ok(9)); + }); + }); +} + +#[test] +fn send_timeout() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!( + s.send_timeout(7, ms(1000)), + Err(SendTimeoutError::Timeout(7)) + ); + assert_eq!(s.send_timeout(8, ms(1000)), Ok(())); + assert_eq!( + s.send_timeout(9, ms(1000)), + Err(SendTimeoutError::Disconnected(9)) + ); + }); + scope.spawn(move || { + thread::sleep(ms(1500)); + assert_eq!(r.recv(), Ok(8)); + }); + }); +} + +#[test] +fn len() { + const COUNT: usize = 25_000; + + let (s, r) = bounded(0); + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + assert_eq!(r.recv(), Ok(i)); + assert_eq!(r.len(), 0); + } + }); + + scope.spawn(|| { + for i in 0..COUNT { + s.send(i).unwrap(); + assert_eq!(s.len(), 0); + } + }); + }); + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); +} + +#[test] +fn disconnect_wakes_sender() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(s.send(()), Err(SendError(()))); + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + drop(r); + }); + }); +} + +#[test] +fn disconnect_wakes_receiver() { + let (s, r) = bounded::<()>(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + assert_eq!(r.recv(), Err(RecvError)); + }); + scope.spawn(move || { + thread::sleep(ms(1000)); + drop(s); + }); + }); +} + +#[test] +fn spsc() { + const COUNT: usize = 100_000; + + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(move || { + for i in 0..COUNT { + assert_eq!(r.recv(), Ok(i)); + } + assert_eq!(r.recv(), Err(RecvError)); + }); + scope.spawn(move || { + for i in 0..COUNT { + s.send(i).unwrap(); + } + }); + }); +} + +#[test] +fn mpmc() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = bounded::(0); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + crossbeam::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + for _ in 0..COUNT { + let n = r.recv().unwrap(); + v[n].fetch_add(1, Ordering::SeqCst); + } + }); + } + for _ in 0..THREADS { + scope.spawn(|| { + for i in 0..COUNT { + s.send(i).unwrap(); + } + }); + } + }); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[test] +fn stress_timeout_two_threads() { + const COUNT: usize = 100; + + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for i in 0..COUNT { + if i % 2 == 0 { + thread::sleep(ms(50)); + } + loop { + if let Ok(()) = s.send_timeout(i, ms(10)) { + break; + } + } + } + }); + + scope.spawn(|| { + for i in 0..COUNT { + if i % 2 == 0 { + thread::sleep(ms(50)); + } + loop { + if let Ok(x) = r.recv_timeout(ms(10)) { + assert_eq!(x, i); + break; + } + } + } + }); + }); +} + +#[test] +fn drops() { + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + let mut rng = thread_rng(); + + for _ in 0..100 { + let steps = rng.gen_range(0, 3_000); + + DROPS.store(0, Ordering::SeqCst); + let (s, r) = bounded::(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + for _ in 0..steps { + r.recv().unwrap(); + } + }); + + scope.spawn(|| { + for _ in 0..steps { + s.send(DropCounter).unwrap(); + } + }); + }); + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(s); + drop(r); + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + } +} + +#[test] +fn fairness() { + const COUNT: usize = 10_000; + + let (s1, r1) = bounded::<()>(0); + let (s2, r2) = bounded::<()>(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + let mut hits = [0usize; 2]; + for _ in 0..COUNT { + select! { + recv(r1) -> _ => hits[0] += 1, + recv(r2) -> _ => hits[1] += 1, + } + } + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); + }); + + let mut hits = [0usize; 2]; + for _ in 0..COUNT { + select! { + send(s1, ()) -> _ => hits[0] += 1, + send(s2, ()) -> _ => hits[1] += 1, + } + } + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); + }); +} + +#[test] +fn fairness_duplicates() { + const COUNT: usize = 10_000; + + let (s, r) = bounded::<()>(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + let mut hits = [0usize; 5]; + for _ in 0..COUNT { + select! { + recv(r) -> _ => hits[0] += 1, + recv(r) -> _ => hits[1] += 1, + recv(r) -> _ => hits[2] += 1, + recv(r) -> _ => hits[3] += 1, + recv(r) -> _ => hits[4] += 1, + } + } + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); + }); + + let mut hits = [0usize; 5]; + for _ in 0..COUNT { + select! { + send(s, ()) -> _ => hits[0] += 1, + send(s, ()) -> _ => hits[1] += 1, + send(s, ()) -> _ => hits[2] += 1, + send(s, ()) -> _ => hits[3] += 1, + send(s, ()) -> _ => hits[4] += 1, + } + } + assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); + }); +} + +#[test] +fn recv_in_send() { + let (s, r) = bounded(0); + + crossbeam::scope(|scope| { + scope.spawn(|| { + thread::sleep(ms(100)); + r.recv() + }); + + scope.spawn(|| { + thread::sleep(ms(500)); + s.send(()).unwrap(); + }); + + select! { + send(s, r.recv().unwrap()) -> _ => {} + } + }); +} diff --git a/crossbeam-deque/CHANGELOG.md b/crossbeam-deque/CHANGELOG.md new file mode 100644 index 000000000..8d9782839 --- /dev/null +++ b/crossbeam-deque/CHANGELOG.md @@ -0,0 +1,63 @@ +# Version 0.6.1 + +- Change a few `Relaxed` orderings to `Release` in order to fix false positives by tsan. + +# Version 0.6.0 + +- Add `Stealer::steal_many` for batched stealing. +- Change the return type of `pop` to `Pop` so that spinning can be handled manually. + +# Version 0.5.2 + +- Update `crossbeam-utils` to `0.5.0`. + +# Version 0.5.1 + +- Minor optimizations. + +# Version 0.5.0 + +- Add two deque constructors : `fifo()` and `lifo()`. +- Update `rand` to `0.5.3`. +- Rename `Deque` to `Worker`. +- Return `Option` from `Stealer::steal`. +- Remove methods `Deque::len` and `Stealer::len`. +- Remove method `Deque::stealer`. +- Remove method `Deque::steal`. + +# Version 0.4.1 + +- Update `crossbeam-epoch` to `0.5.0`. + +# Version 0.4.0 + +- Update `crossbeam-epoch` to `0.4.2`. +- Update `crossbeam-utils` to `0.4.0`. +- Require minimum Rust version 1.25. + +# Version 0.3.1 + +- Add `Deque::capacity`. +- Add `Deque::min_capacity`. +- Add `Deque::shrink_to_fit`. +- Update `crossbeam-epoch` to `0.3.0`. +- Support Rust 1.20. +- Shrink the buffer in `Deque::push` if necessary. + +# Version 0.3.0 + +- Update `crossbeam-epoch` to `0.4.0`. +- Drop support for Rust 1.13. + +# Version 0.2.0 + +- Update `crossbeam-epoch` to `0.3.0`. +- Support Rust 1.13. + +# Version 0.1.1 + +- Update `crossbeam-epoch` to `0.2.0`. + +# Version 0.1.0 + +- First implementation of the Chase-Lev deque. diff --git a/crossbeam-deque/Cargo.toml b/crossbeam-deque/Cargo.toml new file mode 100644 index 000000000..b4615552f --- /dev/null +++ b/crossbeam-deque/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "crossbeam-deque" +# When publishing a new version: +# - Update CHANGELOG.md +# - Update README.md +# - Create "crossbeam-deque-X.Y.Z" git tag +version = "0.6.1" +authors = ["The Crossbeam Project Developers"] +license = "MIT/Apache-2.0" +readme = "README.md" +repository = "https://github.com/crossbeam-rs/crossbeam-deque" +homepage = "https://github.com/crossbeam-rs/crossbeam-deque" +documentation = "https://docs.rs/crossbeam-deque" +description = "Concurrent work-stealing deque" +keywords = ["chase-lev", "lock-free", "scheduler", "scheduling"] +categories = ["algorithms", "concurrency", "data-structures"] + +[badges] +travis-ci = { repository = "crossbeam-rs/crossbeam-deque" } + +[dependencies] +crossbeam-epoch = "0.5.0" +crossbeam-utils = "0.5.0" + +[dev-dependencies] +rand = "0.5.3" diff --git a/crossbeam-deque/LICENSE-APACHE b/crossbeam-deque/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/crossbeam-deque/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/crossbeam-deque/LICENSE-MIT b/crossbeam-deque/LICENSE-MIT new file mode 100644 index 000000000..25597d583 --- /dev/null +++ b/crossbeam-deque/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/crossbeam-deque/README.md b/crossbeam-deque/README.md new file mode 100644 index 000000000..da4b0851f --- /dev/null +++ b/crossbeam-deque/README.md @@ -0,0 +1,33 @@ +# Concurrent work-stealing deque + +[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)]( +https://travis-ci.org/crossbeam-rs/crossbeam) +[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)]( +https://github.com/crossbeam-rs/crossbeam-deque) +[![Cargo](https://img.shields.io/crates/v/crossbeam-deque.svg)]( +https://crates.io/crates/crossbeam-deque) +[![Documentation](https://docs.rs/crossbeam-deque/badge.svg)]( +https://docs.rs/crossbeam-deque) + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +crossbeam-deque = "0.6" +``` + +Next, add this to your crate: + +```rust +extern crate crossbeam_deque; +``` + +The minimum required Rust version is 1.25. + +## License + +Licensed under the terms of MIT license and the Apache License (Version 2.0). + +See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details. diff --git a/crossbeam-deque/ci/script.sh b/crossbeam-deque/ci/script.sh new file mode 100755 index 000000000..a13d03248 --- /dev/null +++ b/crossbeam-deque/ci/script.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +check_min_version() { + local rustc="`rustc -V | cut -d' ' -f2 | cut -d- -f1`" + if [[ "$rustc" != "`echo -e "$rustc\n$1" | sort -V | tail -n1`" ]]; then + echo "Unsupported Rust version: $1 < $rustc" + exit 0 + fi +} +check_min_version 1.25.0 + +set -ex + +export RUSTFLAGS="-D warnings" + +cargo test diff --git a/crossbeam-deque/src/lib.rs b/crossbeam-deque/src/lib.rs new file mode 100644 index 000000000..236e106d6 --- /dev/null +++ b/crossbeam-deque/src/lib.rs @@ -0,0 +1,859 @@ +//! A concurrent work-stealing deque. +//! +//! This data structure is most commonly used in schedulers. The typical setup involves a number of +//! threads where each thread has its own deque containing tasks. A thread may push tasks into its +//! deque as well as pop tasks from it. Once it runs out of tasks, it may steal some from other +//! threads to help complete tasks more quickly. Therefore, work-stealing deques supports three +//! essential operations: *push*, *pop*, and *steal*. +//! +//! # Types of deques +//! +//! There are two types of deques, differing only in which order tasks get pushed and popped. The +//! two task ordering strategies are: +//! +//! * First-in first-out (FIFO) +//! * Last-in first-out (LIFO) +//! +//! A deque is a buffer with two ends, front and back. In a FIFO deque, tasks are pushed into the +//! back, popped from the front, and stolen from the front. However, in a LIFO deque, tasks are +//! popped from the back instead - that is the only difference. +//! +//! # Workers and stealers +//! +//! There are two functions that construct a deque: [`fifo`] and [`lifo`]. These functions return a +//! [`Worker`] and a [`Stealer`]. The thread which owns the deque is usually called *worker*, while +//! all other threads are *stealers*. +//! +//! [`Worker`] is able to push and pop tasks. It cannot be shared among multiple threads - only +//! one thread owns it. +//! +//! [`Stealer`] can only steal tasks. It can be shared among multiple threads by reference or by +//! cloning. Cloning a [`Stealer`] simply creates another one associated with the same deque. +//! +//! # Examples +//! +//! ``` +//! use crossbeam_deque::{self as deque, Pop, Steal}; +//! use std::thread; +//! +//! // Create a LIFO deque. +//! let (w, s) = deque::lifo(); +//! +//! // Push several elements into the back. +//! w.push(1); +//! w.push(2); +//! w.push(3); +//! +//! // This is a LIFO deque, which means an element is popped from the back. +//! // If it was a FIFO deque, `w.pop()` would return `Some(1)`. +//! assert_eq!(w.pop(), Pop::Data(3)); +//! +//! // Create a stealer thread. +//! thread::spawn(move || { +//! assert_eq!(s.steal(), Steal::Data(1)); +//! assert_eq!(s.steal(), Steal::Data(2)); +//! }).join().unwrap(); +//! ``` +//! +//! [`Worker`]: struct.Worker.html +//! [`Stealer`]: struct.Stealer.html +//! [`fifo`]: fn.fifo.html +//! [`lifo`]: fn.lifo.html + +extern crate crossbeam_epoch as epoch; +extern crate crossbeam_utils as utils; + +use std::cell::Cell; +use std::cmp; +use std::fmt; +use std::marker::PhantomData; +use std::mem; +use std::ptr; +use std::sync::Arc; +use std::sync::atomic::{self, AtomicIsize, Ordering}; + +use epoch::{Atomic, Owned}; +use utils::CachePadded; + +/// Minimum buffer capacity for a deque. +const MIN_CAP: usize = 32; + +/// Maximum number of additional elements that can be stolen in `steal_many`. +const MAX_BATCH: usize = 128; + +/// If a buffer of at least this size is retired, thread-local garbage is flushed so that it gets +/// deallocated as soon as possible. +const FLUSH_THRESHOLD_BYTES: usize = 1 << 10; + +/// Creates a work-stealing deque with the first-in first-out strategy. +/// +/// Elements are pushed into the back, popped from the front, and stolen from the front. In other +/// words, the worker side behaves as a FIFO queue. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_deque::{self as deque, Pop, Steal}; +/// +/// let (w, s) = deque::fifo::(); +/// w.push(1); +/// w.push(2); +/// w.push(3); +/// +/// assert_eq!(s.steal(), Steal::Data(1)); +/// assert_eq!(w.pop(), Pop::Data(2)); +/// assert_eq!(w.pop(), Pop::Data(3)); +/// ``` +pub fn fifo() -> (Worker, Stealer) { + let buffer = Buffer::alloc(MIN_CAP); + + let inner = Arc::new(CachePadded::new(Inner { + front: AtomicIsize::new(0), + back: AtomicIsize::new(0), + buffer: Atomic::new(buffer), + })); + + let w = Worker { + inner: inner.clone(), + cached_buffer: Cell::new(buffer), + flavor: Flavor::Fifo, + _marker: PhantomData, + }; + let s = Stealer { + inner, + flavor: Flavor::Fifo, + }; + (w, s) +} + +/// Creates a work-stealing deque with the last-in first-out strategy. +/// +/// Elements are pushed into the back, popped from the back, and stolen from the front. In other +/// words, the worker side behaves as a LIFO stack. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_deque::{self as deque, Pop, Steal}; +/// +/// let (w, s) = deque::lifo::(); +/// w.push(1); +/// w.push(2); +/// w.push(3); +/// +/// assert_eq!(s.steal(), Steal::Data(1)); +/// assert_eq!(w.pop(), Pop::Data(3)); +/// assert_eq!(w.pop(), Pop::Data(2)); +/// ``` +pub fn lifo() -> (Worker, Stealer) { + let buffer = Buffer::alloc(MIN_CAP); + + let inner = Arc::new(CachePadded::new(Inner { + front: AtomicIsize::new(0), + back: AtomicIsize::new(0), + buffer: Atomic::new(buffer), + })); + + let w = Worker { + inner: inner.clone(), + cached_buffer: Cell::new(buffer), + flavor: Flavor::Lifo, + _marker: PhantomData, + }; + let s = Stealer { + inner, + flavor: Flavor::Lifo, + }; + (w, s) +} + +/// A buffer that holds elements in a deque. +/// +/// This is just a pointer to the buffer and its length - dropping an instance of this struct will +/// *not* deallocate the buffer. +struct Buffer { + /// Pointer to the allocated memory. + ptr: *mut T, + + /// Capacity of the buffer. Always a power of two. + cap: usize, +} + +unsafe impl Send for Buffer {} + +impl Buffer { + /// Allocates a new buffer with the specified capacity. + fn alloc(cap: usize) -> Self { + debug_assert_eq!(cap, cap.next_power_of_two()); + + let mut v = Vec::with_capacity(cap); + let ptr = v.as_mut_ptr(); + mem::forget(v); + + Buffer { ptr, cap } + } + + /// Deallocates the buffer. + unsafe fn dealloc(self) { + drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); + } + + /// Returns a pointer to the element at the specified `index`. + unsafe fn at(&self, index: isize) -> *mut T { + // `self.cap` is always a power of two. + self.ptr.offset(index & (self.cap - 1) as isize) + } + + /// Writes `value` into the specified `index`. + unsafe fn write(&self, index: isize, value: T) { + ptr::write_volatile(self.at(index), value) + } + + /// Reads a value from the specified `index`. + unsafe fn read(&self, index: isize) -> T { + ptr::read_volatile(self.at(index)) + } +} + +impl Clone for Buffer { + fn clone(&self) -> Buffer { + Buffer { + ptr: self.ptr, + cap: self.cap, + } + } +} + +impl Copy for Buffer {} + +/// Possible outcomes of a pop operation. +#[must_use] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] +pub enum Pop { + /// The deque was empty at the time of popping. + Empty, + + /// Some data has been successfully popped. + Data(T), + + /// Lost the race for popping data to another concurrent steal operation. Try again. + Retry, +} + +/// Possible outcomes of a steal operation. +#[must_use] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] +pub enum Steal { + /// The deque was empty at the time of stealing. + Empty, + + /// Some data has been successfully stolen. + Data(T), + + /// Lost the race for stealing data to another concurrent steal or pop operation. Try again. + Retry, +} + +/// Internal data that is shared between the worker and stealers. +/// +/// The implementation is based on the following work: +/// +/// 1. [Chase and Lev. Dynamic circular work-stealing deque. SPAA 2005.][chase-lev] +/// 2. [Le, Pop, Cohen, and Nardelli. Correct and efficient work-stealing for weak memory models. +/// PPoPP 2013.][weak-mem] +/// 3. [Norris and Demsky. CDSchecker: checking concurrent data structures written with C/C++ +/// atomics. OOPSLA 2013.][checker] +/// +/// [chase-lev]: https://dl.acm.org/citation.cfm?id=1073974 +/// [weak-mem]: https://dl.acm.org/citation.cfm?id=2442524 +/// [checker]: https://dl.acm.org/citation.cfm?id=2509514 +struct Inner { + /// The front index. + front: AtomicIsize, + + /// The back index. + back: AtomicIsize, + + /// The underlying buffer. + buffer: Atomic>, +} + +impl Drop for Inner { + fn drop(&mut self) { + // Load the back index, front index, and buffer. + let b = self.back.load(Ordering::Relaxed); + let f = self.front.load(Ordering::Relaxed); + + unsafe { + let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); + + // Go through the buffer from front to back and drop all elements in the deque. + let mut i = f; + while i != b { + ptr::drop_in_place(buffer.deref().at(i)); + i = i.wrapping_add(1); + } + + // Free the memory allocated by the buffer. + buffer.into_owned().into_box().dealloc(); + } + } +} + +/// The flavor of a deque: FIFO or LIFO. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +enum Flavor { + /// The first-in first-out flavor. + Fifo, + + /// The last-in first-out flavor. + Lifo, +} + +/// The worker side of a deque. +/// +/// Workers push elements into the back and pop elements depending on the strategy: +/// +/// * In FIFO deques, elements are popped from the front. +/// * In LIFO deques, elements are popped from the back. +/// +/// A deque has only one worker. Workers are not intended to be shared among multiple threads. +pub struct Worker { + /// A reference to the inner representation of the deque. + inner: Arc>>, + + /// A copy of `inner.buffer` for quick access. + cached_buffer: Cell>, + + /// The flavor of the deque. + flavor: Flavor, + + /// Indicates that the worker cannot be shared among threads. + _marker: PhantomData<*mut ()>, // !Send + !Sync +} + +unsafe impl Send for Worker {} + +impl Worker { + /// Resizes the internal buffer to the new capacity of `new_cap`. + #[cold] + unsafe fn resize(&self, new_cap: usize) { + // Load the back index, front index, and buffer. + let b = self.inner.back.load(Ordering::Relaxed); + let f = self.inner.front.load(Ordering::Relaxed); + let buffer = self.cached_buffer.get(); + + // Allocate a new buffer. + let new = Buffer::alloc(new_cap); + self.cached_buffer.set(new); + + // Copy data from the old buffer to the new one. + let mut i = f; + while i != b { + ptr::copy_nonoverlapping(buffer.at(i), new.at(i), 1); + i = i.wrapping_add(1); + } + + let guard = &epoch::pin(); + + // Replace the old buffer with the new one. + let old = self.inner + .buffer + .swap(Owned::new(new).into_shared(guard), Ordering::Release, guard); + + // Destroy the old buffer later. + guard.defer(move || old.into_owned().into_box().dealloc()); + + // If the buffer is very large, then flush the thread-local garbage in order to deallocate + // it as soon as possible. + if mem::size_of::() * new_cap >= FLUSH_THRESHOLD_BYTES { + guard.flush(); + } + } + + /// Reserves enough capacity so that `reserve_cap` elements can be pushed without growing the + /// buffer. + fn reserve(&self, reserve_cap: usize) { + if reserve_cap > 0 { + // Compute the current length. + let b = self.inner.back.load(Ordering::Relaxed); + let f = self.inner.front.load(Ordering::SeqCst); + let len = b.wrapping_sub(f) as usize; + + // The current capacity. + let cap = self.cached_buffer.get().cap; + + // Is there enough capacity to push `reserve_cap` elements? + if cap - len < reserve_cap { + // Keep doubling the capacity as much as is needed. + let mut new_cap = cap * 2; + while new_cap - len < reserve_cap { + new_cap *= 2; + } + + // Resize the buffer. + unsafe { + self.resize(new_cap); + } + } + } + } + + /// Returns `true` if the deque is empty. + /// + /// ``` + /// use crossbeam_deque as deque; + /// + /// let (w, _) = deque::lifo(); + /// assert!(w.is_empty()); + /// w.push(1); + /// assert!(!w.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + let b = self.inner.back.load(Ordering::Relaxed); + let f = self.inner.front.load(Ordering::SeqCst); + b.wrapping_sub(f) <= 0 + } + + /// Pushes an element into the back of the deque. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque as deque; + /// + /// let (w, _) = deque::lifo(); + /// w.push(1); + /// w.push(2); + /// ``` + pub fn push(&self, value: T) { + // Load the back index, front index, and buffer. + let b = self.inner.back.load(Ordering::Relaxed); + let f = self.inner.front.load(Ordering::Acquire); + let mut buffer = self.cached_buffer.get(); + + // Calculate the length of the deque. + let len = b.wrapping_sub(f); + + // Is the deque full? + if len >= buffer.cap as isize { + // Yes. Grow the underlying buffer. + unsafe { + self.resize(2 * buffer.cap); + } + buffer = self.cached_buffer.get(); + } + + // Write `value` into the slot. + unsafe { + buffer.write(b, value); + } + + atomic::fence(Ordering::Release); + + // Increment the back index. + // + // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data + // races because it doesn't understand fences. + self.inner.back.store(b.wrapping_add(1), Ordering::Release); + } + + /// Pops an element from the deque. + /// + /// Which end of the deque is used depends on the strategy: + /// + /// * If this is a FIFO deque, an element is popped from the front. + /// * If this is a LIFO deque, an element is popped from the back. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::{self as deque, Pop}; + /// + /// let (w, _) = deque::fifo(); + /// w.push(1); + /// w.push(2); + /// + /// assert_eq!(w.pop(), Pop::Data(1)); + /// assert_eq!(w.pop(), Pop::Data(2)); + /// assert_eq!(w.pop(), Pop::Empty); + /// ``` + pub fn pop(&self) -> Pop { + // Load the back and front index. + let b = self.inner.back.load(Ordering::Relaxed); + let f = self.inner.front.load(Ordering::Relaxed); + + // Calculate the length of the deque. + let len = b.wrapping_sub(f); + + // Is the deque empty? + if len <= 0 { + return Pop::Empty; + } + + match self.flavor { + // Pop from the front of the deque. + Flavor::Fifo => { + // Try incrementing the front index to pop the value. + if self.inner + .front + .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) + .is_ok() + { + unsafe { + // Read the popped value. + let buffer = self.cached_buffer.get(); + let data = buffer.read(f); + + // Shrink the buffer if `len - 1` is less than one fourth of the capacity. + if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 4 { + self.resize(buffer.cap / 2); + } + + return Pop::Data(data); + } + } + + Pop::Retry + } + + // Pop from the back of the deque. + Flavor::Lifo => { + // Decrement the back index. + let b = b.wrapping_sub(1); + self.inner.back.store(b, Ordering::Relaxed); + + atomic::fence(Ordering::SeqCst); + + // Load the front index. + let f = self.inner.front.load(Ordering::Relaxed); + + // Compute the length after the back index was decremented. + let len = b.wrapping_sub(f); + + if len < 0 { + // The deque is empty. Restore the back index to the original value. + self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); + Pop::Empty + } else { + // Read the value to be popped. + let buffer = self.cached_buffer.get(); + let mut value = unsafe { Some(buffer.read(b)) }; + + // Are we popping the last element from the deque? + if len == 0 { + // Try incrementing the front index. + if self.inner + .front + .compare_exchange( + f, + f.wrapping_add(1), + Ordering::SeqCst, + Ordering::Relaxed, + ) + .is_err() + { + // Failed. We didn't pop anything. + mem::forget(value.take()); + } + + // Restore the back index to the original value. + self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); + } else { + // Shrink the buffer if `len` is less than one fourth of the capacity. + if buffer.cap > MIN_CAP && len < buffer.cap as isize / 4 { + unsafe { + self.resize(buffer.cap / 2); + } + } + } + + match value { + None => Pop::Empty, + Some(data) => Pop::Data(data), + } + } + } + } + } +} + +impl fmt::Debug for Worker { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Worker {{ ... }}") + } +} + +/// The stealer side of a deque. +/// +/// Stealers can only steal elements from the front of the deque. +/// +/// Stealers are cloneable so that they can be easily shared among multiple threads. +pub struct Stealer { + /// A reference to the inner representation of the deque. + inner: Arc>>, + + /// The flavor of the deque. + flavor: Flavor, +} + +unsafe impl Send for Stealer {} +unsafe impl Sync for Stealer {} + +impl Stealer { + /// Returns `true` if the deque is empty. + /// + /// ``` + /// use crossbeam_deque as deque; + /// + /// let (w, s) = deque::lifo(); + /// assert!(s.is_empty()); + /// w.push(1); + /// assert!(!s.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + let f = self.inner.front.load(Ordering::Acquire); + atomic::fence(Ordering::SeqCst); + let b = self.inner.back.load(Ordering::Acquire); + b.wrapping_sub(f) <= 0 + } + + /// Steals an element from the front of the deque. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::{self as deque, Steal}; + /// + /// let (w, s) = deque::lifo(); + /// w.push(1); + /// w.push(2); + /// + /// assert_eq!(s.steal(), Steal::Data(1)); + /// assert_eq!(s.steal(), Steal::Data(2)); + /// assert_eq!(s.steal(), Steal::Empty); + /// ``` + pub fn steal(&self) -> Steal { + // Load the front index. + let f = self.inner.front.load(Ordering::Acquire); + + // A SeqCst fence is needed here. + // + // If the current thread is already pinned (reentrantly), we must manually issue the + // fence. Otherwise, the following pinning will issue the fence anyway, so we don't + // have to. + if epoch::is_pinned() { + atomic::fence(Ordering::SeqCst); + } + + let guard = &epoch::pin(); + + // Load the back index. + let b = self.inner.back.load(Ordering::Acquire); + + // Is the deque empty? + if b.wrapping_sub(f) <= 0 { + return Steal::Empty; + } + + // Load the buffer and read the value at the front. + let buffer = self.inner.buffer.load(Ordering::Acquire, guard); + let value = unsafe { buffer.deref().read(f) }; + + // Try incrementing the front index to steal the value. + if self.inner + .front + .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) + .is_err() + { + // We didn't steal this value, forget it. + mem::forget(value); + return Steal::Retry; + } + + // Return the stolen value. + Steal::Data(value) + } + + /// Steals elements from the front of the deque. + /// + /// If at least one element can be stolen, it will be returned. Additionally, some of the + /// remaining elements will be stolen and pushed into the back of worker `dest` in order to + /// balance the work among deques. There is no hard guarantee on exactly how many elements will + /// be stolen, but it should be around half of the deque. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::{self as deque, Steal}; + /// + /// let (w1, s1) = deque::fifo(); + /// let (w2, s2) = deque::fifo(); + /// + /// w1.push(1); + /// w1.push(2); + /// w1.push(3); + /// w1.push(4); + /// + /// assert_eq!(s1.steal_many(&w2), Steal::Data(1)); + /// assert_eq!(s2.steal(), Steal::Data(2)); + /// ``` + pub fn steal_many(&self, dest: &Worker) -> Steal { + // Load the front index. + let mut f = self.inner.front.load(Ordering::Acquire); + + // A SeqCst fence is needed here. + // + // If the current thread is already pinned (reentrantly), we must manually issue the + // fence. Otherwise, the following pinning will issue the fence anyway, so we don't + // have to. + if epoch::is_pinned() { + atomic::fence(Ordering::SeqCst); + } + + let guard = &epoch::pin(); + + // Load the back index. + let b = self.inner.back.load(Ordering::Acquire); + + // Is the deque empty? + let len = b.wrapping_sub(f); + if len <= 0 { + return Steal::Empty; + } + + // Reserve capacity for the stolen additional elements. + let additional = cmp::min((len as usize - 1) / 2, MAX_BATCH); + dest.reserve(additional); + let additional = additional as isize; + + // Get the destination buffer and back index. + let dest_buffer = dest.cached_buffer.get(); + let mut dest_b = dest.inner.back.load(Ordering::Relaxed); + + // Load the buffer and read the value at the front. + let buffer = self.inner.buffer.load(Ordering::Acquire, guard); + let value = unsafe { buffer.deref().read(f) }; + + match self.flavor { + // Steal a batch of elements from the front at once. + Flavor::Fifo => { + // Copy the additional elements from the source to the destination buffer. + for i in 0..additional { + unsafe { + let value = buffer.deref().read(f.wrapping_add(i + 1)); + dest_buffer.write(dest_b.wrapping_add(i), value); + } + } + + // Try incrementing the front index to steal the batch. + if self.inner + .front + .compare_exchange( + f, + f.wrapping_add(additional + 1), + Ordering::SeqCst, + Ordering::Relaxed, + ) + .is_err() + { + // We didn't steal this value, forget it. + mem::forget(value); + return Steal::Retry; + } + + atomic::fence(Ordering::Release); + + // Success! Update the back index in the destination deque. + // + // This ordering could be `Relaxed`, but then thread sanitizer would falsely report + // data races because it doesn't understand fences. + dest.inner.back.store(dest_b.wrapping_add(additional), Ordering::Release); + + // Return the first stolen value. + Steal::Data(value) + } + + // Steal a batch of elements from the front one by one. + Flavor::Lifo => { + // Try incrementing the front index to steal the value. + if self.inner + .front + .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) + .is_err() + { + // We didn't steal this value, forget it. + mem::forget(value); + return Steal::Retry; + } + + // Move the front index one step forward. + f = f.wrapping_add(1); + + // Repeat the same procedure for the additional steals. + for _ in 0..additional { + // We've already got the current front index. Now execute the fence to + // synchronize with other threads. + atomic::fence(Ordering::SeqCst); + + // Load the back index. + let b = self.inner.back.load(Ordering::Acquire); + + // Is the deque empty? + if b.wrapping_sub(f) <= 0 { + break; + } + + // Read the value at the front. + let value = unsafe { buffer.deref().read(f) }; + + // Try incrementing the front index to steal the value. + if self.inner + .front + .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) + .is_err() + { + // We didn't steal this value, forget it and break from the loop. + mem::forget(value); + break; + } + + // Write the stolen value into the destination buffer. + unsafe { + dest_buffer.write(dest_b, value); + } + + // Move the source front index and the destination back index one step forward. + f = f.wrapping_add(1); + dest_b = dest_b.wrapping_add(1); + + atomic::fence(Ordering::Release); + + // Update the destination back index. + // + // This ordering could be `Relaxed`, but then thread sanitizer would falsely + // report data races because it doesn't understand fences. + dest.inner.back.store(dest_b, Ordering::Release); + } + + // Return the first stolen value. + Steal::Data(value) + } + } + } +} + +impl Clone for Stealer { + fn clone(&self) -> Stealer { + Stealer { + inner: self.inner.clone(), + flavor: self.flavor, + } + } +} + +impl fmt::Debug for Stealer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Stealer {{ ... }}") + } +} diff --git a/crossbeam-deque/tests/fifo.rs b/crossbeam-deque/tests/fifo.rs new file mode 100644 index 000000000..a12193040 --- /dev/null +++ b/crossbeam-deque/tests/fifo.rs @@ -0,0 +1,375 @@ +extern crate rand; +extern crate crossbeam_deque as deque; +extern crate crossbeam_epoch as epoch; + +use std::sync::{Arc, Mutex}; +use std::sync::atomic::{AtomicBool, AtomicUsize}; +use std::sync::atomic::Ordering::SeqCst; +use std::thread; + +use deque::{Pop, Steal}; +use rand::Rng; + +#[test] +fn smoke() { + let (w, s) = deque::fifo::(); + assert_eq!(w.pop(), Pop::Empty); + assert_eq!(s.steal(), Steal::Empty); + + w.push(1); + assert_eq!(w.pop(), Pop::Data(1)); + assert_eq!(w.pop(), Pop::Empty); + assert_eq!(s.steal(), Steal::Empty); + + w.push(2); + assert_eq!(s.steal(), Steal::Data(2)); + assert_eq!(s.steal(), Steal::Empty); + assert_eq!(w.pop(), Pop::Empty); + + w.push(3); + w.push(4); + w.push(5); + assert_eq!(s.steal(), Steal::Data(3)); + assert_eq!(s.steal(), Steal::Data(4)); + assert_eq!(s.steal(), Steal::Data(5)); + assert_eq!(s.steal(), Steal::Empty); + + w.push(6); + w.push(7); + w.push(8); + w.push(9); + assert_eq!(w.pop(), Pop::Data(6)); + assert_eq!(s.steal(), Steal::Data(7)); + assert_eq!(w.pop(), Pop::Data(8)); + assert_eq!(w.pop(), Pop::Data(9)); + assert_eq!(w.pop(), Pop::Empty); +} + +#[test] +fn steal_push() { + const STEPS: usize = 50_000; + + let (w, s) = deque::fifo(); + let t = thread::spawn(move || { + for i in 0..STEPS { + loop { + if let Steal::Data(v) = s.steal() { + assert_eq!(i, v); + break; + } + } + } + }); + + for i in 0..STEPS { + w.push(i); + } + t.join().unwrap(); +} + +#[test] +fn stampede() { + const THREADS: usize = 8; + const COUNT: usize = 50_000; + + let (w, s) = deque::fifo(); + + for i in 0..COUNT { + w.push(Box::new(i + 1)); + } + let remaining = Arc::new(AtomicUsize::new(COUNT)); + + let threads = (0..THREADS) + .map(|_| { + let s = s.clone(); + let remaining = remaining.clone(); + + thread::spawn(move || { + let mut last = 0; + while remaining.load(SeqCst) > 0 { + if let Steal::Data(x) = s.steal() { + assert!(last < *x); + last = *x; + remaining.fetch_sub(1, SeqCst); + } + } + }) + }) + .collect::>(); + + let mut last = 0; + while remaining.load(SeqCst) > 0 { + loop { + match w.pop() { + Pop::Data(x) => { + assert!(last < *x); + last = *x; + remaining.fetch_sub(1, SeqCst); + break; + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + + for t in threads { + t.join().unwrap(); + } +} + +fn run_stress() { + const THREADS: usize = 8; + const COUNT: usize = 50_000; + + let (w, s) = deque::fifo(); + let done = Arc::new(AtomicBool::new(false)); + let hits = Arc::new(AtomicUsize::new(0)); + + let threads = (0..THREADS) + .map(|_| { + let s = s.clone(); + let done = done.clone(); + let hits = hits.clone(); + + thread::spawn(move || { + let (w2, _) = deque::fifo(); + + while !done.load(SeqCst) { + if let Steal::Data(_) = s.steal() { + hits.fetch_add(1, SeqCst); + } + + if let Steal::Data(_) = s.steal_many(&w2) { + hits.fetch_add(1, SeqCst); + + loop { + match w2.pop() { + Pop::Data(_) => { + hits.fetch_add(1, SeqCst); + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + } + }) + }) + .collect::>(); + + let mut rng = rand::thread_rng(); + let mut expected = 0; + while expected < COUNT { + if rng.gen_range(0, 3) == 0 { + loop { + match w.pop() { + Pop::Data(_) => { + hits.fetch_add(1, SeqCst); + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } else { + w.push(expected); + expected += 1; + } + } + + while hits.load(SeqCst) < COUNT { + loop { + match w.pop() { + Pop::Data(_) => { + hits.fetch_add(1, SeqCst); + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + done.store(true, SeqCst); + + for t in threads { + t.join().unwrap(); + } +} + +#[test] +fn stress() { + run_stress(); +} + +#[test] +fn stress_pinned() { + let _guard = epoch::pin(); + run_stress(); +} + +#[test] +fn no_starvation() { + const THREADS: usize = 8; + const COUNT: usize = 50_000; + + let (w, s) = deque::fifo(); + let done = Arc::new(AtomicBool::new(false)); + + let (threads, hits): (Vec<_>, Vec<_>) = (0..THREADS) + .map(|_| { + let s = s.clone(); + let done = done.clone(); + let hits = Arc::new(AtomicUsize::new(0)); + + let t = { + let hits = hits.clone(); + thread::spawn(move || { + let (w2, _) = deque::fifo(); + + while !done.load(SeqCst) { + if let Steal::Data(_) = s.steal() { + hits.fetch_add(1, SeqCst); + } + + if let Steal::Data(_) = s.steal_many(&w2) { + hits.fetch_add(1, SeqCst); + + loop { + match w2.pop() { + Pop::Data(_) => { + hits.fetch_add(1, SeqCst); + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + } + }) + }; + + (t, hits) + }) + .unzip(); + + let mut rng = rand::thread_rng(); + let mut my_hits = 0; + loop { + for i in 0..rng.gen_range(0, COUNT) { + if rng.gen_range(0, 3) == 0 && my_hits == 0 { + loop { + match w.pop() { + Pop::Data(_) => my_hits += 1, + Pop::Empty => break, + Pop::Retry => {} + } + } + } else { + w.push(i); + } + } + + if my_hits > 0 && hits.iter().all(|h| h.load(SeqCst) > 0) { + break; + } + } + done.store(true, SeqCst); + + for t in threads { + t.join().unwrap(); + } +} + +#[test] +fn destructors() { + const THREADS: usize = 8; + const COUNT: usize = 50_000; + const STEPS: usize = 1000; + + struct Elem(usize, Arc>>); + + impl Drop for Elem { + fn drop(&mut self) { + self.1.lock().unwrap().push(self.0); + } + } + + let (w, s) = deque::fifo(); + + let dropped = Arc::new(Mutex::new(Vec::new())); + let remaining = Arc::new(AtomicUsize::new(COUNT)); + for i in 0..COUNT { + w.push(Elem(i, dropped.clone())); + } + + let threads = (0..THREADS) + .map(|_| { + let remaining = remaining.clone(); + let s = s.clone(); + + thread::spawn(move || { + let (w2, _) = deque::fifo(); + let mut cnt = 0; + + while cnt < STEPS { + if let Steal::Data(_) = s.steal() { + cnt += 1; + remaining.fetch_sub(1, SeqCst); + } + + if let Steal::Data(_) = s.steal_many(&w2) { + cnt += 1; + remaining.fetch_sub(1, SeqCst); + + loop { + match w2.pop() { + Pop::Data(_) => { + cnt += 1; + remaining.fetch_sub(1, SeqCst); + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + } + }) + }) + .collect::>(); + + for _ in 0..STEPS { + loop { + match w.pop() { + Pop::Data(_) => { + remaining.fetch_sub(1, SeqCst); + break; + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + + for t in threads { + t.join().unwrap(); + } + + let rem = remaining.load(SeqCst); + assert!(rem > 0); + + { + let mut v = dropped.lock().unwrap(); + assert_eq!(v.len(), COUNT - rem); + v.clear(); + } + + drop((w, s)); + + { + let mut v = dropped.lock().unwrap(); + assert_eq!(v.len(), rem); + v.sort(); + for pair in v.windows(2) { + assert_eq!(pair[0] + 1, pair[1]); + } + } +} diff --git a/crossbeam-deque/tests/lifo.rs b/crossbeam-deque/tests/lifo.rs new file mode 100644 index 000000000..e8f5b4d39 --- /dev/null +++ b/crossbeam-deque/tests/lifo.rs @@ -0,0 +1,375 @@ +extern crate rand; +extern crate crossbeam_deque as deque; +extern crate crossbeam_epoch as epoch; + +use std::sync::{Arc, Mutex}; +use std::sync::atomic::{AtomicBool, AtomicUsize}; +use std::sync::atomic::Ordering::SeqCst; +use std::thread; + +use deque::{Pop, Steal}; +use rand::Rng; + +#[test] +fn smoke() { + let (w, s) = deque::lifo::(); + assert_eq!(w.pop(), Pop::Empty); + assert_eq!(s.steal(), Steal::Empty); + + w.push(1); + assert_eq!(w.pop(), Pop::Data(1)); + assert_eq!(w.pop(), Pop::Empty); + assert_eq!(s.steal(), Steal::Empty); + + w.push(2); + assert_eq!(s.steal(), Steal::Data(2)); + assert_eq!(s.steal(), Steal::Empty); + assert_eq!(w.pop(), Pop::Empty); + + w.push(3); + w.push(4); + w.push(5); + assert_eq!(s.steal(), Steal::Data(3)); + assert_eq!(s.steal(), Steal::Data(4)); + assert_eq!(s.steal(), Steal::Data(5)); + assert_eq!(s.steal(), Steal::Empty); + + w.push(6); + w.push(7); + w.push(8); + w.push(9); + assert_eq!(w.pop(), Pop::Data(9)); + assert_eq!(s.steal(), Steal::Data(6)); + assert_eq!(w.pop(), Pop::Data(8)); + assert_eq!(w.pop(), Pop::Data(7)); + assert_eq!(w.pop(), Pop::Empty); +} + +#[test] +fn steal_push() { + const STEPS: usize = 50_000; + + let (w, s) = deque::lifo(); + let t = thread::spawn(move || { + for i in 0..STEPS { + loop { + if let Steal::Data(v) = s.steal() { + assert_eq!(i, v); + break; + } + } + } + }); + + for i in 0..STEPS { + w.push(i); + } + t.join().unwrap(); +} + +#[test] +fn stampede() { + const THREADS: usize = 8; + const COUNT: usize = 50_000; + + let (w, s) = deque::lifo(); + + for i in 0..COUNT { + w.push(Box::new(i + 1)); + } + let remaining = Arc::new(AtomicUsize::new(COUNT)); + + let threads = (0..THREADS) + .map(|_| { + let s = s.clone(); + let remaining = remaining.clone(); + + thread::spawn(move || { + let mut last = 0; + while remaining.load(SeqCst) > 0 { + if let Steal::Data(x) = s.steal() { + assert!(last < *x); + last = *x; + remaining.fetch_sub(1, SeqCst); + } + } + }) + }) + .collect::>(); + + let mut last = COUNT + 1; + while remaining.load(SeqCst) > 0 { + loop { + match w.pop() { + Pop::Data(x) => { + assert!(last > *x); + last = *x; + remaining.fetch_sub(1, SeqCst); + break; + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + + for t in threads { + t.join().unwrap(); + } +} + +fn run_stress() { + const THREADS: usize = 8; + const COUNT: usize = 50_000; + + let (w, s) = deque::lifo(); + let done = Arc::new(AtomicBool::new(false)); + let hits = Arc::new(AtomicUsize::new(0)); + + let threads = (0..THREADS) + .map(|_| { + let s = s.clone(); + let done = done.clone(); + let hits = hits.clone(); + + thread::spawn(move || { + let (w2, _) = deque::lifo(); + + while !done.load(SeqCst) { + if let Steal::Data(_) = s.steal() { + hits.fetch_add(1, SeqCst); + } + + if let Steal::Data(_) = s.steal_many(&w2) { + hits.fetch_add(1, SeqCst); + + loop { + match w2.pop() { + Pop::Data(_) => { + hits.fetch_add(1, SeqCst); + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + } + }) + }) + .collect::>(); + + let mut rng = rand::thread_rng(); + let mut expected = 0; + while expected < COUNT { + if rng.gen_range(0, 3) == 0 { + loop { + match w.pop() { + Pop::Data(_) => { + hits.fetch_add(1, SeqCst); + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } else { + w.push(expected); + expected += 1; + } + } + + while hits.load(SeqCst) < COUNT { + loop { + match w.pop() { + Pop::Data(_) => { + hits.fetch_add(1, SeqCst); + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + done.store(true, SeqCst); + + for t in threads { + t.join().unwrap(); + } +} + +#[test] +fn stress() { + run_stress(); +} + +#[test] +fn stress_pinned() { + let _guard = epoch::pin(); + run_stress(); +} + +#[test] +fn no_starvation() { + const THREADS: usize = 8; + const COUNT: usize = 50_000; + + let (w, s) = deque::lifo(); + let done = Arc::new(AtomicBool::new(false)); + + let (threads, hits): (Vec<_>, Vec<_>) = (0..THREADS) + .map(|_| { + let s = s.clone(); + let done = done.clone(); + let hits = Arc::new(AtomicUsize::new(0)); + + let t = { + let hits = hits.clone(); + thread::spawn(move || { + let (w2, _) = deque::lifo(); + + while !done.load(SeqCst) { + if let Steal::Data(_) = s.steal() { + hits.fetch_add(1, SeqCst); + } + + if let Steal::Data(_) = s.steal_many(&w2) { + hits.fetch_add(1, SeqCst); + + loop { + match w2.pop() { + Pop::Data(_) => { + hits.fetch_add(1, SeqCst); + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + } + }) + }; + + (t, hits) + }) + .unzip(); + + let mut rng = rand::thread_rng(); + let mut my_hits = 0; + loop { + for i in 0..rng.gen_range(0, COUNT) { + if rng.gen_range(0, 3) == 0 && my_hits == 0 { + loop { + match w.pop() { + Pop::Data(_) => my_hits += 1, + Pop::Empty => break, + Pop::Retry => {} + } + } + } else { + w.push(i); + } + } + + if my_hits > 0 && hits.iter().all(|h| h.load(SeqCst) > 0) { + break; + } + } + done.store(true, SeqCst); + + for t in threads { + t.join().unwrap(); + } +} + +#[test] +fn destructors() { + const THREADS: usize = 8; + const COUNT: usize = 50_000; + const STEPS: usize = 1000; + + struct Elem(usize, Arc>>); + + impl Drop for Elem { + fn drop(&mut self) { + self.1.lock().unwrap().push(self.0); + } + } + + let (w, s) = deque::lifo(); + + let dropped = Arc::new(Mutex::new(Vec::new())); + let remaining = Arc::new(AtomicUsize::new(COUNT)); + for i in 0..COUNT { + w.push(Elem(i, dropped.clone())); + } + + let threads = (0..THREADS) + .map(|_| { + let remaining = remaining.clone(); + let s = s.clone(); + + thread::spawn(move || { + let (w2, _) = deque::lifo(); + let mut cnt = 0; + + while cnt < STEPS { + if let Steal::Data(_) = s.steal() { + cnt += 1; + remaining.fetch_sub(1, SeqCst); + } + + if let Steal::Data(_) = s.steal_many(&w2) { + cnt += 1; + remaining.fetch_sub(1, SeqCst); + + loop { + match w2.pop() { + Pop::Data(_) => { + cnt += 1; + remaining.fetch_sub(1, SeqCst); + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + } + }) + }) + .collect::>(); + + for _ in 0..STEPS { + loop { + match w.pop() { + Pop::Data(_) => { + remaining.fetch_sub(1, SeqCst); + break; + } + Pop::Empty => break, + Pop::Retry => {} + } + } + } + + for t in threads { + t.join().unwrap(); + } + + let rem = remaining.load(SeqCst); + assert!(rem > 0); + + { + let mut v = dropped.lock().unwrap(); + assert_eq!(v.len(), COUNT - rem); + v.clear(); + } + + drop((w, s)); + + { + let mut v = dropped.lock().unwrap(); + assert_eq!(v.len(), rem); + v.sort(); + for pair in v.windows(2) { + assert_eq!(pair[0] + 1, pair[1]); + } + } +} diff --git a/crossbeam-epoch/CHANGELOG.md b/crossbeam-epoch/CHANGELOG.md new file mode 100644 index 000000000..91f70a4a5 --- /dev/null +++ b/crossbeam-epoch/CHANGELOG.md @@ -0,0 +1,62 @@ +# Version 0.6.0 + +- `defer` now requires `F: Send + 'static`. +- Bump the minimum Rust version to 1.26. +- Pinning while TLS is tearing down does not fail anymore. +- Rename `Handle` to `LocalHandle`. +- Add `defer_unchecked` and `defer_destroy`. +- Remove `Clone` impl for `LocalHandle`. + +# Version 0.5.2 + +- Update `crossbeam-utils` to `0.5`. + +# Version 0.5.1 + +- Fix compatibility with the latest Rust nightly. + +# Version 0.5.0 + +- Update `crossbeam-utils` to `0.4`. +- Specify the minimum Rust version to `1.25.0`. + +# Version 0.4.3 + +- Downgrade `crossbeam-utils` to `0.3` because it was a breaking change. + +# Version 0.4.2 + +- Expose the `Pointer` trait. +- Warn missing docs and missing debug impls. +- Update `crossbeam-utils` to `0.4`. + +# Version 0.4.1 + +- Add `Debug` impls for `Collector`, `Handle`, and `Guard`. +- Add `load_consume` to `Atomic`. +- Rename `Collector::handle` to `Collector::register`. +- Remove the `Send` implementation for `Handle` (this was a bug). Only + `Collector`s can be shared among multiple threads, while `Handle`s and + `Guard`s must stay within the thread in which they were created. + +# Version 0.4.0 + +- Update dependencies. +- Remove support for Rust 1.13. + +# Version 0.3.0 + +- Add support for Rust 1.13. +- Improve documentation for CAS. + +# Version 0.2.0 + +- Add method `Owned::into_box`. +- Fix a use-after-free bug in `Local::finalize`. +- Fix an ordering bug in `Global::push_bag`. +- Fix a bug in calculating distance between epochs. +- Remove `impl Into> for Owned`. + +# Version 0.1.0 + +- First version of the new epoch-based GC. diff --git a/crossbeam-epoch/Cargo.toml b/crossbeam-epoch/Cargo.toml new file mode 100644 index 000000000..265e4a669 --- /dev/null +++ b/crossbeam-epoch/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "crossbeam-epoch" +# When publishing a new version: +# - Update CHANGELOG.md +# - Update README.md +# - Create "crossbeam-epoch-X.Y.Z" git tag +version = "0.6.0" +authors = ["The Crossbeam Project Developers"] +license = "MIT/Apache-2.0" +readme = "README.md" +repository = "https://github.com/crossbeam-rs/crossbeam-epoch" +homepage = "https://github.com/crossbeam-rs/crossbeam-epoch" +documentation = "https://docs.rs/crossbeam-epoch" +description = "Epoch-based garbage collection" +keywords = ["lock-free", "rcu", "atomic", "garbage"] +categories = ["concurrency", "memory-management"] + +[features] +default = ["use_std"] +use_std = ["lazy_static", "crossbeam-utils/use_std"] +nightly = ["arrayvec/use_union"] +sanitize = [] # Makes it more likely to trigger any potential data races. + +[dependencies] +arrayvec = { version = "0.4", default-features = false } +cfg-if = "0.1" +crossbeam-utils = { version = "0.5", default-features = false } +lazy_static = { version = "1", optional = true } +memoffset = { version = "0.2" } +scopeguard = { version = "0.3", default-features = false } + +[dev-dependencies] +rand = "0.5" diff --git a/crossbeam-epoch/LICENSE-APACHE b/crossbeam-epoch/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/crossbeam-epoch/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/crossbeam-epoch/LICENSE-MIT b/crossbeam-epoch/LICENSE-MIT new file mode 100644 index 000000000..25597d583 --- /dev/null +++ b/crossbeam-epoch/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/crossbeam-epoch/README.md b/crossbeam-epoch/README.md new file mode 100644 index 000000000..d5c1e6d04 --- /dev/null +++ b/crossbeam-epoch/README.md @@ -0,0 +1,39 @@ +# Epoch-based garbage collection + +[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)]( +https://travis-ci.org/crossbeam-rs/crossbeam) +[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)]( +https://github.com/crossbeam-rs/crossbeam-epoch) +[![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)]( +https://crates.io/crates/crossbeam-epoch) +[![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)]( +https://docs.rs/crossbeam-epoch) + +This crate provides epoch-based garbage collection for use in concurrent data structures. + +If a thread removes a node from a concurrent data structure, other threads +may still have pointers to that node, so it cannot be immediately destructed. +Epoch GC allows deferring destruction until it becomes safe to do so. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +crossbeam-epoch = "0.6" +``` + +Next, add this to your crate: + +```rust +extern crate crossbeam_epoch as epoch; +``` + +The minimum required Rust version is 1.26. + +## License + +Licensed under the terms of MIT license and the Apache License (Version 2.0). + +See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details. diff --git a/crossbeam-epoch/benches/defer.rs b/crossbeam-epoch/benches/defer.rs new file mode 100644 index 000000000..8690b3f57 --- /dev/null +++ b/crossbeam-epoch/benches/defer.rs @@ -0,0 +1,73 @@ +#![feature(test)] + +extern crate crossbeam_epoch as epoch; +extern crate crossbeam_utils as utils; +extern crate test; + +use epoch::Owned; +use test::Bencher; +use utils::thread::scope; + +#[bench] +fn single_alloc_defer_free(b: &mut Bencher) { + b.iter(|| { + let guard = &epoch::pin(); + let p = Owned::new(1).into_shared(guard); + unsafe { + guard.defer(move || p.into_owned()); + } + }); +} + +#[bench] +fn single_defer(b: &mut Bencher) { + b.iter(|| { + let guard = &epoch::pin(); + unsafe { + guard.defer(move || ()); + } + }); +} + +#[bench] +fn multi_alloc_defer_free(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 10_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|| { + for _ in 0..STEPS { + let guard = &epoch::pin(); + let p = Owned::new(1).into_shared(guard); + unsafe { + guard.defer(move || p.into_owned()); + } + } + }); + } + }); + }); +} + +#[bench] +fn multi_defer(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 10_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|| { + for _ in 0..STEPS { + let guard = &epoch::pin(); + unsafe { + guard.defer(move || ()); + } + } + }); + } + }); + }); +} diff --git a/crossbeam-epoch/benches/flush.rs b/crossbeam-epoch/benches/flush.rs new file mode 100644 index 000000000..14268fa18 --- /dev/null +++ b/crossbeam-epoch/benches/flush.rs @@ -0,0 +1,51 @@ +#![feature(test)] + +extern crate crossbeam_epoch as epoch; +extern crate crossbeam_utils as utils; +extern crate test; + +use std::sync::Barrier; + +use test::Bencher; +use utils::thread::scope; + +#[bench] +fn single_flush(b: &mut Bencher) { + const THREADS: usize = 16; + + let start = Barrier::new(THREADS + 1); + let end = Barrier::new(THREADS + 1); + + scope(|s| { + for _ in 0..THREADS { + s.spawn(|| { + epoch::pin(); + start.wait(); + end.wait(); + }); + } + + start.wait(); + b.iter(|| epoch::pin().flush()); + end.wait(); + }); +} + +#[bench] +fn multi_flush(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 10_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|| { + for _ in 0..STEPS { + let guard = &epoch::pin(); + guard.flush(); + } + }); + } + }); + }); +} diff --git a/crossbeam-epoch/benches/pin.rs b/crossbeam-epoch/benches/pin.rs new file mode 100644 index 000000000..1f23e840d --- /dev/null +++ b/crossbeam-epoch/benches/pin.rs @@ -0,0 +1,31 @@ +#![feature(test)] + +extern crate crossbeam_epoch as epoch; +extern crate crossbeam_utils as utils; +extern crate test; + +use test::Bencher; +use utils::thread::scope; + +#[bench] +fn single_pin(b: &mut Bencher) { + b.iter(|| epoch::pin()); +} + +#[bench] +fn multi_pin(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 100_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|| { + for _ in 0..STEPS { + epoch::pin(); + } + }); + } + }); + }); +} diff --git a/crossbeam-epoch/ci/script.sh b/crossbeam-epoch/ci/script.sh new file mode 100755 index 000000000..2ab5173d4 --- /dev/null +++ b/crossbeam-epoch/ci/script.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +check_min_version() { + local rustc="`rustc -V | cut -d' ' -f2 | cut -d- -f1`" + if [[ "$rustc" != "`echo -e "$rustc\n$1" | sort -V | tail -n1`" ]]; then + echo "Unsupported Rust version: $1 < $rustc" + exit 0 + fi +} +check_min_version 1.26.0 + +set -ex + +export RUSTFLAGS="-D warnings" + +cargo build --no-default-features +cargo test + +if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then + cargo test --features nightly + + sudo apt-get install -y llvm-3.8 llvm-3.8-dev clang-3.8 clang-3.8-dev + + ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" \ + RUSTFLAGS="-Z sanitizer=address" \ + cargo run \ + --release \ + --target x86_64-unknown-linux-gnu \ + --features sanitize,nightly \ + --example sanitize +fi diff --git a/crossbeam-epoch/examples/sanitize.rs b/crossbeam-epoch/examples/sanitize.rs new file mode 100644 index 000000000..d0afa8a88 --- /dev/null +++ b/crossbeam-epoch/examples/sanitize.rs @@ -0,0 +1,70 @@ +extern crate crossbeam_epoch as epoch; +extern crate rand; + +use std::sync::Arc; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed}; +use std::time::{Duration, Instant}; +use std::thread; + +use epoch::{Atomic, Collector, LocalHandle, Owned, Shared}; +use rand::Rng; + +fn worker(a: Arc>, handle: LocalHandle) -> usize { + let mut rng = rand::thread_rng(); + let mut sum = 0; + + if rng.gen() { + thread::sleep(Duration::from_millis(1)); + } + let timeout = Duration::from_millis(rng.gen_range(0, 10)); + let now = Instant::now(); + + while now.elapsed() < timeout { + for _ in 0..100 { + let guard = &handle.pin(); + guard.flush(); + + let val = if rng.gen() { + let p = a.swap(Owned::new(AtomicUsize::new(sum)), AcqRel, guard); + unsafe { + guard.defer_destroy(p); + guard.flush(); + p.deref().load(Relaxed) + } + } else { + let p = a.load(Acquire, guard); + unsafe { + p.deref().fetch_add(sum, Relaxed) + } + }; + + sum = sum.wrapping_add(val); + } + } + + sum +} + +fn main() { + for _ in 0..100 { + let collector = Collector::new(); + let a = Arc::new(Atomic::new(AtomicUsize::new(777))); + + let threads = (0..16) + .map(|_| { + let a = a.clone(); + let c = collector.clone(); + thread::spawn(move || worker(a, c.register())) + }) + .collect::>(); + + for t in threads { + t.join().unwrap(); + } + + unsafe { + a.swap(Shared::null(), AcqRel, epoch::unprotected()).into_owned(); + } + } +} diff --git a/crossbeam-epoch/src/atomic.rs b/crossbeam-epoch/src/atomic.rs new file mode 100644 index 000000000..eb971ce5e --- /dev/null +++ b/crossbeam-epoch/src/atomic.rs @@ -0,0 +1,1124 @@ +use core::borrow::{Borrow, BorrowMut}; +use core::cmp; +use core::fmt; +use core::marker::PhantomData; +use core::mem; +use core::ptr; +use core::ops::{Deref, DerefMut}; +use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; +use core::sync::atomic::Ordering; +use alloc::boxed::Box; + +use guard::Guard; +use crossbeam_utils::AtomicConsume; + +/// Given ordering for the success case in a compare-exchange operation, returns the strongest +/// appropriate ordering for the failure case. +#[inline] +fn strongest_failure_ordering(ord: Ordering) -> Ordering { + use self::Ordering::*; + match ord { + Relaxed | Release => Relaxed, + Acquire | AcqRel => Acquire, + _ => SeqCst, + } +} + +/// The error returned on failed compare-and-set operation. +pub struct CompareAndSetError<'g, T: 'g, P: Pointer> { + /// The value in the atomic pointer at the time of the failed operation. + pub current: Shared<'g, T>, + + /// The new value, which the operation failed to store. + pub new: P, +} + +impl<'g, T: 'g, P: Pointer + fmt::Debug> fmt::Debug for CompareAndSetError<'g, T, P> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("CompareAndSetError") + .field("current", &self.current) + .field("new", &self.new) + .finish() + } +} + +/// Memory orderings for compare-and-set operations. +/// +/// A compare-and-set operation can have different memory orderings depending on whether it +/// succeeds or fails. This trait generalizes different ways of specifying memory orderings. +/// +/// The two ways of specifying orderings for compare-and-set are: +/// +/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate +/// ordering is chosen. +/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is +/// for the failure case. +pub trait CompareAndSetOrdering { + /// The ordering of the operation when it succeeds. + fn success(&self) -> Ordering; + + /// The ordering of the operation when it fails. + /// + /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than + /// the success ordering. + fn failure(&self) -> Ordering; +} + +impl CompareAndSetOrdering for Ordering { + #[inline] + fn success(&self) -> Ordering { + *self + } + + #[inline] + fn failure(&self) -> Ordering { + strongest_failure_ordering(*self) + } +} + +impl CompareAndSetOrdering for (Ordering, Ordering) { + #[inline] + fn success(&self) -> Ordering { + self.0 + } + + #[inline] + fn failure(&self) -> Ordering { + self.1 + } +} + +/// Panics if the pointer is not properly unaligned. +#[inline] +fn ensure_aligned(raw: *const T) { + assert_eq!(raw as usize & low_bits::(), 0, "unaligned pointer"); +} + +/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`. +#[inline] +fn low_bits() -> usize { + (1 << mem::align_of::().trailing_zeros()) - 1 +} + +/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`. +/// +/// `tag` is truncated to fit into the unused bits of the pointer to `T`. +#[inline] +fn data_with_tag(data: usize, tag: usize) -> usize { + (data & !low_bits::()) | (tag & low_bits::()) +} + +/// Decomposes a tagged pointer `data` into the pointer and the tag. +#[inline] +fn decompose_data(data: usize) -> (*mut T, usize) { + let raw = (data & !low_bits::()) as *mut T; + let tag = data & low_bits::(); + (raw, tag) +} + +/// An atomic pointer that can be safely shared between threads. +/// +/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused +/// least significant bits of the address. More precisely, a tag should be less than `(1 << +/// mem::align_of::().trailing_zeros())`. +/// +/// Any method that loads the pointer must be passed a reference to a [`Guard`]. +/// +/// [`Guard`]: struct.Guard.html +pub struct Atomic { + data: AtomicUsize, + _marker: PhantomData<*mut T>, +} + +unsafe impl Send for Atomic {} +unsafe impl Sync for Atomic {} + +impl Atomic { + /// Returns a new atomic pointer pointing to the tagged pointer `data`. + fn from_usize(data: usize) -> Self { + Self { + data: AtomicUsize::new(data), + _marker: PhantomData, + } + } + + /// Returns a new null atomic pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::::null(); + /// ``` + #[cfg(not(feature = "nightly"))] + pub fn null() -> Atomic { + Self { + data: ATOMIC_USIZE_INIT, + _marker: PhantomData, + } + } + + /// Returns a new null atomic pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::::null(); + /// ``` + #[cfg(feature = "nightly")] + pub const fn null() -> Atomic { + Self { + data: ATOMIC_USIZE_INIT, + _marker: PhantomData, + } + } + + /// Allocates `value` on the heap and returns a new atomic pointer pointing to it. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::new(1234); + /// ``` + pub fn new(value: T) -> Atomic { + Self::from(Owned::new(value)) + } + + /// Loads a `Shared` from the atomic pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// ``` + pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.load(ord)) } + } + + /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering. + /// + /// This is similar to the "acquire" ordering, except that an ordering is + /// only guaranteed with operations that "depend on" the result of the load. + /// However consume loads are usually much faster than acquire loads on + /// architectures with a weak memory model since they don't require memory + /// fence instructions. + /// + /// The exact definition of "depend on" is a bit vague, but it works as you + /// would expect in practice since a lot of software, especially the Linux + /// kernel, rely on this behavior. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load_consume(guard); + /// ``` + pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.load_consume()) } + } + + /// Stores a `Shared` or `Owned` pointer into the atomic pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// a.store(Shared::null(), SeqCst); + /// a.store(Owned::new(1234), SeqCst); + /// ``` + pub fn store<'g, P: Pointer>(&self, new: P, ord: Ordering) { + self.data.store(new.into_usize(), ord); + } + + /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous + /// `Shared`. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.swap(Shared::null(), SeqCst, guard); + /// ``` + pub fn swap<'g, P: Pointer>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) } + } + + /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current + /// value is the same as `current`. The tag is also taken into account, so two pointers to the + /// same object, but with different tags, will not be considered equal. + /// + /// The return value is a result indicating whether the new pointer was written. On success the + /// pointer that was written is returned. On failure the actual current value and `new` are + /// returned. + /// + /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory + /// ordering of this operation. + /// + /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// + /// let guard = &epoch::pin(); + /// let mut curr = a.load(SeqCst, guard); + /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard); + /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard); + /// ``` + pub fn compare_and_set<'g, O, P>( + &self, + current: Shared, + new: P, + ord: O, + _: &'g Guard, + ) -> Result, CompareAndSetError<'g, T, P>> + where + O: CompareAndSetOrdering, + P: Pointer, + { + let new = new.into_usize(); + self.data + .compare_exchange(current.into_usize(), new, ord.success(), ord.failure()) + .map(|_| unsafe { Shared::from_usize(new) }) + .map_err(|current| unsafe { + CompareAndSetError { + current: Shared::from_usize(current), + new: P::from_usize(new), + } + }) + } + + /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current + /// value is the same as `current`. The tag is also taken into account, so two pointers to the + /// same object, but with different tags, will not be considered equal. + /// + /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison + /// succeeds, which can result in more efficient code on some platforms. The return value is a + /// result indicating whether the new pointer was written. On success the pointer that was + /// written is returned. On failure the actual current value and `new` are returned. + /// + /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory + /// ordering of this operation. + /// + /// [`compare_and_set`]: struct.Atomic.html#method.compare_and_set + /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// + /// let mut new = Owned::new(5678); + /// let mut ptr = a.load(SeqCst, guard); + /// loop { + /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) { + /// Ok(p) => { + /// ptr = p; + /// break; + /// } + /// Err(err) => { + /// ptr = err.current; + /// new = err.new; + /// } + /// } + /// } + /// + /// let mut curr = a.load(SeqCst, guard); + /// loop { + /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) { + /// Ok(_) => break, + /// Err(err) => curr = err.current, + /// } + /// } + /// ``` + pub fn compare_and_set_weak<'g, O, P>( + &self, + current: Shared, + new: P, + ord: O, + _: &'g Guard, + ) -> Result, CompareAndSetError<'g, T, P>> + where + O: CompareAndSetOrdering, + P: Pointer, + { + let new = new.into_usize(); + self.data + .compare_exchange_weak(current.into_usize(), new, ord.success(), ord.failure()) + .map(|_| unsafe { Shared::from_usize(new) }) + .map_err(|current| unsafe { + CompareAndSetError { + current: Shared::from_usize(current), + new: P::from_usize(new), + } + }) + } + + /// Bitwise "and" with the current tag. + /// + /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the + /// new tag to the result. Returns the previous pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::::from(Shared::null().with_tag(3)); + /// let guard = &epoch::pin(); + /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3); + /// assert_eq!(a.load(SeqCst, guard).tag(), 2); + /// ``` + pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::(), ord)) } + } + + /// Bitwise "or" with the current tag. + /// + /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the + /// new tag to the result. Returns the previous pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::::from(Shared::null().with_tag(1)); + /// let guard = &epoch::pin(); + /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1); + /// assert_eq!(a.load(SeqCst, guard).tag(), 3); + /// ``` + pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::(), ord)) } + } + + /// Bitwise "xor" with the current tag. + /// + /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the + /// new tag to the result. Returns the previous pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::::from(Shared::null().with_tag(1)); + /// let guard = &epoch::pin(); + /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1); + /// assert_eq!(a.load(SeqCst, guard).tag(), 2); + /// ``` + pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::(), ord)) } + } +} + +impl fmt::Debug for Atomic { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let data = self.data.load(Ordering::SeqCst); + let (raw, tag) = decompose_data::(data); + + f.debug_struct("Atomic") + .field("raw", &raw) + .field("tag", &tag) + .finish() + } +} + +impl fmt::Pointer for Atomic { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let data = self.data.load(Ordering::SeqCst); + let (raw, _) = decompose_data::(data); + fmt::Pointer::fmt(&raw, f) + } +} + +impl Clone for Atomic { + /// Returns a copy of the atomic value. + /// + /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other + /// atomics or fences. + fn clone(&self) -> Self { + let data = self.data.load(Ordering::Relaxed); + Atomic::from_usize(data) + } +} + +impl Default for Atomic { + fn default() -> Self { + Atomic::null() + } +} + +impl From> for Atomic { + /// Returns a new atomic pointer pointing to `owned`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{Atomic, Owned}; + /// + /// let a = Atomic::::from(Owned::new(1234)); + /// ``` + fn from(owned: Owned) -> Self { + let data = owned.data; + mem::forget(owned); + Self::from_usize(data) + } +} + +impl From> for Atomic { + fn from(b: Box) -> Self { + Self::from(Owned::from(b)) + } +} + +impl From for Atomic { + fn from(t: T) -> Self { + Self::new(t) + } +} + +impl<'g, T> From> for Atomic { + /// Returns a new atomic pointer pointing to `ptr`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{Atomic, Shared}; + /// + /// let a = Atomic::::from(Shared::::null()); + /// ``` + fn from(ptr: Shared<'g, T>) -> Self { + Self::from_usize(ptr.data) + } +} + +impl From<*const T> for Atomic { + /// Returns a new atomic pointer pointing to `raw`. + /// + /// # Examples + /// + /// ``` + /// use std::ptr; + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::::from(ptr::null::()); + /// ``` + fn from(raw: *const T) -> Self { + Self::from_usize(raw as usize) + } +} + +/// A trait for either `Owned` or `Shared` pointers. +pub trait Pointer { + /// Returns the machine representation of the pointer. + fn into_usize(self) -> usize; + + /// Returns a new pointer pointing to the tagged pointer `data`. + unsafe fn from_usize(data: usize) -> Self; +} + +/// An owned heap-allocated object. +/// +/// This type is very similar to `Box`. +/// +/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused +/// least significant bits of the address. +pub struct Owned { + data: usize, + _marker: PhantomData>, +} + +impl Pointer for Owned { + #[inline] + fn into_usize(self) -> usize { + let data = self.data; + mem::forget(self); + data + } + + /// Returns a new pointer pointing to the tagged pointer `data`. + /// + /// # Panics + /// + /// Panics if the data is zero in debug mode. + #[inline] + unsafe fn from_usize(data: usize) -> Self { + debug_assert!(data != 0, "converting zero into `Owned`"); + Owned { + data: data, + _marker: PhantomData, + } + } +} + +impl Owned { + /// Allocates `value` on the heap and returns a new owned pointer pointing to it. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = Owned::new(1234); + /// ``` + pub fn new(value: T) -> Owned { + Self::from(Box::new(value)) + } + + /// Returns a new owned pointer pointing to `raw`. + /// + /// This function is unsafe because improper use may lead to memory problems. Argument `raw` + /// must be a valid pointer. Also, a double-free may occur if the function is called twice on + /// the same raw pointer. + /// + /// # Panics + /// + /// Panics if `raw` is not properly aligned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; + /// ``` + pub unsafe fn from_raw(raw: *mut T) -> Owned { + ensure_aligned(raw); + Self::from_usize(raw as usize) + } + + /// Converts the owned pointer into a [`Shared`]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Owned}; + /// + /// let o = Owned::new(1234); + /// let guard = &epoch::pin(); + /// let p = o.into_shared(guard); + /// ``` + /// + /// [`Shared`]: struct.Shared.html + pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.into_usize()) } + } + + /// Converts the owned pointer into a `Box`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Owned}; + /// + /// let o = Owned::new(1234); + /// let b: Box = o.into_box(); + /// assert_eq!(*b, 1234); + /// ``` + pub fn into_box(self) -> Box { + let (raw, _) = decompose_data::(self.data); + mem::forget(self); + unsafe { Box::from_raw(raw) } + } + + /// Returns the tag stored within the pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// assert_eq!(Owned::new(1234).tag(), 0); + /// ``` + pub fn tag(&self) -> usize { + let (_, tag) = decompose_data::(self.data); + tag + } + + /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the + /// unused bits of the pointer to `T`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = Owned::new(0u64); + /// assert_eq!(o.tag(), 0); + /// let o = o.with_tag(2); + /// assert_eq!(o.tag(), 2); + /// ``` + pub fn with_tag(self, tag: usize) -> Owned { + let data = self.into_usize(); + unsafe { Self::from_usize(data_with_tag::(data, tag)) } + } +} + +impl Drop for Owned { + fn drop(&mut self) { + let (raw, _) = decompose_data::(self.data); + unsafe { + drop(Box::from_raw(raw)); + } + } +} + +impl fmt::Debug for Owned { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (raw, tag) = decompose_data::(self.data); + + f.debug_struct("Owned") + .field("raw", &raw) + .field("tag", &tag) + .finish() + } +} + +impl Clone for Owned { + fn clone(&self) -> Self { + Owned::new((**self).clone()).with_tag(self.tag()) + } +} + +impl Deref for Owned { + type Target = T; + + fn deref(&self) -> &T { + let (raw, _) = decompose_data::(self.data); + unsafe { &*raw } + } +} + +impl DerefMut for Owned { + fn deref_mut(&mut self) -> &mut T { + let (raw, _) = decompose_data::(self.data); + unsafe { &mut *raw } + } +} + +impl From for Owned { + fn from(t: T) -> Self { + Owned::new(t) + } +} + +impl From> for Owned { + /// Returns a new owned pointer pointing to `b`. + /// + /// # Panics + /// + /// Panics if the pointer (the `Box`) is not properly aligned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; + /// ``` + fn from(b: Box) -> Self { + unsafe { Self::from_raw(Box::into_raw(b)) } + } +} + +impl Borrow for Owned { + fn borrow(&self) -> &T { + &**self + } +} + +impl BorrowMut for Owned { + fn borrow_mut(&mut self) -> &mut T { + &mut **self + } +} + +impl AsRef for Owned { + fn as_ref(&self) -> &T { + &**self + } +} + +impl AsMut for Owned { + fn as_mut(&mut self) -> &mut T { + &mut **self + } +} + +/// A pointer to an object protected by the epoch GC. +/// +/// The pointer is valid for use only during the lifetime `'g`. +/// +/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused +/// least significant bits of the address. +pub struct Shared<'g, T: 'g> { + data: usize, + _marker: PhantomData<(&'g (), *const T)>, +} + +impl<'g, T> Clone for Shared<'g, T> { + fn clone(&self) -> Self { + Shared { + data: self.data, + _marker: PhantomData, + } + } +} + +impl<'g, T> Copy for Shared<'g, T> {} + +impl<'g, T> Pointer for Shared<'g, T> { + #[inline] + fn into_usize(self) -> usize { + self.data + } + + #[inline] + unsafe fn from_usize(data: usize) -> Self { + Shared { + data: data, + _marker: PhantomData, + } + } +} + +impl<'g, T> Shared<'g, T> { + /// Returns a new null pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Shared; + /// + /// let p = Shared::::null(); + /// assert!(p.is_null()); + /// ``` + pub fn null() -> Shared<'g, T> { + Shared { + data: 0, + _marker: PhantomData, + } + } + + /// Returns `true` if the pointer is null. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::null(); + /// let guard = &epoch::pin(); + /// assert!(a.load(SeqCst, guard).is_null()); + /// a.store(Owned::new(1234), SeqCst); + /// assert!(!a.load(SeqCst, guard).is_null()); + /// ``` + pub fn is_null(&self) -> bool { + self.as_raw().is_null() + } + + /// Converts the pointer to a raw pointer (without the tag). + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let o = Owned::new(1234); + /// let raw = &*o as *const _; + /// let a = Atomic::from(o); + /// + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// assert_eq!(p.as_raw(), raw); + /// ``` + pub fn as_raw(&self) -> *const T { + let (raw, _) = decompose_data::(self.data); + raw + } + + /// Dereferences the pointer. + /// + /// Returns a reference to the pointee that is valid during the lifetime `'g`. + /// + /// # Safety + /// + /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. + /// + /// Another concern is the possiblity of data races due to lack of proper synchronization. + /// For example, consider the following scenario: + /// + /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` + /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` + /// + /// The problem is that relaxed orderings don't synchronize initialization of the object with + /// the read from the second thread. This is a data race. A possible solution would be to use + /// `Release` and `Acquire` orderings. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// unsafe { + /// assert_eq!(p.deref(), &1234); + /// } + /// ``` + pub unsafe fn deref(&self) -> &'g T { + &*self.as_raw() + } + + /// Converts the pointer to a reference. + /// + /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`. + /// + /// # Safety + /// + /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. + /// + /// Another concern is the possiblity of data races due to lack of proper synchronization. + /// For example, consider the following scenario: + /// + /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` + /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` + /// + /// The problem is that relaxed orderings don't synchronize initialization of the object with + /// the read from the second thread. This is a data race. A possible solution would be to use + /// `Release` and `Acquire` orderings. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// unsafe { + /// assert_eq!(p.as_ref(), Some(&1234)); + /// } + /// ``` + pub unsafe fn as_ref(&self) -> Option<&'g T> { + self.as_raw().as_ref() + } + + /// Takes ownership of the pointee. + /// + /// # Panics + /// + /// Panics if this pointer is null, but only in debug mode. + /// + /// # Safety + /// + /// This method may be called only if the pointer is valid and nobody else is holding a + /// reference to the same object. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// unsafe { + /// let guard = &epoch::unprotected(); + /// let p = a.load(SeqCst, guard); + /// drop(p.into_owned()); + /// } + /// ``` + pub unsafe fn into_owned(self) -> Owned { + debug_assert!( + self.as_raw() != ptr::null(), + "converting a null `Shared` into `Owned`" + ); + Owned::from_usize(self.data) + } + + /// Returns the tag stored within the pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::::from(Owned::new(0u64).with_tag(2)); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// assert_eq!(p.tag(), 2); + /// ``` + pub fn tag(&self) -> usize { + let (_, tag) = decompose_data::(self.data); + tag + } + + /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the + /// unused bits of the pointer to `T`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(0u64); + /// let guard = &epoch::pin(); + /// let p1 = a.load(SeqCst, guard); + /// let p2 = p1.with_tag(2); + /// + /// assert_eq!(p1.tag(), 0); + /// assert_eq!(p2.tag(), 2); + /// assert_eq!(p1.as_raw(), p2.as_raw()); + /// ``` + pub fn with_tag(&self, tag: usize) -> Shared<'g, T> { + unsafe { Self::from_usize(data_with_tag::(self.data, tag)) } + } +} + +impl<'g, T> From<*const T> for Shared<'g, T> { + /// Returns a new pointer pointing to `raw`. + /// + /// # Panics + /// + /// Panics if `raw` is not properly aligned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Shared; + /// + /// let p = unsafe { Shared::from(Box::into_raw(Box::new(1234)) as *const _) }; + /// assert!(!p.is_null()); + /// ``` + fn from(raw: *const T) -> Self { + ensure_aligned(raw); + unsafe { Self::from_usize(raw as usize) } + } +} + +impl<'g, T> PartialEq> for Shared<'g, T> { + fn eq(&self, other: &Self) -> bool { + self.data == other.data + } +} + +impl<'g, T> Eq for Shared<'g, T> {} + +impl<'g, T> PartialOrd> for Shared<'g, T> { + fn partial_cmp(&self, other: &Self) -> Option { + self.data.partial_cmp(&other.data) + } +} + +impl<'g, T> Ord for Shared<'g, T> { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.data.cmp(&other.data) + } +} + +impl<'g, T> fmt::Debug for Shared<'g, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (raw, tag) = decompose_data::(self.data); + + f.debug_struct("Shared") + .field("raw", &raw) + .field("tag", &tag) + .finish() + } +} + +impl<'g, T> fmt::Pointer for Shared<'g, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&self.as_raw(), f) + } +} + +impl<'g, T> Default for Shared<'g, T> { + fn default() -> Self { + Shared::null() + } +} + +#[cfg(test)] +mod tests { + use super::Shared; + + #[test] + fn valid_tag_i8() { + Shared::::null().with_tag(0); + } + + #[test] + fn valid_tag_i64() { + Shared::::null().with_tag(7); + } +} diff --git a/crossbeam-epoch/src/collector.rs b/crossbeam-epoch/src/collector.rs new file mode 100644 index 000000000..dceacbff3 --- /dev/null +++ b/crossbeam-epoch/src/collector.rs @@ -0,0 +1,428 @@ +/// Epoch-based garbage collector. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_epoch::Collector; +/// +/// let collector = Collector::new(); +/// +/// let handle = collector.register(); +/// drop(collector); // `handle` still works after dropping `collector` +/// +/// handle.pin().flush(); +/// ``` + +use alloc::sync::Arc; +use core::fmt; + +use internal::{Global, Local}; +use guard::Guard; + +/// An epoch-based garbage collector. +pub struct Collector { + pub(crate) global: Arc, +} + +unsafe impl Send for Collector {} +unsafe impl Sync for Collector {} + +impl Collector { + /// Creates a new collector. + pub fn new() -> Self { + Collector { global: Arc::new(Global::new()) } + } + + /// Registers a new handle for the collector. + pub fn register(&self) -> LocalHandle { + Local::register(self) + } +} + +impl Clone for Collector { + /// Creates another reference to the same garbage collector. + fn clone(&self) -> Self { + Collector { global: self.global.clone() } + } +} + +impl fmt::Debug for Collector { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Collector").finish() + } +} + +impl PartialEq for Collector { + /// Checks if both handles point to the same collector. + fn eq(&self, rhs: &Collector) -> bool { + Arc::ptr_eq(&self.global, &rhs.global) + } +} +impl Eq for Collector {} + +/// A handle to a garbage collector. +pub struct LocalHandle { + pub(crate) local: *const Local, +} + +impl LocalHandle { + /// Pins the handle. + #[inline] + pub fn pin(&self) -> Guard { + unsafe { (*self.local).pin() } + } + + /// Returns `true` if the handle is pinned. + #[inline] + pub fn is_pinned(&self) -> bool { + unsafe { (*self.local).is_pinned() } + } + + /// Returns the `Collector` associated with this handle. + #[inline] + pub fn collector(&self) -> &Collector { + unsafe { (*self.local).collector() } + } +} + +impl Drop for LocalHandle { + #[inline] + fn drop(&mut self) { + unsafe { + Local::release_handle(&*self.local); + } + } +} + +impl fmt::Debug for LocalHandle { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("LocalHandle").finish() + } +} + +#[cfg(test)] +mod tests { + use std::mem; + use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; + use std::sync::atomic::Ordering; + + use crossbeam_utils::thread; + + use {Collector, Owned}; + + const NUM_THREADS: usize = 8; + + #[test] + fn pin_reentrant() { + let collector = Collector::new(); + let handle = collector.register(); + drop(collector); + + assert!(!handle.is_pinned()); + { + let _guard = &handle.pin(); + assert!(handle.is_pinned()); + { + let _guard = &handle.pin(); + assert!(handle.is_pinned()); + } + assert!(handle.is_pinned()); + } + assert!(!handle.is_pinned()); + } + + #[test] + fn flush_local_bag() { + let collector = Collector::new(); + let handle = collector.register(); + drop(collector); + + for _ in 0..100 { + let guard = &handle.pin(); + unsafe { + let a = Owned::new(7).into_shared(guard); + guard.defer_destroy(a); + + assert!(!(*(*guard.local).bag.get()).is_empty()); + + while !(*(*guard.local).bag.get()).is_empty() { + guard.flush(); + } + } + } + } + + #[test] + fn garbage_buffering() { + let collector = Collector::new(); + let handle = collector.register(); + drop(collector); + + let guard = &handle.pin(); + unsafe { + for _ in 0..10 { + let a = Owned::new(7).into_shared(guard); + guard.defer_destroy(a); + } + assert!(!(*(*guard.local).bag.get()).is_empty()); + } + } + + #[test] + fn pin_holds_advance() { + let collector = Collector::new(); + + thread::scope(|scope| { + for _ in 0..NUM_THREADS { + scope.spawn(|| { + let handle = collector.register(); + for _ in 0..500_000 { + let guard = &handle.pin(); + + let before = collector.global.epoch.load(Ordering::Relaxed); + collector.global.collect(guard); + let after = collector.global.epoch.load(Ordering::Relaxed); + + assert!(after.wrapping_sub(before) <= 2); + } + }); + } + }) + } + + #[test] + fn incremental() { + const COUNT: usize = 100_000; + static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; + + let collector = Collector::new(); + let handle = collector.register(); + + unsafe { + let guard = &handle.pin(); + for _ in 0..COUNT { + let a = Owned::new(7i32).into_shared(guard); + guard.defer_unchecked(move || { + drop(a.into_owned()); + DESTROYS.fetch_add(1, Ordering::Relaxed); + }); + } + guard.flush(); + } + + let mut last = 0; + + while last < COUNT { + let curr = DESTROYS.load(Ordering::Relaxed); + assert!(curr - last <= 1024); + last = curr; + + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert!(DESTROYS.load(Ordering::Relaxed) == 100_000); + } + + #[test] + fn buffering() { + const COUNT: usize = 10; + static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; + + let collector = Collector::new(); + let handle = collector.register(); + + unsafe { + let guard = &handle.pin(); + for _ in 0..COUNT { + let a = Owned::new(7i32).into_shared(guard); + guard.defer_unchecked(move || { + drop(a.into_owned()); + DESTROYS.fetch_add(1, Ordering::Relaxed); + }); + } + } + + for _ in 0..100_000 { + collector.global.collect(&handle.pin()); + } + assert!(DESTROYS.load(Ordering::Relaxed) < COUNT); + + handle.pin().flush(); + + while DESTROYS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn count_drops() { + const COUNT: usize = 100_000; + static DROPS: AtomicUsize = ATOMIC_USIZE_INIT; + + struct Elem(i32); + + impl Drop for Elem { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::Relaxed); + } + } + + let collector = Collector::new(); + let handle = collector.register(); + + unsafe { + let guard = &handle.pin(); + + for _ in 0..COUNT { + let a = Owned::new(Elem(7i32)).into_shared(guard); + guard.defer_destroy(a); + } + guard.flush(); + } + + while DROPS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DROPS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn count_destroy() { + const COUNT: usize = 100_000; + static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; + + let collector = Collector::new(); + let handle = collector.register(); + + unsafe { + let guard = &handle.pin(); + + for _ in 0..COUNT { + let a = Owned::new(7i32).into_shared(guard); + guard.defer_unchecked(move || { + drop(a.into_owned()); + DESTROYS.fetch_add(1, Ordering::Relaxed); + }); + } + guard.flush(); + } + + while DESTROYS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn drop_array() { + const COUNT: usize = 700; + static DROPS: AtomicUsize = ATOMIC_USIZE_INIT; + + struct Elem(i32); + + impl Drop for Elem { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::Relaxed); + } + } + + let collector = Collector::new(); + let handle = collector.register(); + + let mut guard = handle.pin(); + + let mut v = Vec::with_capacity(COUNT); + for i in 0..COUNT { + v.push(Elem(i as i32)); + } + + { + let a = Owned::new(v).into_shared(&guard); + unsafe { guard.defer_destroy(a); } + guard.flush(); + } + + while DROPS.load(Ordering::Relaxed) < COUNT { + guard.repin(); + collector.global.collect(&guard); + } + assert_eq!(DROPS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn destroy_array() { + const COUNT: usize = 100_000; + static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; + + let collector = Collector::new(); + let handle = collector.register(); + + unsafe { + let guard = &handle.pin(); + + let mut v = Vec::with_capacity(COUNT); + for i in 0..COUNT { + v.push(i as i32); + } + + let ptr = v.as_mut_ptr() as usize; + let len = v.len(); + guard.defer_unchecked(move || { + drop(Vec::from_raw_parts(ptr as *const u8 as *mut u8, len, len)); + DESTROYS.fetch_add(len, Ordering::Relaxed); + }); + guard.flush(); + + mem::forget(v); + } + + while DESTROYS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn stress() { + const THREADS: usize = 8; + const COUNT: usize = 100_000; + static DROPS: AtomicUsize = ATOMIC_USIZE_INIT; + + struct Elem(i32); + + impl Drop for Elem { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::Relaxed); + } + } + + let collector = Collector::new(); + + thread::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + let handle = collector.register(); + for _ in 0..COUNT { + let guard = &handle.pin(); + unsafe { + let a = Owned::new(Elem(7i32)).into_shared(guard); + guard.defer_destroy(a); + } + } + }); + } + }); + + let handle = collector.register(); + while DROPS.load(Ordering::Relaxed) < COUNT * THREADS { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DROPS.load(Ordering::Relaxed), COUNT * THREADS); + } +} diff --git a/crossbeam-epoch/src/default.rs b/crossbeam-epoch/src/default.rs new file mode 100644 index 000000000..6ce053d59 --- /dev/null +++ b/crossbeam-epoch/src/default.rs @@ -0,0 +1,73 @@ +//! The default garbage collector. +//! +//! For each thread, a participant is lazily initialized on its first use, when the current thread +//! is registered in the default collector. If initialized, the thread's participant will get +//! destructed on thread exit, which in turn unregisters the thread. + +use collector::{Collector, LocalHandle}; +use guard::Guard; + +lazy_static! { + /// The global data for the default garbage collector. + static ref COLLECTOR: Collector = Collector::new(); +} + +thread_local! { + /// The per-thread participant for the default garbage collector. + static HANDLE: LocalHandle = COLLECTOR.register(); +} + +/// Pins the current thread. +#[inline] +pub fn pin() -> Guard { + with_handle(|handle| handle.pin()) +} + +/// Returns `true` if the current thread is pinned. +#[inline] +pub fn is_pinned() -> bool { + with_handle(|handle| handle.is_pinned()) +} + +/// Returns the default global collector. +pub fn default_collector() -> &'static Collector { + &COLLECTOR +} + +#[inline] +fn with_handle(mut f: F) -> R +where + F: FnMut(&LocalHandle) -> R, +{ + HANDLE.try_with(|h| f(h)).unwrap_or_else(|_| f(&COLLECTOR.register())) +} + +#[cfg(test)] +mod tests { + use crossbeam_utils::thread; + + #[test] + fn pin_while_exiting() { + struct Foo; + + impl Drop for Foo { + fn drop(&mut self) { + // Pin after `HANDLE` has been dropped. This must not panic. + super::pin(); + } + } + + thread_local! { + static FOO: Foo = Foo; + } + + thread::scope(|scope| { + scope.spawn(|| { + // Initialize `FOO` and then `HANDLE`. + FOO.with(|_| ()); + super::pin(); + // At thread exit, `HANDLE` gets dropped first and `FOO` second. + }); + }); + } +} diff --git a/crossbeam-epoch/src/deferred.rs b/crossbeam-epoch/src/deferred.rs new file mode 100644 index 000000000..3063f9829 --- /dev/null +++ b/crossbeam-epoch/src/deferred.rs @@ -0,0 +1,134 @@ +use core::fmt; +use core::marker::PhantomData; +use core::mem; +use core::ptr; +use alloc::boxed::Box; + +/// Number of words a piece of `Data` can hold. +/// +/// Three words should be enough for the majority of cases. For example, you can fit inside it the +/// function pointer together with a fat pointer representing an object that needs to be destroyed. +const DATA_WORDS: usize = 3; + +/// Some space to keep a `FnOnce()` object on the stack. +type Data = [usize; DATA_WORDS]; + +/// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap. +/// +/// This is a handy way of keeping an unsized `FnOnce()` within a sized structure. +pub struct Deferred { + call: unsafe fn(*mut u8), + data: Data, + _marker: PhantomData<*mut ()>, // !Send + !Sync +} + +impl fmt::Debug for Deferred { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "Deferred {{ ... }}") + } +} + +impl Deferred { + /// Constructs a new `Deferred` from a `FnOnce()`. + pub fn new(f: F) -> Self { + let size = mem::size_of::(); + let align = mem::align_of::(); + + unsafe { + if size <= mem::size_of::() && align <= mem::align_of::() { + let mut data: Data = mem::uninitialized(); + ptr::write(&mut data as *mut Data as *mut F, f); + + unsafe fn call(raw: *mut u8) { + let f: F = ptr::read(raw as *mut F); + f(); + } + + Deferred { + call: call::, + data, + _marker: PhantomData, + } + } else { + let b: Box = Box::new(f); + let mut data: Data = mem::uninitialized(); + ptr::write(&mut data as *mut Data as *mut Box, b); + + unsafe fn call(raw: *mut u8) { + let b: Box = ptr::read(raw as *mut Box); + (*b)(); + } + + Deferred { + call: call::, + data, + _marker: PhantomData, + } + } + } + } + + /// Calls the function. + #[inline] + pub fn call(mut self) { + let call = self.call; + unsafe { call(&mut self.data as *mut Data as *mut u8) }; + } +} + +#[cfg(test)] +mod tests { + use std::cell::Cell; + use super::Deferred; + + #[test] + fn on_stack() { + let fired = &Cell::new(false); + let a = [0usize; 1]; + + let d = Deferred::new(move || { + drop(a); + fired.set(true); + }); + + assert!(!fired.get()); + d.call(); + assert!(fired.get()); + } + + #[test] + fn on_heap() { + let fired = &Cell::new(false); + let a = [0usize; 10]; + + let d = Deferred::new(move || { + drop(a); + fired.set(true); + }); + + assert!(!fired.get()); + d.call(); + assert!(fired.get()); + } + + #[test] + fn string() { + let a = "hello".to_string(); + let d = Deferred::new(move || assert_eq!(a, "hello")); + d.call(); + } + + #[test] + fn boxed_slice_i32() { + let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice(); + let d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7])); + d.call(); + } + + #[test] + fn long_slice_usize() { + let a: [usize; 5] = [2, 3, 5, 7, 11]; + let d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11])); + d.call(); + } +} diff --git a/crossbeam-epoch/src/epoch.rs b/crossbeam-epoch/src/epoch.rs new file mode 100644 index 000000000..51076bbaa --- /dev/null +++ b/crossbeam-epoch/src/epoch.rs @@ -0,0 +1,106 @@ +//! The global epoch +//! +//! The last bit in this number is unused and is always zero. Every so often the global epoch is +//! incremented, i.e. we say it "advances". A pinned participant may advance the global epoch only +//! if all currently pinned participants have been pinned in the current epoch. +//! +//! If an object became garbage in some epoch, then we can be sure that after two advancements no +//! participant will hold a reference to it. That is the crux of safe memory reclamation. + +use core::sync::atomic::{AtomicUsize, Ordering}; + +/// An epoch that can be marked as pinned or unpinned. +/// +/// Internally, the epoch is represented as an integer that wraps around at some unspecified point +/// and a flag that represents whether it is pinned or unpinned. +#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)] +pub struct Epoch { + /// The least significant bit is set if pinned. The rest of the bits hold the epoch. + data: usize, +} + +impl Epoch { + /// Returns the starting epoch in unpinned state. + #[inline] + pub fn starting() -> Self { + Self::default() + } + + /// Returns the number of epochs `self` is ahead of `rhs`. + /// + /// Internally, epochs are represented as numbers in the range `(isize::MIN / 2) .. (isize::MAX + /// / 2)`, so the returned distance will be in the same interval. + pub fn wrapping_sub(self, rhs: Self) -> isize { + // The result is the same with `(self.data & !1).wrapping_sub(rhs.data & !1) as isize >> 1`, + // because the possible difference of LSB in `(self.data & !1).wrapping_sub(rhs.data & !1)` + // will be ignored in the shift operation. + self.data.wrapping_sub(rhs.data & !1) as isize >> 1 + } + + /// Returns `true` if the epoch is marked as pinned. + #[inline] + pub fn is_pinned(self) -> bool { + (self.data & 1) == 1 + } + + /// Returns the same epoch, but marked as pinned. + #[inline] + pub fn pinned(self) -> Epoch { + Epoch { data: self.data | 1 } + } + + /// Returns the same epoch, but marked as unpinned. + #[inline] + pub fn unpinned(self) -> Epoch { + Epoch { data: self.data & !1 } + } + + /// Returns the successor epoch. + /// + /// The returned epoch will be marked as pinned only if the previous one was as well. + #[inline] + pub fn successor(self) -> Epoch { + Epoch { data: self.data.wrapping_add(2) } + } +} + +/// An atomic value that holds an `Epoch`. +#[derive(Default, Debug)] +pub struct AtomicEpoch { + /// Since `Epoch` is just a wrapper around `usize`, an `AtomicEpoch` is similarly represented + /// using an `AtomicUsize`. + data: AtomicUsize, +} + +impl AtomicEpoch { + /// Creates a new atomic epoch. + #[inline] + pub fn new(epoch: Epoch) -> Self { + let data = AtomicUsize::new(epoch.data); + AtomicEpoch { data } + } + + /// Loads a value from the atomic epoch. + #[inline] + pub fn load(&self, ord: Ordering) -> Epoch { + Epoch { data: self.data.load(ord) } + } + + /// Stores a value into the atomic epoch. + #[inline] + pub fn store(&self, epoch: Epoch, ord: Ordering) { + self.data.store(epoch.data, ord); + } + + /// Stores a value into the atomic epoch if the current value is the same as `current`. + /// + /// The return value is always the previous value. If it is equal to `current`, then the value + /// is updated. + /// + /// The `Ordering` argument describes the memory ordering of this operation. + #[inline] + pub fn compare_and_swap(&self, current: Epoch, new: Epoch, ord: Ordering) -> Epoch { + let data = self.data.compare_and_swap(current.data, new.data, ord); + Epoch { data } + } +} diff --git a/crossbeam-epoch/src/guard.rs b/crossbeam-epoch/src/guard.rs new file mode 100644 index 000000000..94ab5ea43 --- /dev/null +++ b/crossbeam-epoch/src/guard.rs @@ -0,0 +1,547 @@ +use core::fmt; +use core::ptr; +use core::mem; + +use atomic::Shared; +use collector::Collector; +use deferred::Deferred; +use internal::Local; + +/// A guard that keeps the current thread pinned. +/// +/// # Pinning +/// +/// The current thread is pinned by calling [`pin`], which returns a new guard: +/// +/// ``` +/// use crossbeam_epoch as epoch; +/// +/// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference. +/// // This is not really necessary, but makes passing references to the guard a bit easier. +/// let guard = &epoch::pin(); +/// ``` +/// +/// When a guard gets dropped, the current thread is automatically unpinned. +/// +/// # Pointers on the stack +/// +/// Having a guard allows us to create pointers on the stack to heap-allocated objects. +/// For example: +/// +/// ``` +/// use crossbeam_epoch::{self as epoch, Atomic, Owned}; +/// use std::sync::atomic::Ordering::SeqCst; +/// +/// // Create a heap-allocated number. +/// let a = Atomic::new(777); +/// +/// // Pin the current thread. +/// let guard = &epoch::pin(); +/// +/// // Load the heap-allocated object and create pointer `p` on the stack. +/// let p = a.load(SeqCst, guard); +/// +/// // Dereference the pointer and print the value: +/// if let Some(num) = unsafe { p.as_ref() } { +/// println!("The number is {}.", num); +/// } +/// ``` +/// +/// # Multiple guards +/// +/// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the +/// thread will actually be pinned only when the first guard is created and unpinned when the last +/// one is dropped: +/// +/// ``` +/// use crossbeam_epoch as epoch; +/// +/// let guard1 = epoch::pin(); +/// let guard2 = epoch::pin(); +/// assert!(epoch::is_pinned()); +/// drop(guard1); +/// assert!(epoch::is_pinned()); +/// drop(guard2); +/// assert!(!epoch::is_pinned()); +/// ``` +/// +/// The same can be achieved by cloning guards: +/// +/// ``` +/// use crossbeam_epoch as epoch; +/// +/// let guard1 = epoch::pin(); +/// let guard2 = guard1.clone(); +/// ``` +/// +/// [`pin`]: fn.pin.html +pub struct Guard { + pub(crate) local: *const Local, +} + +impl Guard { + /// Stores a function so that it can be executed at some point after all currently pinned + /// threads get unpinned. + /// + /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache + /// becomes full, some functions are moved into the global cache. At the same time, some + /// functions from both local and global caches may get executed in order to incrementally + /// clean up the caches as they fill up. + /// + /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it + /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might + /// never run, but the epoch-based garbage collection will make an effort to execute it + /// reasonably soon. + /// + /// If this method is called from an [`unprotected`] guard, the function will simply be + /// executed immediately. + /// + /// [`unprotected`]: fn.unprotected.html + pub fn defer(&self, f: F) + where + F: FnOnce() -> R, + F: Send + 'static, + { + unsafe { + self.defer_unchecked(f); + } + } + + /// Stores a function so that it can be executed at some point after all currently pinned + /// threads get unpinned. + /// + /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache + /// becomes full, some functions are moved into the global cache. At the same time, some + /// functions from both local and global caches may get executed in order to incrementally + /// clean up the caches as they fill up. + /// + /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it + /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might + /// never run, but the epoch-based garbage collection will make an effort to execute it + /// reasonably soon. + /// + /// If this method is called from an [`unprotected`] guard, the function will simply be + /// executed immediately. + /// + /// # Safety + /// + /// The given function must not hold reference onto the stack. It is highly recommended that + /// the passed function is **always** marked with `move` in order to prevent accidental + /// borrows. + /// + /// ``` + /// use crossbeam_epoch as epoch; + /// + /// let guard = &epoch::pin(); + /// let message = "Hello!"; + /// unsafe { + /// // ALWAYS use `move` when sending a closure into `defer_unchecked`. + /// guard.defer_unchecked(move || { + /// println!("{}", message); + /// }); + /// } + /// ``` + /// + /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by + /// the closure must be `Send`. + /// + /// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove + /// `F: Send` for typical use cases. For example, consider the following code snippet, which + /// exemplifies the typical use case of deferring the deallocation of a shared reference: + /// + /// ```ignore + /// let shared = Owned::new(7i32).into_shared(guard); + /// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`! + /// ``` + /// + /// While `Shared` is not `Send`, it's safe for another thread to call the deferred function, + /// because it's called only after the grace period and `shared` is no longer shared with other + /// threads. But we don't expect type systems to prove this. + /// + /// # Examples + /// + /// When a heap-allocated object in a data structure becomes unreachable, it has to be + /// deallocated. However, the current thread and other threads may be still holding references + /// on the stack to that same object. Therefore it cannot be deallocated before those references + /// get dropped. This method can defer deallocation until all those threads get unpinned and + /// consequently drop all their references on the stack. + /// + /// ```rust + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new("foo"); + /// + /// // Now suppose that `a` is shared among multiple threads and concurrently + /// // accessed and modified... + /// + /// // Pin the current thread. + /// let guard = &epoch::pin(); + /// + /// // Steal the object currently stored in `a` and swap it with another one. + /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard); + /// + /// if !p.is_null() { + /// // The object `p` is pointing to is now unreachable. + /// // Defer its deallocation until all currently pinned threads get unpinned. + /// unsafe { + /// // ALWAYS use `move` when sending a closure into `defer_unchecked`. + /// guard.defer_unchecked(move || { + /// println!("{} is now being deallocated.", p.deref()); + /// // Now we have unique access to the object pointed to by `p` and can turn it + /// // into an `Owned`. Dropping the `Owned` will deallocate the object. + /// drop(p.into_owned()); + /// }); + /// } + /// } + /// ``` + /// + /// [`unprotected`]: fn.unprotected.html + pub unsafe fn defer_unchecked(&self, f: F) + where + F: FnOnce() -> R, + { + if let Some(local) = self.local.as_ref() { + local.defer(Deferred::new(move || drop(f())), self); + } + } + + /// Stores a destructor for an object so that it can be deallocated and dropped at some point + /// after all currently pinned threads get unpinned. + /// + /// This method first stores the destructor into the thread-local (or handle-local) cache. If + /// this cache becomes full, some destructors are moved into the global cache. At the same + /// time, some destructors from both local and global caches may get executed in order to + /// incrementally clean up the caches as they fill up. + /// + /// There is no guarantee when exactly the destructor will be executed. The only guarantee is + /// that it won't be executed until all currently pinned threads get unpinned. In theory, the + /// destructor might never run, but the epoch-based garbage collection will make an effort to + /// execute it reasonably soon. + /// + /// If this method is called from an [`unprotected`] guard, the destructor will simply be + /// executed immediately. + /// + /// # Safety + /// + /// The object must not be reachable by other threads anymore, otherwise it might be still in + /// use when the destructor runs. + /// + /// Apart from that, keep in mind that another thread may execute the destructor, so the object + /// must be sendable to other threads. + /// + /// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove + /// `T: Send` for typical use cases. For example, consider the following code snippet, which + /// exemplifies the typical use case of deferring the deallocation of a shared reference: + /// + /// ```ignore + /// let shared = Owned::new(7i32).into_shared(guard); + /// guard.defer_destroy(shared); // `Shared` is not `Send`! + /// ``` + /// + /// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because + /// it's called only after the grace period and `shared` is no longer shared with other + /// threads. But we don't expect type systems to prove this. + /// + /// # Examples + /// + /// When a heap-allocated object in a data structure becomes unreachable, it has to be + /// deallocated. However, the current thread and other threads may be still holding references + /// on the stack to that same object. Therefore it cannot be deallocated before those references + /// get dropped. This method can defer deallocation until all those threads get unpinned and + /// consequently drop all their references on the stack. + /// + /// ```rust + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new("foo"); + /// + /// // Now suppose that `a` is shared among multiple threads and concurrently + /// // accessed and modified... + /// + /// // Pin the current thread. + /// let guard = &epoch::pin(); + /// + /// // Steal the object currently stored in `a` and swap it with another one. + /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard); + /// + /// if !p.is_null() { + /// // The object `p` is pointing to is now unreachable. + /// // Defer its deallocation until all currently pinned threads get unpinned. + /// unsafe { + /// guard.defer_destroy(p); + /// } + /// } + /// ``` + /// + /// [`unprotected`]: fn.unprotected.html + pub unsafe fn defer_destroy(&self, ptr: Shared) { + self.defer_unchecked(move || ptr.into_owned()); + } + + /// Clears up the thread-local cache of deferred functions by executing them or moving into the + /// global cache. + /// + /// Call this method after deferring execution of a function if you want to get it executed as + /// soon as possible. Flushing will make sure it is residing in in the global cache, so that + /// any thread has a chance of taking the function and executing it. + /// + /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens). + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch as epoch; + /// + /// let guard = &epoch::pin(); + /// unsafe { + /// guard.defer(move || { + /// println!("This better be printed as soon as possible!"); + /// }); + /// } + /// guard.flush(); + /// ``` + /// + /// [`unprotected`]: fn.unprotected.html + pub fn flush(&self) { + if let Some(local) = unsafe { self.local.as_ref() } { + local.flush(self); + } + } + + /// Unpins and then immediately re-pins the thread. + /// + /// This method is useful when you don't want delay the advancement of the global epoch by + /// holding an old epoch. For safety, you should not maintain any guard-based reference across + /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this + /// is the only active guard for the current thread. + /// + /// If this method is called from an [`unprotected`] guard, then the call will be just no-op. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// use std::thread; + /// use std::time::Duration; + /// + /// let a = Atomic::new(777); + /// let mut guard = epoch::pin(); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// guard.repin(); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// ``` + /// + /// [`unprotected`]: fn.unprotected.html + pub fn repin(&mut self) { + if let Some(local) = unsafe { self.local.as_ref() } { + local.repin(); + } + } + + /// Temporarily unpins the thread, executes the given function and then re-pins the thread. + /// + /// This method is useful when you need to perform a long-running operation (e.g. sleeping) + /// and don't need to maintain any guard-based reference across the call (the latter is enforced + /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the + /// current thread. + /// + /// If this method is called from an [`unprotected`] guard, then the passed function is called + /// directly without unpinning the thread. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// use std::thread; + /// use std::time::Duration; + /// + /// let a = Atomic::new(777); + /// let mut guard = epoch::pin(); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// guard.repin_after(|| thread::sleep(Duration::from_millis(50))); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// ``` + /// + /// [`unprotected`]: fn.unprotected.html + pub fn repin_after(&mut self, f: F) -> R + where + F: FnOnce() -> R, + { + if let Some(local) = unsafe { self.local.as_ref() } { + // We need to acquire a handle here to ensure the Local doesn't + // disappear from under us. + local.acquire_handle(); + local.unpin(); + } + + // Ensure the Guard is re-pinned even if the function panics + defer! { + if let Some(local) = unsafe { self.local.as_ref() } { + mem::forget(local.pin()); + local.release_handle(); + } + } + + f() + } + + /// Returns the `Collector` associated with this guard. + /// + /// This method is useful when you need to ensure that all guards used with + /// a data structure come from the same collector. + /// + /// If this method is called from an [`unprotected`] guard, then `None` is returned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch as epoch; + /// + /// let mut guard1 = epoch::pin(); + /// let mut guard2 = epoch::pin(); + /// assert!(guard1.collector() == guard2.collector()); + /// ``` + /// + /// [`unprotected`]: fn.unprotected.html + pub fn collector(&self) -> Option<&Collector> { + unsafe { self.local.as_ref().map(|local| local.collector()) } + } +} + +impl Drop for Guard { + #[inline] + fn drop(&mut self) { + if let Some(local) = unsafe { self.local.as_ref() } { + local.unpin(); + } + } +} + +impl Clone for Guard { + #[inline] + fn clone(&self) -> Guard { + match unsafe { self.local.as_ref() } { + None => Guard { local: ptr::null() }, + Some(local) => local.pin(), + } + } +} + +impl fmt::Debug for Guard { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Guard").finish() + } +} + +/// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s. +/// +/// This guard should be used in special occasions only. Note that it doesn't actually keep any +/// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely. +/// +/// Note that calling [`defer`] with a dummy guard will not defer the function - it will just +/// execute the function immediately. +/// +/// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`. +/// +/// # Safety +/// +/// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the +/// [`Atomic`] is not being concurrently modified by other threads. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_epoch::{self as epoch, Atomic}; +/// use std::sync::atomic::Ordering::Relaxed; +/// +/// let a = Atomic::new(7); +/// +/// unsafe { +/// // Load `a` without pinning the current thread. +/// a.load(Relaxed, epoch::unprotected()); +/// +/// // It's possible to create more dummy guards by calling `clone()`. +/// let dummy = &epoch::unprotected().clone(); +/// +/// dummy.defer(move || { +/// println!("This gets executed immediately."); +/// }); +/// +/// // Dropping `dummy` doesn't affect the current thread - it's just a noop. +/// } +/// ``` +/// +/// The most common use of this function is when constructing or destructing a data structure. +/// +/// For example, we can use a dummy guard in the destructor of a Treiber stack because at that +/// point no other thread could concurrently modify the [`Atomic`]s we are accessing. +/// +/// If we were to actually pin the current thread during destruction, that would just unnecessarily +/// delay garbage collection and incur some performance cost, so in cases like these `unprotected` +/// is very helpful. +/// +/// ``` +/// use crossbeam_epoch::{self as epoch, Atomic}; +/// use std::mem::ManuallyDrop; +/// use std::sync::atomic::Ordering::Relaxed; +/// +/// struct Stack { +/// head: Atomic>, +/// } +/// +/// struct Node { +/// data: ManuallyDrop, +/// next: Atomic>, +/// } +/// +/// impl Drop for Stack { +/// fn drop(&mut self) { +/// unsafe { +/// // Unprotected load. +/// let mut node = self.head.load(Relaxed, epoch::unprotected()); +/// +/// while let Some(n) = node.as_ref() { +/// // Unprotected load. +/// let next = n.next.load(Relaxed, epoch::unprotected()); +/// +/// // Take ownership of the node, then drop its data and deallocate it. +/// let mut o = node.into_owned(); +/// ManuallyDrop::drop(&mut o.data); +/// drop(o); +/// +/// node = next; +/// } +/// } +/// } +/// } +/// ``` +/// +/// [`Atomic`]: struct.Atomic.html +/// [`defer`]: struct.Guard.html#method.defer +#[inline] +pub unsafe fn unprotected() -> &'static Guard { + // HACK(stjepang): An unprotected guard is just a `Guard` with its field `local` set to null. + // Since this function returns a `'static` reference to a `Guard`, we must return a reference + // to a global guard. However, it's not possible to create a `static` `Guard` because it does + // not implement `Sync`. To get around the problem, we create a static `usize` initialized to + // zero and then transmute it into a `Guard`. This is safe because `usize` and `Guard` + // (consisting of a single pointer) have the same representation in memory. + static UNPROTECTED: usize = 0; + &*(&UNPROTECTED as *const _ as *const Guard) +} diff --git a/crossbeam-epoch/src/internal.rs b/crossbeam-epoch/src/internal.rs new file mode 100644 index 000000000..3ff26c274 --- /dev/null +++ b/crossbeam-epoch/src/internal.rs @@ -0,0 +1,543 @@ +//! The global data and participant for garbage collection. +//! +//! # Registration +//! +//! In order to track all participants in one place, we need some form of participant +//! registration. When a participant is created, it is registered to a global lock-free +//! singly-linked list of registries; and when a participant is leaving, it is unregistered from the +//! list. +//! +//! # Pinning +//! +//! Every participant contains an integer that tells whether the participant is pinned and if so, +//! what was the global epoch at the time it was pinned. Participants also hold a pin counter that +//! aids in periodic global epoch advancement. +//! +//! When a participant is pinned, a `Guard` is returned as a witness that the participant is pinned. +//! Guards are necessary for performing atomic operations, and for freeing/dropping locations. +//! +//! # Thread-local bag +//! +//! Objects that get unlinked from concurrent data structures must be stashed away until the global +//! epoch sufficiently advances so that they become safe for destruction. Pointers to such objects +//! are pushed into a thread-local bag, and when it becomes full, the bag is marked with the current +//! global epoch and pushed into the global queue of bags. We store objects in thread-local storages +//! for amortizing the synchronization cost of pushing the garbages to a global queue. +//! +//! # Global queue +//! +//! Whenever a bag is pushed into a queue, the objects in some bags in the queue are collected and +//! destroyed along the way. This design reduces contention on data structures. The global queue +//! cannot be explicitly accessed: the only way to interact with it is by calling functions +//! `defer()` that adds an object tothe thread-local bag, or `collect()` that manually triggers +//! garbage collection. +//! +//! Ideally each instance of concurrent data structure may have its own queue that gets fully +//! destroyed as soon as the data structure gets dropped. + +use core::cell::{Cell, UnsafeCell}; +use core::mem::{self, ManuallyDrop}; +use core::num::Wrapping; +use core::ptr; +use core::sync::atomic; +use core::sync::atomic::Ordering; +use alloc::boxed::Box; + +use crossbeam_utils::CachePadded; +use arrayvec::ArrayVec; + +use atomic::Owned; +use collector::{LocalHandle, Collector}; +use epoch::{AtomicEpoch, Epoch}; +use guard::{unprotected, Guard}; +use deferred::Deferred; +use sync::list::{List, Entry, IterError, IsElement}; +use sync::queue::Queue; + +/// Maximum number of objects a bag can contain. +#[cfg(not(feature = "sanitize"))] +const MAX_OBJECTS: usize = 64; +#[cfg(feature = "sanitize")] +const MAX_OBJECTS: usize = 4; + +/// A bag of deferred functions. +#[derive(Default, Debug)] +pub struct Bag { + /// Stashed objects. + deferreds: ArrayVec<[Deferred; MAX_OBJECTS]>, +} + +/// `Bag::try_push()` requires that it is safe for another thread to execute the given functions. +unsafe impl Send for Bag {} + +impl Bag { + /// Returns a new, empty bag. + pub fn new() -> Self { + Self::default() + } + + /// Returns `true` if the bag is empty. + pub fn is_empty(&self) -> bool { + self.deferreds.is_empty() + } + + /// Attempts to insert a deferred function into the bag. + /// + /// Returns `Ok(())` if successful, and `Err(deferred)` for the given `deferred` if the bag is + /// full. + /// + /// # Safety + /// + /// It should be safe for another thread to execute the given function. + pub unsafe fn try_push(&mut self, deferred: Deferred) -> Result<(), Deferred> { + self.deferreds.try_push(deferred).map_err(|e| e.element()) + } + + /// Seals the bag with the given epoch. + fn seal(self, epoch: Epoch) -> SealedBag { + SealedBag { epoch, bag: self } + } +} + +impl Drop for Bag { + fn drop(&mut self) { + // Call all deferred functions. + for deferred in self.deferreds.drain(..) { + deferred.call(); + } + } +} + +/// A pair of an epoch and a bag. +#[derive(Default, Debug)] +struct SealedBag { + epoch: Epoch, + bag: Bag, +} + +/// It is safe to share `SealedBag` because `is_expired` only inspects the epoch. +unsafe impl Sync for SealedBag {} + +impl SealedBag { + /// Checks if it is safe to drop the bag w.r.t. the given global epoch. + fn is_expired(&self, global_epoch: Epoch) -> bool { + // A pinned participant can witness at most one epoch advancement. Therefore, any bag that + // is within one epoch of the current one cannot be destroyed yet. + global_epoch.wrapping_sub(self.epoch) >= 2 + } +} + +/// The global data for a garbage collector. +pub struct Global { + /// The intrusive linked list of `Local`s. + locals: List, + + /// The global queue of bags of deferred functions. + queue: Queue, + + /// The global epoch. + pub(crate) epoch: CachePadded, +} + +impl Global { + /// Number of bags to destroy. + const COLLECT_STEPS: usize = 8; + + /// Creates a new global data for garbage collection. + #[inline] + pub fn new() -> Self { + Self { + locals: List::new(), + queue: Queue::new(), + epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())), + } + } + + /// Pushes the bag into the global queue and replaces the bag with a new empty bag. + pub fn push_bag(&self, bag: &mut Bag, guard: &Guard) { + let bag = mem::replace(bag, Bag::new()); + + atomic::fence(Ordering::SeqCst); + + let epoch = self.epoch.load(Ordering::Relaxed); + self.queue.push(bag.seal(epoch), guard); + } + + /// Collects several bags from the global queue and executes deferred functions in them. + /// + /// Note: This may itself produce garbage and in turn allocate new bags. + /// + /// `pin()` rarely calls `collect()`, so we want the compiler to place that call on a cold + /// path. In other words, we want the compiler to optimize branching for the case when + /// `collect()` is not called. + #[cold] + pub fn collect(&self, guard: &Guard) { + let global_epoch = self.try_advance(guard); + + let steps = if cfg!(feature = "sanitize") { + usize::max_value() + } else { + Self::COLLECT_STEPS + }; + + for _ in 0..steps { + match self.queue.try_pop_if( + &|sealed_bag: &SealedBag| sealed_bag.is_expired(global_epoch), + guard, + ) + { + None => break, + Some(sealed_bag) => drop(sealed_bag), + } + } + } + + /// Attempts to advance the global epoch. + /// + /// The global epoch can advance only if all currently pinned participants have been pinned in + /// the current epoch. + /// + /// Returns the current global epoch. + /// + /// `try_advance()` is annotated `#[cold]` because it is rarely called. + #[cold] + pub fn try_advance(&self, guard: &Guard) -> Epoch { + let global_epoch = self.epoch.load(Ordering::Relaxed); + atomic::fence(Ordering::SeqCst); + + // TODO(stjepang): `Local`s are stored in a linked list because linked lists are fairly + // easy to implement in a lock-free manner. However, traversal can be slow due to cache + // misses and data dependencies. We should experiment with other data structures as well. + for local in self.locals.iter(&guard) { + match local { + Err(IterError::Stalled) => { + // A concurrent thread stalled this iteration. That thread might also try to + // advance the epoch, in which case we leave the job to it. Otherwise, the + // epoch will not be advanced. + return global_epoch; + } + Ok(local) => { + let local_epoch = local.epoch.load(Ordering::Relaxed); + + // If the participant was pinned in a different epoch, we cannot advance the + // global epoch just yet. + if local_epoch.is_pinned() && local_epoch.unpinned() != global_epoch { + return global_epoch; + } + } + } + } + atomic::fence(Ordering::Acquire); + + // All pinned participants were pinned in the current global epoch. + // Now let's advance the global epoch... + // + // Note that if another thread already advanced it before us, this store will simply + // overwrite the global epoch with the same value. This is true because `try_advance` was + // called from a thread that was pinned in `global_epoch`, and the global epoch cannot be + // advanced two steps ahead of it. + let new_epoch = global_epoch.successor(); + self.epoch.store(new_epoch, Ordering::Release); + new_epoch + } +} + +/// Participant for garbage collection. +pub struct Local { + /// A node in the intrusive linked list of `Local`s. + entry: Entry, + + /// The local epoch. + epoch: AtomicEpoch, + + /// A reference to the global data. + /// + /// When all guards and handles get dropped, this reference is destroyed. + collector: UnsafeCell>, + + /// The local bag of deferred functions. + pub(crate) bag: UnsafeCell, + + /// The number of guards keeping this participant pinned. + guard_count: Cell, + + /// The number of active handles. + handle_count: Cell, + + /// Total number of pinnings performed. + /// + /// This is just an auxilliary counter that sometimes kicks off collection. + pin_count: Cell>, +} + +impl Local { + /// Number of pinnings after which a participant will execute some deferred functions from the + /// global queue. + const PINNINGS_BETWEEN_COLLECT: usize = 128; + + /// Registers a new `Local` in the provided `Global`. + pub fn register(collector: &Collector) -> LocalHandle { + unsafe { + // Since we dereference no pointers in this block, it is safe to use `unprotected`. + + let local = Owned::new(Local { + entry: Entry::default(), + epoch: AtomicEpoch::new(Epoch::starting()), + collector: UnsafeCell::new(ManuallyDrop::new(collector.clone())), + bag: UnsafeCell::new(Bag::new()), + guard_count: Cell::new(0), + handle_count: Cell::new(1), + pin_count: Cell::new(Wrapping(0)), + }).into_shared(&unprotected()); + collector.global.locals.insert(local, &unprotected()); + LocalHandle { local: local.as_raw() } + } + } + + /// Returns a reference to the `Global` in which this `Local` resides. + #[inline] + pub fn global(&self) -> &Global { + &self.collector().global + } + + /// Returns a reference to the `Collector` in which this `Local` resides. + #[inline] + pub fn collector(&self) -> &Collector { + unsafe { &**self.collector.get() } + } + + /// Returns `true` if the current participant is pinned. + #[inline] + pub fn is_pinned(&self) -> bool { + self.guard_count.get() > 0 + } + + /// Adds `deferred` to the thread-local bag. + /// + /// # Safety + /// + /// It should be safe for another thread to execute the given function. + pub unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) { + let bag = &mut *self.bag.get(); + + while let Err(d) = bag.try_push(deferred) { + self.global().push_bag(bag, guard); + deferred = d; + } + } + + pub fn flush(&self, guard: &Guard) { + let bag = unsafe { &mut *self.bag.get() }; + + if !bag.is_empty() { + self.global().push_bag(bag, guard); + } + + self.global().collect(guard); + } + + /// Pins the `Local`. + #[inline] + pub fn pin(&self) -> Guard { + let guard = Guard { local: self }; + + let guard_count = self.guard_count.get(); + self.guard_count.set(guard_count.checked_add(1).unwrap()); + + if guard_count == 0 { + let global_epoch = self.global().epoch.load(Ordering::Relaxed); + let new_epoch = global_epoch.pinned(); + + // Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence. + // The fence makes sure that any future loads from `Atomic`s will not happen before + // this store. + if cfg!(any(target_arch = "x86", target_arch = "x86_64")) { + // HACK(stjepang): On x86 architectures there are two different ways of executing + // a `SeqCst` fence. + // + // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction. + // 2. `_.compare_and_swap(_, _, SeqCst)`, which compiles into a `lock cmpxchg` + // instruction. + // + // Both instructions have the effect of a full barrier, but benchmarks have shown + // that the second one makes pinning faster in this particular case. + let current = Epoch::starting(); + let previous = self.epoch.compare_and_swap(current, new_epoch, Ordering::SeqCst); + debug_assert_eq!(current, previous, "participant was expected to be unpinned"); + } else { + self.epoch.store(new_epoch, Ordering::Relaxed); + atomic::fence(Ordering::SeqCst); + } + + // Increment the pin counter. + let count = self.pin_count.get(); + self.pin_count.set(count + Wrapping(1)); + + // After every `PINNINGS_BETWEEN_COLLECT` try advancing the epoch and collecting + // some garbage. + if count.0 % Self::PINNINGS_BETWEEN_COLLECT == 0 { + self.global().collect(&guard); + } + } + + guard + } + + /// Unpins the `Local`. + #[inline] + pub fn unpin(&self) { + let guard_count = self.guard_count.get(); + self.guard_count.set(guard_count - 1); + + if guard_count == 1 { + self.epoch.store(Epoch::starting(), Ordering::Release); + + if self.handle_count.get() == 0 { + self.finalize(); + } + } + } + + /// Unpins and then pins the `Local`. + #[inline] + pub fn repin(&self) { + let guard_count = self.guard_count.get(); + + // Update the local epoch only if there's only one guard. + if guard_count == 1 { + let epoch = self.epoch.load(Ordering::Relaxed); + let global_epoch = self.global().epoch.load(Ordering::Relaxed); + + // Update the local epoch only if the global epoch is greater than the local epoch. + if epoch != global_epoch { + // We store the new epoch with `Release` because we need to ensure any memory + // accesses from the previous epoch do not leak into the new one. + self.epoch.store(global_epoch, Ordering::Release); + + // However, we don't need a following `SeqCst` fence, because it is safe for memory + // accesses from the new epoch to be executed before updating the local epoch. At + // worse, other threads will see the new epoch late and delay GC slightly. + } + } + } + + /// Increments the handle count. + #[inline] + pub fn acquire_handle(&self) { + let handle_count = self.handle_count.get(); + debug_assert!(handle_count >= 1); + self.handle_count.set(handle_count + 1); + } + + /// Decrements the handle count. + #[inline] + pub fn release_handle(&self) { + let guard_count = self.guard_count.get(); + let handle_count = self.handle_count.get(); + debug_assert!(handle_count >= 1); + self.handle_count.set(handle_count - 1); + + if guard_count == 0 && handle_count == 1 { + self.finalize(); + } + } + + /// Removes the `Local` from the global linked list. + #[cold] + fn finalize(&self) { + debug_assert_eq!(self.guard_count.get(), 0); + debug_assert_eq!(self.handle_count.get(), 0); + + // Temporarily increment handle count. This is required so that the following call to `pin` + // doesn't call `finalize` again. + self.handle_count.set(1); + unsafe { + // Pin and move the local bag into the global queue. It's important that `push_bag` + // doesn't defer destruction on any new garbage. + let guard = &self.pin(); + self.global().push_bag(&mut *self.bag.get(), guard); + } + // Revert the handle count back to zero. + self.handle_count.set(0); + + unsafe { + // Take the reference to the `Global` out of this `Local`. Since we're not protected + // by a guard at this time, it's crucial that the reference is read before marking the + // `Local` as deleted. + let collector: Collector = ptr::read(&*(*self.collector.get())); + + // Mark this node in the linked list as deleted. + self.entry.delete(&unprotected()); + + // Finally, drop the reference to the global. Note that this might be the last reference + // to the `Global`. If so, the global data will be destroyed and all deferred functions + // in its queue will be executed. + drop(collector); + } + } +} + +impl IsElement for Local { + fn entry_of(local: &Local) -> &Entry { + let entry_ptr = (local as *const Local as usize + offset_of!(Local, entry)) as *const Entry; + unsafe { &*entry_ptr } + } + + unsafe fn element_of(entry: &Entry) -> &Local { + // offset_of! macro uses unsafe, but it's unnecessary in this context. + #[allow(unused_unsafe)] + let local_ptr = (entry as *const Entry as usize - offset_of!(Local, entry)) as *const Local; + &*local_ptr + } + + unsafe fn finalize(entry: &Entry) { + let local = Self::element_of(entry); + drop(Box::from_raw(local as *const Local as *mut Local)); + } +} + +#[cfg(test)] +mod tests { + use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; + use std::sync::atomic::Ordering; + + use super::*; + + #[test] + fn check_defer() { + static FLAG: AtomicUsize = ATOMIC_USIZE_INIT; + fn set() { + FLAG.store(42, Ordering::Relaxed); + } + + let d = Deferred::new(set); + assert_eq!(FLAG.load(Ordering::Relaxed), 0); + d.call(); + assert_eq!(FLAG.load(Ordering::Relaxed), 42); + } + + #[test] + fn check_bag() { + static FLAG: AtomicUsize = ATOMIC_USIZE_INIT; + fn incr() { + FLAG.fetch_add(1, Ordering::Relaxed); + } + + let mut bag = Bag::new(); + assert!(bag.is_empty()); + + for _ in 0..MAX_OBJECTS { + assert!(unsafe { bag.try_push(Deferred::new(incr)).is_ok() }); + assert!(!bag.is_empty()); + assert_eq!(FLAG.load(Ordering::Relaxed), 0); + } + + let result = unsafe { bag.try_push(Deferred::new(incr)) }; + assert!(result.is_err()); + assert!(!bag.is_empty()); + assert_eq!(FLAG.load(Ordering::Relaxed), 0); + + drop(bag); + assert_eq!(FLAG.load(Ordering::Relaxed), MAX_OBJECTS); + } +} diff --git a/crossbeam-epoch/src/lib.rs b/crossbeam-epoch/src/lib.rs new file mode 100644 index 000000000..a83f58377 --- /dev/null +++ b/crossbeam-epoch/src/lib.rs @@ -0,0 +1,99 @@ +//! Epoch-based memory reclamation. +//! +//! An interesting problem concurrent collections deal with comes from the remove operation. +//! Suppose that a thread removes an element from a lock-free map, while another thread is reading +//! that same element at the same time. The first thread must wait until the second thread stops +//! reading the element. Only then it is safe to destruct it. +//! +//! Programming languages that come with garbage collectors solve this problem trivially. The +//! garbage collector will destruct the removed element when no thread can hold a reference to it +//! anymore. +//! +//! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an +//! element gets removed from a concurrent collection, it is inserted into a pile of garbage and +//! marked with the current epoch. Every time a thread accesses a collection, it checks the current +//! epoch, attempts to increment it, and destructs some garbage that became so old that no thread +//! can be referencing it anymore. +//! +//! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit +//! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something +//! users of concurrent collections don't have to worry much about. +//! +//! # Pointers +//! +//! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which +//! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a +//! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely +//! read. +//! +//! # Pinning +//! +//! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant +//! we declare that any object that gets removed from now on must not be destructed just +//! yet. Garbage collection of newly removed objects is suspended until the participant gets +//! unpinned. +//! +//! # Garbage +//! +//! Objects that get removed from concurrent collections must be stashed away until all currently +//! pinned participants get unpinned. Such objects can be stored into a thread-local or global +//! storage, where they are kept until the right time for their destruction comes. +//! +//! There is a global shared instance of garbage queue. You can [`defer`] the execution of an +//! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data +//! structures may defer the deallocation of an object. +//! +//! # APIs +//! +//! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you +//! want to create your own garbage collector, use the [`Collector`] API. +//! +//! [`Atomic`]: struct.Atomic.html +//! [`Collector`]: struct.Collector.html +//! [`Shared`]: struct.Shared.html +//! [`pin`]: fn.pin.html +//! [`defer`]: fn.defer.html + +#![cfg_attr(feature = "nightly", feature(const_fn))] +#![cfg_attr(feature = "nightly", feature(alloc))] +#![cfg_attr(not(test), no_std)] + +#![warn(missing_docs, missing_debug_implementations)] + +#[cfg(test)] +extern crate core; +#[cfg(all(not(test), feature = "use_std"))] +#[macro_use] +extern crate std; + +// Use liballoc on nightly to avoid a dependency on libstd +#[cfg(feature = "nightly")] +extern crate alloc; +#[cfg(not(feature = "nightly"))] +extern crate std as alloc; + +extern crate arrayvec; +extern crate crossbeam_utils; +#[cfg(feature = "use_std")] +#[macro_use] +extern crate lazy_static; +#[macro_use] +extern crate memoffset; +#[macro_use] +extern crate scopeguard; + +mod atomic; +mod collector; +#[cfg(feature = "use_std")] +mod default; +mod deferred; +mod epoch; +mod guard; +mod internal; +mod sync; + +pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Shared, Pointer}; +pub use self::guard::{unprotected, Guard}; +#[cfg(feature = "use_std")] +pub use self::default::{default_collector, is_pinned, pin}; +pub use self::collector::{Collector, LocalHandle}; diff --git a/crossbeam-epoch/src/sync/list.rs b/crossbeam-epoch/src/sync/list.rs new file mode 100644 index 000000000..46f8143c5 --- /dev/null +++ b/crossbeam-epoch/src/sync/list.rs @@ -0,0 +1,473 @@ +//! Lock-free intrusive linked list. +//! +//! Ideas from Michael. High Performance Dynamic Lock-Free Hash Tables and List-Based Sets. SPAA +//! 2002. http://dl.acm.org/citation.cfm?id=564870.564881 + +use core::marker::PhantomData; +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; + +use {Atomic, Shared, Guard, unprotected}; + +/// An entry in a linked list. +/// +/// An Entry is accessed from multiple threads, so it would be beneficial to put it in a different +/// cache-line than thread-local data in terms of performance. +#[derive(Debug)] +pub struct Entry { + /// The next entry in the linked list. + /// If the tag is 1, this entry is marked as deleted. + next: Atomic, +} + +/// Implementing this trait asserts that the type `T` can be used as an element in the intrusive +/// linked list defined in this module. `T` has to contain (or otherwise be linked to) an instance +/// of `Entry`. +/// +/// # Example +/// +/// ```ignore +/// struct A { +/// entry: Entry, +/// data: usize, +/// } +/// +/// impl IsElement for A { +/// fn entry_of(a: &A) -> &Entry { +/// let entry_ptr = ((a as usize) + offset_of!(A, entry)) as *const Entry; +/// unsafe { &*entry_ptr } +/// } +/// +/// unsafe fn element_of(entry: &Entry) -> &T { +/// let elem_ptr = ((entry as usize) - offset_of!(A, entry)) as *const T; +/// &*elem_ptr +/// } +/// +/// unsafe fn finalize(entry: &Entry) { +/// let elem = Self::element_of(entry); +/// drop(Box::from_raw(elem as *const A as *mut A)); +/// } +/// } +/// ``` +/// +/// This trait is implemented on a type separate from `T` (although it can be just `T`), because +/// one type might be placeable into multiple lists, in which case it would require multiple +/// implementations of `IsElement`. In such cases, each struct implementing `IsElement` +/// represents a distinct `Entry` in `T`. +/// +/// For example, we can insert the following struct into two lists using `entry1` for one +/// and `entry2` for the other: +/// +/// ```ignore +/// struct B { +/// entry1: Entry, +/// entry2: Entry, +/// data: usize, +/// } +/// ``` +/// +pub trait IsElement { + /// Returns a reference to this element's `Entry`. + fn entry_of(&T) -> &Entry; + + /// Given a reference to an element's entry, returns that element. + /// + /// ```ignore + /// let elem = ListElement::new(); + /// assert_eq!(elem.entry_of(), + /// unsafe { ListElement::element_of(elem.entry_of()) } ); + /// ``` + /// + /// # Safety + /// The caller has to guarantee that the `Entry` it + /// is called with was retrieved from an instance of the element type (`T`). + unsafe fn element_of(&Entry) -> &T; + + /// Deallocates the whole element given its `Entry`. This is called when the list + /// is ready to actually free the element. + /// + /// # Safety + /// The caller has to guarantee that the `Entry` it + /// is called with was retrieved from an instance of the element type (`T`). + unsafe fn finalize(&Entry); +} + +/// A lock-free, intrusive linked list of type `T`. +#[derive(Debug)] +pub struct List = T> { + /// The head of the linked list. + head: Atomic, + + /// The phantom data for using `T` and `C`. + _marker: PhantomData<(T, C)>, +} + +/// An iterator used for retrieving values from the list. +pub struct Iter<'g, T: 'g, C: IsElement> { + /// The guard that protects the iteration. + guard: &'g Guard, + + /// Pointer from the predecessor to the current entry. + pred: &'g Atomic, + + /// The current entry. + curr: Shared<'g, Entry>, + + /// The list head, needed for restarting iteration. + head: &'g Atomic, + + /// Logically, we store a borrow of an instance of `T` and + /// use the type information from `C`. + _marker: PhantomData<(&'g T, C)>, +} + +/// An error that occurs during iteration over the list. +#[derive(PartialEq, Debug)] +pub enum IterError { + /// A concurrent thread modified the state of the list at the same place that this iterator + /// was inspecting. Subsequent iteration will restart from the beginning of the list. + Stalled, +} + +impl Default for Entry { + /// Returns the empty entry. + fn default() -> Self { + Self { next: Atomic::null() } + } +} + +impl Entry { + /// Marks this entry as deleted, deferring the actual deallocation to a later iteration. + /// + /// # Safety + /// + /// The entry should be a member of a linked list, and it should not have been deleted. + /// It should be safe to call `C::finalize` on the entry after the `guard` is dropped, where `C` + /// is the associated helper for the linked list. + pub unsafe fn delete(&self, guard: &Guard) { + self.next.fetch_or(1, Release, guard); + } +} + +impl> List { + /// Returns a new, empty linked list. + pub fn new() -> Self { + Self { + head: Atomic::null(), + _marker: PhantomData, + } + } + + /// Inserts `entry` into the head of the list. + /// + /// # Safety + /// + /// You should guarantee that: + /// + /// - `container` is not null + /// - `container` is immovable, e.g. inside a `Box` + /// - the same `Entry` is not inserted more than once + /// - the inserted object will be removed before the list is dropped + pub unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) { + // Insert right after head, i.e. at the beginning of the list. + let to = &self.head; + // Get the intrusively stored Entry of the new element to insert. + let entry: &Entry = C::entry_of(container.deref()); + // Make a Shared ptr to that Entry. + let entry_ptr = Shared::from(entry as *const _); + // Read the current successor of where we want to insert. + let mut next = to.load(Relaxed, guard); + + loop { + // Set the Entry of the to-be-inserted element to point to the previous successor of + // `to`. + entry.next.store(next, Relaxed); + match to.compare_and_set_weak(next, entry_ptr, Release, guard) { + Ok(_) => break, + // We lost the race or weak CAS failed spuriously. Update the successor and try + // again. + Err(err) => next = err.current, + } + } + } + + /// Returns an iterator over all objects. + /// + /// # Caveat + /// + /// Every object that is inserted at the moment this function is called and persists at least + /// until the end of iteration will be returned. Since this iterator traverses a lock-free + /// linked list that may be concurrently modified, some additional caveats apply: + /// + /// 1. If a new object is inserted during iteration, it may or may not be returned. + /// 2. If an object is deleted during iteration, it may or may not be returned. + /// 3. The iteration may be aborted when it lost in a race condition. In this case, the winning + /// thread will continue to iterate over the same list. + pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> { + Iter { + guard, + pred: &self.head, + curr: self.head.load(Acquire, guard), + head: &self.head, + _marker: PhantomData, + } + } +} + +impl> Drop for List { + fn drop(&mut self) { + unsafe { + let guard = &unprotected(); + let mut curr = self.head.load(Relaxed, guard); + while let Some(c) = curr.as_ref() { + let succ = c.next.load(Relaxed, guard); + // Verify that all elements have been removed from the list. + assert_eq!(succ.tag(), 1); + + C::finalize(curr.deref()); + curr = succ; + } + } + } +} + +impl<'g, T: 'g, C: IsElement> Iterator for Iter<'g, T, C> { + type Item = Result<&'g T, IterError>; + + fn next(&mut self) -> Option { + while let Some(c) = unsafe { self.curr.as_ref() } { + let succ = c.next.load(Acquire, self.guard); + + if succ.tag() == 1 { + // This entry was removed. Try unlinking it from the list. + let succ = succ.with_tag(0); + + // The tag should never be zero, because removing a node after a logically deleted + // node leaves the list in an invalid state. + debug_assert!(self.curr.tag() == 0); + + match self.pred.compare_and_set( + self.curr, + succ, + Acquire, + self.guard, + ) { + Ok(_) => { + // We succeeded in unlinking this element from the list, so we have to + // schedule deallocation. Deferred drop is okay, because `list.delete()` + // can only be called if `T: 'static`. + unsafe { + let p = self.curr; + self.guard.defer_unchecked(move || C::finalize(p.deref())); + } + + // Move over the removed by only advancing `curr`, not `pred`. + self.curr = succ; + continue; + } + Err(_) => { + // A concurrent thread modified the predecessor node. Since it might've + // been deleted, we need to restart from `head`. + self.pred = self.head; + self.curr = self.head.load(Acquire, self.guard); + + return Some(Err(IterError::Stalled)); + } + } + } + + // Move one step forward. + self.pred = &c.next; + self.curr = succ; + + return Some(Ok(unsafe { C::element_of(c) })); + } + + // We reached the end of the list. + None + } +} + +#[cfg(test)] +mod tests { + use {Collector, Owned}; + use crossbeam_utils::thread; + use std::sync::Barrier; + use super::*; + + impl IsElement for Entry { + fn entry_of(entry: &Entry) -> &Entry { + entry + } + + unsafe fn element_of(entry: &Entry) -> &Entry { + entry + } + + unsafe fn finalize(entry: &Entry) { + drop(Box::from_raw(entry as *const Entry as *mut Entry)); + } + } + + /// Checks whether the list retains inserted elements + /// and returns them in the correct order. + #[test] + fn insert() { + let collector = Collector::new(); + let handle = collector.register(); + let guard = handle.pin(); + + let l: List = List::new(); + + let e1 = Owned::new(Entry::default()).into_shared(&guard); + let e2 = Owned::new(Entry::default()).into_shared(&guard); + let e3 = Owned::new(Entry::default()).into_shared(&guard); + + unsafe { + l.insert(e1, &guard); + l.insert(e2, &guard); + l.insert(e3, &guard); + } + + let mut iter = l.iter(&guard); + let maybe_e3 = iter.next(); + assert!(maybe_e3.is_some()); + assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw()); + let maybe_e2 = iter.next(); + assert!(maybe_e2.is_some()); + assert!(maybe_e2.unwrap().unwrap() as *const Entry == e2.as_raw()); + let maybe_e1 = iter.next(); + assert!(maybe_e1.is_some()); + assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw()); + assert!(iter.next().is_none()); + + unsafe { + e1.as_ref().unwrap().delete(&guard); + e2.as_ref().unwrap().delete(&guard); + e3.as_ref().unwrap().delete(&guard); + } + } + + /// Checks whether elements can be removed from the list and whether + /// the correct elements are removed. + #[test] + fn delete() { + let collector = Collector::new(); + let handle = collector.register(); + let guard = handle.pin(); + + let l: List = List::new(); + + let e1 = Owned::new(Entry::default()).into_shared(&guard); + let e2 = Owned::new(Entry::default()).into_shared(&guard); + let e3 = Owned::new(Entry::default()).into_shared(&guard); + unsafe { + l.insert(e1, &guard); + l.insert(e2, &guard); + l.insert(e3, &guard); + e2.as_ref().unwrap().delete(&guard); + } + + let mut iter = l.iter(&guard); + let maybe_e3 = iter.next(); + assert!(maybe_e3.is_some()); + assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw()); + let maybe_e1 = iter.next(); + assert!(maybe_e1.is_some()); + assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw()); + assert!(iter.next().is_none()); + + unsafe { + e1.as_ref().unwrap().delete(&guard); + e3.as_ref().unwrap().delete(&guard); + } + + let mut iter = l.iter(&guard); + assert!(iter.next().is_none()); + } + + const THREADS: usize = 8; + const ITERS: usize = 512; + + /// Contends the list on insert and delete operations to make sure they can run concurrently. + #[test] + fn insert_delete_multi() { + let collector = Collector::new(); + + let l: List = List::new(); + let b = Barrier::new(THREADS); + + thread::scope(|s| for _ in 0..THREADS { + s.spawn(|| { + b.wait(); + + let handle = collector.register(); + let guard: Guard = handle.pin(); + let mut v = Vec::with_capacity(ITERS); + + for _ in 0..ITERS { + let e = Owned::new(Entry::default()).into_shared(&guard); + v.push(e); + unsafe { + l.insert(e, &guard); + } + } + + for e in v { + unsafe { + e.as_ref().unwrap().delete(&guard); + } + } + }); + }); + + let handle = collector.register(); + let guard = handle.pin(); + + let mut iter = l.iter(&guard); + assert!(iter.next().is_none()); + } + + /// Contends the list on iteration to make sure that it can be iterated over concurrently. + #[test] + fn iter_multi() { + let collector = Collector::new(); + + let l: List = List::new(); + let b = Barrier::new(THREADS); + + thread::scope(|s| for _ in 0..THREADS { + s.spawn(|| { + b.wait(); + + let handle = collector.register(); + let guard: Guard = handle.pin(); + let mut v = Vec::with_capacity(ITERS); + + for _ in 0..ITERS { + let e = Owned::new(Entry::default()).into_shared(&guard); + v.push(e); + unsafe { + l.insert(e, &guard); + } + } + + let mut iter = l.iter(&guard); + for _ in 0..ITERS { + assert!(iter.next().is_some()); + } + + for e in v { + unsafe { + e.as_ref().unwrap().delete(&guard); + } + } + }); + }); + + let handle = collector.register(); + let guard = handle.pin(); + + let mut iter = l.iter(&guard); + assert!(iter.next().is_none()); + } +} diff --git a/crossbeam-epoch/src/sync/mod.rs b/crossbeam-epoch/src/sync/mod.rs new file mode 100644 index 000000000..f8eb25960 --- /dev/null +++ b/crossbeam-epoch/src/sync/mod.rs @@ -0,0 +1,4 @@ +//! Synchronization primitives. + +pub mod list; +pub mod queue; diff --git a/crossbeam-epoch/src/sync/queue.rs b/crossbeam-epoch/src/sync/queue.rs new file mode 100644 index 000000000..9e145d72d --- /dev/null +++ b/crossbeam-epoch/src/sync/queue.rs @@ -0,0 +1,428 @@ +//! Michael-Scott lock-free queue. +//! +//! Usable with any number of producers and consumers. +//! +//! Michael and Scott. Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue +//! Algorithms. PODC 1996. http://dl.acm.org/citation.cfm?id=248106 + +use core::mem::{self, ManuallyDrop}; +use core::ptr; +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; + +use crossbeam_utils::CachePadded; + +use {unprotected, Atomic, Guard, Owned, Shared}; + +// The representation here is a singly-linked list, with a sentinel node at the front. In general +// the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or +// all `Blocked` (requests for data from blocked threads). +#[derive(Debug)] +pub struct Queue { + head: CachePadded>>, + tail: CachePadded>>, +} + +#[derive(Debug)] +struct Node { + /// The slot in which a value of type `T` can be stored. + /// + /// The type of `data` is `ManuallyDrop` because a `Node` doesn't always contain a `T`. + /// For example, the sentinel node in a queue never contains a value: its slot is always empty. + /// Other nodes start their life with a push operation and contain a value until it gets popped + /// out. After that such empty nodes get added to the collector for destruction. + data: ManuallyDrop, + + next: Atomic>, +} + +// Any particular `T` should never be accessed concurrently, so no need for `Sync`. +unsafe impl Sync for Queue {} +unsafe impl Send for Queue {} + + +impl Queue { + /// Create a new, empty queue. + pub fn new() -> Queue { + let q = Queue { + head: CachePadded::new(Atomic::null()), + tail: CachePadded::new(Atomic::null()), + }; + let sentinel = Owned::new(Node { + data: unsafe { mem::uninitialized() }, + next: Atomic::null(), + }); + unsafe { + let guard = &unprotected(); + let sentinel = sentinel.into_shared(guard); + q.head.store(sentinel, Relaxed); + q.tail.store(sentinel, Relaxed); + q + } + } + + /// Attempts to atomically place `n` into the `next` pointer of `onto`, and returns `true` on + /// success. The queue's `tail` pointer may be updated. + #[inline(always)] + fn push_internal(&self, onto: Shared>, new: Shared>, guard: &Guard) -> bool { + // is `onto` the actual tail? + let o = unsafe { onto.deref() }; + let next = o.next.load(Acquire, guard); + if unsafe { next.as_ref().is_some() } { + // if not, try to "help" by moving the tail pointer forward + let _ = self.tail.compare_and_set(onto, next, Release, guard); + false + } else { + // looks like the actual tail; attempt to link in `n` + let result = o.next + .compare_and_set(Shared::null(), new, Release, guard) + .is_ok(); + if result { + // try to move the tail pointer forward + let _ = self.tail.compare_and_set(onto, new, Release, guard); + } + result + } + } + + /// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`. + pub fn push(&self, t: T, guard: &Guard) { + let new = Owned::new(Node { + data: ManuallyDrop::new(t), + next: Atomic::null(), + }); + let new = Owned::into_shared(new, guard); + + loop { + // We push onto the tail, so we'll start optimistically by looking there first. + let tail = self.tail.load(Acquire, guard); + + // Attempt to push onto the `tail` snapshot; fails if `tail.next` has changed. + if self.push_internal(tail, new, guard) { + break; + } + } + } + + /// Attempts to pop a data node. `Ok(None)` if queue is empty; `Err(())` if lost race to pop. + #[inline(always)] + fn pop_internal(&self, guard: &Guard) -> Result, ()> { + let head = self.head.load(Acquire, guard); + let h = unsafe { head.deref() }; + let next = h.next.load(Acquire, guard); + match unsafe { next.as_ref() } { + Some(n) => unsafe { + self.head + .compare_and_set(head, next, Release, guard) + .map(|_| { + guard.defer_destroy(head); + Some(ManuallyDrop::into_inner(ptr::read(&n.data))) + }) + .map_err(|_| ()) + }, + None => Ok(None), + } + } + + /// Attempts to pop a data node, if the data satisfies the given condition. `Ok(None)` if queue + /// is empty or the data does not satisfy the condition; `Err(())` if lost race to pop. + #[inline(always)] + fn pop_if_internal(&self, condition: F, guard: &Guard) -> Result, ()> + where + T: Sync, + F: Fn(&T) -> bool, + { + let head = self.head.load(Acquire, guard); + let h = unsafe { head.deref() }; + let next = h.next.load(Acquire, guard); + match unsafe { next.as_ref() } { + Some(n) if condition(&n.data) => unsafe { + self.head + .compare_and_set(head, next, Release, guard) + .map(|_| { + guard.defer_destroy(head); + Some(ManuallyDrop::into_inner(ptr::read(&n.data))) + }) + .map_err(|_| ()) + }, + None | Some(_) => Ok(None), + } + } + + /// Attempts to dequeue from the front. + /// + /// Returns `None` if the queue is observed to be empty. + pub fn try_pop(&self, guard: &Guard) -> Option { + loop { + if let Ok(head) = self.pop_internal(guard) { + return head; + } + } + } + + /// Attempts to dequeue from the front, if the item satisfies the given condition. + /// + /// Returns `None` if the queue is observed to be empty, or the head does not satisfy the given + /// condition. + pub fn try_pop_if(&self, condition: F, guard: &Guard) -> Option + where + T: Sync, + F: Fn(&T) -> bool, + { + loop { + if let Ok(head) = self.pop_if_internal(&condition, guard) { + return head; + } + } + } +} + +impl Drop for Queue { + fn drop(&mut self) { + unsafe { + let guard = &unprotected(); + + while let Some(_) = self.try_pop(guard) {} + + // Destroy the remaining sentinel node. + let sentinel = self.head.load(Relaxed, guard); + drop(sentinel.into_owned()); + } + } +} + + +#[cfg(test)] +mod test { + use super::*; + use crossbeam_utils::thread; + use pin; + + struct Queue { + queue: super::Queue, + } + + impl Queue { + pub fn new() -> Queue { + Queue { queue: super::Queue::new() } + } + + pub fn push(&self, t: T) { + let guard = &pin(); + self.queue.push(t, guard); + } + + pub fn is_empty(&self) -> bool { + let guard = &pin(); + let head = self.queue.head.load(Acquire, guard); + let h = unsafe { head.deref() }; + h.next.load(Acquire, guard).is_null() + } + + pub fn try_pop(&self) -> Option { + let guard = &pin(); + self.queue.try_pop(guard) + } + + pub fn pop(&self) -> T { + loop { + match self.try_pop() { + None => continue, + Some(t) => return t, + } + } + } + } + + const CONC_COUNT: i64 = 1000000; + + #[test] + fn push_try_pop_1() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + q.push(37); + assert!(!q.is_empty()); + assert_eq!(q.try_pop(), Some(37)); + assert!(q.is_empty()); + } + + #[test] + fn push_try_pop_2() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + q.push(37); + q.push(48); + assert_eq!(q.try_pop(), Some(37)); + assert!(!q.is_empty()); + assert_eq!(q.try_pop(), Some(48)); + assert!(q.is_empty()); + } + + #[test] + fn push_try_pop_many_seq() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + for i in 0..200 { + q.push(i) + } + assert!(!q.is_empty()); + for i in 0..200 { + assert_eq!(q.try_pop(), Some(i)); + } + assert!(q.is_empty()); + } + + #[test] + fn push_pop_1() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + q.push(37); + assert!(!q.is_empty()); + assert_eq!(q.pop(), 37); + assert!(q.is_empty()); + } + + #[test] + fn push_pop_2() { + let q: Queue = Queue::new(); + q.push(37); + q.push(48); + assert_eq!(q.pop(), 37); + assert_eq!(q.pop(), 48); + } + + #[test] + fn push_pop_many_seq() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + for i in 0..200 { + q.push(i) + } + assert!(!q.is_empty()); + for i in 0..200 { + assert_eq!(q.pop(), i); + } + assert!(q.is_empty()); + } + + #[test] + fn push_try_pop_many_spsc() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + + thread::scope(|scope| { + scope.spawn(|| { + let mut next = 0; + + while next < CONC_COUNT { + if let Some(elem) = q.try_pop() { + assert_eq!(elem, next); + next += 1; + } + } + }); + + for i in 0..CONC_COUNT { + q.push(i) + } + }); + } + + #[test] + fn push_try_pop_many_spmc() { + fn recv(_t: i32, q: &Queue) { + let mut cur = -1; + for _i in 0..CONC_COUNT { + if let Some(elem) = q.try_pop() { + assert!(elem > cur); + cur = elem; + + if cur == CONC_COUNT - 1 { + break; + } + } + } + } + + let q: Queue = Queue::new(); + assert!(q.is_empty()); + let qr = &q; + thread::scope(|scope| { + for i in 0..3 { + scope.spawn(move || recv(i, qr)); + } + + scope.spawn(|| for i in 0..CONC_COUNT { + q.push(i); + }) + }); + } + + #[test] + fn push_try_pop_many_mpmc() { + enum LR { + Left(i64), + Right(i64), + } + + let q: Queue = Queue::new(); + assert!(q.is_empty()); + + thread::scope(|scope| for _t in 0..2 { + scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT { + q.push(LR::Left(i)) + }); + scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT { + q.push(LR::Right(i)) + }); + scope.spawn(|| { + let mut vl = vec![]; + let mut vr = vec![]; + for _i in 0..CONC_COUNT { + match q.try_pop() { + Some(LR::Left(x)) => vl.push(x), + Some(LR::Right(x)) => vr.push(x), + _ => {} + } + } + + let mut vl2 = vl.clone(); + let mut vr2 = vr.clone(); + vl2.sort(); + vr2.sort(); + + assert_eq!(vl, vl2); + assert_eq!(vr, vr2); + }); + }); + } + + #[test] + fn push_pop_many_spsc() { + let q: Queue = Queue::new(); + + thread::scope(|scope| { + scope.spawn(|| { + let mut next = 0; + while next < CONC_COUNT { + assert_eq!(q.pop(), next); + next += 1; + } + }); + + for i in 0..CONC_COUNT { + q.push(i) + } + }); + assert!(q.is_empty()); + } + + #[test] + fn is_empty_dont_pop() { + let q: Queue = Queue::new(); + q.push(20); + q.push(20); + assert!(!q.is_empty()); + assert!(!q.is_empty()); + assert!(q.try_pop().is_some()); + } +} diff --git a/crossbeam-skiplist/CHANGELOG.md b/crossbeam-skiplist/CHANGELOG.md new file mode 100644 index 000000000..7319cd7ad --- /dev/null +++ b/crossbeam-skiplist/CHANGELOG.md @@ -0,0 +1,3 @@ +# Version 0.1.0 + +- Initial implementation. diff --git a/crossbeam-skiplist/Cargo.toml b/crossbeam-skiplist/Cargo.toml new file mode 100644 index 000000000..4322586db --- /dev/null +++ b/crossbeam-skiplist/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "crossbeam-skiplist" +# When publishing a new version: +# - Update CHANGELOG.md +# - Update README.md +# - Create "crossbeam-skiplist-X.Y.Z" git tag +version = "0.0.0" +authors = ["The Crossbeam Project Developers"] +license = "MIT/Apache-2.0" +readme = "README.md" +repository = "https://github.com/crossbeam-rs/crossbeam-skiplist" +homepage = "https://github.com/crossbeam-rs/crossbeam-skiplist" +documentation = "https://docs.rs/crossbeam-skiplist" +description = "A concurrent skip list" +keywords = ["map", "set", "skiplist", "lock-free"] +categories = ["algorithms", "concurrency", "data-structures"] + +[features] +default = ["use_std"] +use_std = ["crossbeam-epoch/use_std"] +nightly = ["crossbeam-epoch/nightly"] + +[dependencies] +crossbeam-epoch = {version = "0.4.1", default-features = false} +crossbeam-utils = {version = "0.3.0", default-features = false} +scopeguard = {version = "0.3.0", default-features = false} + +[dev-dependencies] +crossbeam = "0.3.2" +rand = "0.5.3" diff --git a/crossbeam-skiplist/LICENSE-APACHE b/crossbeam-skiplist/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/crossbeam-skiplist/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/crossbeam-skiplist/LICENSE-MIT b/crossbeam-skiplist/LICENSE-MIT new file mode 100644 index 000000000..25597d583 --- /dev/null +++ b/crossbeam-skiplist/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/crossbeam-skiplist/README.md b/crossbeam-skiplist/README.md new file mode 100644 index 000000000..9214e7fbf --- /dev/null +++ b/crossbeam-skiplist/README.md @@ -0,0 +1,35 @@ +# Concurrent skip list + +[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)]( +https://travis-ci.org/crossbeam-rs/crossbeam) +[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)]( +https://github.com/crossbeam-rs/crossbeam-skiplist) +[![Cargo](https://img.shields.io/crates/v/crossbeam-skiplist.svg)]( +https://crates.io/crates/crossbeam-skiplist) +[![Documentation](https://docs.rs/crossbeam-skiplist/badge.svg)]( +https://docs.rs/crossbeam-skiplist) + +This crate is still a work in progress. + + + +## License + +Licensed under the terms of MIT license and the Apache License (Version 2.0). + +See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details. diff --git a/crossbeam-skiplist/benches/btree.rs b/crossbeam-skiplist/benches/btree.rs new file mode 100644 index 000000000..078392fd7 --- /dev/null +++ b/crossbeam-skiplist/benches/btree.rs @@ -0,0 +1,94 @@ +#![feature(test)] + +extern crate crossbeam_skiplist; +extern crate test; + +use test::{black_box, Bencher}; + +use std::collections::BTreeMap as Map; + +#[bench] +fn insert(b: &mut Bencher) { + b.iter(|| { + let mut map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + }); +} + +#[bench] +fn iter(b: &mut Bencher) { + let mut map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + + b.iter(|| { + for x in map.iter() { + black_box(x); + } + }); +} + +#[bench] +fn rev_iter(b: &mut Bencher) { + let mut map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + + b.iter(|| { + for x in map.iter().rev() { + black_box(x); + } + }); +} + +#[bench] +fn lookup(b: &mut Bencher) { + let mut map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + + b.iter(|| { + let mut num = 0 as u64; + + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + black_box(map.get(&num)); + } + }); +} + +#[bench] +fn insert_remove(b: &mut Bencher) { + b.iter(|| { + let mut map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + black_box(map.remove(&num).unwrap()); + } + }); +} diff --git a/crossbeam-skiplist/benches/hash.rs b/crossbeam-skiplist/benches/hash.rs new file mode 100644 index 000000000..a472e627a --- /dev/null +++ b/crossbeam-skiplist/benches/hash.rs @@ -0,0 +1,77 @@ +#![feature(test)] + +extern crate crossbeam_skiplist; +extern crate test; + +use test::{black_box, Bencher}; + +use std::collections::HashMap as Map; + +#[bench] +fn insert(b: &mut Bencher) { + b.iter(|| { + let mut map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + }); +} + +#[bench] +fn iter(b: &mut Bencher) { + let mut map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + + b.iter(|| { + for x in map.iter() { + black_box(x); + } + }); +} + +#[bench] +fn lookup(b: &mut Bencher) { + let mut map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + + b.iter(|| { + let mut num = 0 as u64; + + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + black_box(map.get(&num)); + } + }); +} + +#[bench] +fn insert_remove(b: &mut Bencher) { + b.iter(|| { + let mut map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + black_box(map.remove(&num).unwrap()); + } + }); +} diff --git a/crossbeam-skiplist/benches/skiplist.rs b/crossbeam-skiplist/benches/skiplist.rs new file mode 100644 index 000000000..cb9b7f77d --- /dev/null +++ b/crossbeam-skiplist/benches/skiplist.rs @@ -0,0 +1,101 @@ +#![feature(test)] + +extern crate crossbeam_epoch as epoch; +extern crate crossbeam_skiplist; +extern crate test; + +use test::{black_box, Bencher}; + +use crossbeam_skiplist::SkipList; + +#[bench] +fn insert(b: &mut Bencher) { + let guard = &epoch::pin(); + + b.iter(|| { + let map = SkipList::new(epoch::default_collector().clone()); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num, guard); + } + }); +} + +#[bench] +fn iter(b: &mut Bencher) { + let guard = &epoch::pin(); + let map = SkipList::new(epoch::default_collector().clone()); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num, guard).release(guard); + } + + b.iter(|| { + for x in map.iter(guard) { + black_box(x.key()); + } + }); +} + +#[bench] +fn rev_iter(b: &mut Bencher) { + let guard = &epoch::pin(); + let map = SkipList::new(epoch::default_collector().clone()); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num, guard).release(guard); + } + + b.iter(|| { + for x in map.iter(guard).rev() { + black_box(x.key()); + } + }); +} + +#[bench] +fn lookup(b: &mut Bencher) { + let guard = &epoch::pin(); + let map = SkipList::new(epoch::default_collector().clone()); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num, guard).release(guard); + } + + b.iter(|| { + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + black_box(map.get(&num, guard)); + } + }); +} + +#[bench] +fn insert_remove(b: &mut Bencher) { + let guard = &epoch::pin(); + + b.iter(|| { + let map = SkipList::new(epoch::default_collector().clone()); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num, guard).release(guard); + } + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + black_box(map.remove(&num, guard).unwrap().release(guard)); + } + }); +} diff --git a/crossbeam-skiplist/benches/skipmap.rs b/crossbeam-skiplist/benches/skipmap.rs new file mode 100644 index 000000000..a02c17446 --- /dev/null +++ b/crossbeam-skiplist/benches/skipmap.rs @@ -0,0 +1,94 @@ +#![feature(test)] + +extern crate crossbeam_skiplist; +extern crate test; + +use test::{black_box, Bencher}; + +use crossbeam_skiplist::SkipMap as Map; + +#[bench] +fn insert(b: &mut Bencher) { + b.iter(|| { + let map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + }); +} + +#[bench] +fn iter(b: &mut Bencher) { + let map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + + b.iter(|| { + for x in map.iter() { + black_box(x); + } + }); +} + +#[bench] +fn rev_iter(b: &mut Bencher) { + let map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + + b.iter(|| { + for x in map.iter().rev() { + black_box(x); + } + }); +} + +#[bench] +fn lookup(b: &mut Bencher) { + let map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + + b.iter(|| { + let mut num = 0 as u64; + + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + black_box(map.get(&num)); + } + }); +} + +#[bench] +fn insert_remove(b: &mut Bencher) { + b.iter(|| { + let map = Map::new(); + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + map.insert(num, !num); + } + + let mut num = 0 as u64; + for _ in 0..1_000 { + num = num.wrapping_mul(17).wrapping_add(255); + black_box(map.remove(&num).unwrap()); + } + }); +} diff --git a/crossbeam-skiplist/ci/script.sh b/crossbeam-skiplist/ci/script.sh new file mode 100755 index 000000000..febc4548f --- /dev/null +++ b/crossbeam-skiplist/ci/script.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +check_min_version() { + local rustc="`rustc -V | cut -d' ' -f2 | cut -d- -f1`" + if [[ "$rustc" != "`echo -e "$rustc\n$1" | sort -V | tail -n1`" ]]; then + echo "Unsupported Rust version: $1 < $rustc" + exit 0 + fi +} +check_min_version 1.26.0 + +set -ex + +export RUSTFLAGS="-D warnings" + +cargo build --no-default-features +cargo test + +if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then + cargo test --features nightly +fi diff --git a/crossbeam-skiplist/examples/simple.rs b/crossbeam-skiplist/examples/simple.rs new file mode 100644 index 000000000..31095b0b0 --- /dev/null +++ b/crossbeam-skiplist/examples/simple.rs @@ -0,0 +1,27 @@ +// extern crate crossbeam_skiplist; + +// use std::time::Instant; + +fn main() { + // let map = crossbeam_skiplist::SkipMap::new(); + // // let mut map = std::collections::BTreeMap::new(); + // // let mut map = std::collections::HashMap::new(); + // + // let now = Instant::now(); + // + // let mut num = 0 as u64; + // for _ in 0..1_000_000 { + // num = num.wrapping_mul(17).wrapping_add(255); + // map.insert(num, !num); + // } + // + // let dur = Instant::now() - now; + // println!("insert: {} sec", dur.as_secs() as f64 + dur.subsec_nanos() as f64 * 1e-9); + // + // let now = Instant::now(); + // + // for _ in map.iter() {} + // + // let dur = Instant::now() - now; + // println!("iterate: {} sec", dur.as_secs() as f64 + dur.subsec_nanos() as f64 * 1e-9); +} diff --git a/crossbeam-skiplist/src/base.rs b/crossbeam-skiplist/src/base.rs new file mode 100644 index 000000000..d5c5c67e0 --- /dev/null +++ b/crossbeam-skiplist/src/base.rs @@ -0,0 +1,2683 @@ +use alloc::vec::Vec; +use core::borrow::Borrow; +use core::cmp; +use core::fmt; +use core::mem; +use core::ops::{Deref, Index}; +use core::ptr; +use core::sync::atomic::{fence, AtomicUsize, Ordering}; + +use epoch::{self, Atomic, Collector, Guard, Shared}; +use scopeguard; +use utils::cache_padded::CachePadded; +use Bound; + +/// Number of bits needed to store height. +const HEIGHT_BITS: usize = 5; + +/// Maximum height of a skip list tower. +const MAX_HEIGHT: usize = 1 << HEIGHT_BITS; + +/// The bits of `refs_and_height` that keep the height. +const HEIGHT_MASK: usize = (1 << HEIGHT_BITS) - 1; + +/// The tower of atomic pointers. +/// +/// The actual size of the tower will vary depending on the height that a node +/// was allocated with. +#[repr(C)] +struct Tower { + pointers: [Atomic>; 0], +} + +impl Index for Tower { + type Output = Atomic>; + fn index(&self, index: usize) -> &Atomic> { + // This implementation is actually unsafe since we don't check if the + // index is in-bounds. But this is fine since this is only used internally. + unsafe { self.pointers.get_unchecked(index) } + } +} + +/// Tower at the head of a skip list. +/// +/// This is located in the `SkipList` struct itself and holds a full height +/// tower. +#[repr(C)] +struct Head { + pointers: [Atomic>; MAX_HEIGHT], +} + +impl Head { + /// Initializes a `Head`. + #[inline] + fn new() -> Head { + // Initializing arrays in rust is a pain... + Head { + pointers: Default::default(), + } + } +} + +impl Deref for Head { + type Target = Tower; + fn deref(&self) -> &Tower { + unsafe { &*(self as *const _ as *const Tower) } + } +} + +/// A skip list node. +/// +/// This struct is marked with `repr(C)` so that the specific order of fields is enforced. +/// It is important that the tower is the last field since it is dynamically sized. The key, +/// reference count, and height are kept close to the tower to improve cache locality during +/// skip list traversal. +#[repr(C)] +struct Node { + /// The value. + value: V, + + /// The key. + key: K, + + /// Keeps the reference count and the height of its tower. + /// + /// The reference count is equal to the number of `Entry`s pointing to this node, plus the + /// number of levels in which this node is installed. + refs_and_height: AtomicUsize, + + /// The tower of atomic pointers. + tower: Tower, +} + +impl Node { + /// Allocates a node. + /// + /// The returned node will start with reference count of `ref_count` and the tower will be initialized + /// with null pointers. However, the key and the value will be left uninitialized, and that is + /// why this function is unsafe. + unsafe fn alloc(height: usize, ref_count: usize) -> *mut Self { + // TODO(stjepang): Use the new alloc API instead of this hack once it becomes stable. + let cap = Self::size_in_u64s(height); + let mut v = Vec::::with_capacity(cap); + let ptr = v.as_mut_ptr() as *mut Self; + mem::forget(v); + + ptr::write( + &mut (*ptr).refs_and_height, + AtomicUsize::new((height - 1) | ref_count << HEIGHT_BITS), + ); + ptr::write_bytes((*ptr).tower.pointers.as_mut_ptr(), 0, height); + ptr + } + + /// Deallocates a node. + /// + /// This function will not run any destructors. + unsafe fn dealloc(ptr: *mut Self) { + let height = (*ptr).height(); + let cap = Self::size_in_u64s(height); + drop(Vec::from_raw_parts(ptr as *mut u64, 0, cap)); + } + + /// Returns the size of a node with tower of given `height` measured in `u64`s. + fn size_in_u64s(height: usize) -> usize { + assert!(1 <= height && height <= MAX_HEIGHT); + assert!(mem::align_of::() <= mem::align_of::()); + + let size_base = mem::size_of::(); + let size_ptr = mem::size_of::>(); + + let size_u64 = mem::size_of::(); + let size_self = size_base + size_ptr * height; + + (size_self + size_u64 - 1) / size_u64 + } + + /// Returns the height of this node's tower. + #[inline] + fn height(&self) -> usize { + (self.refs_and_height.load(Ordering::Relaxed) & HEIGHT_MASK) + 1 + } + + /// Marks all pointers in the tower and returns `true` if the level 0 was not marked. + fn mark_tower(&self) -> bool { + let height = self.height(); + + for level in (0..height).rev() { + let tag = unsafe { + // We're loading the pointer only for the tag, so it's okay to use + // `epoch::unprotected()` in this situation. + // TODO(Amanieu): can we use release ordering here? + self.tower[level] + .fetch_or(1, Ordering::SeqCst, epoch::unprotected()) + .tag() + }; + + // If the level 0 pointer was already marked, somebody else removed the node. + if level == 0 && tag == 1 { + return false; + } + } + + // We marked the level 0 pointer, therefore we removed the node. + true + } + + /// Returns `true` if the node is removed. + #[inline] + fn is_removed(&self) -> bool { + let tag = unsafe { + // We're loading the pointer only for the tag, so it's okay to use + // `epoch::unprotected()` in this situation. + self.tower[0] + .load(Ordering::Relaxed, epoch::unprotected()) + .tag() + }; + tag == 1 + } + + /// Attempts to increment the reference count of a node and returns `true` on success. + /// + /// The reference count can be incremented only if it is non-zero. + /// + /// # Panics + /// + /// Panics if the reference count overflows. + #[inline] + unsafe fn try_increment(&self) -> bool { + let mut refs_and_height = self.refs_and_height.load(Ordering::Relaxed); + + loop { + // If the reference count is zero, then the node has already been + // queued for deletion. Incrementing it again could lead to a + // double-free. + if refs_and_height & !HEIGHT_MASK == 0 { + return false; + } + + // If all bits in the reference count are ones, we're about to overflow it. + let new_refs_and_height = refs_and_height + .checked_add(1 << HEIGHT_BITS) + .expect("SkipList reference count overflow"); + + // Try incrementing the count. + match self.refs_and_height.compare_exchange_weak( + refs_and_height, + new_refs_and_height, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + Ok(_) => return true, + Err(current) => refs_and_height = current, + } + } + } + + /// Decrements the reference count of a node, destroying it if the count becomes zero. + #[inline] + unsafe fn decrement(&self, guard: &Guard) { + if self.refs_and_height + .fetch_sub(1 << HEIGHT_BITS, Ordering::Release) >> HEIGHT_BITS == 1 + { + fence(Ordering::Acquire); + guard.defer(move || Self::finalize(self)); + } + } + + /// Drops the key and value of a node, then deallocates it. + #[cold] + unsafe fn finalize(ptr: *const Self) { + let ptr = ptr as *mut Self; + + // Call destructors: drop the key and the value. + ptr::drop_in_place(&mut (*ptr).key); + ptr::drop_in_place(&mut (*ptr).value); + + // Finally, deallocate the memory occupied by the node. + Node::dealloc(ptr); + } +} + +/// A search result. +/// +/// The result indicates whether the key was found, as well as what were the adjacent nodes to the +/// key on each level of the skip list. +struct Position<'a, K: 'a, V: 'a> { + /// Reference to a node with the given key, if found. + /// + /// If this is `Some` then it will point to the same node as `right[0]`. + found: Option<&'a Node>, + + /// Adjacent nodes with smaller keys (predecessors). + left: [&'a Tower; MAX_HEIGHT], + + /// Adjacent nodes with equal or greater keys (successors). + right: [Shared<'a, Node>; MAX_HEIGHT], +} + +/// Frequently modified data associated with a skip list. +struct HotData { + /// The seed for random height generation. + seed: AtomicUsize, + + /// The number of entries in the skip list. + len: AtomicUsize, + + /// Highest tower currently in use. This value is used as a hint for where + /// to start lookups and never decreases. + max_height: AtomicUsize, +} + +/// A lock-free skip list. +// TODO(stjepang): Embed a custom `epoch::Collector` inside `SkipList`. Instead of adding +// garbage to the default global collector, we should add it to a local collector tied to the +// particular skip list instance. +// +// Since global collector might destroy garbage arbitrarily late in the future, some skip list +// methods have `K: 'static` and `V: 'static` bounds. But a local collector embedded in the skip +// list would destroy all remaining garbage when the skip list is dropped, so in that case we'd be +// able to remove those bounds on types `K` and `V`. +// +// As a further future optimization, if `!mem::needs_drop::() && !mem::needs_drop::()` +// (neither key nor the value have destructors), there's no point in creating a new local +// collector, so we should simply use the global one. +pub struct SkipList { + /// The head of the skip list (just a dummy node, not a real entry). + head: Head, + + /// The `Collector` associated with this skip list. + collector: Collector, + + /// Hot data associated with the skip list, stored in a dedicated cache line. + hot_data: CachePadded, +} + +unsafe impl Send for SkipList {} +unsafe impl Sync for SkipList {} + +impl SkipList { + /// Returns a new, empty skip list. + pub fn new(collector: Collector) -> SkipList { + SkipList { + head: Head::new(), + collector, + hot_data: CachePadded::new(HotData { + seed: AtomicUsize::new(1), + len: AtomicUsize::new(0), + max_height: AtomicUsize::new(1), + }), + } + } + + /// Returns `true` if the skip list is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of entries in the skip list. + /// + /// If the skip list is being concurrently modified, consider the returned number just an + /// approximation without any guarantees. + pub fn len(&self) -> usize { + let len = self.hot_data.len.load(Ordering::Relaxed); + + // Due to the relaxed memory ordering, the length counter may sometimes + // underflow and produce a very large value. We treat such values as 0. + if len > isize::max_value() as usize { + 0 + } else { + len + } + } + + /// Ensures that all `Guard`s used with the skip list come from the same + /// `Collector`. + fn check_guard(&self, guard: &Guard) { + if let Some(c) = guard.collector() { + assert!(c == &self.collector); + } + } +} + +impl SkipList +where + K: Ord, +{ + /// Returns the entry with the smallest key. + pub fn front<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option> { + self.check_guard(guard); + let n = self.next_node(&self.head, Bound::Unbounded, guard)?; + Some(Entry { + parent: self, + node: n, + guard, + }) + } + + /// Returns the entry with the largest key. + pub fn back<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option> { + self.check_guard(guard); + let n = self.search_bound(Bound::Unbounded, true, guard)?; + Some(Entry { + parent: self, + node: n, + guard, + }) + } + + /// Returns `true` if the map contains a value for the specified key. + pub fn contains_key(&self, key: &Q, guard: &Guard) -> bool + where + K: Borrow, + Q: Ord + ?Sized, + { + self.get(key, guard).is_some() + } + + /// Returns an entry with the specified `key`. + pub fn get<'a: 'g, 'g, Q>(&'a self, key: &Q, guard: &'g Guard) -> Option> + where + K: Borrow, + Q: Ord + ?Sized, + { + self.check_guard(guard); + let n = self.search_bound(Bound::Included(key), false, guard)?; + if n.key.borrow() != key { + return None; + } + Some(Entry { + parent: self, + node: n, + guard, + }) + } + + /// Returns an `Entry` pointing to the lowest element whose key is above + /// the given bound. If no such element is found then `None` is + /// returned. + pub fn lower_bound<'a: 'g, 'g, Q>( + &'a self, + bound: Bound<&Q>, + guard: &'g Guard, + ) -> Option> + where + K: Borrow, + Q: Ord + ?Sized, + { + self.check_guard(guard); + let n = self.search_bound(bound, false, guard)?; + Some(Entry { + parent: self, + node: n, + guard, + }) + } + + /// Returns an `Entry` pointing to the highest element whose key is below + /// the given bound. If no such element is found then `None` is + /// returned. + pub fn upper_bound<'a: 'g, 'g, Q>( + &'a self, + bound: Bound<&Q>, + guard: &'g Guard, + ) -> Option> + where + K: Borrow, + Q: Ord + ?Sized, + { + self.check_guard(guard); + let n = self.search_bound(bound, true, guard)?; + Some(Entry { + parent: self, + node: n, + guard, + }) + } + + /// Finds an entry with the specified key, or inserts a new `key`-`value` pair if none exist. + pub fn get_or_insert(&self, key: K, value: V, guard: &Guard) -> RefEntry { + self.insert_internal(key, value, false, guard) + } + + /// Returns an iterator over all entries in the skip list. + pub fn iter<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Iter<'a, 'g, K, V> { + self.check_guard(guard); + Iter { + parent: self, + head: None, + tail: None, + guard, + } + } + + /// Returns an iterator over all entries in the skip list. + pub fn ref_iter(&self) -> RefIter { + RefIter { + parent: self, + head: None, + tail: None, + } + } + + /// Returns an iterator over a subset of entries in the skip list. + pub fn range<'a: 'g, 'g, 'k, Min, Max>( + &'a self, + lower_bound: Bound<&'k Min>, + upper_bound: Bound<&'k Max>, + guard: &'g Guard, + ) -> Range<'a, 'g, 'k, Min, Max, K, V> + where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, + { + self.check_guard(guard); + Range { + parent: self, + head: None, + tail: None, + lower_bound, + upper_bound, + guard, + } + } + + /// Returns an iterator over a subset of entries in the skip list. + pub fn ref_range<'a, 'k, Min, Max>( + &'a self, + lower_bound: Bound<&'k Min>, + upper_bound: Bound<&'k Max>, + ) -> RefRange<'a, 'k, Min, Max, K, V> + where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, + { + RefRange { + parent: self, + head: None, + tail: None, + lower_bound, + upper_bound, + } + } + + /// Generates a random height and returns it. + fn random_height(&self) -> usize { + // Pseudorandom number generation from "Xorshift RNGs" by George Marsaglia. + // + // This particular set of operations generates 32-bit integers. See: + // https://en.wikipedia.org/wiki/Xorshift#Example_implementation + let mut num = self.hot_data.seed.load(Ordering::Relaxed); + num ^= num << 13; + num ^= num >> 17; + num ^= num << 5; + self.hot_data.seed.store(num, Ordering::Relaxed); + + let mut height = cmp::min(MAX_HEIGHT, num.trailing_zeros() as usize + 1); + unsafe { + // Keep decreasing the height while it's much larger than all towers currently in the + // skip list. + // + // Note that we're loading the pointer only to check whether it is null, so it's okay + // to use `epoch::unprotected()` in this situation. + while height >= 4 + && self.head[height - 2] + .load(Ordering::Relaxed, epoch::unprotected()) + .is_null() + { + height -= 1; + } + } + + // Track the max height to speed up lookups + let mut max_height = self.hot_data.max_height.load(Ordering::Relaxed); + while height > max_height { + match self.hot_data.max_height.compare_exchange_weak( + max_height, + height, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + Ok(_) => break, + Err(h) => max_height = h, + } + } + height + } + + /// If we encounter a deleted node while searching, help with the deletion + /// by attempting to unlink the node from the list. + /// + /// If the unlinking is successful then this function returns the next node + /// with which the search should continue on the current level. + #[cold] + unsafe fn help_unlink<'a>( + &'a self, + pred: &'a Atomic>, + curr: &'a Node, + succ: Shared<'a, Node>, + guard: &'a Guard, + ) -> Option>> { + // If `succ` is marked, that means `curr` is removed. Let's try + // unlinking it from the skip list at this level. + match pred.compare_and_set( + Shared::from(curr as *const _), + succ.with_tag(0), + Ordering::Release, + guard, + ) { + Ok(_) => { + curr.decrement(guard); + Some(succ.with_tag(0)) + } + Err(_) => None, + } + } + + /// Returns the successor of a node. + /// + /// This will keep searching until a non-deleted node is found. If a deleted + /// node is reached then a search is performed using the given key. + fn next_node<'a>( + &'a self, + pred: &'a Tower, + lower_bound: Bound<&K>, + guard: &'a Guard, + ) -> Option<&'a Node> { + unsafe { + // Load the level 0 successor of the current node. + let mut curr = pred[0].load_consume(guard); + + // If `curr` is marked, that means `pred` is removed and we have to use + // a key search. + if curr.tag() == 1 { + return self.search_bound(lower_bound, false, guard); + } + + while let Some(c) = curr.as_ref() { + let succ = c.tower[0].load_consume(guard); + + if succ.tag() == 1 { + if let Some(c) = self.help_unlink(&pred[0], c, succ, guard) { + // On success, continue searching through the current level. + curr = c; + continue; + } else { + // On failure, we cannot do anything reasonable to continue + // searching from the current position. Restart the search. + return self.search_bound(lower_bound, false, guard); + } + } + + return Some(c); + } + + None + } + } + + /// Searches for first/last node that is greater/less/equal to a key in the skip list. + /// + /// If `upper_bound == true`: the last node less than (or equal to) the key. + /// + /// If `upper_bound == false`: the first node greater than (or equal to) the key. + /// + /// This is unsafe because the returned nodes are bound to the lifetime of + /// the `SkipList`, not the `Guard`. + fn search_bound<'a, Q>( + &'a self, + bound: Bound<&Q>, + upper_bound: bool, + guard: &'a Guard, + ) -> Option<&'a Node> + where + K: Borrow, + Q: Ord + ?Sized, + { + unsafe { + 'search: loop { + // The current level we're at. + let mut level = self.hot_data.max_height.load(Ordering::Relaxed); + + // Fast loop to skip empty tower levels. + while level >= 1 + && self.head[level - 1] + .load(Ordering::Relaxed, guard) + .is_null() + { + level -= 1; + } + + // The current best node + let mut result = None; + + // The predecessor node + let mut pred = &*self.head; + + while level >= 1 { + level -= 1; + + // Two adjacent nodes at the current level. + let mut curr = pred[level].load_consume(guard); + + // If `curr` is marked, that means `pred` is removed and we have to restart the + // search. + if curr.tag() == 1 { + continue 'search; + } + + // Iterate through the current level until we reach a node with a key greater + // than or equal to `key`. + while let Some(c) = curr.as_ref() { + let succ = c.tower[level].load_consume(guard); + + if succ.tag() == 1 { + if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { + // On success, continue searching through the current level. + curr = c; + continue; + } else { + // On failure, we cannot do anything reasonable to continue + // searching from the current position. Restart the search. + continue 'search; + } + } + + // If `curr` contains a key that is greater than (or equal) to `key`, we're + // done with this level. + // + // The condition determines whether we should stop the search. For the upper + // bound, we return the last node before the condition became true. For the + // lower bound, we return the first node after the condition became true. + if upper_bound { + if !below_upper_bound(&bound, c.key.borrow()) { + break; + } + result = Some(c); + } else { + if above_lower_bound(&bound, c.key.borrow()) { + result = Some(c); + break; + } + } + + // Move one step forward. + pred = &c.tower; + curr = succ; + } + } + + return result; + } + } + } + + /// Searches for a key in the skip list and returns a list of all adjacent nodes. + fn search_position<'a, Q>(&'a self, key: &Q, guard: &'a Guard) -> Position<'a, K, V> + where + K: Borrow, + Q: Ord + ?Sized, + { + unsafe { + 'search: loop { + // The result of this search. + let mut result = Position { + found: None, + left: [&*self.head; MAX_HEIGHT], + right: [Shared::null(); MAX_HEIGHT], + }; + + // The current level we're at. + let mut level = self.hot_data.max_height.load(Ordering::Relaxed); + + // Fast loop to skip empty tower levels. + while level >= 1 + && self.head[level - 1] + .load(Ordering::Relaxed, guard) + .is_null() + { + level -= 1; + } + + // The predecessor node + let mut pred = &*self.head; + + while level >= 1 { + level -= 1; + + // Two adjacent nodes at the current level. + let mut curr = pred[level].load_consume(guard); + + // If `curr` is marked, that means `pred` is removed and we have to restart the + // search. + if curr.tag() == 1 { + continue 'search; + } + + // Iterate through the current level until we reach a node with a key greater + // than or equal to `key`. + while let Some(c) = curr.as_ref() { + let succ = c.tower[level].load_consume(guard); + + if succ.tag() == 1 { + if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { + // On success, continue searching through the current level. + curr = c; + continue; + } else { + // On failure, we cannot do anything reasonable to continue + // searching from the current position. Restart the search. + continue 'search; + } + } + + // If `curr` contains a key that is greater than or equal to `key`, we're + // done with this level. + match c.key.borrow().cmp(key) { + cmp::Ordering::Greater => break, + cmp::Ordering::Equal => { + result.found = Some(c); + break; + } + cmp::Ordering::Less => {} + } + + // Move one step forward. + pred = &c.tower; + curr = succ; + } + + // Store the position at the current level into the result. + result.left[level] = pred; + result.right[level] = curr; + } + + return result; + } + } + } + + /// Inserts an entry with the specified `key` and `value`. + /// + /// If `replace` is `true`, then any existing entry with this key will first be removed. + fn insert_internal(&self, key: K, value: V, replace: bool, guard: &Guard) -> RefEntry { + self.check_guard(guard); + + unsafe { + // Rebind the guard to the lifetime of self. This is a bit of a + // hack but it allows us to return references that are not bound to + // the lifetime of the guard. + let guard = &*(guard as *const _); + + let mut search; + loop { + // First try searching for the key. + // Note that the `Ord` implementation for `K` may panic during the search. + search = self.search_position(&key, guard); + + let r = match search.found { + Some(r) => r, + None => break, + }; + + if replace { + // If a node with the key was found and we should replace it, mark its tower + // and then repeat the search. + if r.mark_tower() { + self.hot_data.len.fetch_sub(1, Ordering::Relaxed); + } + } else { + // If a node with the key was found and we're not going to replace it, let's + // try returning it as an entry. + if let Some(e) = RefEntry::try_acquire(self, r) { + return e; + } + + // If we couldn't increment the reference count, that means someone has just + // now removed the node. + break; + } + } + + // Create a new node. + let height = self.random_height(); + let (node, n) = { + // The reference count is initially two to account for: + // 1. The entry that will be returned. + // 2. The link at the level 0 of the tower. + let n = Node::::alloc(height, 2); + + // Write the key and the value into the node. + ptr::write(&mut (*n).key, key); + ptr::write(&mut (*n).value, value); + + (Shared::>::from(n as *const _), &*n) + }; + + // Optimistically increment `len`. + self.hot_data.len.fetch_add(1, Ordering::Relaxed); + + loop { + // Set the lowest successor of `n` to `search.right[0]`. + n.tower[0].store(search.right[0], Ordering::Relaxed); + + // Try installing the new node into the skip list (at level 0). + // TODO(Amanieu): can we use release ordering here? + if search.left[0][0] + .compare_and_set(search.right[0], node, Ordering::SeqCst, guard) + .is_ok() + { + break; + } + + // We failed. Let's search for the key and try again. + { + // Create a guard that destroys the new node in case search panics. + let sg = scopeguard::guard((), |_| { + Node::finalize(node.as_raw()); + }); + search = self.search_position(&n.key, guard); + mem::forget(sg); + } + + if let Some(r) = search.found { + if replace { + // If a node with the key was found and we should replace it, mark its + // tower and then repeat the search. + if r.mark_tower() { + self.hot_data.len.fetch_sub(1, Ordering::Relaxed); + } + } else { + // If a node with the key was found and we're not going to replace it, + // let's try returning it as an entry. + if let Some(e) = RefEntry::try_acquire(self, r) { + // Destroy the new node. + Node::finalize(node.as_raw()); + self.hot_data.len.fetch_sub(1, Ordering::Relaxed); + + return e; + } + + // If we couldn't increment the reference count, that means someone has + // just now removed the node. + } + } + } + + // The new node was successfully installed. Let's create an entry associated with it. + let entry = RefEntry { + parent: self, + node: n, + }; + + // Build the rest of the tower above level 0. + 'build: for level in 1..height { + loop { + // Obtain the predecessor and successor at the current level. + let pred = search.left[level]; + let succ = search.right[level]; + + // Load the current value of the pointer in the tower at this level. + // TODO(Amanieu): can we use relaxed ordering here? + let next = n.tower[level].load(Ordering::SeqCst, guard); + + // If the current pointer is marked, that means another thread is already + // removing the node we've just inserted. In that case, let's just stop + // building the tower. + if next.tag() == 1 { + break 'build; + } + + // When searching for `key` and traversing the skip list from the highest level + // to the lowest, it is possible to observe a node with an equal key at higher + // levels and then find it missing at the lower levels if it gets removed + // during traversal. Even worse, it is possible to observe completely different + // nodes with the exact same key at different levels. + // + // Linking the new node to a dead successor with an equal key could create + // subtle corner cases that would require special care. It's much easier to + // simply prohibit linking two nodes with equal keys. + // + // If the successor has the same key as the new node, that means it is marked + // as removed and should be unlinked from the skip list. In that case, let's + // repeat the search to make sure it gets unlinked and try again. + // + // If this comparison or the following search panics, we simply stop building + // the tower without breaking any invariants. Note that building higher levels + // is completely optional. Only the lowest level really matters, and all the + // higher levels are there just to make searching faster. + if succ.as_ref().map(|s| &s.key) == Some(&n.key) { + search = self.search_position(&n.key, guard); + continue; + } + + // Change the pointer at the current level from `next` to `succ`. If this CAS + // operation fails, that means another thread has marked the pointer and we + // should stop building the tower. + // TODO(Amanieu): can we use release ordering here? + if n.tower[level] + .compare_and_set(next, succ, Ordering::SeqCst, guard) + .is_err() + { + break 'build; + } + + // Increment the reference count. The current value will always be at least 1 + // because we are holding `entry`. + n.refs_and_height + .fetch_add(1 << HEIGHT_BITS, Ordering::Relaxed); + + // Try installing the new node at the current level. + // TODO(Amanieu): can we use release ordering here? + if pred[level] + .compare_and_set(succ, node, Ordering::SeqCst, guard) + .is_ok() + { + // Success! Continue on the next level. + break; + } + + // Installation failed. Decrement the reference count. + (*n).refs_and_height + .fetch_sub(1 << HEIGHT_BITS, Ordering::Relaxed); + + // We don't have the most up-to-date search results. Repeat the search. + // + // If this search panics, we simply stop building the tower without breaking + // any invariants. Note that building higher levels is completely optional. + // Only the lowest level really matters, and all the higher levels are there + // just to make searching faster. + search = self.search_position(&n.key, guard); + } + } + + // If any pointer in the tower is marked, that means our node is in the process of + // removal or already removed. It is possible that another thread (either partially or + // completely) removed the new node while we were building the tower, and just after + // that we installed the new node at one of the higher levels. In order to undo that + // installation, we must repeat the search, which will unlink the new node at that + // level. + // TODO(Amanieu): can we use relaxed ordering here? + if n.tower[height - 1].load(Ordering::SeqCst, guard).tag() == 1 { + self.search_bound(Bound::Included(&n.key), false, guard); + } + + // Finally, return the new entry. + entry + } + } +} + +impl SkipList +where + K: Ord + Send + 'static, + V: Send + 'static, +{ + /// Inserts a `key`-`value` pair into the skip list and returns the new entry. + /// + /// If there is an existing entry with this key, it will be removed before inserting the new + /// one. + pub fn insert(&self, key: K, value: V, guard: &Guard) -> RefEntry { + self.insert_internal(key, value, true, guard) + } + + /// Removes an entry with the specified `key` from the map and returns it. + pub fn remove(&self, key: &Q, guard: &Guard) -> Option> + where + K: Borrow, + Q: Ord + ?Sized, + { + self.check_guard(guard); + + unsafe { + // Rebind the guard to the lifetime of self. This is a bit of a + // hack but it allows us to return references that are not bound to + // the lifetime of the guard. + let guard = &*(guard as *const _); + + loop { + // Try searching for the key. + let search = self.search_position(key, guard); + + let n = search.found?; + + // First try incrementing the reference count because we have to return the node as + // an entry. If this fails, repeat the search. + let entry = match RefEntry::try_acquire(self, n) { + Some(e) => e, + None => continue, + }; + + // Try removing the node by marking its tower. + if n.mark_tower() { + // Success! Decrement `len`. + self.hot_data.len.fetch_sub(1, Ordering::Relaxed); + + // Unlink the node at each level of the skip list. We could do this by simply + // repeating the search, but it's usually faster to unlink it manually using + // the `left` and `right` lists. + for level in (0..n.height()).rev() { + // TODO(Amanieu): can we use relaxed ordering here? + let succ = n.tower[level].load(Ordering::SeqCst, guard).with_tag(0); + + // Try linking the predecessor and successor at this level. + // TODO(Amanieu): can we use release ordering here? + if search.left[level][level] + .compare_and_set( + Shared::from(n as *const _), + succ, + Ordering::SeqCst, + guard, + ) + .is_ok() + { + // Success! Decrement the reference count. + n.decrement(guard); + } else { + // Failed! Just repeat the search to completely unlink the node. + self.search_bound(Bound::Included(key), false, guard); + break; + } + } + + return Some(entry); + } + } + } + } + + /// Removes an entry from the front of the skip list. + pub fn pop_front(&self, guard: &Guard) -> Option> { + self.check_guard(guard); + loop { + let e = self.front(guard)?; + if let Some(e) = e.pin() { + if e.remove(guard) { + return Some(e); + } + } + } + } + + /// Removes an entry from the back of the skip list. + pub fn pop_back(&self, guard: &Guard) -> Option> { + self.check_guard(guard); + loop { + let e = self.back(guard)?; + if let Some(e) = e.pin() { + if e.remove(guard) { + return Some(e); + } + } + } + } + + /// Iterates over the map and removes every entry. + pub fn clear(&self, guard: &mut Guard) { + self.check_guard(guard); + + /// Number of steps after which we repin the current thread and unlink removed nodes. + const BATCH_SIZE: usize = 100; + + loop { + { + // Search for the first entry in order to unlink all the preceeding entries + // we have removed. + // + // By unlinking nodes in batches we make sure that the final search doesn't + // unlink all nodes at once, which could keep the current thread pinned for a + // long time. + let mut entry = self.lower_bound(Bound::Unbounded, guard); + + for _ in 0..BATCH_SIZE { + // Stop if we have reached the end of the list. + let e = match entry { + None => return, + Some(e) => e, + }; + + // Before removing the current entry, first obtain the following one. + let next = e.next(); + + // Try removing the current entry. + if e.node.mark_tower() { + // Success! Decrement `len`. + self.hot_data.len.fetch_sub(1, Ordering::Relaxed); + } + + entry = next; + } + } + + // Repin the current thread because we don't want to keep it pinned in the same + // epoch for a too long time. + guard.repin(); + } + } +} + +impl Drop for SkipList { + fn drop(&mut self) { + unsafe { + let mut node = self.head[0] + .load(Ordering::Relaxed, epoch::unprotected()) + .as_ref(); + + // Iterate through the whole skip list and destroy every node. + while let Some(n) = node { + // Unprotected loads are okay because this function is the only one currently using + // the skip list. + let next = n.tower[0] + .load(Ordering::Relaxed, epoch::unprotected()) + .as_ref(); + + // Deallocate every node. + Node::finalize(n); + + node = next; + } + } + } +} + +impl IntoIterator for SkipList { + type Item = (K, V); + type IntoIter = IntoIter; + + fn into_iter(self) -> IntoIter { + unsafe { + // Load the front node. + // + // Unprotected loads are okay because this function is the only one currently using + // the skip list. + let front = self.head[0] + .load(Ordering::Relaxed, epoch::unprotected()) + .as_raw(); + + // Clear the skip list by setting all pointers in head to null. + for level in 0..MAX_HEIGHT { + self.head[level].store(Shared::null(), Ordering::Relaxed); + } + + IntoIter { + node: front as *mut Node, + } + } + } +} + +/// An entry in a skip list, protected by a `Guard`. +/// +/// The lifetimes of the key and value are the same as that of the `Guard` +/// used when creating the `Entry` (`'g`). This lifetime is also constrained to +/// not outlive the `SkipList`. +pub struct Entry<'a: 'g, 'g, K: 'a, V: 'a> { + parent: &'a SkipList, + node: &'g Node, + guard: &'g Guard, +} + +impl<'a: 'g, 'g, K: 'a, V: 'a> Entry<'a, 'g, K, V> { + /// Returns `true` if the entry is removed from the skip list. + pub fn is_removed(&self) -> bool { + self.node.is_removed() + } + + /// Returns a reference to the key. + pub fn key(&self) -> &'g K { + &self.node.key + } + + /// Returns a reference to the value. + pub fn value(&self) -> &'g V { + &self.node.value + } + + /// Returns a reference to the parent `SkipList` + pub fn skiplist(&self) -> &'a SkipList { + self.parent + } + + /// Attempts to pin the entry with a reference count, ensuring that it + /// remains accessible even after the `Guard` is dropped. + /// + /// This method may return `None` if the reference count is already 0 and + /// the node has been queued for deletion. + pub fn pin(&self) -> Option> { + unsafe { RefEntry::try_acquire(self.parent, self.node) } + } +} + +impl<'a: 'g, 'g, K, V> Entry<'a, 'g, K, V> +where + K: Ord + Send + 'static, + V: Send + 'static, +{ + /// Removes the entry from the skip list. + /// + /// Returns `true` if this call removed the entry and `false` if it was already removed. + pub fn remove(&self) -> bool { + // Try marking the tower. + if self.node.mark_tower() { + // Success - the entry is removed. Now decrement `len`. + self.parent.hot_data.len.fetch_sub(1, Ordering::Relaxed); + + // Search for the key to unlink the node from the skip list. + self.parent + .search_bound(Bound::Included(&self.node.key), false, self.guard); + + true + } else { + false + } + } +} + +impl<'a: 'g, 'g, K, V> Clone for Entry<'a, 'g, K, V> { + fn clone(&self) -> Entry<'a, 'g, K, V> { + Entry { + parent: self.parent, + node: self.node, + guard: self.guard, + } + } +} + +impl<'a, 'g, K, V> fmt::Debug for Entry<'a, 'g, K, V> +where + K: fmt::Debug, + V: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("Entry") + .field(self.key()) + .field(self.value()) + .finish() + } +} + +impl<'a: 'g, 'g, K, V> Entry<'a, 'g, K, V> +where + K: Ord, +{ + /// Moves to the next entry in the skip list. + pub fn move_next(&mut self) -> bool { + match self.next() { + None => false, + Some(n) => { + *self = n; + true + } + } + } + + /// Returns the next entry in the skip list. + pub fn next(&self) -> Option> { + let n = self.parent.next_node( + &self.node.tower, + Bound::Excluded(&self.node.key), + self.guard, + )?; + Some(Entry { + parent: self.parent, + node: n, + guard: self.guard, + }) + } + + /// Moves to the previous entry in the skip list. + pub fn move_prev(&mut self) -> bool { + match self.prev() { + None => false, + Some(n) => { + *self = n; + true + } + } + } + + /// Returns the previous entry in the skip list. + pub fn prev(&self) -> Option> { + let n = self.parent + .search_bound(Bound::Excluded(&self.node.key), true, self.guard)?; + Some(Entry { + parent: self.parent, + node: n, + guard: self.guard, + }) + } +} + +/// A reference-counted entry in a skip list. +/// +/// You *must* call `release` to free this type, otherwise the node will be +/// leaked. This is because releasing the entry requires a `Guard`. +pub struct RefEntry<'a, K: 'a, V: 'a> { + parent: &'a SkipList, + node: &'a Node, +} + +impl<'a, K: 'a, V: 'a> RefEntry<'a, K, V> { + /// Returns `true` if the entry is removed from the skip list. + pub fn is_removed(&self) -> bool { + self.node.is_removed() + } + + /// Returns a reference to the key. + pub fn key(&self) -> &K { + &self.node.key + } + + /// Returns a reference to the value. + pub fn value(&self) -> &V { + &self.node.value + } + + /// Returns a reference to the parent `SkipList` + pub fn skiplist(&self) -> &'a SkipList { + self.parent + } + + /// Tries to create a new `RefEntry` by incrementing the reference count of + /// a node. + unsafe fn try_acquire( + parent: &'a SkipList, + node: &Node, + ) -> Option> { + if node.try_increment() { + Some(RefEntry { + parent, + + // We re-bind the lifetime of the node here to that of the skip + // list since we now hold a reference to it. + node: &*(node as *const _), + }) + } else { + None + } + } +} + +impl<'a, K, V> RefEntry<'a, K, V> +where + K: Ord + Send + 'static, + V: Send + 'static, +{ + /// Removes the entry from the skip list. + /// + /// Returns `true` if this call removed the entry and `false` if it was already removed. + pub fn remove(&self, guard: &Guard) -> bool { + self.parent.check_guard(guard); + + // Try marking the tower. + if self.node.mark_tower() { + // Success - the entry is removed. Now decrement `len`. + self.parent.hot_data.len.fetch_sub(1, Ordering::Relaxed); + + // Search for the key to unlink the node from the skip list. + self.parent + .search_bound(Bound::Included(&self.node.key), false, guard); + + true + } else { + false + } + } +} + +impl<'a, K, V> Clone for RefEntry<'a, K, V> { + fn clone(&self) -> RefEntry<'a, K, V> { + unsafe { + // Incrementing will always succeed since we're already holding a reference to the node. + Node::try_increment(self.node); + } + RefEntry { + parent: self.parent, + node: self.node, + } + } +} + +impl<'a, K, V> fmt::Debug for RefEntry<'a, K, V> +where + K: fmt::Debug, + V: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("RefEntry") + .field(self.key()) + .field(self.value()) + .finish() + } +} + +impl<'a, K, V> RefEntry<'a, K, V> +where + K: Ord, +{ + /// Moves to the next entry in the skip list. + pub fn move_next(&mut self, guard: &Guard) -> bool { + match self.next(guard) { + None => false, + Some(e) => { + mem::replace(self, e).release(guard); + true + } + } + } + + /// Returns the next entry in the skip list. + pub fn next(&self, guard: &Guard) -> Option> { + self.parent.check_guard(guard); + unsafe { + let mut n = self.node; + loop { + n = self.parent + .next_node(&n.tower, Bound::Excluded(&n.key), guard)?; + if let Some(e) = RefEntry::try_acquire(self.parent, n) { + return Some(e); + } + } + } + } + /// Moves to the previous entry in the skip list. + pub fn move_prev(&mut self, guard: &Guard) -> bool { + match self.prev(guard) { + None => false, + Some(e) => { + mem::replace(self, e).release(guard); + true + } + } + } + + /// Returns the previous entry in the skip list. + pub fn prev(&self, guard: &Guard) -> Option> { + self.parent.check_guard(guard); + unsafe { + let mut n = self.node; + loop { + n = self.parent + .search_bound(Bound::Excluded(&n.key), true, guard)?; + if let Some(e) = RefEntry::try_acquire(self.parent, n) { + return Some(e); + } + } + } + } + + /// Releases the reference on the entry. + pub fn release(self, guard: &Guard) { + self.parent.check_guard(guard); + unsafe { self.node.decrement(guard) } + } +} + +/// An iterator over the entries of a `SkipList`. +pub struct Iter<'a: 'g, 'g, K: 'a, V: 'a> { + parent: &'a SkipList, + head: Option<&'g Node>, + tail: Option<&'g Node>, + guard: &'g Guard, +} + +impl<'a: 'g, 'g, K: 'a, V: 'a> Iterator for Iter<'a, 'g, K, V> +where + K: Ord, +{ + type Item = Entry<'a, 'g, K, V>; + + fn next(&mut self) -> Option> { + self.head = match self.head { + Some(n) => self.parent + .next_node(&n.tower, Bound::Excluded(&n.key), self.guard), + None => self.parent + .next_node(&self.parent.head, Bound::Unbounded, self.guard), + }; + if let (Some(h), Some(t)) = (self.head, self.tail) { + if h.key >= t.key { + self.head = None; + self.tail = None; + } + } + self.head.map(|n| Entry { + parent: self.parent, + node: n, + guard: self.guard, + }) + } +} + +impl<'a: 'g, 'g, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, 'g, K, V> +where + K: Ord, +{ + fn next_back(&mut self) -> Option> { + self.tail = match self.tail { + Some(n) => self.parent + .search_bound(Bound::Excluded(&n.key), true, self.guard), + None => self.parent.search_bound(Bound::Unbounded, true, self.guard), + }; + if let (Some(h), Some(t)) = (self.head, self.tail) { + if h.key >= t.key { + self.head = None; + self.tail = None; + } + } + self.tail.map(|n| Entry { + parent: self.parent, + node: n, + guard: self.guard, + }) + } +} + +/// An iterator over reference-counted entries of a `SkipList`. +pub struct RefIter<'a, K: 'a, V: 'a> { + parent: &'a SkipList, + head: Option>, + tail: Option>, +} + +impl<'a, K: 'a, V: 'a> RefIter<'a, K, V> +where + K: Ord, +{ + pub fn next(&mut self, guard: &Guard) -> Option> { + self.parent.check_guard(guard); + self.head = match self.head { + Some(ref e) => e.next(guard), + None => try_pin_loop(|| self.parent.front(guard)), + }; + let mut finished = false; + if let (&Some(ref h), &Some(ref t)) = (&self.head, &self.tail) { + if h.key() >= t.key() { + finished = true; + } + } + if finished { + self.head = None; + self.tail = None; + } + self.head.clone() + } + + pub fn next_back(&mut self, guard: &Guard) -> Option> { + self.parent.check_guard(guard); + self.tail = match self.tail { + Some(ref e) => e.prev(guard), + None => try_pin_loop(|| self.parent.back(guard)), + }; + let mut finished = false; + if let (&Some(ref h), &Some(ref t)) = (&self.head, &self.tail) { + if h.key() >= t.key() { + finished = true; + } + } + if finished { + self.head = None; + self.tail = None; + } + self.tail.clone() + } +} + +/// An iterator over a subset of entries of a `SkipList`. +pub struct Range<'a: 'g, 'g, 'k, Min, Max, K: 'a, V: 'a> +where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + parent: &'a SkipList, + lower_bound: Bound<&'k Min>, + upper_bound: Bound<&'k Max>, + head: Option<&'g Node>, + tail: Option<&'g Node>, + guard: &'g Guard, +} + +impl<'a: 'g, 'g, 'k, Min, Max, K: 'a, V: 'a> Iterator for Range<'a, 'g, 'k, Min, Max, K, V> +where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + type Item = Entry<'a, 'g, K, V>; + + fn next(&mut self) -> Option> { + self.head = match self.head { + Some(n) => self.parent + .next_node(&n.tower, Bound::Excluded(&n.key), self.guard), + None => self.parent + .search_bound(self.lower_bound, false, self.guard), + }; + if let Some(h) = self.head { + let bound = match self.tail { + Some(t) => Bound::Excluded(t.key.borrow()), + None => self.upper_bound, + }; + if !below_upper_bound(&bound, h.key.borrow()) { + self.head = None; + self.tail = None; + } + } + self.head.map(|n| Entry { + parent: self.parent, + node: n, + guard: self.guard, + }) + } +} + +impl<'a: 'g, 'g, 'k, Min, Max, K: 'a, V: 'a> DoubleEndedIterator + for Range<'a, 'g, 'k, Min, Max, K, V> +where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + fn next_back(&mut self) -> Option> { + self.tail = match self.tail { + Some(n) => self.parent + .search_bound(Bound::Excluded(&n.key), true, self.guard), + None => self.parent.search_bound(self.upper_bound, true, self.guard), + }; + if let Some(t) = self.tail { + let bound = match self.head { + Some(h) => Bound::Excluded(h.key.borrow()), + None => self.lower_bound, + }; + if !above_lower_bound(&bound, t.key.borrow()) { + self.head = None; + self.tail = None; + } + } + self.tail.map(|n| Entry { + parent: self.parent, + node: n, + guard: self.guard, + }) + } +} + +/// An iterator over reference-counted subset of entries of a `SkipList`. +pub struct RefRange<'a, 'k, Min, Max, K: 'a, V: 'a> +where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + parent: &'a SkipList, + lower_bound: Bound<&'k Min>, + upper_bound: Bound<&'k Max>, + head: Option>, + tail: Option>, +} + +impl<'a, 'k, Min, Max, K: 'a, V: 'a> RefRange<'a, 'k, Min, Max, K, V> +where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + pub fn next(&mut self, guard: &Guard) -> Option> { + self.parent.check_guard(guard); + self.head = match self.head { + Some(ref e) => e.next(guard), + None => try_pin_loop(|| self.parent.lower_bound(self.lower_bound, guard)), + }; + let mut finished = false; + if let Some(ref h) = self.head { + let bound = match self.tail { + Some(ref t) => Bound::Excluded(t.key().borrow()), + None => self.upper_bound, + }; + if !below_upper_bound(&bound, h.key().borrow()) { + finished = true; + } + } + if finished { + self.head = None; + self.tail = None; + } + self.head.clone() + } + + pub fn next_back(&mut self, guard: &Guard) -> Option> { + self.parent.check_guard(guard); + self.tail = match self.tail { + Some(ref e) => e.prev(guard), + None => try_pin_loop(|| self.parent.upper_bound(self.upper_bound, guard)), + }; + let mut finished = false; + if let Some(ref t) = self.tail { + let bound = match self.head { + Some(ref h) => Bound::Excluded(h.key().borrow()), + None => self.lower_bound, + }; + if !above_lower_bound(&bound, t.key().borrow()) { + finished = true; + } + } + if finished { + self.head = None; + self.tail = None; + } + self.tail.clone() + } +} + +/// An owning iterator over the entries of a `SkipList`. +pub struct IntoIter { + /// The current node. + /// + /// All preceeding nods have already been destroyed. + node: *mut Node, +} + +impl Drop for IntoIter { + fn drop(&mut self) { + // Iterate through the whole chain and destroy every node. + while !self.node.is_null() { + unsafe { + // Unprotected loads are okay because this function is the only one currently using + // the skip list. + let next = (*self.node).tower[0].load(Ordering::Relaxed, epoch::unprotected()); + + // We can safely do this without defering because references to + // keys & values that we give out never outlive the SkipList. + Node::finalize(self.node); + + self.node = next.as_raw() as *mut Node; + } + } + } +} + +impl Iterator for IntoIter { + type Item = (K, V); + + fn next(&mut self) -> Option<(K, V)> { + loop { + // Have we reached the end of the skip list? + if self.node.is_null() { + return None; + } + + unsafe { + // Take the key and value out of the node. + let key = ptr::read(&(*self.node).key); + let value = ptr::read(&(*self.node).value); + + // Get the next node in the skip list. + // + // Unprotected loads are okay because this function is the only one currently using + // the skip list. + let next = (*self.node).tower[0].load(Ordering::Relaxed, epoch::unprotected()); + + // Deallocate the current node and move to the next one. + Node::dealloc(self.node); + self.node = next.as_raw() as *mut Node; + + // The current node may be marked. If it is, it's been removed from the skip list + // and we should just skip it. + if next.tag() == 0 { + return Some((key, value)); + } + } + } + } +} + +/// Helper function to retry an operation until pinning succeeds or `None` is +/// returned. +pub(crate) fn try_pin_loop<'a: 'g, 'g, F, K, V>(mut f: F) -> Option> +where + F: FnMut() -> Option>, +{ + loop { + if let Some(e) = f()?.pin() { + return Some(e); + } + } +} + +/// Helper function to check if a value is above a lower bound +fn above_lower_bound(bound: &Bound<&T>, other: &T) -> bool { + match *bound { + Bound::Unbounded => true, + Bound::Included(key) => other >= key, + Bound::Excluded(key) => other > key, + } +} + +/// Helper function to check if a value is below an upper bound +fn below_upper_bound(bound: &Bound<&T>, other: &T) -> bool { + match *bound { + Bound::Unbounded => true, + Bound::Included(key) => other <= key, + Bound::Excluded(key) => other < key, + } +} + +#[cfg(test)] +mod tests { + use epoch; + use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; + use Bound; + + use super::SkipList; + + #[test] + fn new() { + SkipList::::new(epoch::default_collector().clone()); + SkipList::>::new(epoch::default_collector().clone()); + } + + #[test] + fn is_empty() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + assert!(s.is_empty()); + + s.insert(1, 10, guard); + assert!(!s.is_empty()); + s.insert(2, 20, guard); + s.insert(3, 30, guard); + assert!(!s.is_empty()); + + s.remove(&2, guard); + assert!(!s.is_empty()); + + s.remove(&1, guard); + assert!(!s.is_empty()); + + s.remove(&3, guard); + assert!(s.is_empty()); + } + + #[test] + fn insert() { + let guard = &epoch::pin(); + let insert = [0, 4, 2, 12, 8, 7, 11, 5]; + let not_present = [1, 3, 6, 9, 10]; + let s = SkipList::new(epoch::default_collector().clone()); + + for &x in &insert { + s.insert(x, x * 10, guard); + assert_eq!(*s.get(&x, guard).unwrap().value(), x * 10); + } + + for &x in ¬_present { + assert!(s.get(&x, guard).is_none()); + } + } + + #[test] + fn remove() { + let guard = &epoch::pin(); + let insert = [0, 4, 2, 12, 8, 7, 11, 5]; + let not_present = [1, 3, 6, 9, 10]; + let remove = [2, 12, 8]; + let remaining = [0, 4, 5, 7, 11]; + + let s = SkipList::new(epoch::default_collector().clone()); + + for &x in &insert { + s.insert(x, x * 10, guard); + } + for x in ¬_present { + assert!(s.remove(x, guard).is_none()); + } + for x in &remove { + assert!(s.remove(x, guard).is_some()); + } + + let mut v = vec![]; + let mut e = s.front(guard).unwrap(); + loop { + v.push(*e.key()); + if !e.move_next() { + break; + } + } + + assert_eq!(v, remaining); + for x in &insert { + s.remove(x, guard); + } + assert!(s.is_empty()); + } + + #[test] + fn entry() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + + assert!(s.front(guard).is_none()); + assert!(s.back(guard).is_none()); + + for &x in &[4, 2, 12, 8, 7, 11, 5] { + s.insert(x, x * 10, guard); + } + + let mut e = s.front(guard).unwrap(); + assert_eq!(*e.key(), 2); + assert!(!e.move_prev()); + assert!(e.move_next()); + assert_eq!(*e.key(), 4); + + e = s.back(guard).unwrap(); + assert_eq!(*e.key(), 12); + assert!(!e.move_next()); + assert!(e.move_prev()); + assert_eq!(*e.key(), 11); + } + + #[test] + fn entry_remove() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + + for &x in &[4, 2, 12, 8, 7, 11, 5] { + s.insert(x, x * 10, guard); + } + + let mut e = s.get(&7, guard).unwrap(); + assert!(!e.is_removed()); + assert!(e.remove()); + assert!(e.is_removed()); + + e.move_prev(); + e.move_next(); + assert_ne!(*e.key(), 7); + + for e in s.iter(guard) { + assert!(!s.is_empty()); + assert_ne!(s.len(), 0); + e.remove(); + } + assert!(s.is_empty()); + assert_eq!(s.len(), 0); + } + + #[test] + fn entry_reposition() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + + for &x in &[4, 2, 12, 8, 7, 11, 5] { + s.insert(x, x * 10, guard); + } + + let mut e = s.get(&7, guard).unwrap(); + assert!(!e.is_removed()); + assert!(e.remove()); + assert!(e.is_removed()); + + s.insert(7, 700, guard); + e.move_prev(); + e.move_next(); + assert_eq!(*e.key(), 7); + } + + #[test] + fn len() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + assert_eq!(s.len(), 0); + + for (i, &x) in [4, 2, 12, 8, 7, 11, 5].iter().enumerate() { + s.insert(x, x * 1, guard); + assert_eq!(s.len(), i + 1); + } + + s.insert(5, 0, guard); + assert_eq!(s.len(), 7); + s.insert(5, 0, guard); + assert_eq!(s.len(), 7); + + s.remove(&6, guard); + assert_eq!(s.len(), 7); + s.remove(&5, guard); + assert_eq!(s.len(), 6); + s.remove(&12, guard); + assert_eq!(s.len(), 5); + } + + #[test] + fn insert_and_remove() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + let keys = || s.iter(guard).map(|e| *e.key()).collect::>(); + + s.insert(3, 0, guard); + s.insert(5, 0, guard); + s.insert(1, 0, guard); + s.insert(4, 0, guard); + s.insert(2, 0, guard); + assert_eq!(keys(), [1, 2, 3, 4, 5]); + + assert!(s.remove(&4, guard).is_some()); + assert_eq!(keys(), [1, 2, 3, 5]); + assert!(s.remove(&3, guard).is_some()); + assert_eq!(keys(), [1, 2, 5]); + assert!(s.remove(&1, guard).is_some()); + assert_eq!(keys(), [2, 5]); + + assert!(s.remove(&1, guard).is_none()); + assert_eq!(keys(), [2, 5]); + assert!(s.remove(&3, guard).is_none()); + assert_eq!(keys(), [2, 5]); + + assert!(s.remove(&2, guard).is_some()); + assert_eq!(keys(), [5]); + assert!(s.remove(&5, guard).is_some()); + assert_eq!(keys(), []); + + s.insert(3, 0, guard); + assert_eq!(keys(), [3]); + s.insert(1, 0, guard); + assert_eq!(keys(), [1, 3]); + s.insert(3, 0, guard); + assert_eq!(keys(), [1, 3]); + s.insert(5, 0, guard); + assert_eq!(keys(), [1, 3, 5]); + + assert!(s.remove(&3, guard).is_some()); + assert_eq!(keys(), [1, 5]); + assert!(s.remove(&1, guard).is_some()); + assert_eq!(keys(), [5]); + assert!(s.remove(&3, guard).is_none()); + assert_eq!(keys(), [5]); + assert!(s.remove(&5, guard).is_some()); + assert_eq!(keys(), []); + } + + #[test] + fn get() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + s.insert(30, 3, guard); + s.insert(50, 5, guard); + s.insert(10, 1, guard); + s.insert(40, 4, guard); + s.insert(20, 2, guard); + + assert_eq!(*s.get(&10, guard).unwrap().value(), 1); + assert_eq!(*s.get(&20, guard).unwrap().value(), 2); + assert_eq!(*s.get(&30, guard).unwrap().value(), 3); + assert_eq!(*s.get(&40, guard).unwrap().value(), 4); + assert_eq!(*s.get(&50, guard).unwrap().value(), 5); + + assert!(s.get(&7, guard).is_none()); + assert!(s.get(&27, guard).is_none()); + assert!(s.get(&31, guard).is_none()); + assert!(s.get(&97, guard).is_none()); + } + + #[test] + fn lower_bound() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + s.insert(30, 3, guard); + s.insert(50, 5, guard); + s.insert(10, 1, guard); + s.insert(40, 4, guard); + s.insert(20, 2, guard); + + assert_eq!(*s.lower_bound(Bound::Unbounded, guard).unwrap().value(), 1); + + assert_eq!( + *s.lower_bound(Bound::Included(&10), guard).unwrap().value(), + 1 + ); + assert_eq!( + *s.lower_bound(Bound::Included(&20), guard).unwrap().value(), + 2 + ); + assert_eq!( + *s.lower_bound(Bound::Included(&30), guard).unwrap().value(), + 3 + ); + assert_eq!( + *s.lower_bound(Bound::Included(&40), guard).unwrap().value(), + 4 + ); + assert_eq!( + *s.lower_bound(Bound::Included(&50), guard).unwrap().value(), + 5 + ); + + assert_eq!( + *s.lower_bound(Bound::Included(&7), guard).unwrap().value(), + 1 + ); + assert_eq!( + *s.lower_bound(Bound::Included(&27), guard).unwrap().value(), + 3 + ); + assert_eq!( + *s.lower_bound(Bound::Included(&31), guard).unwrap().value(), + 4 + ); + assert!(s.lower_bound(Bound::Included(&97), guard).is_none()); + + assert_eq!( + *s.lower_bound(Bound::Excluded(&10), guard).unwrap().value(), + 2 + ); + assert_eq!( + *s.lower_bound(Bound::Excluded(&20), guard).unwrap().value(), + 3 + ); + assert_eq!( + *s.lower_bound(Bound::Excluded(&30), guard).unwrap().value(), + 4 + ); + assert_eq!( + *s.lower_bound(Bound::Excluded(&40), guard).unwrap().value(), + 5 + ); + assert!(s.lower_bound(Bound::Excluded(&50), guard).is_none()); + + assert_eq!( + *s.lower_bound(Bound::Excluded(&7), guard).unwrap().value(), + 1 + ); + assert_eq!( + *s.lower_bound(Bound::Excluded(&27), guard).unwrap().value(), + 3 + ); + assert_eq!( + *s.lower_bound(Bound::Excluded(&31), guard).unwrap().value(), + 4 + ); + assert!(s.lower_bound(Bound::Excluded(&97), guard).is_none()); + } + + #[test] + fn upper_bound() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + s.insert(30, 3, guard); + s.insert(50, 5, guard); + s.insert(10, 1, guard); + s.insert(40, 4, guard); + s.insert(20, 2, guard); + + assert_eq!(*s.upper_bound(Bound::Unbounded, guard).unwrap().value(), 5); + + assert_eq!( + *s.upper_bound(Bound::Included(&10), guard).unwrap().value(), + 1 + ); + assert_eq!( + *s.upper_bound(Bound::Included(&20), guard).unwrap().value(), + 2 + ); + assert_eq!( + *s.upper_bound(Bound::Included(&30), guard).unwrap().value(), + 3 + ); + assert_eq!( + *s.upper_bound(Bound::Included(&40), guard).unwrap().value(), + 4 + ); + assert_eq!( + *s.upper_bound(Bound::Included(&50), guard).unwrap().value(), + 5 + ); + + assert!(s.upper_bound(Bound::Included(&7), guard).is_none()); + assert_eq!( + *s.upper_bound(Bound::Included(&27), guard).unwrap().value(), + 2 + ); + assert_eq!( + *s.upper_bound(Bound::Included(&31), guard).unwrap().value(), + 3 + ); + assert_eq!( + *s.upper_bound(Bound::Included(&97), guard).unwrap().value(), + 5 + ); + + assert!(s.upper_bound(Bound::Excluded(&10), guard).is_none()); + assert_eq!( + *s.upper_bound(Bound::Excluded(&20), guard).unwrap().value(), + 1 + ); + assert_eq!( + *s.upper_bound(Bound::Excluded(&30), guard).unwrap().value(), + 2 + ); + assert_eq!( + *s.upper_bound(Bound::Excluded(&40), guard).unwrap().value(), + 3 + ); + assert_eq!( + *s.upper_bound(Bound::Excluded(&50), guard).unwrap().value(), + 4 + ); + + assert!(s.upper_bound(Bound::Excluded(&7), guard).is_none()); + assert_eq!( + *s.upper_bound(Bound::Excluded(&27), guard).unwrap().value(), + 2 + ); + assert_eq!( + *s.upper_bound(Bound::Excluded(&31), guard).unwrap().value(), + 3 + ); + assert_eq!( + *s.upper_bound(Bound::Excluded(&97), guard).unwrap().value(), + 5 + ); + } + + #[test] + fn get_or_insert() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + s.insert(3, 3, guard); + s.insert(5, 5, guard); + s.insert(1, 1, guard); + s.insert(4, 4, guard); + s.insert(2, 2, guard); + + assert_eq!(*s.get(&4, guard).unwrap().value(), 4); + assert_eq!(*s.insert(4, 40, guard).value(), 40); + assert_eq!(*s.get(&4, guard).unwrap().value(), 40); + + assert_eq!(*s.get_or_insert(4, 400, guard).value(), 40); + assert_eq!(*s.get(&4, guard).unwrap().value(), 40); + assert_eq!(*s.get_or_insert(6, 600, guard).value(), 600); + } + + #[test] + fn get_next_prev() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + s.insert(3, 3, guard); + s.insert(5, 5, guard); + s.insert(1, 1, guard); + s.insert(4, 4, guard); + s.insert(2, 2, guard); + + let mut e = s.get(&3, guard).unwrap(); + assert_eq!(*e.next().unwrap().value(), 4); + assert_eq!(*e.prev().unwrap().value(), 2); + assert_eq!(*e.value(), 3); + + e.move_prev(); + assert_eq!(*e.next().unwrap().value(), 3); + assert_eq!(*e.prev().unwrap().value(), 1); + assert_eq!(*e.value(), 2); + + e.move_prev(); + assert_eq!(*e.next().unwrap().value(), 2); + assert!(e.prev().is_none()); + assert_eq!(*e.value(), 1); + + e.move_next(); + e.move_next(); + e.move_next(); + e.move_next(); + assert!(e.next().is_none()); + assert_eq!(*e.prev().unwrap().value(), 4); + assert_eq!(*e.value(), 5); + } + + #[test] + fn front_and_back() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + assert!(s.front(guard).is_none()); + assert!(s.back(guard).is_none()); + + for &x in &[4, 2, 12, 8, 7, 11, 5] { + s.insert(x, x * 10, guard); + } + + assert_eq!(*s.front(guard).unwrap().key(), 2); + assert_eq!(*s.back(guard).unwrap().key(), 12); + } + + #[test] + fn iter() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + for &x in &[4, 2, 12, 8, 7, 11, 5] { + s.insert(x, x * 10, guard); + } + + assert_eq!( + s.iter(guard).map(|e| *e.key()).collect::>(), + &[2, 4, 5, 7, 8, 11, 12] + ); + + let mut it = s.iter(guard); + s.remove(&2, guard); + assert_eq!(*it.next().unwrap().key(), 4); + s.remove(&7, guard); + assert_eq!(*it.next().unwrap().key(), 5); + s.remove(&5, guard); + assert_eq!(*it.next().unwrap().key(), 8); + s.remove(&12, guard); + assert_eq!(*it.next().unwrap().key(), 11); + assert!(it.next().is_none()); + } + + #[test] + fn iter_range() { + use Bound::*; + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + let v = (0..10).map(|x| x * 10).collect::>(); + for &x in v.iter() { + s.insert(x, x, guard); + } + + assert_eq!( + s.iter(guard).map(|x| *x.value()).collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.iter(guard).rev().map(|x| *x.value()).collect::>(), + vec![90, 80, 70, 60, 50, 40, 30, 20, 10, 0] + ); + assert_eq!( + s.range(Unbounded, Unbounded, guard) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90] + ); + + assert_eq!( + s.range(Included(&0), Unbounded, guard) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.range(Excluded(&0), Unbounded, guard) + .map(|x| *x.value()) + .collect::>(), + vec![10, 20, 30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.range(Included(&25), Unbounded, guard) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.range(Excluded(&25), Unbounded, guard) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.range(Included(&70), Unbounded, guard) + .map(|x| *x.value()) + .collect::>(), + vec![70, 80, 90] + ); + assert_eq!( + s.range(Excluded(&70), Unbounded, guard) + .map(|x| *x.value()) + .collect::>(), + vec![80, 90] + ); + assert_eq!( + s.range(Included(&100), Unbounded, guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&100), Unbounded, guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + + assert_eq!( + s.range(Unbounded, Included(&90), guard) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.range(Unbounded, Excluded(&90), guard) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70, 80] + ); + assert_eq!( + s.range(Unbounded, Included(&25), guard) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20] + ); + assert_eq!( + s.range(Unbounded, Excluded(&25), guard) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20] + ); + assert_eq!( + s.range(Unbounded, Included(&70), guard) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70] + ); + assert_eq!( + s.range(Unbounded, Excluded(&70), guard) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60] + ); + assert_eq!( + s.range(Unbounded, Included(&-1), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Unbounded, Excluded(&-1), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + + assert_eq!( + s.range(Included(&25), Included(&80), guard) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70, 80] + ); + assert_eq!( + s.range(Included(&25), Excluded(&80), guard) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70] + ); + assert_eq!( + s.range(Excluded(&25), Included(&80), guard) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70, 80] + ); + assert_eq!( + s.range(Excluded(&25), Excluded(&80), guard) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70] + ); + + assert_eq!( + s.range(Included(&25), Included(&25), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Included(&25), Excluded(&25), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&25), Included(&25), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&25), Excluded(&25), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + + assert_eq!( + s.range(Included(&50), Included(&50), guard) + .map(|x| *x.value()) + .collect::>(), + vec![50] + ); + assert_eq!( + s.range(Included(&50), Excluded(&50), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&50), Included(&50), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&50), Excluded(&50), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + + assert_eq!( + s.range(Included(&100), Included(&-2), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Included(&100), Excluded(&-2), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&100), Included(&-2), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&100), Excluded(&-2), guard) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + } + + #[test] + fn into_iter() { + let guard = &epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + for &x in &[4, 2, 12, 8, 7, 11, 5] { + s.insert(x, x * 10, guard); + } + + assert_eq!( + s.into_iter().collect::>(), + &[ + (2, 20), + (4, 40), + (5, 50), + (7, 70), + (8, 80), + (11, 110), + (12, 120), + ] + ); + } + + #[test] + fn clear() { + let guard = &mut epoch::pin(); + let s = SkipList::new(epoch::default_collector().clone()); + for &x in &[4, 2, 12, 8, 7, 11, 5] { + s.insert(x, x * 10, guard); + } + + assert!(!s.is_empty()); + assert_ne!(s.len(), 0); + s.clear(guard); + assert!(s.is_empty()); + assert_eq!(s.len(), 0); + } + + #[test] + fn drops() { + static KEYS: AtomicUsize = ATOMIC_USIZE_INIT; + static VALUES: AtomicUsize = ATOMIC_USIZE_INIT; + + let collector = epoch::Collector::new(); + let handle = collector.register(); + { + let guard = &handle.pin(); + + #[derive(Eq, PartialEq, Ord, PartialOrd)] + struct Key(i32); + + impl Drop for Key { + fn drop(&mut self) { + KEYS.fetch_add(1, Ordering::SeqCst); + } + } + + struct Value; + + impl Drop for Value { + fn drop(&mut self) { + VALUES.fetch_add(1, Ordering::SeqCst); + } + } + + let s = SkipList::new(collector.clone()); + for &x in &[4, 2, 12, 8, 7, 11, 5] { + s.insert(Key(x), Value, guard).release(guard); + } + assert_eq!(KEYS.load(Ordering::SeqCst), 0); + assert_eq!(VALUES.load(Ordering::SeqCst), 0); + + let key7 = Key(7); + s.remove(&key7, guard).unwrap().release(guard); + assert_eq!(KEYS.load(Ordering::SeqCst), 0); + assert_eq!(VALUES.load(Ordering::SeqCst), 0); + + drop(s); + } + + handle.pin().flush(); + handle.pin().flush(); + assert_eq!(KEYS.load(Ordering::SeqCst), 8); + assert_eq!(VALUES.load(Ordering::SeqCst), 7); + } +} diff --git a/crossbeam-skiplist/src/lib.rs b/crossbeam-skiplist/src/lib.rs new file mode 100644 index 000000000..662d7dcb1 --- /dev/null +++ b/crossbeam-skiplist/src/lib.rs @@ -0,0 +1,45 @@ +#![cfg_attr(feature = "nightly", feature(alloc))] +#![cfg_attr(not(test), no_std)] + +#[cfg(test)] +extern crate core; +#[cfg(all(not(test), feature = "use_std"))] +#[macro_use] +extern crate std; + +// Use liballoc on nightly to avoid a dependency on libstd +#[cfg(feature = "nightly")] +extern crate alloc; +#[cfg(not(feature = "nightly"))] +mod alloc { + // Tweak the module layout to match the one in liballoc + extern crate std; + pub use self::std::vec; +} + +extern crate crossbeam_epoch as epoch; +extern crate crossbeam_utils as utils; +extern crate scopeguard; + +pub mod base; +#[cfg(feature = "use_std")] +pub mod map; +#[cfg(feature = "use_std")] +pub mod set; + +pub use base::SkipList; +#[cfg(feature = "use_std")] +pub use map::SkipMap; +#[cfg(feature = "use_std")] +pub use set::SkipSet; + +/// An endpoint of a range of keys. +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +pub enum Bound { + /// An inclusive bound. + Included(T), + /// An exclusive bound. + Excluded(T), + /// An infinite endpoint. Indicates that there is no bound in this direction. + Unbounded, +} diff --git a/crossbeam-skiplist/src/map.rs b/crossbeam-skiplist/src/map.rs new file mode 100644 index 000000000..cb502e2dd --- /dev/null +++ b/crossbeam-skiplist/src/map.rs @@ -0,0 +1,679 @@ +use std::borrow::Borrow; +use std::fmt; +use std::iter::FromIterator; + +use base::{self, try_pin_loop}; +use epoch; +use Bound; + +/// A map based on a lock-free skip list. +pub struct SkipMap { + inner: base::SkipList, +} + +impl SkipMap { + /// Returns a new, empty map. + pub fn new() -> SkipMap { + SkipMap { + inner: base::SkipList::new(epoch::default_collector().clone()), + } + } + + /// Returns `true` if the map is empty. + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Returns the number of entries in the map. + /// + /// If the map is being concurrently modified, consider the returned number just an + /// approximation without any guarantees. + pub fn len(&self) -> usize { + self.inner.len() + } +} + +impl SkipMap +where + K: Ord, +{ + /// Returns the entry with the smallest key. + pub fn front(&self) -> Option> { + let guard = &epoch::pin(); + try_pin_loop(|| self.inner.front(guard)).map(Entry::new) + } + + /// Returns the entry with the largest key. + pub fn back(&self) -> Option> { + let guard = &epoch::pin(); + try_pin_loop(|| self.inner.back(guard)).map(Entry::new) + } + + /// Returns `true` if the map contains a value for the specified key. + pub fn contains_key(&self, key: &Q) -> bool + where + K: Borrow, + Q: Ord + ?Sized, + { + let guard = &epoch::pin(); + self.inner.contains_key(key, guard) + } + + /// Returns an entry with the specified `key`. + pub fn get(&self, key: &Q) -> Option> + where + K: Borrow, + Q: Ord + ?Sized, + { + let guard = &epoch::pin(); + try_pin_loop(|| self.inner.get(key, guard)).map(Entry::new) + } + + /// Returns an `Entry` pointing to the lowest element whose key is above + /// the given bound. If no such element is found then `None` is + /// returned. + pub fn lower_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> + where + K: Borrow, + Q: Ord + ?Sized, + { + let guard = &epoch::pin(); + try_pin_loop(|| self.inner.lower_bound(bound, guard)).map(Entry::new) + } + + /// Returns an `Entry` pointing to the highest element whose key is below + /// the given bound. If no such element is found then `None` is + /// returned. + pub fn upper_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> + where + K: Borrow, + Q: Ord + ?Sized, + { + let guard = &epoch::pin(); + try_pin_loop(|| self.inner.upper_bound(bound, guard)).map(Entry::new) + } + + /// Finds an entry with the specified key, or inserts a new `key`-`value` pair if none exist. + pub fn get_or_insert(&self, key: K, value: V) -> Entry { + let guard = &epoch::pin(); + Entry::new(self.inner.get_or_insert(key, value, guard)) + } + + /// Returns an iterator over all entries in the map. + pub fn iter(&self) -> Iter { + Iter { + inner: self.inner.ref_iter(), + } + } + + /// Returns an iterator over a subset of entries in the skip list. + pub fn range<'a, 'k, Min, Max>( + &'a self, + lower_bound: Bound<&'k Min>, + upper_bound: Bound<&'k Max>, + ) -> Range<'a, 'k, Min, Max, K, V> + where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, + { + Range { + inner: self.inner.ref_range(lower_bound, upper_bound), + } + } +} + +impl SkipMap +where + K: Ord + Send + 'static, + V: Send + 'static, +{ + /// Inserts a `key`-`value` pair into the map and returns the new entry. + /// + /// If there is an existing entry with this key, it will be removed before inserting the new + /// one. + pub fn insert(&self, key: K, value: V) -> Entry { + let guard = &epoch::pin(); + Entry::new(self.inner.insert(key, value, guard)) + } + + /// Removes an entry with the specified `key` from the map and returns it. + pub fn remove(&self, key: &Q) -> Option> + where + K: Borrow, + Q: Ord + ?Sized, + { + let guard = &epoch::pin(); + self.inner.remove(key, guard).map(Entry::new) + } + + /// Removes an entry from the front of the map. + pub fn pop_front(&self) -> Option> { + let guard = &epoch::pin(); + self.inner.pop_front(guard).map(Entry::new) + } + + /// Removes an entry from the back of the map. + pub fn pop_back(&self) -> Option> { + let guard = &epoch::pin(); + self.inner.pop_back(guard).map(Entry::new) + } + + /// Iterates over the map and removes every entry. + pub fn clear(&self) { + let guard = &mut epoch::pin(); + self.inner.clear(guard); + } +} + +impl Default for SkipMap { + fn default() -> SkipMap { + SkipMap::new() + } +} + +impl fmt::Debug for SkipMap +where + K: Ord + fmt::Debug, + V: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut m = f.debug_map(); + for e in self.iter() { + m.entry(e.key(), e.value()); + } + m.finish() + } +} + +impl IntoIterator for SkipMap { + type Item = (K, V); + type IntoIter = IntoIter; + + fn into_iter(self) -> IntoIter { + IntoIter { + inner: self.inner.into_iter(), + } + } +} + +impl<'a, K, V> IntoIterator for &'a SkipMap +where + K: Ord, +{ + type Item = Entry<'a, K, V>; + type IntoIter = Iter<'a, K, V>; + + fn into_iter(self) -> Iter<'a, K, V> { + self.iter() + } +} + +impl FromIterator<(K, V)> for SkipMap +where + K: Ord, +{ + fn from_iter(iter: I) -> SkipMap + where + I: IntoIterator, + { + let s = SkipMap::new(); + for (k, v) in iter { + s.get_or_insert(k, v); + } + s + } +} + +/// A reference-counted entry in a map. +pub struct Entry<'a, K: 'a, V: 'a> { + inner: base::RefEntry<'a, K, V>, +} + +impl<'a, K, V> Entry<'a, K, V> { + fn new(inner: base::RefEntry<'a, K, V>) -> Entry<'a, K, V> { + Entry { inner } + } + + /// Returns a reference to the key. + pub fn key(&self) -> &K { + self.inner.key() + } + + /// Returns a reference to the value. + pub fn value(&self) -> &V { + self.inner.value() + } + + /// Returns `true` if the entry is removed from the map. + pub fn is_removed(&self) -> bool { + self.inner.is_removed() + } +} + +impl<'a, K, V> Entry<'a, K, V> +where + K: Ord, +{ + /// Moves to the next entry in the map. + pub fn move_next(&mut self) -> bool { + let guard = &epoch::pin(); + self.inner.move_next(guard) + } + + /// Moves to the previous entry in the map. + pub fn move_prev(&mut self) -> bool { + let guard = &epoch::pin(); + self.inner.move_prev(guard) + } + + /// Returns the next entry in the map. + pub fn next(&self) -> Option> { + let guard = &epoch::pin(); + self.inner.next(guard).map(Entry::new) + } + + /// Returns the previous entry in the map. + pub fn prev(&self) -> Option> { + let guard = &epoch::pin(); + self.inner.prev(guard).map(Entry::new) + } +} + +impl<'a, K, V> Entry<'a, K, V> +where + K: Ord + Send + 'static, + V: Send + 'static, +{ + /// Removes the entry from the map. + /// + /// Returns `true` if this call removed the entry and `false` if it was already removed. + pub fn remove(&self) -> bool { + let guard = &epoch::pin(); + self.inner.remove(guard) + } +} + +impl<'a, K, V> Clone for Entry<'a, K, V> { + fn clone(&self) -> Entry<'a, K, V> { + Entry { + inner: self.inner.clone(), + } + } +} + +impl<'a, K, V> fmt::Debug for Entry<'a, K, V> +where + K: fmt::Debug, + V: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("Entry") + .field(self.key()) + .field(self.value()) + .finish() + } +} + +/// An owning iterator over the entries of a `SkipMap`. +pub struct IntoIter { + inner: base::IntoIter, +} + +impl Iterator for IntoIter { + type Item = (K, V); + + fn next(&mut self) -> Option<(K, V)> { + self.inner.next() + } +} + +impl fmt::Debug for IntoIter +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "IntoIter {{ ... }}") + } +} + +/// An iterator over the entries of a `SkipMap`. +pub struct Iter<'a, K: 'a, V: 'a> { + inner: base::RefIter<'a, K, V>, +} + +impl<'a, K, V> Iterator for Iter<'a, K, V> +where + K: Ord, +{ + type Item = Entry<'a, K, V>; + + fn next(&mut self) -> Option> { + let guard = &epoch::pin(); + self.inner.next(guard).map(Entry::new) + } +} + +impl<'a, K, V> DoubleEndedIterator for Iter<'a, K, V> +where + K: Ord, +{ + fn next_back(&mut self) -> Option> { + let guard = &epoch::pin(); + self.inner.next_back(guard).map(Entry::new) + } +} + +impl<'a, K, V> fmt::Debug for Iter<'a, K, V> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Iter {{ ... }}") + } +} + +/// An iterator over the entries of a `SkipMap`. +pub struct Range<'a, 'k, Min, Max, K: 'a, V: 'a> +where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + inner: base::RefRange<'a, 'k, Min, Max, K, V>, +} + +impl<'a, 'k, Min, Max, K, V> Iterator for Range<'a, 'k, Min, Max, K, V> +where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + type Item = Entry<'a, K, V>; + + fn next(&mut self) -> Option> { + let guard = &epoch::pin(); + self.inner.next(guard).map(Entry::new) + } +} + +impl<'a, 'k, Min, Max, K, V> DoubleEndedIterator for Range<'a, 'k, Min, Max, K, V> +where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + fn next_back(&mut self) -> Option> { + let guard = &epoch::pin(); + self.inner.next_back(guard).map(Entry::new) + } +} + +impl<'a, 'k, Min, Max, K, V> fmt::Debug for Range<'a, 'k, Min, Max, K, V> +where + K: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Range {{ ... }}") + } +} + +#[cfg(test)] +mod tests { + use super::SkipMap; + + #[test] + fn smoke() { + let m = SkipMap::new(); + m.insert(1, 10); + m.insert(5, 50); + m.insert(7, 70); + } + + #[test] + fn iter() { + let s = SkipMap::new(); + for &x in &[4, 2, 12, 8, 7, 11, 5] { + s.insert(x, x * 10); + } + + assert_eq!( + s.iter().map(|e| *e.key()).collect::>(), + &[2, 4, 5, 7, 8, 11, 12] + ); + + let mut it = s.iter(); + s.remove(&2); + assert_eq!(*it.next().unwrap().key(), 4); + s.remove(&7); + assert_eq!(*it.next().unwrap().key(), 5); + s.remove(&5); + assert_eq!(*it.next().unwrap().key(), 8); + s.remove(&12); + assert_eq!(*it.next().unwrap().key(), 11); + assert!(it.next().is_none()); + } + + #[test] + fn iter_range() { + use Bound::*; + let s = SkipMap::new(); + let v = (0..10).map(|x| x * 10).collect::>(); + for &x in v.iter() { + s.insert(x, x); + } + + assert_eq!( + s.iter().map(|x| *x.value()).collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.iter().rev().map(|x| *x.value()).collect::>(), + vec![90, 80, 70, 60, 50, 40, 30, 20, 10, 0] + ); + assert_eq!( + s.range(Unbounded, Unbounded) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90] + ); + + assert_eq!( + s.range(Included(&0), Unbounded) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.range(Excluded(&0), Unbounded) + .map(|x| *x.value()) + .collect::>(), + vec![10, 20, 30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.range(Included(&25), Unbounded) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.range(Excluded(&25), Unbounded) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.range(Included(&70), Unbounded) + .map(|x| *x.value()) + .collect::>(), + vec![70, 80, 90] + ); + assert_eq!( + s.range(Excluded(&70), Unbounded) + .map(|x| *x.value()) + .collect::>(), + vec![80, 90] + ); + assert_eq!( + s.range(Included(&100), Unbounded) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&100), Unbounded) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + + assert_eq!( + s.range(Unbounded, Included(&90)) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90] + ); + assert_eq!( + s.range(Unbounded, Excluded(&90)) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70, 80] + ); + assert_eq!( + s.range(Unbounded, Included(&25)) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20] + ); + assert_eq!( + s.range(Unbounded, Excluded(&25)) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20] + ); + assert_eq!( + s.range(Unbounded, Included(&70)) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60, 70] + ); + assert_eq!( + s.range(Unbounded, Excluded(&70)) + .map(|x| *x.value()) + .collect::>(), + vec![0, 10, 20, 30, 40, 50, 60] + ); + assert_eq!( + s.range(Unbounded, Included(&-1)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Unbounded, Excluded(&-1)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + + assert_eq!( + s.range(Included(&25), Included(&80)) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70, 80] + ); + assert_eq!( + s.range(Included(&25), Excluded(&80)) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70] + ); + assert_eq!( + s.range(Excluded(&25), Included(&80)) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70, 80] + ); + assert_eq!( + s.range(Excluded(&25), Excluded(&80)) + .map(|x| *x.value()) + .collect::>(), + vec![30, 40, 50, 60, 70] + ); + + assert_eq!( + s.range(Included(&25), Included(&25)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Included(&25), Excluded(&25)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&25), Included(&25)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&25), Excluded(&25)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + + assert_eq!( + s.range(Included(&50), Included(&50)) + .map(|x| *x.value()) + .collect::>(), + vec![50] + ); + assert_eq!( + s.range(Included(&50), Excluded(&50)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&50), Included(&50)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&50), Excluded(&50)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + + assert_eq!( + s.range(Included(&100), Included(&-2)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Included(&100), Excluded(&-2)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&100), Included(&-2)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + assert_eq!( + s.range(Excluded(&100), Excluded(&-2)) + .map(|x| *x.value()) + .collect::>(), + vec![] + ); + } + + // TODO(stjepang): Write more tests. +} diff --git a/crossbeam-skiplist/src/set.rs b/crossbeam-skiplist/src/set.rs new file mode 100644 index 000000000..cefee918c --- /dev/null +++ b/crossbeam-skiplist/src/set.rs @@ -0,0 +1,397 @@ +use std::borrow::Borrow; +use std::fmt; +use std::iter::FromIterator; + +use map; +use Bound; + +/// A set based on a lock-free skip list. +pub struct SkipSet { + inner: map::SkipMap, +} + +impl SkipSet { + /// Returns a new, empty set. + pub fn new() -> SkipSet { + SkipSet { + inner: map::SkipMap::new(), + } + } + + /// Returns `true` if the set is empty. + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Returns the number of entries in the set. + /// + /// If the set is being concurrently modified, consider the returned number just an + /// approximation without any guarantees. + pub fn len(&self) -> usize { + self.inner.len() + } +} + +impl SkipSet +where + T: Ord, +{ + /// Returns the entry with the smallest key. + pub fn front(&self) -> Option> { + self.inner.front().map(Entry::new) + } + + /// Returns the entry with the largest key. + pub fn back(&self) -> Option> { + self.inner.back().map(Entry::new) + } + + /// Returns `true` if the set contains a value for the specified key. + pub fn contains(&self, key: &Q) -> bool + where + T: Borrow, + Q: Ord + ?Sized, + { + self.inner.contains_key(key) + } + + /// Returns an entry with the specified `key`. + pub fn get(&self, key: &Q) -> Option> + where + T: Borrow, + Q: Ord + ?Sized, + { + self.inner.get(key).map(Entry::new) + } + + /// Returns an `Entry` pointing to the lowest element whose key is above + /// the given bound. If no such element is found then `None` is + /// returned. + pub fn lower_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> + where + T: Borrow, + Q: Ord + ?Sized, + { + self.inner.lower_bound(bound).map(Entry::new) + } + + /// Returns an `Entry` pointing to the highest element whose key is below + /// the given bound. If no such element is found then `None` is + /// returned. + pub fn upper_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> + where + T: Borrow, + Q: Ord + ?Sized, + { + self.inner.upper_bound(bound).map(Entry::new) + } + + /// Finds an entry with the specified key, or inserts a new `key`-`value` pair if none exist. + pub fn get_or_insert(&self, key: T) -> Entry { + Entry::new(self.inner.get_or_insert(key, ())) + } + + /// Returns an iterator over all entries in the map. + pub fn iter(&self) -> Iter { + Iter { + inner: self.inner.iter(), + } + } + + /// Returns an iterator over a subset of entries in the skip list. + pub fn range<'a, 'k, Min, Max>( + &'a self, + lower_bound: Bound<&'k Min>, + upper_bound: Bound<&'k Max>, + ) -> Range<'a, 'k, Min, Max, T> + where + T: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, + { + Range { + inner: self.inner.range(lower_bound, upper_bound), + } + } +} + +impl SkipSet +where + T: Ord + Send + 'static, +{ + /// Inserts a `key`-`value` pair into the set and returns the new entry. + /// + /// If there is an existing entry with this key, it will be removed before inserting the new + /// one. + pub fn insert(&self, key: T) -> Entry { + Entry::new(self.inner.insert(key, ())) + } + + /// Removes an entry with the specified key from the set and returns it. + pub fn remove(&self, key: &Q) -> Option> + where + T: Borrow, + Q: Ord + ?Sized, + { + self.inner.remove(key).map(Entry::new) + } + + /// Removes an entry from the front of the map. + pub fn pop_front(&self) -> Option> { + self.inner.pop_front().map(Entry::new) + } + + /// Removes an entry from the back of the map. + pub fn pop_back(&self) -> Option> { + self.inner.pop_back().map(Entry::new) + } + + /// Iterates over the set and removes every entry. + pub fn clear(&self) { + self.inner.clear(); + } +} + +impl Default for SkipSet { + fn default() -> SkipSet { + SkipSet::new() + } +} + +impl fmt::Debug for SkipSet +where + T: Ord + fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut m = f.debug_set(); + for e in self.iter() { + m.entry(e.value()); + } + m.finish() + } +} + +impl IntoIterator for SkipSet { + type Item = T; + type IntoIter = IntoIter; + + fn into_iter(self) -> IntoIter { + IntoIter { + inner: self.inner.into_iter(), + } + } +} + +impl<'a, T> IntoIterator for &'a SkipSet +where + T: Ord, +{ + type Item = Entry<'a, T>; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +impl FromIterator for SkipSet +where + T: Ord, +{ + fn from_iter(iter: I) -> SkipSet + where + I: IntoIterator, + { + let s = SkipSet::new(); + for t in iter { + s.get_or_insert(t); + } + s + } +} + +pub struct Entry<'a, T: 'a> { + inner: map::Entry<'a, T, ()>, +} + +impl<'a, T> Entry<'a, T> { + fn new(inner: map::Entry<'a, T, ()>) -> Entry<'a, T> { + Entry { inner } + } + + /// Returns a reference to the key. + pub fn value(&self) -> &T { + self.inner.key() + } + + /// Returns `true` if the entry is removed from the set. + pub fn is_removed(&self) -> bool { + self.inner.is_removed() + } +} + +impl<'a, T> Entry<'a, T> +where + T: Ord, +{ + pub fn move_next(&mut self) -> bool { + self.inner.move_next() + } + + pub fn move_prev(&mut self) -> bool { + self.inner.move_prev() + } + + /// Returns the next entry in the set. + pub fn next(&self) -> Option> { + self.inner.next().map(Entry::new) + } + + /// Returns the previous entry in the set. + pub fn prev(&self) -> Option> { + self.inner.prev().map(Entry::new) + } +} + +impl<'a, T> Entry<'a, T> +where + T: Ord + Send + 'static, +{ + /// Removes the entry from the set. + /// + /// Returns `true` if this call removed the entry and `false` if it was already removed. + pub fn remove(&self) -> bool { + self.inner.remove() + } +} + +impl<'a, T> Clone for Entry<'a, T> { + fn clone(&self) -> Entry<'a, T> { + Entry { + inner: self.inner.clone(), + } + } +} + +impl<'a, T> fmt::Debug for Entry<'a, T> +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("Entry") + .field(&self.value()) + .finish() + } +} + +/// An owning iterator over the entries of a `SkipSet`. +pub struct IntoIter { + inner: map::IntoIter, +} + +impl Iterator for IntoIter { + type Item = T; + + fn next(&mut self) -> Option { + self.inner.next().map(|(k, ())| k) + } +} + +impl fmt::Debug for IntoIter +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "IntoIter {{ ... }}") + } +} + +/// An iterator over the entries of a `SkipSet`. +pub struct Iter<'a, T: 'a> { + inner: map::Iter<'a, T, ()>, +} + +impl<'a, T> Iterator for Iter<'a, T> +where + T: Ord, +{ + type Item = Entry<'a, T>; + + fn next(&mut self) -> Option> { + self.inner.next().map(Entry::new) + } +} + +impl<'a, T> DoubleEndedIterator for Iter<'a, T> +where + T: Ord, +{ + fn next_back(&mut self) -> Option> { + self.inner.next_back().map(Entry::new) + } +} + +impl<'a, T> fmt::Debug for Iter<'a, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Iter {{ ... }}") + } +} + +/// An iterator over the entries of a `SkipMap`. +pub struct Range<'a, 'k, Min, Max, T: 'a> +where + T: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + inner: map::Range<'a, 'k, Min, Max, T, ()>, +} + +impl<'a, 'k, Min, Max, T> Iterator for Range<'a, 'k, Min, Max, T> +where + T: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + type Item = Entry<'a, T>; + + fn next(&mut self) -> Option> { + self.inner.next().map(Entry::new) + } +} + +impl<'a, 'k, Min, Max, T> DoubleEndedIterator for Range<'a, 'k, Min, Max, T> +where + T: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + fn next_back(&mut self) -> Option> { + self.inner.next_back().map(Entry::new) + } +} + +impl<'a, 'k, Min, Max, T> fmt::Debug for Range<'a, 'k, Min, Max, T> +where + T: Ord + Borrow + Borrow, + Min: Ord + ?Sized + 'k, + Max: Ord + ?Sized + 'k, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Range {{ ... }}") + } +} + +#[cfg(test)] +mod tests { + use super::SkipSet; + + #[test] + fn smoke() { + let m = SkipSet::new(); + m.insert(1); + m.insert(5); + m.insert(7); + } + + // TODO(stjepang): Write more tests. +} diff --git a/crossbeam-utils/README.md b/crossbeam-utils/README.md index 6b62576ad..011d3523b 100644 --- a/crossbeam-utils/README.md +++ b/crossbeam-utils/README.md @@ -2,13 +2,10 @@ [![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)]( https://travis-ci.org/crossbeam-rs/crossbeam) - [![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)]( https://github.com/crossbeam-rs/crossbeam-utils/tree/master/src) - [![Cargo](https://img.shields.io/crates/v/crossbeam-utils.svg)]( https://crates.io/crates/crossbeam-utils) - [![Documentation](https://docs.rs/crossbeam-utils/badge.svg)]( https://docs.rs/crossbeam-utils) diff --git a/crossbeam-utils/ci/script.sh b/crossbeam-utils/ci/script.sh index 053f3f93e..febc4548f 100755 --- a/crossbeam-utils/ci/script.sh +++ b/crossbeam-utils/ci/script.sh @@ -1,11 +1,21 @@ #!/bin/bash +check_min_version() { + local rustc="`rustc -V | cut -d' ' -f2 | cut -d- -f1`" + if [[ "$rustc" != "`echo -e "$rustc\n$1" | sort -V | tail -n1`" ]]; then + echo "Unsupported Rust version: $1 < $rustc" + exit 0 + fi +} +check_min_version 1.26.0 + set -ex -cargo build +export RUSTFLAGS="-D warnings" + cargo build --no-default-features cargo test -if [[ $TRAVIS_RUST_VERSION == nightly ]]; then +if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then cargo test --features nightly fi diff --git a/src/bin/bench.rs b/src/bin/bench.rs deleted file mode 100644 index 646fce953..000000000 --- a/src/bin/bench.rs +++ /dev/null @@ -1,170 +0,0 @@ -extern crate crossbeam; - -use std::collections::VecDeque; -use std::sync::Mutex; -use std::sync::mpsc::channel; -use std::time::Duration; - -use crossbeam::scope; -use crossbeam::queue::{MsQueue, SegQueue}; - -use extra_impls::mpsc_queue::Queue as MpscQueue; - -mod extra_impls; - -const COUNT: u64 = 10000000; -const THREADS: u64 = 2; - -fn time(f: F) -> Duration { - let start = ::std::time::Instant::now(); - f(); - start.elapsed() -} - -fn nanos(d: Duration) -> f64 { - d.as_secs() as f64 * 1000000000f64 + (d.subsec_nanos() as f64) -} - -trait Queue { - fn push(&self, T); - fn try_pop(&self) -> Option; -} - -impl Queue for MsQueue { - fn push(&self, t: T) { - self.push(t) - } - fn try_pop(&self) -> Option { - self.try_pop() - } -} - -impl Queue for SegQueue { - fn push(&self, t: T) { - self.push(t) - } - fn try_pop(&self) -> Option { - self.try_pop() - } -} - -impl Queue for MpscQueue { - fn push(&self, t: T) { - self.push(t) - } - fn try_pop(&self) -> Option { - use extra_impls::mpsc_queue::*; - - loop { - match self.pop() { - Data(t) => return Some(t), - Empty => return None, - Inconsistent => (), - } - } - } -} - -impl Queue for Mutex> { - fn push(&self, t: T) { - self.lock().unwrap().push_back(t) - } - fn try_pop(&self) -> Option { - self.lock().unwrap().pop_front() - } -} - -fn bench_queue_mpsc + Sync>(q: Q) -> f64 { - let d = time(|| { - scope(|scope| { - for _i in 0..THREADS { - let qr = &q; - scope.spawn(move || { - for x in 0..COUNT { - let _ = qr.push(x); - } - }); - } - - let mut count = 0; - while count < COUNT * THREADS { - if q.try_pop().is_some() { - count += 1; - } - } - }); - }); - - nanos(d) / ((COUNT * THREADS) as f64) -} - -fn bench_queue_mpmc + Sync>(q: Q) -> f64 { - use std::sync::atomic::AtomicUsize; - use std::sync::atomic::Ordering::Relaxed; - - let prod_count = AtomicUsize::new(0); - - let d = time(|| { - scope(|scope| { - for _i in 0..THREADS { - let qr = &q; - let pcr = &prod_count; - scope.spawn(move || { - for _x in 0..COUNT { - qr.push(true); - } - if pcr.fetch_add(1, Relaxed) == (THREADS as usize) - 1 { - for _x in 0..THREADS { - qr.push(false) - } - } - }); - scope.spawn(move || loop { - if let Some(false) = qr.try_pop() { - break; - } - }); - } - }); - }); - - nanos(d) / ((COUNT * THREADS) as f64) -} - -fn bench_chan_mpsc() -> f64 { - let (tx, rx) = channel(); - - let d = time(|| { - scope(|scope| { - for _i in 0..THREADS { - let my_tx = tx.clone(); - - scope.spawn(move || { - for x in 0..COUNT { - let _ = my_tx.send(x); - } - }); - } - - for _i in 0..COUNT * THREADS { - let _ = rx.recv().unwrap(); - } - }); - }); - - nanos(d) / ((COUNT * THREADS) as f64) -} - -fn main() { - println!("MSQ mpsc: {}", bench_queue_mpsc(MsQueue::new())); - println!("chan mpsc: {}", bench_chan_mpsc()); - println!("mpsc mpsc: {}", bench_queue_mpsc(MpscQueue::new())); - println!("Seg mpsc: {}", bench_queue_mpsc(SegQueue::new())); - - println!("MSQ mpmc: {}", bench_queue_mpmc(MsQueue::new())); - println!("Seg mpmc: {}", bench_queue_mpmc(SegQueue::new())); - - // println!("queue_mpsc: {}", bench_queue_mpsc()); - // println!("queue_mpmc: {}", bench_queue_mpmc()); - // println!("mutex_mpmc: {}", bench_mutex_mpmc()); -} diff --git a/src/bin/extra_impls/mod.rs b/src/bin/extra_impls/mod.rs deleted file mode 100644 index 485946a9f..000000000 --- a/src/bin/extra_impls/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod mpsc_queue; diff --git a/src/bin/extra_impls/mpsc_queue.rs b/src/bin/extra_impls/mpsc_queue.rs deleted file mode 100644 index c204fc869..000000000 --- a/src/bin/extra_impls/mpsc_queue.rs +++ /dev/null @@ -1,159 +0,0 @@ -/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are - * those of the authors and should not be interpreted as representing official - * policies, either expressed or implied, of Dmitry Vyukov. - */ - -//! A mostly lock-free multi-producer, single consumer queue. -//! -//! This module contains an implementation of a concurrent MPSC queue. This -//! queue can be used to share data between threads, and is also used as the -//! building block of channels in rust. -//! -//! Note that the current implementation of this queue has a caveat of the `pop` -//! method, and see the method for more information about it. Due to this -//! caveat, this queue may not be appropriate for all use-cases. - -// http://www.1024cores.net/home/lock-free-algorithms -// /queues/non-intrusive-mpsc-node-based-queue - -pub use self::PopResult::*; - -use std::fmt; -use std::ptr; -use std::cell::UnsafeCell; - -use std::sync::atomic::{AtomicPtr, Ordering}; - -/// A result of the `pop` function. -#[derive(Debug)] -pub enum PopResult { - /// Some data has been popped - Data(T), - /// The queue is empty - Empty, - /// The queue is in an inconsistent state. Popping data should succeed, but - /// some pushers have yet to make enough progress in order allow a pop to - /// succeed. It is recommended that a pop() occur "in the near future" in - /// order to see if the sender has made progress or not - Inconsistent, -} - -#[derive(Debug)] -struct Node { - next: AtomicPtr>, - value: Option, -} - -/// The multi-producer single-consumer structure. This is not cloneable, but it -/// may be safely shared so long as it is guaranteed that there is only one -/// popper at a time (many pushers are allowed). -pub struct Queue { - head: AtomicPtr>, - tail: UnsafeCell<*mut Node>, -} - -impl fmt::Debug for Queue { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Queue {{ ... }}") - } -} - -unsafe impl Send for Queue {} -unsafe impl Sync for Queue {} - -impl Node { - unsafe fn new(v: Option) -> *mut Node { - Box::into_raw(Box::new(Node { - next: AtomicPtr::new(ptr::null_mut()), - value: v, - })) - } -} - -impl Queue { - /// Creates a new queue that is safe to share among multiple producers and - /// one consumer. - pub fn new() -> Queue { - let stub = unsafe { Node::new(None) }; - Queue { - head: AtomicPtr::new(stub), - tail: UnsafeCell::new(stub), - } - } - - /// Pushes a new value onto this queue. - pub fn push(&self, t: T) { - unsafe { - let n = Node::new(Some(t)); - let prev = self.head.swap(n, Ordering::AcqRel); - (*prev).next.store(n, Ordering::Release); - } - } - - /// Pops some data from this queue. - /// - /// Note that the current implementation means that this function cannot - /// return `Option`. It is possible for this queue to be in an - /// inconsistent state where many pushes have succeeded and completely - /// finished, but pops cannot return `Some(t)`. This inconsistent state - /// happens when a pusher is pre-empted at an inopportune moment. - /// - /// This inconsistent state means that this queue does indeed have data, but - /// it does not currently have access to it at this time. - pub fn pop(&self) -> PopResult { - unsafe { - let tail = *self.tail.get(); - let next = (*tail).next.load(Ordering::Acquire); - - if !next.is_null() { - *self.tail.get() = next; - assert!((*tail).value.is_none()); - assert!((*next).value.is_some()); - let ret = (*next).value.take().unwrap(); - let _ = Box::from_raw(tail); - return Data(ret); - } - - if self.head.load(Ordering::Acquire) == tail { - Empty - } else { - Inconsistent - } - } - } -} - -impl Drop for Queue { - fn drop(&mut self) { - unsafe { - let mut cur = *self.tail.get(); - while !cur.is_null() { - let next = (*cur).next.load(Ordering::Relaxed); - let _ = Box::from_raw(cur); - cur = next; - } - } - } -} diff --git a/src/bin/stress-msq.rs b/src/bin/stress-msq.rs deleted file mode 100644 index 3b890cd01..000000000 --- a/src/bin/stress-msq.rs +++ /dev/null @@ -1,38 +0,0 @@ -extern crate crossbeam; - -use crossbeam::scope; -use crossbeam::queue::MsQueue; - -use std::sync::Arc; - -const DUP: usize = 4; -const THREADS: u32 = 2; -const COUNT: u64 = 100000; - -fn main() { - scope(|s| { - for _i in 0..DUP { - let q = Arc::new(MsQueue::new()); - let qs = q.clone(); - - s.spawn(move || { - for i in 1..COUNT { - qs.push(i) - } - }); - - for _i in 0..THREADS { - let qr = q.clone(); - s.spawn(move || { - let mut cur: u64 = 0; - for _j in 0..COUNT { - if let Some(new) = qr.try_pop() { - assert!(new > cur); - cur = new; - } - } - }); - } - } - }); -}