From 86ffabe2af69f2440be26d153fd692689c9947fb Mon Sep 17 00:00:00 2001 From: sharnoff <29154784+sharnoff@users.noreply.github.com> Date: Mon, 5 Dec 2022 06:14:05 +0000 Subject: [PATCH 001/101] docs: add note about current-thread + Handle::block_on (#5264) There's already an existing warning about this combo in the documentation for `Handle::block_on`. This commit adds a summarized version in `Runtime::handle`. --- tokio/src/runtime/runtime.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tokio/src/runtime/runtime.rs b/tokio/src/runtime/runtime.rs index 3d4fd67884d..9ede0a7b0b5 100644 --- a/tokio/src/runtime/runtime.rs +++ b/tokio/src/runtime/runtime.rs @@ -138,6 +138,9 @@ impl Runtime { /// The returned handle can be used to spawn tasks that run on this runtime, and can /// be cloned to allow moving the `Handle` to other threads. /// + /// Calling [`Handle::block_on`] on a handle to a `current_thread` runtime is error-prone. + /// Refer to the documentation of [`Handle::block_on`] for more. + /// /// # Examples /// /// ``` From a1316cd792596baa079144bf4672f59e99556531 Mon Sep 17 00:00:00 2001 From: Jiahao XU Date: Mon, 5 Dec 2022 20:11:44 +1100 Subject: [PATCH 002/101] io: impl `std::io::BufRead` on `SyncIoBridge` (#5265) Signed-off-by: Jiahao XU --- tokio-util/src/io/sync_bridge.rs | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/tokio-util/src/io/sync_bridge.rs b/tokio-util/src/io/sync_bridge.rs index 5682258c3b3..f87bfbb9c4e 100644 --- a/tokio-util/src/io/sync_bridge.rs +++ b/tokio-util/src/io/sync_bridge.rs @@ -1,5 +1,7 @@ -use std::io::{Read, Write}; -use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use std::io::{BufRead, Read, Write}; +use tokio::io::{ + AsyncBufRead, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, +}; /// Use a [`tokio::io::AsyncRead`] synchronously as a [`std::io::Read`] or /// a [`tokio::io::AsyncWrite`] as a [`std::io::Write`]. @@ -9,6 +11,28 @@ pub struct SyncIoBridge { rt: tokio::runtime::Handle, } +impl BufRead for SyncIoBridge { + fn fill_buf(&mut self) -> std::io::Result<&[u8]> { + let src = &mut self.src; + self.rt.block_on(AsyncBufReadExt::fill_buf(src)) + } + + fn consume(&mut self, amt: usize) { + let src = &mut self.src; + AsyncBufReadExt::consume(src, amt) + } + + fn read_until(&mut self, byte: u8, buf: &mut Vec) -> std::io::Result { + let src = &mut self.src; + self.rt + .block_on(AsyncBufReadExt::read_until(src, byte, buf)) + } + fn read_line(&mut self, buf: &mut String) -> std::io::Result { + let src = &mut self.src; + self.rt.block_on(AsyncBufReadExt::read_line(src, buf)) + } +} + impl Read for SyncIoBridge { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { let src = &mut self.src; From 644cb8207df09c19543cf9b096a43a66f8df9a0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tymoteusz=20Wi=C5=9Bniewski?= Date: Mon, 5 Dec 2022 23:42:49 +0100 Subject: [PATCH 003/101] rt: fix `*_closed` false positives (#5231) Readiness futures inconsistently return the current readiness of an I/O resource if it is immediately available, or all readiness relevant for the given `Interest`, if a future needs to wait. In particular, it always returns `read_closed` for `Interest::READABLE` and `write_closed` for `Interest::WRITABLE`, which often is not true. Tokio should not tolerate false positives for `*_closed` events because they are considered final states and are not cleared internally. In the case of an `io_resource.ready(Interest::READABLE | Interest::WRITABLE)` call, this behavior may also lead to false positives of other events. ## Solution Follow the same strategy as `poll_ready` and return the current resource's readiness. Closes: #5098 --- tokio/src/net/tcp/split.rs | 12 +++++++ tokio/src/net/tcp/split_owned.rs | 12 +++++++ tokio/src/net/tcp/stream.rs | 6 ++++ tokio/src/net/udp.rs | 4 ++- tokio/src/net/unix/datagram/socket.rs | 4 ++- tokio/src/net/unix/split.rs | 6 ++++ tokio/src/net/unix/split_owned.rs | 12 +++++++ tokio/src/net/unix/stream.rs | 6 ++++ tokio/src/net/windows/named_pipe.rs | 12 +++++++ tokio/src/runtime/io/scheduled_io.rs | 16 +++++++-- tokio/tests/tcp_stream.rs | 49 ++++++++++++++++++++++++--- 11 files changed, 130 insertions(+), 9 deletions(-) diff --git a/tokio/src/net/tcp/split.rs b/tokio/src/net/tcp/split.rs index 2ea08b3dc32..343d4fde7d5 100644 --- a/tokio/src/net/tcp/split.rs +++ b/tokio/src/net/tcp/split.rs @@ -145,6 +145,12 @@ impl ReadHalf<'_> { /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// + /// The function may complete without the socket being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// /// This function is equivalent to [`TcpStream::ready`]. /// /// # Cancel safety @@ -273,6 +279,12 @@ impl WriteHalf<'_> { /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// + /// The function may complete without the socket being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// /// This function is equivalent to [`TcpStream::ready`]. /// /// # Cancel safety diff --git a/tokio/src/net/tcp/split_owned.rs b/tokio/src/net/tcp/split_owned.rs index e2cfdefe1a3..b2730e8fb0f 100644 --- a/tokio/src/net/tcp/split_owned.rs +++ b/tokio/src/net/tcp/split_owned.rs @@ -200,6 +200,12 @@ impl OwnedReadHalf { /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// + /// The function may complete without the socket being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// /// This function is equivalent to [`TcpStream::ready`]. /// /// # Cancel safety @@ -355,6 +361,12 @@ impl OwnedWriteHalf { /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// + /// The function may complete without the socket being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// /// This function is equivalent to [`TcpStream::ready`]. /// /// # Cancel safety diff --git a/tokio/src/net/tcp/stream.rs b/tokio/src/net/tcp/stream.rs index 0b8529546c6..b7dd3377b75 100644 --- a/tokio/src/net/tcp/stream.rs +++ b/tokio/src/net/tcp/stream.rs @@ -377,6 +377,12 @@ impl TcpStream { /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// + /// The function may complete without the socket being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method diff --git a/tokio/src/net/udp.rs b/tokio/src/net/udp.rs index 922a977a929..af343f20090 100644 --- a/tokio/src/net/udp.rs +++ b/tokio/src/net/udp.rs @@ -357,7 +357,9 @@ impl UdpSocket { /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. /// /// # Cancel safety /// diff --git a/tokio/src/net/unix/datagram/socket.rs b/tokio/src/net/unix/datagram/socket.rs index 0f5dca421cf..5e1453e380d 100644 --- a/tokio/src/net/unix/datagram/socket.rs +++ b/tokio/src/net/unix/datagram/socket.rs @@ -104,7 +104,9 @@ impl UnixDatagram { /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. /// /// # Cancel safety /// diff --git a/tokio/src/net/unix/split.rs b/tokio/src/net/unix/split.rs index f9664d53254..816a2578b5f 100644 --- a/tokio/src/net/unix/split.rs +++ b/tokio/src/net/unix/split.rs @@ -182,6 +182,12 @@ impl WriteHalf<'_> { /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// + /// The function may complete without the socket being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method diff --git a/tokio/src/net/unix/split_owned.rs b/tokio/src/net/unix/split_owned.rs index 3dce2a86aa8..da41ced83c2 100644 --- a/tokio/src/net/unix/split_owned.rs +++ b/tokio/src/net/unix/split_owned.rs @@ -114,6 +114,12 @@ impl OwnedReadHalf { /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// + /// The function may complete without the socket being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method @@ -265,6 +271,12 @@ impl OwnedWriteHalf { /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// + /// The function may complete without the socket being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method diff --git a/tokio/src/net/unix/stream.rs b/tokio/src/net/unix/stream.rs index e9acbb68240..2d278986c97 100644 --- a/tokio/src/net/unix/stream.rs +++ b/tokio/src/net/unix/stream.rs @@ -66,6 +66,12 @@ impl UnixStream { /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// + /// The function may complete without the socket being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method diff --git a/tokio/src/net/windows/named_pipe.rs b/tokio/src/net/windows/named_pipe.rs index 3f7a5a4fe9d..692c69ded46 100644 --- a/tokio/src/net/windows/named_pipe.rs +++ b/tokio/src/net/windows/named_pipe.rs @@ -238,6 +238,12 @@ impl NamedPipeServer { /// can be used to concurrently read / write to the same pipe on a single /// task without splitting the pipe. /// + /// The function may complete without the pipe being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// /// # Examples /// /// Concurrently read and write to the pipe on the same task without @@ -989,6 +995,12 @@ impl NamedPipeClient { /// can be used to concurrently read / write to the same pipe on a single /// task without splitting the pipe. /// + /// The function may complete without the pipe being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// /// # Examples /// /// Concurrently read and write to the pipe on the same task without diff --git a/tokio/src/runtime/io/scheduled_io.rs b/tokio/src/runtime/io/scheduled_io.rs index af42ba8a31e..1709091032b 100644 --- a/tokio/src/runtime/io/scheduled_io.rs +++ b/tokio/src/runtime/io/scheduled_io.rs @@ -510,14 +510,24 @@ cfg_io_readiness! { drop(waiters); } State::Done => { - let tick = TICK.unpack(scheduled_io.readiness.load(Acquire)) as u8; - // Safety: State::Done means it is no longer shared let w = unsafe { &mut *waiter.get() }; + let curr = scheduled_io.readiness.load(Acquire); + + // The returned tick might be newer than the event + // which notified our waker. This is ok because the future + // still didn't return `Poll::Ready`. + let tick = TICK.unpack(curr) as u8; + + // The readiness state could have been cleared in the meantime, + // but we allow the returned ready set to be empty. + let curr_ready = Ready::from_usize(READINESS.unpack(curr)); + let ready = curr_ready.intersection(w.interest); + return Poll::Ready(ReadyEvent { tick, - ready: Ready::from_interest(w.interest), + ready, }); } } diff --git a/tokio/tests/tcp_stream.rs b/tokio/tests/tcp_stream.rs index 453023fc51d..31fe3baa296 100644 --- a/tokio/tests/tcp_stream.rs +++ b/tokio/tests/tcp_stream.rs @@ -254,30 +254,34 @@ async fn create_pair() -> (TcpStream, TcpStream) { (client, server) } -fn read_until_pending(stream: &mut TcpStream) { +fn read_until_pending(stream: &mut TcpStream) -> usize { let mut buf = vec![0u8; 1024 * 1024]; + let mut total = 0; loop { match stream.try_read(&mut buf) { - Ok(_) => (), + Ok(n) => total += n, Err(err) => { assert_eq!(err.kind(), io::ErrorKind::WouldBlock); break; } } } + total } -fn write_until_pending(stream: &mut TcpStream) { +fn write_until_pending(stream: &mut TcpStream) -> usize { let buf = vec![0u8; 1024 * 1024]; + let mut total = 0; loop { match stream.try_write(&buf) { - Ok(_) => (), + Ok(n) => total += n, Err(err) => { assert_eq!(err.kind(), io::ErrorKind::WouldBlock); break; } } } + total } #[tokio::test] @@ -357,3 +361,40 @@ async fn try_read_buf() { } } } + +// read_closed is a best effort event, so test only for no false positives. +#[tokio::test] +async fn read_closed() { + let (client, mut server) = create_pair().await; + + let mut ready_fut = task::spawn(client.ready(Interest::READABLE)); + assert_pending!(ready_fut.poll()); + + assert_ok!(server.write_all(b"ping").await); + + let ready_event = assert_ok!(ready_fut.await); + + assert!(!ready_event.is_read_closed()); +} + +// write_closed is a best effort event, so test only for no false positives. +#[tokio::test] +async fn write_closed() { + let (mut client, mut server) = create_pair().await; + + // Fill the write buffer. + let write_size = write_until_pending(&mut client); + let mut ready_fut = task::spawn(client.ready(Interest::WRITABLE)); + assert_pending!(ready_fut.poll()); + + // Drain the socket to make client writable. + let mut read_size = 0; + while read_size < write_size { + server.readable().await.unwrap(); + read_size += read_until_pending(&mut server); + } + + let ready_event = assert_ok!(ready_fut.await); + + assert!(!ready_event.is_write_closed()); +} From 3ce5a2681c734e134c2aa6d6cf91b8d2631bd82b Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Mon, 5 Dec 2022 15:22:43 -0800 Subject: [PATCH 004/101] chore: prepare Tokio v1.23 release (#5270) ### Fixed - net: fix Windows named pipe connect ([#5208]) - io: support vectored writes for `ChildStdin` ([#5216]) - io: fix `async fn ready()` false positive for OS-specific events ([#5231]) ### Changed - runtime: `yield_now` defers task until after driver poll ([#5223]) - runtime: reduce amount of codegen needed per spawned task ([#5213]) - windows: replace `winapi` dependency with `windows-sys` ([#5204]) [#5208]: https://github.com/tokio-rs/tokio/pull/5208 [#5216]: https://github.com/tokio-rs/tokio/pull/5216 [#5213]: https://github.com/tokio-rs/tokio/pull/5213 [#5204]: https://github.com/tokio-rs/tokio/pull/5204 [#5223]: https://github.com/tokio-rs/tokio/pull/5223 [#5231]: https://github.com/tokio-rs/tokio/pull/5231 --- README.md | 2 +- tokio/CHANGELOG.md | 20 ++++++++++++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 23 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 1cb160d30dc..3e51cf546b7 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.22.0", features = ["full"] } +tokio = { version = "1.23.0", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 670289d0913..f930dfcdb05 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,23 @@ +# 1.23.0 (December 5, 2022) + +### Fixed + + - net: fix Windows named pipe connect ([#5208]) + - io: support vectored writes for `ChildStdin` ([#5216]) + - io: fix `async fn ready()` false positive for OS-specific events ([#5231]) + + ### Changed + - runtime: `yield_now` defers task until after driver poll ([#5223]) + - runtime: reduce amount of codegen needed per spawned task ([#5213]) + - windows: replace `winapi` dependency with `windows-sys` ([#5204]) + + [#5208]: https://github.com/tokio-rs/tokio/pull/5208 + [#5216]: https://github.com/tokio-rs/tokio/pull/5216 + [#5213]: https://github.com/tokio-rs/tokio/pull/5213 + [#5204]: https://github.com/tokio-rs/tokio/pull/5204 + [#5223]: https://github.com/tokio-rs/tokio/pull/5223 + [#5231]: https://github.com/tokio-rs/tokio/pull/5231 + # 1.22.0 (November 17, 2022) ### Added diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index efa6ddc95ef..78cba811156 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.22.0" +version = "1.23.0" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index 1cb160d30dc..3e51cf546b7 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.22.0", features = ["full"] } +tokio = { version = "1.23.0", features = ["full"] } ``` Then, on your main.rs: From c4ed16d1b4b299c4a537d7cc4efb078d5fe868a3 Mon Sep 17 00:00:00 2001 From: Alan Somers Date: Tue, 6 Dec 2022 02:03:09 -0700 Subject: [PATCH 005/101] ci: future-proof for FreeBSD 12 (#5260) Raise the mio-aio dev dependency, which transitively brings in Nix, to ensure that the tests will continue to compile if libc switches from a FreeBSD 11 ABI to a FreeBSD 12 one. --- tokio/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 78cba811156..6a8ad1775ce 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -157,7 +157,7 @@ rand = "0.8.0" wasm-bindgen-test = "0.3.0" [target.'cfg(target_os = "freebsd")'.dev-dependencies] -mio-aio = { version = "0.6.0", features = ["tokio"] } +mio-aio = { version = "0.7.0", features = ["tokio"] } [target.'cfg(loom)'.dev-dependencies] loom = { version = "0.5.2", features = ["futures", "checkpoint"] } From 07da5e73eef109c3064b22f484ff930702c1c1a0 Mon Sep 17 00:00:00 2001 From: Alan Somers Date: Tue, 6 Dec 2022 02:03:51 -0700 Subject: [PATCH 006/101] ci: update CI environment to FreeBSD 12.4 (#5272) 12.3 will soon be EoL --- .cirrus.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cirrus.yml b/.cirrus.yml index 7d7c3adfd32..bdc44e0c93c 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -1,5 +1,5 @@ freebsd_instance: - image: freebsd-12-3-release-amd64 + image: freebsd-12-4-release-amd64 env: RUST_STABLE: stable RUST_NIGHTLY: nightly-2022-10-25 From 22cff80048c62ed0fa20065888667d00d5aedd14 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Tue, 6 Dec 2022 19:56:13 -0800 Subject: [PATCH 007/101] chore: update CI's clippy version to 1.65 (#5276) --- .github/workflows/ci.yml | 2 +- tokio-macros/src/select.rs | 4 ++-- tokio-stream/src/stream_ext/then.rs | 2 +- tokio-util/src/sync/poll_semaphore.rs | 2 +- tokio-util/tests/spawn_pinned.rs | 8 ++++---- tokio-util/tests/time_delay_queue.rs | 2 +- tokio/src/io/util/read.rs | 2 +- tokio/src/net/tcp/split_owned.rs | 4 ++-- tokio/src/net/unix/split_owned.rs | 4 ++-- tokio/src/runtime/scheduler/multi_thread/queue.rs | 2 +- tokio/src/runtime/task/harness.rs | 2 +- tokio/src/time/sleep.rs | 2 +- tokio/src/util/linked_list.rs | 2 +- tokio/tests/buffered.rs | 4 ++-- tokio/tests/io_driver.rs | 2 +- tokio/tests/macros_join.rs | 2 +- tokio/tests/macros_select.rs | 2 +- tokio/tests/macros_try_join.rs | 2 +- tokio/tests/rt_common.rs | 2 +- tokio/tests/tcp_peek.rs | 2 +- 20 files changed, 27 insertions(+), 27 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a109e9f0023..d73eb595976 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ env: # Change to specific Rust release to pin rust_stable: stable rust_nightly: nightly-2022-11-03 - rust_clippy: 1.60.0 + rust_clippy: 1.65.0 # When updating this, also update: # - README.md # - tokio/README.md diff --git a/tokio-macros/src/select.rs b/tokio-macros/src/select.rs index 23e280a1056..8c5ae306e67 100644 --- a/tokio-macros/src/select.rs +++ b/tokio-macros/src/select.rs @@ -100,10 +100,10 @@ fn clean_pattern(pat: &mut syn::Pat) { } syn::Pat::Reference(reference) => { reference.mutability = None; - clean_pattern(&mut *reference.pat); + clean_pattern(&mut reference.pat); } syn::Pat::Type(type_pat) => { - clean_pattern(&mut *type_pat.pat); + clean_pattern(&mut type_pat.pat); } _ => {} } diff --git a/tokio-stream/src/stream_ext/then.rs b/tokio-stream/src/stream_ext/then.rs index 7f6b5a2394f..cc7caa721e0 100644 --- a/tokio-stream/src/stream_ext/then.rs +++ b/tokio-stream/src/stream_ext/then.rs @@ -72,7 +72,7 @@ where } fn size_hint(&self) -> (usize, Option) { - let future_len = if self.future.is_some() { 1 } else { 0 }; + let future_len = usize::from(self.future.is_some()); let (lower, upper) = self.stream.size_hint(); let lower = lower.saturating_add(future_len); diff --git a/tokio-util/src/sync/poll_semaphore.rs b/tokio-util/src/sync/poll_semaphore.rs index 85f75a40896..6b44574a161 100644 --- a/tokio-util/src/sync/poll_semaphore.rs +++ b/tokio-util/src/sync/poll_semaphore.rs @@ -166,6 +166,6 @@ impl fmt::Debug for PollSemaphore { impl AsRef for PollSemaphore { fn as_ref(&self) -> &Semaphore { - &*self.semaphore + &self.semaphore } } diff --git a/tokio-util/tests/spawn_pinned.rs b/tokio-util/tests/spawn_pinned.rs index b620cce048c..9ea8cd27830 100644 --- a/tokio-util/tests/spawn_pinned.rs +++ b/tokio-util/tests/spawn_pinned.rs @@ -82,8 +82,8 @@ async fn task_panic_propagates() { assert!(result.is_err()); let error = result.unwrap_err(); assert!(error.is_panic()); - let panic_str: &str = *error.into_panic().downcast().unwrap(); - assert_eq!(panic_str, "Test panic"); + let panic_str = error.into_panic().downcast::<&'static str>().unwrap(); + assert_eq!(*panic_str, "Test panic"); // Trying again with a "safe" task still works let join_handle = pool.spawn_pinned(|| async { "test" }); @@ -108,8 +108,8 @@ async fn callback_panic_does_not_kill_worker() { assert!(result.is_err()); let error = result.unwrap_err(); assert!(error.is_panic()); - let panic_str: &str = *error.into_panic().downcast().unwrap(); - assert_eq!(panic_str, "Test panic"); + let panic_str = error.into_panic().downcast::<&'static str>().unwrap(); + assert_eq!(*panic_str, "Test panic"); // Trying again with a "safe" callback works let join_handle = pool.spawn_pinned(|| async { "test" }); diff --git a/tokio-util/tests/time_delay_queue.rs b/tokio-util/tests/time_delay_queue.rs index 0fcdbf4a073..9ceae34365c 100644 --- a/tokio-util/tests/time_delay_queue.rs +++ b/tokio-util/tests/time_delay_queue.rs @@ -1,4 +1,4 @@ -#![allow(clippy::blacklisted_name)] +#![allow(clippy::disallowed_names)] #![warn(rust_2018_idioms)] #![cfg(feature = "full")] diff --git a/tokio/src/io/util/read.rs b/tokio/src/io/util/read.rs index edc9d5a9e6e..a1f9c8a0505 100644 --- a/tokio/src/io/util/read.rs +++ b/tokio/src/io/util/read.rs @@ -48,7 +48,7 @@ where fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let me = self.project(); - let mut buf = ReadBuf::new(*me.buf); + let mut buf = ReadBuf::new(me.buf); ready!(Pin::new(me.reader).poll_read(cx, &mut buf))?; Poll::Ready(Ok(buf.filled().len())) } diff --git a/tokio/src/net/tcp/split_owned.rs b/tokio/src/net/tcp/split_owned.rs index b2730e8fb0f..53fc5f06fd7 100644 --- a/tokio/src/net/tcp/split_owned.rs +++ b/tokio/src/net/tcp/split_owned.rs @@ -490,12 +490,12 @@ impl AsyncWrite for OwnedWriteHalf { impl AsRef for OwnedReadHalf { fn as_ref(&self) -> &TcpStream { - &*self.inner + &self.inner } } impl AsRef for OwnedWriteHalf { fn as_ref(&self) -> &TcpStream { - &*self.inner + &self.inner } } diff --git a/tokio/src/net/unix/split_owned.rs b/tokio/src/net/unix/split_owned.rs index da41ced83c2..2cb561d4bd0 100644 --- a/tokio/src/net/unix/split_owned.rs +++ b/tokio/src/net/unix/split_owned.rs @@ -398,12 +398,12 @@ impl AsyncWrite for OwnedWriteHalf { impl AsRef for OwnedReadHalf { fn as_ref(&self) -> &UnixStream { - &*self.inner + &self.inner } } impl AsRef for OwnedWriteHalf { fn as_ref(&self) -> &UnixStream { - &*self.inner + &self.inner } } diff --git a/tokio/src/runtime/scheduler/multi_thread/queue.rs b/tokio/src/runtime/scheduler/multi_thread/queue.rs index 59b448d26b8..958c32716f4 100644 --- a/tokio/src/runtime/scheduler/multi_thread/queue.rs +++ b/tokio/src/runtime/scheduler/multi_thread/queue.rs @@ -263,7 +263,7 @@ impl Local { // safety: The CAS above ensures that no consumer will look at these // values again, and we are the only producer. let batch_iter = BatchTaskIter { - buffer: &*self.inner.buffer, + buffer: &self.inner.buffer, head: head as UnsignedLong, i: 0, }; diff --git a/tokio/src/runtime/task/harness.rs b/tokio/src/runtime/task/harness.rs index c0792979844..a9589375218 100644 --- a/tokio/src/runtime/task/harness.rs +++ b/tokio/src/runtime/task/harness.rs @@ -194,7 +194,7 @@ where TransitionToRunning::Success => { let header_ptr = self.header_ptr(); let waker_ref = waker_ref::(&header_ptr); - let cx = Context::from_waker(&*waker_ref); + let cx = Context::from_waker(&waker_ref); let res = poll_future(self.core(), cx); if res == Poll::Ready(()) { diff --git a/tokio/src/time/sleep.rs b/tokio/src/time/sleep.rs index d974e1ab282..0a012e25015 100644 --- a/tokio/src/time/sleep.rs +++ b/tokio/src/time/sleep.rs @@ -357,7 +357,7 @@ impl Sleep { fn reset_inner(self: Pin<&mut Self>, deadline: Instant) { let mut me = self.project(); me.entry.as_mut().reset(deadline); - (*me.inner).deadline = deadline; + (me.inner).deadline = deadline; #[cfg(all(tokio_unstable, feature = "tracing"))] { diff --git a/tokio/src/util/linked_list.rs b/tokio/src/util/linked_list.rs index 9698f727f4f..b46bd6d4d9e 100644 --- a/tokio/src/util/linked_list.rs +++ b/tokio/src/util/linked_list.rs @@ -126,7 +126,7 @@ impl LinkedList { pub(crate) fn push_front(&mut self, val: L::Handle) { // The value should not be dropped, it is being inserted into the list let val = ManuallyDrop::new(val); - let ptr = L::as_raw(&*val); + let ptr = L::as_raw(&val); assert_ne!(self.head, Some(ptr)); unsafe { L::pointers(ptr).as_mut().set_next(self.head); diff --git a/tokio/tests/buffered.rs b/tokio/tests/buffered.rs index 19afebd392a..4251c3fcc0b 100644 --- a/tokio/tests/buffered.rs +++ b/tokio/tests/buffered.rs @@ -18,10 +18,10 @@ async fn echo_server() { let msg = "foo bar baz"; let t = thread::spawn(move || { - let mut s = assert_ok!(TcpStream::connect(&addr)); + let mut s = assert_ok!(TcpStream::connect(addr)); let t2 = thread::spawn(move || { - let mut s = assert_ok!(TcpStream::connect(&addr)); + let mut s = assert_ok!(TcpStream::connect(addr)); let mut b = vec![0; msg.len() * N]; assert_ok!(s.read_exact(&mut b)); b diff --git a/tokio/tests/io_driver.rs b/tokio/tests/io_driver.rs index 2ca56301de0..97018e0f967 100644 --- a/tokio/tests/io_driver.rs +++ b/tokio/tests/io_driver.rs @@ -80,7 +80,7 @@ fn test_drop_on_notify() { drop(task); // Establish a connection to the acceptor - let _s = TcpStream::connect(&addr).unwrap(); + let _s = TcpStream::connect(addr).unwrap(); // Force the reactor to turn rt.block_on(async {}); diff --git a/tokio/tests/macros_join.rs b/tokio/tests/macros_join.rs index 16e7c43102f..a87c6a6f86e 100644 --- a/tokio/tests/macros_join.rs +++ b/tokio/tests/macros_join.rs @@ -1,5 +1,5 @@ #![cfg(feature = "macros")] -#![allow(clippy::blacklisted_name)] +#![allow(clippy::disallowed_names)] use std::sync::Arc; #[cfg(tokio_wasm_not_wasi)] diff --git a/tokio/tests/macros_select.rs b/tokio/tests/macros_select.rs index 60f3738c991..26d6fec874b 100644 --- a/tokio/tests/macros_select.rs +++ b/tokio/tests/macros_select.rs @@ -1,5 +1,5 @@ #![cfg(feature = "macros")] -#![allow(clippy::blacklisted_name)] +#![allow(clippy::disallowed_names)] #[cfg(tokio_wasm_not_wasi)] use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test; diff --git a/tokio/tests/macros_try_join.rs b/tokio/tests/macros_try_join.rs index 209516bb998..6c432221df1 100644 --- a/tokio/tests/macros_try_join.rs +++ b/tokio/tests/macros_try_join.rs @@ -1,5 +1,5 @@ #![cfg(feature = "macros")] -#![allow(clippy::blacklisted_name)] +#![allow(clippy::disallowed_names)] use std::sync::Arc; diff --git a/tokio/tests/rt_common.rs b/tokio/tests/rt_common.rs index ef0c2a222dc..22d821fa1ad 100644 --- a/tokio/tests/rt_common.rs +++ b/tokio/tests/rt_common.rs @@ -661,7 +661,7 @@ rt_test! { loop { // Don't use Tokio's `yield_now()` to avoid special defer // logic. - let _: () = futures::future::poll_fn(|cx| { + futures::future::poll_fn::<(), _>(|cx| { cx.waker().wake_by_ref(); std::task::Poll::Pending }).await; diff --git a/tokio/tests/tcp_peek.rs b/tokio/tests/tcp_peek.rs index 03813c2e46c..b7120232ca4 100644 --- a/tokio/tests/tcp_peek.rs +++ b/tokio/tests/tcp_peek.rs @@ -15,7 +15,7 @@ async fn peek() { let addr = listener.local_addr().unwrap(); let t = thread::spawn(move || assert_ok!(listener.accept()).0); - let left = net::TcpStream::connect(&addr).unwrap(); + let left = net::TcpStream::connect(addr).unwrap(); let mut right = t.join().unwrap(); let _ = right.write(&[1, 2, 3, 4]).unwrap(); From 36039d0bb94d1accf8ae5569f6c50ca5a0c661ef Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Wed, 7 Dec 2022 10:15:03 -0800 Subject: [PATCH 008/101] rt: allow configuring I/O events capacity (#5186) Adds a method `Builder::max_io_events_per_tick()` to the runtime builder. This can be used to configure the capacity of events that may be processed per OS poll. --- tokio/src/process/unix/orphan.rs | 2 +- tokio/src/runtime/builder.rs | 22 ++++++++++++++++++++++ tokio/src/runtime/driver.rs | 9 +++++---- tokio/src/runtime/io/mod.rs | 4 ++-- 4 files changed, 30 insertions(+), 7 deletions(-) diff --git a/tokio/src/process/unix/orphan.rs b/tokio/src/process/unix/orphan.rs index 66572ef7c41..340719603be 100644 --- a/tokio/src/process/unix/orphan.rs +++ b/tokio/src/process/unix/orphan.rs @@ -294,7 +294,7 @@ pub(crate) mod test { #[cfg_attr(miri, ignore)] // Miri does not support epoll. #[test] fn does_not_register_signal_if_queue_empty() { - let (io_driver, io_handle) = IoDriver::new().unwrap(); + let (io_driver, io_handle) = IoDriver::new(1024).unwrap(); let signal_driver = SignalDriver::new(io_driver, &io_handle).unwrap(); let handle = signal_driver.handle(); diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index d49a4da39ae..64cf403aaf7 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -44,6 +44,7 @@ pub struct Builder { /// Whether or not to enable the I/O driver enable_io: bool, + nevents: usize, /// Whether or not to enable the time driver enable_time: bool, @@ -228,6 +229,7 @@ impl Builder { // I/O defaults to "off" enable_io: false, + nevents: 1024, // Time defaults to "off" enable_time: false, @@ -647,6 +649,7 @@ impl Builder { enable_io: self.enable_io, enable_time: self.enable_time, start_paused: self.start_paused, + nevents: self.nevents, } } @@ -938,6 +941,25 @@ cfg_io_driver! { self.enable_io = true; self } + + /// Enables the I/O driver and configures the max number of events to be + /// processed per tick. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime; + /// + /// let rt = runtime::Builder::new_current_thread() + /// .enable_io() + /// .max_io_events_per_tick(1024) + /// .build() + /// .unwrap(); + /// ``` + pub fn max_io_events_per_tick(&mut self, capacity: usize) -> &mut Self { + self.nevents = capacity; + self + } } } diff --git a/tokio/src/runtime/driver.rs b/tokio/src/runtime/driver.rs index 8f9c5122b85..4fb6b8783f4 100644 --- a/tokio/src/runtime/driver.rs +++ b/tokio/src/runtime/driver.rs @@ -36,11 +36,12 @@ pub(crate) struct Cfg { pub(crate) enable_time: bool, pub(crate) enable_pause_time: bool, pub(crate) start_paused: bool, + pub(crate) nevents: usize, } impl Driver { pub(crate) fn new(cfg: Cfg) -> io::Result<(Self, Handle)> { - let (io_stack, io_handle, signal_handle) = create_io_stack(cfg.enable_io)?; + let (io_stack, io_handle, signal_handle) = create_io_stack(cfg.enable_io, cfg.nevents)?; let clock = create_clock(cfg.enable_pause_time, cfg.start_paused); @@ -135,12 +136,12 @@ cfg_io_driver! { Disabled(UnparkThread), } - fn create_io_stack(enabled: bool) -> io::Result<(IoStack, IoHandle, SignalHandle)> { + fn create_io_stack(enabled: bool, nevents: usize) -> io::Result<(IoStack, IoHandle, SignalHandle)> { #[cfg(loom)] assert!(!enabled); let ret = if enabled { - let (io_driver, io_handle) = crate::runtime::io::Driver::new()?; + let (io_driver, io_handle) = crate::runtime::io::Driver::new(nevents)?; let (signal_driver, signal_handle) = create_signal_driver(io_driver, &io_handle)?; let process_driver = create_process_driver(signal_driver); @@ -201,7 +202,7 @@ cfg_not_io_driver! { #[derive(Debug)] pub(crate) struct IoStack(ParkThread); - fn create_io_stack(_enabled: bool) -> io::Result<(IoStack, IoHandle, SignalHandle)> { + fn create_io_stack(_enabled: bool, _nevents: usize) -> io::Result<(IoStack, IoHandle, SignalHandle)> { let park_thread = ParkThread::new(); let unpark_thread = park_thread.unpark(); Ok((IoStack(park_thread), unpark_thread, Default::default())) diff --git a/tokio/src/runtime/io/mod.rs b/tokio/src/runtime/io/mod.rs index 02039f2a49c..1ddf920f3bb 100644 --- a/tokio/src/runtime/io/mod.rs +++ b/tokio/src/runtime/io/mod.rs @@ -104,7 +104,7 @@ fn _assert_kinds() { impl Driver { /// Creates a new event loop, returning any error that happened during the /// creation. - pub(crate) fn new() -> io::Result<(Driver, Handle)> { + pub(crate) fn new(nevents: usize) -> io::Result<(Driver, Handle)> { let poll = mio::Poll::new()?; #[cfg(not(tokio_wasi))] let waker = mio::Waker::new(poll.registry(), TOKEN_WAKEUP)?; @@ -116,7 +116,7 @@ impl Driver { let driver = Driver { tick: 0, signal_ready: false, - events: mio::Events::with_capacity(1024), + events: mio::Events::with_capacity(nevents), poll, resources: slab, }; From c693ccd210c7a318957b0701f4a9632b2d545d6a Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Thu, 8 Dec 2022 09:13:22 -0800 Subject: [PATCH 009/101] ci: test no const mutex new (#5257) This adds CI coverage for a couple of code paths that are not currently hit in CI: * no `const fn Mutex::new` * no `AtomicU64` This is done by adding some new CFG flags used only for tests in order to force those code paths. --- .github/workflows/ci.yml | 30 ++- tokio/tests/_require_full.rs | 8 +- tokio/tests/support/panic.rs | 8 +- tokio/tests/sync_once_cell.rs | 355 ++++++++++++++++++---------------- tokio/tests/task_join_set.rs | 95 ++++----- 5 files changed, 267 insertions(+), 229 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d73eb595976..38f65080442 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -268,26 +268,42 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - target: - - i686-unknown-linux-gnu - - arm-unknown-linux-gnueabihf - - armv7-unknown-linux-gnueabihf - - aarch64-unknown-linux-gnu + include: + - target: i686-unknown-linux-gnu + - target: arm-unknown-linux-gnueabihf + - target: armv7-unknown-linux-gnueabihf + - target: aarch64-unknown-linux-gnu + + # Run a platform without AtomicU64 and no const Mutex::new + - target: arm-unknown-linux-gnueabihf + rustflags: --cfg tokio_no_const_mutex_new steps: - uses: actions/checkout@v3 - - name: Install Rust ${{ env.rust_stable }} + - name: Install Rust stable uses: actions-rs/toolchain@v1 with: toolchain: ${{ env.rust_stable }} target: ${{ matrix.target }} override: true + # First run with all features (including parking_lot) - uses: actions-rs/cargo@v1 with: use-cross: true command: test args: -p tokio --all-features --target ${{ matrix.target }} --tests env: - RUSTFLAGS: --cfg tokio_unstable -Dwarnings --cfg tokio_no_ipv6 + RUSTFLAGS: --cfg tokio_unstable -Dwarnings --cfg tokio_no_ipv6 ${{ matrix.rustflags }} + # Now run without parking_lot + - name: Remove `parking_lot` from `full` feature + run: sed -i '0,/parking_lot/{/parking_lot/d;}' tokio/Cargo.toml + - uses: actions-rs/cargo@v1 + with: + use-cross: true + command: test + # The `tokio_no_parking_lot` cfg is here to ensure the `sed` above does not silently break. + args: -p tokio --features full,test-util --target ${{ matrix.target }} --tests + env: + RUSTFLAGS: --cfg tokio_unstable -Dwarnings --cfg tokio_no_ipv6 --cfg tokio_no_parking_lot ${{ matrix.rustflags }} # See https://github.com/tokio-rs/tokio/issues/5187 no-atomic-u64: diff --git a/tokio/tests/_require_full.rs b/tokio/tests/_require_full.rs index a339374fd86..4b9698afedd 100644 --- a/tokio/tests/_require_full.rs +++ b/tokio/tests/_require_full.rs @@ -1,2 +1,8 @@ -#![cfg(not(any(feature = "full", tokio_wasm)))] +#[cfg(not(any(feature = "full", tokio_wasm)))] compile_error!("run main Tokio tests with `--features full`"); + +// CI sets `--cfg tokio_no_parking_lot` when trying to run tests with +// `parking_lot` disabled. This check prevents "silent failure" if `parking_lot` +// accidentally gets enabled. +#[cfg(all(tokio_no_parking_lot, feature = "parking_lot"))] +compile_error!("parking_lot feature enabled when it should not be"); diff --git a/tokio/tests/support/panic.rs b/tokio/tests/support/panic.rs index 7f60c76f00a..df2f59d30ff 100644 --- a/tokio/tests/support/panic.rs +++ b/tokio/tests/support/panic.rs @@ -1,9 +1,8 @@ -use parking_lot::{const_mutex, Mutex}; use std::panic; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; pub fn test_panic(func: Func) -> Option { - static PANIC_MUTEX: Mutex<()> = const_mutex(()); + static PANIC_MUTEX: Mutex<()> = Mutex::new(()); { let _guard = PANIC_MUTEX.lock(); @@ -16,6 +15,7 @@ pub fn test_panic(func: Func) -> Option(func: Func) -> Option u32 { - 5 -} - -async fn func2() -> u32 { - time::sleep(Duration::from_millis(1)).await; - 10 -} - -async fn func_err() -> Result { - Err(()) -} - -async fn func_ok() -> Result { - Ok(10) -} - -async fn func_panic() -> u32 { - time::sleep(Duration::from_millis(1)).await; - panic!(); -} - -async fn sleep_and_set() -> u32 { - // Simulate sleep by pausing time and waiting for another thread to - // resume clock when calling `set`, then finding the cell being initialized - // by this call - time::sleep(Duration::from_millis(2)).await; - 5 -} - -async fn advance_time_and_set(cell: &'static OnceCell, v: u32) -> Result<(), SetError> { - time::advance(Duration::from_millis(1)).await; - cell.set(v) -} - -#[test] -fn get_or_init() { - let rt = runtime::Builder::new_current_thread() - .enable_time() - .start_paused(true) - .build() - .unwrap(); - - static ONCE: OnceCell = OnceCell::const_new(); - - rt.block_on(async { - let handle1 = rt.spawn(async { ONCE.get_or_init(func1).await }); - let handle2 = rt.spawn(async { ONCE.get_or_init(func2).await }); - - time::advance(Duration::from_millis(1)).await; - time::resume(); - - let result1 = handle1.await.unwrap(); - let result2 = handle2.await.unwrap(); - - assert_eq!(*result1, 5); - assert_eq!(*result2, 5); - }); -} - -#[test] -fn get_or_init_panic() { - let rt = runtime::Builder::new_current_thread() - .enable_time() - .build() - .unwrap(); - - static ONCE: OnceCell = OnceCell::const_new(); - - rt.block_on(async { - time::pause(); - - let handle1 = rt.spawn(async { ONCE.get_or_init(func1).await }); - let handle2 = rt.spawn(async { ONCE.get_or_init(func_panic).await }); - - time::advance(Duration::from_millis(1)).await; - - let result1 = handle1.await.unwrap(); - let result2 = handle2.await.unwrap(); - - assert_eq!(*result1, 5); - assert_eq!(*result2, 5); - }); -} - -#[test] -fn set_and_get() { - let rt = runtime::Builder::new_current_thread() - .enable_time() - .build() - .unwrap(); - - static ONCE: OnceCell = OnceCell::const_new(); - - rt.block_on(async { - let _ = rt.spawn(async { ONCE.set(5) }).await; - let value = ONCE.get().unwrap(); - assert_eq!(*value, 5); - }); -} - -#[test] -fn get_uninit() { - static ONCE: OnceCell = OnceCell::const_new(); - let uninit = ONCE.get(); - assert!(uninit.is_none()); -} - -#[test] -fn set_twice() { - static ONCE: OnceCell = OnceCell::const_new(); - - let first = ONCE.set(5); - assert_eq!(first, Ok(())); - let second = ONCE.set(6); - assert!(second.err().unwrap().is_already_init_err()); -} - -#[test] -fn set_while_initializing() { - let rt = runtime::Builder::new_current_thread() - .enable_time() - .build() - .unwrap(); - - static ONCE: OnceCell = OnceCell::const_new(); - - rt.block_on(async { - time::pause(); - - let handle1 = rt.spawn(async { ONCE.get_or_init(sleep_and_set).await }); - let handle2 = rt.spawn(async { advance_time_and_set(&ONCE, 10).await }); - - time::advance(Duration::from_millis(2)).await; - - let result1 = handle1.await.unwrap(); - let result2 = handle2.await.unwrap(); - - assert_eq!(*result1, 5); - assert!(result2.err().unwrap().is_initializing_err()); - }); -} - -#[test] -fn get_or_try_init() { - let rt = runtime::Builder::new_current_thread() - .enable_time() - .start_paused(true) - .build() - .unwrap(); - - static ONCE: OnceCell = OnceCell::const_new(); - - rt.block_on(async { - let handle1 = rt.spawn(async { ONCE.get_or_try_init(func_err).await }); - let handle2 = rt.spawn(async { ONCE.get_or_try_init(func_ok).await }); - - time::advance(Duration::from_millis(1)).await; - time::resume(); - - let result1 = handle1.await.unwrap(); - assert!(result1.is_err()); - - let result2 = handle2.await.unwrap(); - assert_eq!(*result2.unwrap(), 10); - }); -} +use tokio::sync::OnceCell; #[test] fn drop_cell() { @@ -272,3 +101,185 @@ fn from() { let cell = OnceCell::from(2); assert_eq!(*cell.get().unwrap(), 2); } + +#[cfg(feature = "parking_lot")] +mod parking_lot { + use super::*; + + use tokio::runtime; + use tokio::sync::SetError; + use tokio::time; + + use std::time::Duration; + + async fn func1() -> u32 { + 5 + } + + async fn func2() -> u32 { + time::sleep(Duration::from_millis(1)).await; + 10 + } + + async fn func_err() -> Result { + Err(()) + } + + async fn func_ok() -> Result { + Ok(10) + } + + async fn func_panic() -> u32 { + time::sleep(Duration::from_millis(1)).await; + panic!(); + } + + async fn sleep_and_set() -> u32 { + // Simulate sleep by pausing time and waiting for another thread to + // resume clock when calling `set`, then finding the cell being initialized + // by this call + time::sleep(Duration::from_millis(2)).await; + 5 + } + + async fn advance_time_and_set( + cell: &'static OnceCell, + v: u32, + ) -> Result<(), SetError> { + time::advance(Duration::from_millis(1)).await; + cell.set(v) + } + + #[test] + fn get_or_init() { + let rt = runtime::Builder::new_current_thread() + .enable_time() + .start_paused(true) + .build() + .unwrap(); + + static ONCE: OnceCell = OnceCell::const_new(); + + rt.block_on(async { + let handle1 = rt.spawn(async { ONCE.get_or_init(func1).await }); + let handle2 = rt.spawn(async { ONCE.get_or_init(func2).await }); + + time::advance(Duration::from_millis(1)).await; + time::resume(); + + let result1 = handle1.await.unwrap(); + let result2 = handle2.await.unwrap(); + + assert_eq!(*result1, 5); + assert_eq!(*result2, 5); + }); + } + + #[test] + fn get_or_init_panic() { + let rt = runtime::Builder::new_current_thread() + .enable_time() + .build() + .unwrap(); + + static ONCE: OnceCell = OnceCell::const_new(); + + rt.block_on(async { + time::pause(); + + let handle1 = rt.spawn(async { ONCE.get_or_init(func1).await }); + let handle2 = rt.spawn(async { ONCE.get_or_init(func_panic).await }); + + time::advance(Duration::from_millis(1)).await; + + let result1 = handle1.await.unwrap(); + let result2 = handle2.await.unwrap(); + + assert_eq!(*result1, 5); + assert_eq!(*result2, 5); + }); + } + + #[test] + fn set_and_get() { + let rt = runtime::Builder::new_current_thread() + .enable_time() + .build() + .unwrap(); + + static ONCE: OnceCell = OnceCell::const_new(); + + rt.block_on(async { + let _ = rt.spawn(async { ONCE.set(5) }).await; + let value = ONCE.get().unwrap(); + assert_eq!(*value, 5); + }); + } + + #[test] + fn get_uninit() { + static ONCE: OnceCell = OnceCell::const_new(); + let uninit = ONCE.get(); + assert!(uninit.is_none()); + } + + #[test] + fn set_twice() { + static ONCE: OnceCell = OnceCell::const_new(); + + let first = ONCE.set(5); + assert_eq!(first, Ok(())); + let second = ONCE.set(6); + assert!(second.err().unwrap().is_already_init_err()); + } + + #[test] + fn set_while_initializing() { + let rt = runtime::Builder::new_current_thread() + .enable_time() + .build() + .unwrap(); + + static ONCE: OnceCell = OnceCell::const_new(); + + rt.block_on(async { + time::pause(); + + let handle1 = rt.spawn(async { ONCE.get_or_init(sleep_and_set).await }); + let handle2 = rt.spawn(async { advance_time_and_set(&ONCE, 10).await }); + + time::advance(Duration::from_millis(2)).await; + + let result1 = handle1.await.unwrap(); + let result2 = handle2.await.unwrap(); + + assert_eq!(*result1, 5); + assert!(result2.err().unwrap().is_initializing_err()); + }); + } + + #[test] + fn get_or_try_init() { + let rt = runtime::Builder::new_current_thread() + .enable_time() + .start_paused(true) + .build() + .unwrap(); + + static ONCE: OnceCell = OnceCell::const_new(); + + rt.block_on(async { + let handle1 = rt.spawn(async { ONCE.get_or_try_init(func_err).await }); + let handle2 = rt.spawn(async { ONCE.get_or_try_init(func_ok).await }); + + time::advance(Duration::from_millis(1)).await; + time::resume(); + + let result1 = handle1.await.unwrap(); + assert!(result1.is_err()); + + let result2 = handle2.await.unwrap(); + assert_eq!(*result2.unwrap(), 10); + }); + } +} diff --git a/tokio/tests/task_join_set.rs b/tokio/tests/task_join_set.rs index 20d4927212a..b1b6cf9665f 100644 --- a/tokio/tests/task_join_set.rs +++ b/tokio/tests/task_join_set.rs @@ -5,8 +5,6 @@ use tokio::sync::oneshot; use tokio::task::JoinSet; use tokio::time::Duration; -use futures::future::FutureExt; - fn rt() -> tokio::runtime::Runtime { tokio::runtime::Builder::new_current_thread() .build() @@ -156,49 +154,6 @@ fn runtime_gone() { .is_cancelled()); } -// This ensures that `join_next` works correctly when the coop budget is -// exhausted. -#[tokio::test(flavor = "current_thread")] -async fn join_set_coop() { - // Large enough to trigger coop. - const TASK_NUM: u32 = 1000; - - static SEM: tokio::sync::Semaphore = tokio::sync::Semaphore::const_new(0); - - let mut set = JoinSet::new(); - - for _ in 0..TASK_NUM { - set.spawn(async { - SEM.add_permits(1); - }); - } - - // Wait for all tasks to complete. - // - // Since this is a `current_thread` runtime, there's no race condition - // between the last permit being added and the task completing. - let _ = SEM.acquire_many(TASK_NUM).await.unwrap(); - - let mut count = 0; - let mut coop_count = 0; - loop { - match set.join_next().now_or_never() { - Some(Some(Ok(()))) => {} - Some(Some(Err(err))) => panic!("failed: {}", err), - None => { - coop_count += 1; - tokio::task::yield_now().await; - continue; - } - Some(None) => break, - } - - count += 1; - } - assert!(coop_count >= 1); - assert_eq!(count, TASK_NUM); -} - #[tokio::test(start_paused = true)] async fn abort_all() { let mut set: JoinSet<()> = JoinSet::new(); @@ -228,3 +183,53 @@ async fn abort_all() { assert_eq!(count, 10); assert_eq!(set.len(), 0); } + +#[cfg(feature = "parking_lot")] +mod parking_lot { + use super::*; + + use futures::future::FutureExt; + + // This ensures that `join_next` works correctly when the coop budget is + // exhausted. + #[tokio::test(flavor = "current_thread")] + async fn join_set_coop() { + // Large enough to trigger coop. + const TASK_NUM: u32 = 1000; + + static SEM: tokio::sync::Semaphore = tokio::sync::Semaphore::const_new(0); + + let mut set = JoinSet::new(); + + for _ in 0..TASK_NUM { + set.spawn(async { + SEM.add_permits(1); + }); + } + + // Wait for all tasks to complete. + // + // Since this is a `current_thread` runtime, there's no race condition + // between the last permit being added and the task completing. + let _ = SEM.acquire_many(TASK_NUM).await.unwrap(); + + let mut count = 0; + let mut coop_count = 0; + loop { + match set.join_next().now_or_never() { + Some(Some(Ok(()))) => {} + Some(Some(Err(err))) => panic!("failed: {}", err), + None => { + coop_count += 1; + tokio::task::yield_now().await; + continue; + } + Some(None) => break, + } + + count += 1; + } + assert!(coop_count >= 1); + assert_eq!(count, TASK_NUM); + } +} From ae69d11d1f9f17c536f35369ab597cebb4bd0159 Mon Sep 17 00:00:00 2001 From: Matt Fellenz Date: Fri, 9 Dec 2022 03:12:42 -0800 Subject: [PATCH 010/101] util: remove `Encoder` bound on `FramedParts` constructor (#5280) --- tokio-util/src/codec/framed.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tokio-util/src/codec/framed.rs b/tokio-util/src/codec/framed.rs index 8a344f90db2..043b4384193 100644 --- a/tokio-util/src/codec/framed.rs +++ b/tokio-util/src/codec/framed.rs @@ -368,10 +368,7 @@ pub struct FramedParts { impl FramedParts { /// Create a new, default, `FramedParts` - pub fn new(io: T, codec: U) -> FramedParts - where - U: Encoder, - { + pub fn new(io: T, codec: U) -> FramedParts { FramedParts { io, codec, From 39766220f4adc969ef7d026e04d752f5dfe55fb9 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Sat, 10 Dec 2022 13:49:16 -0800 Subject: [PATCH 011/101] rt: implement task::Id using `StaticAtomicU64` (#5282) This patch simplifies the implementation of `task::Id` by moving conditional compilation into the `AtomicU64` definition. To handle platforms that do not include `const fn Mutex::new()`, `StaticAtomicU64` is defined as always having a `const fn new()`. `StaticAtomicU64` is implemented with `OnceCell` when needed. --- tokio/src/loom/mocked.rs | 7 +++ tokio/src/loom/std/atomic_u64.rs | 9 ++-- tokio/src/loom/std/atomic_u64_as_mutex.rs | 18 ++++--- tokio/src/loom/std/atomic_u64_native.rs | 4 ++ .../loom/std/atomic_u64_static_const_new.rs | 12 +++++ .../loom/std/atomic_u64_static_once_cell.rs | 36 +++++++++++++ tokio/src/loom/std/mod.rs | 2 +- tokio/src/runtime/task/mod.rs | 51 ++----------------- tokio/src/util/once_cell.rs | 4 +- 9 files changed, 83 insertions(+), 60 deletions(-) create mode 100644 tokio/src/loom/std/atomic_u64_native.rs create mode 100644 tokio/src/loom/std/atomic_u64_static_const_new.rs create mode 100644 tokio/src/loom/std/atomic_u64_static_once_cell.rs diff --git a/tokio/src/loom/mocked.rs b/tokio/src/loom/mocked.rs index 1c4a32dd863..56dc1a06344 100644 --- a/tokio/src/loom/mocked.rs +++ b/tokio/src/loom/mocked.rs @@ -25,6 +25,13 @@ pub(crate) mod sync { } } pub(crate) use loom::sync::*; + + pub(crate) mod atomic { + pub(crate) use loom::sync::atomic::*; + + // TODO: implement a loom version + pub(crate) type StaticAtomicU64 = std::sync::atomic::AtomicU64; + } } pub(crate) mod rand { diff --git a/tokio/src/loom/std/atomic_u64.rs b/tokio/src/loom/std/atomic_u64.rs index 5d1d8a89f8b..ce391be3e11 100644 --- a/tokio/src/loom/std/atomic_u64.rs +++ b/tokio/src/loom/std/atomic_u64.rs @@ -7,12 +7,13 @@ // `#[cfg(target_has_atomic = "64")]`. // Refs: https://github.com/rust-lang/rust/tree/master/src/librustc_target cfg_has_atomic_u64! { - pub(crate) use std::sync::atomic::AtomicU64; + #[path = "atomic_u64_native.rs"] + mod imp; } cfg_not_has_atomic_u64! { #[path = "atomic_u64_as_mutex.rs"] - mod atomic_u64_as_mutex; - - pub(crate) use atomic_u64_as_mutex::AtomicU64; + mod imp; } + +pub(crate) use imp::{AtomicU64, StaticAtomicU64}; diff --git a/tokio/src/loom/std/atomic_u64_as_mutex.rs b/tokio/src/loom/std/atomic_u64_as_mutex.rs index 84ddff06fb8..9b3b6fac68c 100644 --- a/tokio/src/loom/std/atomic_u64_as_mutex.rs +++ b/tokio/src/loom/std/atomic_u64_as_mutex.rs @@ -1,18 +1,24 @@ use crate::loom::sync::Mutex; use std::sync::atomic::Ordering; +cfg_has_const_mutex_new! { + #[path = "atomic_u64_static_const_new.rs"] + mod static_macro; +} + +cfg_not_has_const_mutex_new! { + #[path = "atomic_u64_static_once_cell.rs"] + mod static_macro; +} + +pub(crate) use static_macro::StaticAtomicU64; + #[derive(Debug)] pub(crate) struct AtomicU64 { inner: Mutex, } impl AtomicU64 { - pub(crate) fn new(val: u64) -> Self { - Self { - inner: Mutex::new(val), - } - } - pub(crate) fn load(&self, _: Ordering) -> u64 { *self.inner.lock() } diff --git a/tokio/src/loom/std/atomic_u64_native.rs b/tokio/src/loom/std/atomic_u64_native.rs new file mode 100644 index 00000000000..08adb28629a --- /dev/null +++ b/tokio/src/loom/std/atomic_u64_native.rs @@ -0,0 +1,4 @@ +pub(crate) use std::sync::atomic::{AtomicU64, Ordering}; + +/// Alias `AtomicU64` to `StaticAtomicU64` +pub(crate) type StaticAtomicU64 = AtomicU64; diff --git a/tokio/src/loom/std/atomic_u64_static_const_new.rs b/tokio/src/loom/std/atomic_u64_static_const_new.rs new file mode 100644 index 00000000000..a4215342b68 --- /dev/null +++ b/tokio/src/loom/std/atomic_u64_static_const_new.rs @@ -0,0 +1,12 @@ +use super::AtomicU64; +use crate::loom::sync::Mutex; + +pub(crate) type StaticAtomicU64 = AtomicU64; + +impl AtomicU64 { + pub(crate) const fn new(val: u64) -> Self { + Self { + inner: Mutex::const_new(val), + } + } +} diff --git a/tokio/src/loom/std/atomic_u64_static_once_cell.rs b/tokio/src/loom/std/atomic_u64_static_once_cell.rs new file mode 100644 index 00000000000..14b3a54937b --- /dev/null +++ b/tokio/src/loom/std/atomic_u64_static_once_cell.rs @@ -0,0 +1,36 @@ +use super::AtomicU64; +use crate::loom::sync::{atomic::Ordering, Mutex}; +use crate::util::once_cell::OnceCell; + +pub(crate) struct StaticAtomicU64 { + init: u64, + cell: OnceCell>, +} + +impl AtomicU64 { + pub(crate) fn new(val: u64) -> Self { + Self { + inner: Mutex::new(val), + } + } +} + +impl StaticAtomicU64 { + pub(crate) const fn new(val: u64) -> StaticAtomicU64 { + StaticAtomicU64 { + init: val, + cell: OnceCell::new(), + } + } + + pub(crate) fn fetch_add(&self, val: u64, order: Ordering) -> u64 { + let mut lock = self.inner().lock(); + let prev = *lock; + *lock = prev + val; + prev + } + + fn inner(&self) -> &Mutex { + self.cell.get(|| Mutex::new(self.init)) + } +} diff --git a/tokio/src/loom/std/mod.rs b/tokio/src/loom/std/mod.rs index 1fc0032a87d..f0fcd46d23d 100644 --- a/tokio/src/loom/std/mod.rs +++ b/tokio/src/loom/std/mod.rs @@ -71,7 +71,7 @@ pub(crate) mod sync { pub(crate) mod atomic { pub(crate) use crate::loom::std::atomic_u16::AtomicU16; pub(crate) use crate::loom::std::atomic_u32::AtomicU32; - pub(crate) use crate::loom::std::atomic_u64::AtomicU64; + pub(crate) use crate::loom::std::atomic_u64::{AtomicU64, StaticAtomicU64}; pub(crate) use crate::loom::std::atomic_usize::AtomicUsize; pub(crate) use std::sync::atomic::{fence, AtomicBool, AtomicPtr, AtomicU8, Ordering}; diff --git a/tokio/src/runtime/task/mod.rs b/tokio/src/runtime/task/mod.rs index fea6e0faada..9bb1044ce92 100644 --- a/tokio/src/runtime/task/mod.rs +++ b/tokio/src/runtime/task/mod.rs @@ -562,55 +562,12 @@ impl fmt::Display for Id { } impl Id { - // When 64-bit atomics are available, use a static `AtomicU64` counter to - // generate task IDs. - // - // Note(eliza): we _could_ just use `crate::loom::AtomicU64`, which switches - // between an atomic and mutex-based implementation here, rather than having - // two separate functions for targets with and without 64-bit atomics. - // However, because we can't use the mutex-based implementation in a static - // initializer directly, the 32-bit impl also has to use a `OnceCell`, and I - // thought it was nicer to avoid the `OnceCell` overhead on 64-bit - // platforms... - cfg_has_atomic_u64! { - pub(crate) fn next() -> Self { - use std::sync::atomic::{AtomicU64, Ordering::Relaxed}; - static NEXT_ID: AtomicU64 = AtomicU64::new(1); - Self(NEXT_ID.fetch_add(1, Relaxed)) - } - } - - cfg_not_has_atomic_u64! { - cfg_has_const_mutex_new! { - pub(crate) fn next() -> Self { - use crate::loom::sync::Mutex; - static NEXT_ID: Mutex = Mutex::const_new(1); - - let mut lock = NEXT_ID.lock(); - let id = *lock; - *lock += 1; - Self(id) - } - } + pub(crate) fn next() -> Self { + use crate::loom::sync::atomic::{Ordering::Relaxed, StaticAtomicU64}; - cfg_not_has_const_mutex_new! { - pub(crate) fn next() -> Self { - use crate::util::once_cell::OnceCell; - use crate::loom::sync::Mutex; + static NEXT_ID: StaticAtomicU64 = StaticAtomicU64::new(1); - fn init_next_id() -> Mutex { - Mutex::new(1) - } - - static NEXT_ID: OnceCell> = OnceCell::new(); - - let next_id = NEXT_ID.get(init_next_id); - let mut lock = next_id.lock(); - let id = *lock; - *lock += 1; - Self(id) - } - } + Self(NEXT_ID.fetch_add(1, Relaxed)) } pub(crate) fn as_u64(&self) -> u64 { diff --git a/tokio/src/util/once_cell.rs b/tokio/src/util/once_cell.rs index 138d2a74626..1925f0a7606 100644 --- a/tokio/src/util/once_cell.rs +++ b/tokio/src/util/once_cell.rs @@ -25,7 +25,7 @@ impl OnceCell { /// If the `init` closure panics, then the `OnceCell` is poisoned and all /// future calls to `get` will panic. #[inline] - pub(crate) fn get(&self, init: fn() -> T) -> &T { + pub(crate) fn get(&self, init: impl FnOnce() -> T) -> &T { if !self.once.is_completed() { self.do_init(init); } @@ -41,7 +41,7 @@ impl OnceCell { } #[cold] - fn do_init(&self, init: fn() -> T) { + fn do_init(&self, init: impl FnOnce() -> T) { let value_ptr = self.value.get() as *mut T; self.once.call_once(|| { From 81b50e946fe2f6b30de5e61356ab7cae560956a9 Mon Sep 17 00:00:00 2001 From: John Nunley Date: Thu, 15 Dec 2022 13:48:00 -0800 Subject: [PATCH 012/101] sync: decrease stack usage in mpsc channel (#5294) --- tokio/src/sync/mpsc/block.rs | 143 +++++++++++++++++++++++++---------- tokio/src/sync/mpsc/list.rs | 2 +- 2 files changed, 104 insertions(+), 41 deletions(-) diff --git a/tokio/src/sync/mpsc/block.rs b/tokio/src/sync/mpsc/block.rs index 58f4a9f6cc3..39c3e1be2d9 100644 --- a/tokio/src/sync/mpsc/block.rs +++ b/tokio/src/sync/mpsc/block.rs @@ -1,6 +1,7 @@ use crate::loom::cell::UnsafeCell; use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize}; +use std::alloc::Layout; use std::mem::MaybeUninit; use std::ops; use std::ptr::{self, NonNull}; @@ -10,6 +11,17 @@ use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Release}; /// /// Each block in the list can hold up to `BLOCK_CAP` messages. pub(crate) struct Block { + /// The header fields. + header: BlockHeader, + + /// Array containing values pushed into the block. Values are stored in a + /// continuous array in order to improve cache line behavior when reading. + /// The values must be manually dropped. + values: Values, +} + +/// Extra fields for a `Block`. +struct BlockHeader { /// The start index of this block. /// /// Slots in this block have indices in `start_index .. start_index + BLOCK_CAP`. @@ -24,11 +36,6 @@ pub(crate) struct Block { /// The observed `tail_position` value *after* the block has been passed by /// `block_tail`. observed_tail_position: UnsafeCell, - - /// Array containing values pushed into the block. Values are stored in a - /// continuous array in order to improve cache line behavior when reading. - /// The values must be manually dropped. - values: Values, } pub(crate) enum Read { @@ -36,6 +43,7 @@ pub(crate) enum Read { Closed, } +#[repr(transparent)] struct Values([UnsafeCell>; BLOCK_CAP]); use super::BLOCK_CAP; @@ -71,28 +79,56 @@ pub(crate) fn offset(slot_index: usize) -> usize { SLOT_MASK & slot_index } +generate_addr_of_methods! { + impl Block { + unsafe fn addr_of_header(self: NonNull) -> NonNull> { + &self.header + } + + unsafe fn addr_of_values(self: NonNull) -> NonNull> { + &self.values + } + } +} + impl Block { - pub(crate) fn new(start_index: usize) -> Block { - Block { - // The absolute index in the channel of the first slot in the block. - start_index, + pub(crate) fn new(start_index: usize) -> Box> { + unsafe { + // Allocate the block on the heap. + // SAFETY: The size of the Block is non-zero, since it is at least the size of the header. + let block = std::alloc::alloc(Layout::new::>()) as *mut Block; + let block = match NonNull::new(block) { + Some(block) => block, + None => std::alloc::handle_alloc_error(Layout::new::>()), + }; + + // Write the header to the block. + Block::addr_of_header(block).as_ptr().write(BlockHeader { + // The absolute index in the channel of the first slot in the block. + start_index, - // Pointer to the next block in the linked list. - next: AtomicPtr::new(ptr::null_mut()), + // Pointer to the next block in the linked list. + next: AtomicPtr::new(ptr::null_mut()), - ready_slots: AtomicUsize::new(0), + ready_slots: AtomicUsize::new(0), - observed_tail_position: UnsafeCell::new(0), + observed_tail_position: UnsafeCell::new(0), + }); - // Value storage - values: unsafe { Values::uninitialized() }, + // Initialize the values array. + Values::initialize(Block::addr_of_values(block)); + + // Convert the pointer to a `Box`. + // Safety: The raw pointer was allocated using the global allocator, and with + // the layout for a `Block`, so it's valid to convert it to box. + Box::from_raw(block.as_ptr()) } } /// Returns `true` if the block matches the given index. pub(crate) fn is_at_index(&self, index: usize) -> bool { debug_assert!(offset(index) == 0); - self.start_index == index + self.header.start_index == index } /// Returns the number of blocks between `self` and the block at the @@ -101,7 +137,7 @@ impl Block { /// `start_index` must represent a block *after* `self`. pub(crate) fn distance(&self, other_index: usize) -> usize { debug_assert!(offset(other_index) == 0); - other_index.wrapping_sub(self.start_index) / BLOCK_CAP + other_index.wrapping_sub(self.header.start_index) / BLOCK_CAP } /// Reads the value at the given offset. @@ -116,7 +152,7 @@ impl Block { pub(crate) unsafe fn read(&self, slot_index: usize) -> Option> { let offset = offset(slot_index); - let ready_bits = self.ready_slots.load(Acquire); + let ready_bits = self.header.ready_slots.load(Acquire); if !is_ready(ready_bits, offset) { if is_tx_closed(ready_bits) { @@ -156,7 +192,7 @@ impl Block { /// Signal to the receiver that the sender half of the list is closed. pub(crate) unsafe fn tx_close(&self) { - self.ready_slots.fetch_or(TX_CLOSED, Release); + self.header.ready_slots.fetch_or(TX_CLOSED, Release); } /// Resets the block to a blank state. This enables reusing blocks in the @@ -169,9 +205,9 @@ impl Block { /// * All slots are empty. /// * The caller holds a unique pointer to the block. pub(crate) unsafe fn reclaim(&mut self) { - self.start_index = 0; - self.next = AtomicPtr::new(ptr::null_mut()); - self.ready_slots = AtomicUsize::new(0); + self.header.start_index = 0; + self.header.next = AtomicPtr::new(ptr::null_mut()); + self.header.ready_slots = AtomicUsize::new(0); } /// Releases the block to the rx half for freeing. @@ -187,19 +223,20 @@ impl Block { pub(crate) unsafe fn tx_release(&self, tail_position: usize) { // Track the observed tail_position. Any sender targeting a greater // tail_position is guaranteed to not access this block. - self.observed_tail_position + self.header + .observed_tail_position .with_mut(|ptr| *ptr = tail_position); // Set the released bit, signalling to the receiver that it is safe to // free the block's memory as soon as all slots **prior** to // `observed_tail_position` have been filled. - self.ready_slots.fetch_or(RELEASED, Release); + self.header.ready_slots.fetch_or(RELEASED, Release); } /// Mark a slot as ready fn set_ready(&self, slot: usize) { let mask = 1 << slot; - self.ready_slots.fetch_or(mask, Release); + self.header.ready_slots.fetch_or(mask, Release); } /// Returns `true` when all slots have their `ready` bits set. @@ -214,25 +251,31 @@ impl Block { /// single atomic cell. However, this could have negative impact on cache /// behavior as there would be many more mutations to a single slot. pub(crate) fn is_final(&self) -> bool { - self.ready_slots.load(Acquire) & READY_MASK == READY_MASK + self.header.ready_slots.load(Acquire) & READY_MASK == READY_MASK } /// Returns the `observed_tail_position` value, if set pub(crate) fn observed_tail_position(&self) -> Option { - if 0 == RELEASED & self.ready_slots.load(Acquire) { + if 0 == RELEASED & self.header.ready_slots.load(Acquire) { None } else { - Some(self.observed_tail_position.with(|ptr| unsafe { *ptr })) + Some( + self.header + .observed_tail_position + .with(|ptr| unsafe { *ptr }), + ) } } /// Loads the next block pub(crate) fn load_next(&self, ordering: Ordering) -> Option>> { - let ret = NonNull::new(self.next.load(ordering)); + let ret = NonNull::new(self.header.next.load(ordering)); debug_assert!(unsafe { - ret.map(|block| block.as_ref().start_index == self.start_index.wrapping_add(BLOCK_CAP)) - .unwrap_or(true) + ret.map(|block| { + block.as_ref().header.start_index == self.header.start_index.wrapping_add(BLOCK_CAP) + }) + .unwrap_or(true) }); ret @@ -260,9 +303,10 @@ impl Block { success: Ordering, failure: Ordering, ) -> Result<(), NonNull>> { - block.as_mut().start_index = self.start_index.wrapping_add(BLOCK_CAP); + block.as_mut().header.start_index = self.header.start_index.wrapping_add(BLOCK_CAP); let next_ptr = self + .header .next .compare_exchange(ptr::null_mut(), block.as_ptr(), success, failure) .unwrap_or_else(|x| x); @@ -291,7 +335,7 @@ impl Block { // Create the new block. It is assumed that the block will become the // next one after `&self`. If this turns out to not be the case, // `start_index` is updated accordingly. - let new_block = Box::new(Block::new(self.start_index + BLOCK_CAP)); + let new_block = Block::new(self.header.start_index + BLOCK_CAP); let mut new_block = unsafe { NonNull::new_unchecked(Box::into_raw(new_block)) }; @@ -308,7 +352,8 @@ impl Block { // `Release` ensures that the newly allocated block is available to // other threads acquiring the next pointer. let next = NonNull::new( - self.next + self.header + .next .compare_exchange(ptr::null_mut(), new_block.as_ptr(), AcqRel, Acquire) .unwrap_or_else(|x| x), ); @@ -360,19 +405,20 @@ fn is_tx_closed(bits: usize) -> bool { } impl Values { - unsafe fn uninitialized() -> Values { - let mut vals = MaybeUninit::uninit(); - + /// Initialize a `Values` struct from a pointer. + /// + /// # Safety + /// + /// The raw pointer must be valid for writing a `Values`. + unsafe fn initialize(_value: NonNull>) { // When fuzzing, `UnsafeCell` needs to be initialized. if_loom! { - let p = vals.as_mut_ptr() as *mut UnsafeCell>; + let p = _value.as_ptr() as *mut UnsafeCell>; for i in 0..BLOCK_CAP { p.add(i) .write(UnsafeCell::new(MaybeUninit::uninit())); } } - - Values(vals.assume_init()) } } @@ -383,3 +429,20 @@ impl ops::Index for Values { self.0.index(index) } } + +#[cfg(all(test, not(loom)))] +#[test] +fn assert_no_stack_overflow() { + // https://github.com/tokio-rs/tokio/issues/5293 + + struct Foo { + _a: [u8; 2_000_000], + } + + assert_eq!( + Layout::new::>>(), + Layout::new::>() + ); + + let _block = Block::::new(0); +} diff --git a/tokio/src/sync/mpsc/list.rs b/tokio/src/sync/mpsc/list.rs index e4eeb454118..10b29575bdb 100644 --- a/tokio/src/sync/mpsc/list.rs +++ b/tokio/src/sync/mpsc/list.rs @@ -44,7 +44,7 @@ pub(crate) enum TryPopResult { pub(crate) fn channel() -> (Tx, Rx) { // Create the initial block shared between the tx and rx halves. - let initial_block = Box::new(Block::new(0)); + let initial_block = Block::new(0); let initial_block_ptr = Box::into_raw(initial_block); let tx = Tx { From 42db755ac19fe0c194da59215408063607a78faf Mon Sep 17 00:00:00 2001 From: John Nunley Date: Fri, 16 Dec 2022 10:53:35 -0800 Subject: [PATCH 013/101] tokio: improve detection of whether a target supports AtomicU64 (#5284) --- tokio/build.rs | 52 +++++++++++++++++++++++++++++++++++++++++ tokio/src/macros/cfg.rs | 30 ++++++++++++------------ 2 files changed, 67 insertions(+), 15 deletions(-) diff --git a/tokio/build.rs b/tokio/build.rs index 93b05092bf9..503c0242fc8 100644 --- a/tokio/build.rs +++ b/tokio/build.rs @@ -24,10 +24,25 @@ const CONST_MUTEX_NEW_PROBE: &str = r#" } "#; +const TARGET_HAS_ATOMIC_PROBE: &str = r#" +{ + #[cfg(target_has_atomic = "ptr")] + let _ = (); +} +"#; + +const TARGET_ATOMIC_U64_PROBE: &str = r#" +{ + use std::sync::atomic::AtomicU64 as _; +} +"#; + fn main() { let mut enable_const_thread_local = false; let mut enable_addr_of = false; + let mut enable_target_has_atomic = false; let mut enable_const_mutex_new = false; + let mut target_needs_atomic_u64_fallback = false; match AutoCfg::new() { Ok(ac) => { @@ -66,6 +81,27 @@ fn main() { } } + // The `target_has_atomic` cfg was stabilized in 1.60. + if ac.probe_rustc_version(1, 61) { + enable_target_has_atomic = true; + } else if ac.probe_rustc_version(1, 60) { + // This compiler claims to be 1.60, but there are some nightly + // compilers that claim to be 1.60 without supporting the + // feature. Explicitly probe to check if code using them + // compiles. + // + // The oldest nightly that supports the feature is 2022-02-11. + if ac.probe_expression(TARGET_HAS_ATOMIC_PROBE) { + enable_target_has_atomic = true; + } + } + + // If we can't tell using `target_has_atomic`, tell if the target + // has `AtomicU64` by trying to use it. + if !enable_target_has_atomic && !ac.probe_expression(TARGET_ATOMIC_U64_PROBE) { + target_needs_atomic_u64_fallback = true; + } + // The `Mutex::new` method was made const in 1.63. if ac.probe_rustc_version(1, 64) { enable_const_mutex_new = true; @@ -109,6 +145,14 @@ fn main() { autocfg::emit("tokio_no_addr_of") } + if !enable_target_has_atomic { + // To disable this feature on compilers that support it, you can + // explicitly pass this flag with the following environment variable: + // + // RUSTFLAGS="--cfg tokio_no_target_has_atomic" + autocfg::emit("tokio_no_target_has_atomic") + } + if !enable_const_mutex_new { // To disable this feature on compilers that support it, you can // explicitly pass this flag with the following environment variable: @@ -117,6 +161,14 @@ fn main() { autocfg::emit("tokio_no_const_mutex_new") } + if target_needs_atomic_u64_fallback { + // To disable this feature on compilers that support it, you can + // explicitly pass this flag with the following environment variable: + // + // RUSTFLAGS="--cfg tokio_no_atomic_u64" + autocfg::emit("tokio_no_atomic_u64") + } + let target = ::std::env::var("TARGET").unwrap_or_default(); // We emit cfgs instead of using `target_family = "wasm"` that requires Rust 1.54. diff --git a/tokio/src/macros/cfg.rs b/tokio/src/macros/cfg.rs index 2eea344b516..1c66d24147a 100644 --- a/tokio/src/macros/cfg.rs +++ b/tokio/src/macros/cfg.rs @@ -461,14 +461,14 @@ macro_rules! cfg_not_coop { macro_rules! cfg_has_atomic_u64 { ($($item:item)*) => { $( - #[cfg(not(any( - target_arch = "arm", - target_arch = "mips", - target_arch = "powerpc", - target_arch = "riscv32", - tokio_wasm, - tokio_no_atomic_u64, - )))] + #[cfg_attr( + not(tokio_no_target_has_atomic), + cfg(all(target_has_atomic = "64", not(tokio_no_atomic_u64)) + ))] + #[cfg_attr( + tokio_no_target_has_atomic, + cfg(not(tokio_no_atomic_u64)) + )] $item )* } @@ -477,14 +477,14 @@ macro_rules! cfg_has_atomic_u64 { macro_rules! cfg_not_has_atomic_u64 { ($($item:item)*) => { $( - #[cfg(any( - target_arch = "arm", - target_arch = "mips", - target_arch = "powerpc", - target_arch = "riscv32", - tokio_wasm, - tokio_no_atomic_u64, + #[cfg_attr( + not(tokio_no_target_has_atomic), + cfg(any(not(target_has_atomic = "64"), tokio_no_atomic_u64) ))] + #[cfg_attr( + tokio_no_target_has_atomic, + cfg(tokio_no_atomic_u64) + )] $item )* } From e14ca72e68fbfa04f12408ed916bf5f857dfa232 Mon Sep 17 00:00:00 2001 From: Jason Orendorff Date: Sat, 17 Dec 2022 06:02:18 -0600 Subject: [PATCH 014/101] test-util: don't auto-advance time when a `spawn_blocking` task is running (#5115) --- tokio/src/runtime/blocking/mod.rs | 2 - tokio/src/runtime/blocking/pool.rs | 9 +-- tokio/src/runtime/blocking/schedule.rs | 49 ++++++++++++-- tokio/src/runtime/tests/loom_blocking.rs | 21 ++++++ tokio/src/runtime/tests/loom_queue.rs | 2 +- tokio/src/runtime/tests/mod.rs | 20 +++++- tokio/src/runtime/tests/task.rs | 2 +- tokio/src/runtime/time/mod.rs | 2 +- tokio/src/time/clock.rs | 19 +++++- tokio/tests/task_blocking.rs | 83 +++++++++++++++++++++++- 10 files changed, 190 insertions(+), 19 deletions(-) diff --git a/tokio/src/runtime/blocking/mod.rs b/tokio/src/runtime/blocking/mod.rs index 88bdcfd6421..c42924be77d 100644 --- a/tokio/src/runtime/blocking/mod.rs +++ b/tokio/src/runtime/blocking/mod.rs @@ -17,8 +17,6 @@ cfg_trace! { mod schedule; mod shutdown; mod task; -#[cfg(all(test, not(tokio_wasm)))] -pub(crate) use schedule::NoopSchedule; pub(crate) use task::BlockingTask; use crate::runtime::Builder; diff --git a/tokio/src/runtime/blocking/pool.rs b/tokio/src/runtime/blocking/pool.rs index 9c536141996..e9f6b66e0fc 100644 --- a/tokio/src/runtime/blocking/pool.rs +++ b/tokio/src/runtime/blocking/pool.rs @@ -2,7 +2,7 @@ use crate::loom::sync::{Arc, Condvar, Mutex}; use crate::loom::thread; -use crate::runtime::blocking::schedule::NoopSchedule; +use crate::runtime::blocking::schedule::BlockingSchedule; use crate::runtime::blocking::{shutdown, BlockingTask}; use crate::runtime::builder::ThreadNameFn; use crate::runtime::task::{self, JoinHandle}; @@ -120,7 +120,7 @@ struct Shared { } pub(crate) struct Task { - task: task::UnownedTask, + task: task::UnownedTask, mandatory: Mandatory, } @@ -151,7 +151,7 @@ impl From for io::Error { } impl Task { - pub(crate) fn new(task: task::UnownedTask, mandatory: Mandatory) -> Task { + pub(crate) fn new(task: task::UnownedTask, mandatory: Mandatory) -> Task { Task { task, mandatory } } @@ -379,7 +379,8 @@ impl Spawner { #[cfg(not(all(tokio_unstable, feature = "tracing")))] let _ = name; - let (task, handle) = task::unowned(fut, NoopSchedule, id); + let (task, handle) = task::unowned(fut, BlockingSchedule::new(rt), id); + let spawned = self.spawn_task(Task::new(task, is_mandatory), rt); (handle, spawned) } diff --git a/tokio/src/runtime/blocking/schedule.rs b/tokio/src/runtime/blocking/schedule.rs index 54252241d94..edf775be8be 100644 --- a/tokio/src/runtime/blocking/schedule.rs +++ b/tokio/src/runtime/blocking/schedule.rs @@ -1,15 +1,52 @@ +#[cfg(feature = "test-util")] +use crate::runtime::scheduler; use crate::runtime::task::{self, Task}; +use crate::runtime::Handle; -/// `task::Schedule` implementation that does nothing. This is unique to the -/// blocking scheduler as tasks scheduled are not really futures but blocking -/// operations. +/// `task::Schedule` implementation that does nothing (except some bookkeeping +/// in test-util builds). This is unique to the blocking scheduler as tasks +/// scheduled are not really futures but blocking operations. /// /// We avoid storing the task by forgetting it in `bind` and re-materializing it -/// in `release. -pub(crate) struct NoopSchedule; +/// in `release`. +pub(crate) struct BlockingSchedule { + #[cfg(feature = "test-util")] + handle: Handle, +} + +impl BlockingSchedule { + #[cfg_attr(not(feature = "test-util"), allow(unused_variables))] + pub(crate) fn new(handle: &Handle) -> Self { + #[cfg(feature = "test-util")] + { + match &handle.inner { + scheduler::Handle::CurrentThread(handle) => { + handle.driver.clock.inhibit_auto_advance(); + } + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + scheduler::Handle::MultiThread(_) => {} + } + } + BlockingSchedule { + #[cfg(feature = "test-util")] + handle: handle.clone(), + } + } +} -impl task::Schedule for NoopSchedule { +impl task::Schedule for BlockingSchedule { fn release(&self, _task: &Task) -> Option> { + #[cfg(feature = "test-util")] + { + match &self.handle.inner { + scheduler::Handle::CurrentThread(handle) => { + handle.driver.clock.allow_auto_advance(); + handle.driver.unpark(); + } + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + scheduler::Handle::MultiThread(_) => {} + } + } None } diff --git a/tokio/src/runtime/tests/loom_blocking.rs b/tokio/src/runtime/tests/loom_blocking.rs index 89de85e4362..5c4aeae39c5 100644 --- a/tokio/src/runtime/tests/loom_blocking.rs +++ b/tokio/src/runtime/tests/loom_blocking.rs @@ -73,6 +73,27 @@ fn spawn_mandatory_blocking_should_run_even_when_shutting_down_from_other_thread }); } +#[test] +fn spawn_blocking_when_paused() { + use std::time::Duration; + loom::model(|| { + let rt = crate::runtime::Builder::new_current_thread() + .enable_time() + .start_paused(true) + .build() + .unwrap(); + let handle = rt.handle(); + let _enter = handle.enter(); + let a = crate::task::spawn_blocking(|| {}); + let b = crate::task::spawn_blocking(|| {}); + rt.block_on(crate::time::timeout(Duration::from_millis(1), async move { + a.await.expect("blocking task should finish"); + b.await.expect("blocking task should finish"); + })) + .expect("timeout should not trigger"); + }); +} + fn mk_runtime(num_threads: usize) -> Runtime { runtime::Builder::new_multi_thread() .worker_threads(num_threads) diff --git a/tokio/src/runtime/tests/loom_queue.rs b/tokio/src/runtime/tests/loom_queue.rs index 8d4e1d384e2..fc93bf3e4a2 100644 --- a/tokio/src/runtime/tests/loom_queue.rs +++ b/tokio/src/runtime/tests/loom_queue.rs @@ -1,6 +1,6 @@ -use crate::runtime::blocking::NoopSchedule; use crate::runtime::scheduler::multi_thread::queue; use crate::runtime::task::Inject; +use crate::runtime::tests::NoopSchedule; use crate::runtime::MetricsBatch; use loom::thread; diff --git a/tokio/src/runtime/tests/mod.rs b/tokio/src/runtime/tests/mod.rs index 1c67dfefb32..4e7c2453f25 100644 --- a/tokio/src/runtime/tests/mod.rs +++ b/tokio/src/runtime/tests/mod.rs @@ -2,11 +2,29 @@ // other code when running loom tests. #![cfg_attr(loom, warn(dead_code, unreachable_pub))] +use self::noop_scheduler::NoopSchedule; use self::unowned_wrapper::unowned; +mod noop_scheduler { + use crate::runtime::task::{self, Task}; + + /// `task::Schedule` implementation that does nothing, for testing. + pub(crate) struct NoopSchedule; + + impl task::Schedule for NoopSchedule { + fn release(&self, _task: &Task) -> Option> { + None + } + + fn schedule(&self, _task: task::Notified) { + unreachable!(); + } + } +} + mod unowned_wrapper { - use crate::runtime::blocking::NoopSchedule; use crate::runtime::task::{Id, JoinHandle, Notified}; + use crate::runtime::tests::NoopSchedule; #[cfg(all(tokio_unstable, feature = "tracing"))] pub(crate) fn unowned(task: T) -> (Notified, JoinHandle) diff --git a/tokio/src/runtime/tests/task.rs b/tokio/src/runtime/tests/task.rs index 173e5b0b23f..a79c0f50d15 100644 --- a/tokio/src/runtime/tests/task.rs +++ b/tokio/src/runtime/tests/task.rs @@ -1,5 +1,5 @@ -use crate::runtime::blocking::NoopSchedule; use crate::runtime::task::{self, unowned, Id, JoinHandle, OwnedTasks, Schedule, Task}; +use crate::runtime::tests::NoopSchedule; use crate::util::TryLock; use std::collections::VecDeque; diff --git a/tokio/src/runtime/time/mod.rs b/tokio/src/runtime/time/mod.rs index 240f8f16e6d..f81cab8cc35 100644 --- a/tokio/src/runtime/time/mod.rs +++ b/tokio/src/runtime/time/mod.rs @@ -222,7 +222,7 @@ impl Driver { let handle = rt_handle.time(); let clock = &handle.time_source.clock; - if clock.is_paused() { + if clock.can_auto_advance() { self.park.park_timeout(rt_handle, Duration::from_secs(0)); // If the time driver was woken, then the park completed diff --git a/tokio/src/time/clock.rs b/tokio/src/time/clock.rs index 0343c4f4cf0..cd11a67527f 100644 --- a/tokio/src/time/clock.rs +++ b/tokio/src/time/clock.rs @@ -65,6 +65,9 @@ cfg_test_util! { /// Instant at which the clock was last unfrozen. unfrozen: Option, + + /// Number of `inhibit_auto_advance` calls still in effect. + auto_advance_inhibit_count: usize, } /// Pauses time. @@ -187,6 +190,7 @@ cfg_test_util! { enable_pausing, base: now, unfrozen: Some(now), + auto_advance_inhibit_count: 0, })), }; @@ -212,9 +216,20 @@ cfg_test_util! { inner.unfrozen = None; } - pub(crate) fn is_paused(&self) -> bool { + /// Temporarily stop auto-advancing the clock (see `tokio::time::pause`). + pub(crate) fn inhibit_auto_advance(&self) { + let mut inner = self.inner.lock(); + inner.auto_advance_inhibit_count += 1; + } + + pub(crate) fn allow_auto_advance(&self) { + let mut inner = self.inner.lock(); + inner.auto_advance_inhibit_count -= 1; + } + + pub(crate) fn can_auto_advance(&self) -> bool { let inner = self.inner.lock(); - inner.unfrozen.is_none() + inner.unfrozen.is_none() && inner.auto_advance_inhibit_count == 0 } #[track_caller] diff --git a/tokio/tests/task_blocking.rs b/tokio/tests/task_blocking.rs index e5879332d0e..2999758ff36 100644 --- a/tokio/tests/task_blocking.rs +++ b/tokio/tests/task_blocking.rs @@ -1,7 +1,7 @@ #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support threads -use tokio::{runtime, task}; +use tokio::{runtime, task, time}; use tokio_test::assert_ok; use std::thread; @@ -226,3 +226,84 @@ fn coop_disabled_in_block_in_place_in_block_on() { done_rx.recv().unwrap().unwrap(); } + +#[cfg(feature = "test-util")] +#[tokio::test(start_paused = true)] +async fn blocking_when_paused() { + // Do not auto-advance time when we have started a blocking task that has + // not yet finished. + time::timeout( + Duration::from_secs(3), + task::spawn_blocking(|| thread::sleep(Duration::from_millis(1))), + ) + .await + .expect("timeout should not trigger") + .expect("blocking task should finish"); + + // Really: Do not auto-advance time, even if the timeout is short and the + // blocking task runs for longer than that. It doesn't matter: Tokio time + // is paused; system time is not. + time::timeout( + Duration::from_millis(1), + task::spawn_blocking(|| thread::sleep(Duration::from_millis(50))), + ) + .await + .expect("timeout should not trigger") + .expect("blocking task should finish"); +} + +#[cfg(feature = "test-util")] +#[tokio::test(start_paused = true)] +async fn blocking_task_wakes_paused_runtime() { + let t0 = std::time::Instant::now(); + time::timeout( + Duration::from_secs(15), + task::spawn_blocking(|| thread::sleep(Duration::from_millis(1))), + ) + .await + .expect("timeout should not trigger") + .expect("blocking task should finish"); + assert!( + t0.elapsed() < Duration::from_secs(10), + "completing a spawn_blocking should wake the scheduler if it's parked while time is paused" + ); +} + +#[cfg(feature = "test-util")] +#[tokio::test(start_paused = true)] +async fn unawaited_blocking_task_wakes_paused_runtime() { + let t0 = std::time::Instant::now(); + + // When this task finishes, time should auto-advance, even though the + // JoinHandle has not been awaited yet. + let a = task::spawn_blocking(|| { + thread::sleep(Duration::from_millis(1)); + }); + + crate::time::sleep(Duration::from_secs(15)).await; + a.await.expect("blocking task should finish"); + assert!( + t0.elapsed() < Duration::from_secs(10), + "completing a spawn_blocking should wake the scheduler if it's parked while time is paused" + ); +} + +#[cfg(feature = "test-util")] +#[tokio::test(start_paused = true)] +async fn panicking_blocking_task_wakes_paused_runtime() { + let t0 = std::time::Instant::now(); + let result = time::timeout( + Duration::from_secs(15), + task::spawn_blocking(|| { + thread::sleep(Duration::from_millis(1)); + panic!("blocking task panicked"); + }), + ) + .await + .expect("timeout should not trigger"); + assert!(result.is_err(), "blocking task should have panicked"); + assert!( + t0.elapsed() < Duration::from_secs(10), + "completing a spawn_blocking should wake the scheduler if it's parked while time is paused" + ); +} From 6b3727d5804bbe08f7e5230949ee809732cf1010 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sat, 17 Dec 2022 19:06:30 +0100 Subject: [PATCH 015/101] metrics: make `num_idle_blocking_threads` test less flaky (#5302) --- tokio/tests/rt_metrics.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tokio/tests/rt_metrics.rs b/tokio/tests/rt_metrics.rs index 2a9f998f082..4b98d234c41 100644 --- a/tokio/tests/rt_metrics.rs +++ b/tokio/tests/rt_metrics.rs @@ -31,6 +31,19 @@ fn num_idle_blocking_threads() { rt.block_on(async { time::sleep(Duration::from_millis(5)).await; }); + + // We need to wait until the blocking thread has become idle. Usually 5ms is + // enough for this to happen, but not always. When it isn't enough, sleep + // for another second. We don't always wait for a whole second since we want + // the test suite to finish quickly. + // + // Note that the timeout for idle threads to be killed is 10 seconds. + if 0 == rt.metrics().num_idle_blocking_threads() { + rt.block_on(async { + time::sleep(Duration::from_secs(1)).await; + }); + } + assert_eq!(1, rt.metrics().num_idle_blocking_threads()); } From d9e0f6611351be6ec02fa95bfcd9fef710714eaf Mon Sep 17 00:00:00 2001 From: Abutalib Aghayev Date: Sat, 17 Dec 2022 16:34:12 -0500 Subject: [PATCH 016/101] task: rename `State::has_join_waker` to `State::is_join_waker_set` (#5248) --- tokio/src/runtime/task/harness.rs | 47 ++++++++++++++----------------- tokio/src/runtime/task/state.rs | 8 +++--- 2 files changed, 25 insertions(+), 30 deletions(-) diff --git a/tokio/src/runtime/task/harness.rs b/tokio/src/runtime/task/harness.rs index a9589375218..8e3c3d14fa0 100644 --- a/tokio/src/runtime/task/harness.rs +++ b/tokio/src/runtime/task/harness.rs @@ -315,9 +315,10 @@ where // this task. It is our responsibility to drop the // output. self.core().drop_future_or_output(); - } else if snapshot.has_join_waker() { - // Notify the join handle. The previous transition obtains the - // lock on the waker cell. + } else if snapshot.is_join_waker_set() { + // Notify the waker. Reading the waker field is safe per rule 4 + // in task/mod.rs, since the JOIN_WAKER bit is set and the call + // to transition_to_complete() above set the COMPLETE bit. self.trailer().wake_join(); } })); @@ -367,36 +368,30 @@ fn can_read_output(header: &Header, trailer: &Trailer, waker: &Waker) -> bool { debug_assert!(snapshot.is_join_interested()); if !snapshot.is_complete() { - // The waker must be stored in the task struct. - let res = if snapshot.has_join_waker() { - // There already is a waker stored in the struct. If it matches - // the provided waker, then there is no further work to do. - // Otherwise, the waker must be swapped. - let will_wake = unsafe { - // Safety: when `JOIN_INTEREST` is set, only `JOIN_HANDLE` - // may mutate the `waker` field. - trailer.will_wake(waker) - }; - - if will_wake { - // The task is not complete **and** the waker is up to date, - // there is nothing further that needs to be done. + // If the task is not complete, try storing the provided waker in the + // task's waker field. + + let res = if snapshot.is_join_waker_set() { + // If JOIN_WAKER is set, then JoinHandle has previously stored a + // waker in the waker field per step (iii) of rule 5 in task/mod.rs. + + // Optimization: if the stored waker and the provided waker wake the + // same task, then return without touching the waker field. (Reading + // the waker field below is safe per rule 3 in task/mod.rs.) + if unsafe { trailer.will_wake(waker) } { return false; } - // Unset the `JOIN_WAKER` to gain mutable access to the `waker` - // field then update the field with the new join worker. - // - // This requires two atomic operations, unsetting the bit and - // then resetting it. If the task transitions to complete - // concurrently to either one of those operations, then setting - // the join waker fails and we proceed to reading the task - // output. + // Otherwise swap the stored waker with the provided waker by + // following the rule 5 in task/mod.rs. header .state .unset_waker() .and_then(|snapshot| set_join_waker(header, trailer, waker.clone(), snapshot)) } else { + // If JOIN_WAKER is unset, then JoinHandle has mutable access to the + // waker field per rule 2 in task/mod.rs; therefore, skip step (i) + // of rule 5 and try to store the provided waker in the waker field. set_join_waker(header, trailer, waker.clone(), snapshot) }; @@ -417,7 +412,7 @@ fn set_join_waker( snapshot: Snapshot, ) -> Result { assert!(snapshot.is_join_interested()); - assert!(!snapshot.has_join_waker()); + assert!(!snapshot.is_join_waker_set()); // Safety: Only the `JoinHandle` may set the `waker` field. When // `JOIN_INTEREST` is **not** set, nothing else will touch the field. diff --git a/tokio/src/runtime/task/state.rs b/tokio/src/runtime/task/state.rs index c2d5b28eac2..77283125e61 100644 --- a/tokio/src/runtime/task/state.rs +++ b/tokio/src/runtime/task/state.rs @@ -378,7 +378,7 @@ impl State { pub(super) fn set_join_waker(&self) -> UpdateResult { self.fetch_update(|curr| { assert!(curr.is_join_interested()); - assert!(!curr.has_join_waker()); + assert!(!curr.is_join_waker_set()); if curr.is_complete() { return None; @@ -398,7 +398,7 @@ impl State { pub(super) fn unset_waker(&self) -> UpdateResult { self.fetch_update(|curr| { assert!(curr.is_join_interested()); - assert!(curr.has_join_waker()); + assert!(curr.is_join_waker_set()); if curr.is_complete() { return None; @@ -546,7 +546,7 @@ impl Snapshot { self.0 &= !JOIN_INTEREST } - pub(super) fn has_join_waker(self) -> bool { + pub(super) fn is_join_waker_set(self) -> bool { self.0 & JOIN_WAKER == JOIN_WAKER } @@ -588,7 +588,7 @@ impl fmt::Debug for Snapshot { .field("is_notified", &self.is_notified()) .field("is_cancelled", &self.is_cancelled()) .field("is_join_interested", &self.is_join_interested()) - .field("has_join_waker", &self.has_join_waker()) + .field("is_join_waker_set", &self.is_join_waker_set()) .field("ref_count", &self.ref_count()) .finish() } From b9ae7e6659f31508fbe81e29508984d0c6f2d0b4 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 18 Dec 2022 09:42:59 +0100 Subject: [PATCH 017/101] signal: remove redundant Pin around globals (#5303) --- tokio/src/signal/registry.rs | 5 ++--- tokio/src/signal/unix.rs | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/tokio/src/signal/registry.rs b/tokio/src/signal/registry.rs index e1b3d108767..48e98c832fa 100644 --- a/tokio/src/signal/registry.rs +++ b/tokio/src/signal/registry.rs @@ -5,7 +5,6 @@ use crate::sync::watch; use crate::util::once_cell::OnceCell; use std::ops; -use std::pin::Pin; use std::sync::atomic::{AtomicBool, Ordering}; pub(crate) type EventId = usize; @@ -162,14 +161,14 @@ where } } -pub(crate) fn globals() -> Pin<&'static Globals> +pub(crate) fn globals() -> &'static Globals where OsExtraData: 'static + Send + Sync + Init, OsStorage: 'static + Send + Sync + Init, { static GLOBALS: OnceCell = OnceCell::new(); - Pin::new(GLOBALS.get(globals_init)) + GLOBALS.get(globals_init) } #[cfg(all(test, not(loom)))] diff --git a/tokio/src/signal/unix.rs b/tokio/src/signal/unix.rs index 0e1329ed54f..e5345fdfccc 100644 --- a/tokio/src/signal/unix.rs +++ b/tokio/src/signal/unix.rs @@ -14,7 +14,6 @@ use crate::sync::watch; use mio::net::UnixStream; use std::io::{self, Error, ErrorKind, Write}; -use std::pin::Pin; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Once; use std::task::{Context, Poll}; @@ -240,7 +239,7 @@ impl Default for SignalInfo { /// 2. Wake up the driver by writing a byte to a pipe /// /// Those two operations should both be async-signal safe. -fn action(globals: Pin<&'static Globals>, signal: libc::c_int) { +fn action(globals: &'static Globals, signal: libc::c_int) { globals.record_event(signal as EventId); // Send a wakeup, ignore any errors (anything reasonably possible is From 682e93df93292a47e7d6d04bd9e4922c5bbae5b4 Mon Sep 17 00:00:00 2001 From: Pure White Date: Wed, 21 Dec 2022 22:43:35 +0800 Subject: [PATCH 018/101] rt: read environment variable for worker thread count (#4250) --- tokio/src/lib.rs | 15 +++++++++------ tokio/src/loom/std/mod.rs | 22 +++++++++++++++++++++- tokio/src/runtime/builder.rs | 4 ++++ 3 files changed, 34 insertions(+), 7 deletions(-) diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index e745fe9995f..05767d017bc 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -174,12 +174,15 @@ //! swapping the currently running task on each thread. However, this kind of //! swapping can only happen at `.await` points, so code that spends a long time //! without reaching an `.await` will prevent other tasks from running. To -//! combat this, Tokio provides two kinds of threads: Core threads and blocking -//! threads. The core threads are where all asynchronous code runs, and Tokio -//! will by default spawn one for each CPU core. The blocking threads are -//! spawned on demand, can be used to run blocking code that would otherwise -//! block other tasks from running and are kept alive when not used for a certain -//! amount of time which can be configured with [`thread_keep_alive`]. +//! combat this, Tokio provides two kinds of threads: Core threads and blocking threads. +//! +//! The core threads are where all asynchronous code runs, and Tokio will by default +//! spawn one for each CPU core. You can use the environment variable `TOKIO_WORKER_THREADS` +//! to override the default value. +//! +//! The blocking threads are spawned on demand, can be used to run blocking code +//! that would otherwise block other tasks from running and are kept alive when +//! not used for a certain amount of time which can be configured with [`thread_keep_alive`]. //! Since it is not possible for Tokio to swap out blocking tasks, like it //! can do with asynchronous code, the upper limit on the number of blocking //! threads is very large. These limits can be configured on the [`Builder`]. diff --git a/tokio/src/loom/std/mod.rs b/tokio/src/loom/std/mod.rs index f0fcd46d23d..6bd1ad93dcf 100644 --- a/tokio/src/loom/std/mod.rs +++ b/tokio/src/loom/std/mod.rs @@ -81,7 +81,27 @@ pub(crate) mod sync { pub(crate) mod sys { #[cfg(feature = "rt-multi-thread")] pub(crate) fn num_cpus() -> usize { - usize::max(1, num_cpus::get()) + const ENV_WORKER_THREADS: &str = "TOKIO_WORKER_THREADS"; + + match std::env::var(ENV_WORKER_THREADS) { + Ok(s) => { + let n = s.parse().unwrap_or_else(|e| { + panic!( + "\"{}\" must be usize, error: {}, value: {}", + ENV_WORKER_THREADS, e, s + ) + }); + assert!(n > 0, "\"{}\" cannot be set to 0", ENV_WORKER_THREADS); + n + } + Err(std::env::VarError::NotPresent) => usize::max(1, num_cpus::get()), + Err(std::env::VarError::NotUnicode(e)) => { + panic!( + "\"{}\" must be valid unicode, error: {:?}", + ENV_WORKER_THREADS, e + ) + } + } } #[cfg(not(feature = "rt-multi-thread"))] diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index 64cf403aaf7..ea0df2e3b4c 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -182,6 +182,7 @@ cfg_unstable! { pub(crate) type ThreadNameFn = std::sync::Arc String + Send + Sync + 'static>; +#[derive(Clone, Copy)] pub(crate) enum Kind { CurrentThread, #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] @@ -237,6 +238,7 @@ impl Builder { // The clock starts not-paused start_paused: false, + // Read from environment variable first in multi-threaded mode. // Default to lazy auto-detection (one thread per CPU core) worker_threads: None, @@ -304,6 +306,8 @@ impl Builder { /// This can be any number above 0 though it is advised to keep this value /// on the smaller side. /// + /// This will override the value read from environment variable `TOKIO_WORKER_THREADS`. + /// /// # Default /// /// The default value is the number of cores available to the system. From 519afd4458967eaf7009b0d0fc19cfb36de6f617 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Wed, 28 Dec 2022 00:06:56 +0900 Subject: [PATCH 019/101] ci: remove uses of unmaintained actions-rs actions (#5316) - Use dtolnay/rust-toolchain instead of actions-rs/toolchain - Use cargo/cross directly instead of actions-rs/cargo - Use rustsec/audit-check instead of actions-rs/audit-check --- .github/workflows/audit.yml | 7 +-- .github/workflows/ci.yml | 87 +++++++++++-------------------- .github/workflows/loom.yml | 3 +- .github/workflows/pr-audit.yml | 13 ++--- .github/workflows/stress-test.yml | 3 +- 5 files changed, 38 insertions(+), 75 deletions(-) diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index fa0e8d72943..cb124aabdb6 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -15,15 +15,16 @@ permissions: jobs: security-audit: permissions: - checks: write # for actions-rs/audit-check to create check + checks: write # for rustsec/audit-check to create check contents: read # for actions/checkout to fetch code - issues: write # for actions-rs/audit-check to create issues + issues: write # for rustsec/audit-check to create issues runs-on: ubuntu-latest if: "!contains(github.event.head_commit.message, 'ci skip')" steps: - uses: actions/checkout@v3 - name: Audit Check - uses: actions-rs/audit-check@v1 + # https://github.com/rustsec/audit-check/issues/2 + uses: rustsec/audit-check@master with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 38f65080442..cd20b491ccb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -73,10 +73,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - override: true - name: Install Rust run: rustup update stable - uses: Swatinem/rust-cache@v1 @@ -123,10 +122,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - override: true - uses: Swatinem/rust-cache@v1 - name: Enable parking_lot send_guard feature # Inserts the line "plsend = ["parking_lot/send_guard"]" right after [features] @@ -140,10 +138,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - override: true - uses: Swatinem/rust-cache@v1 - name: Install Valgrind @@ -179,10 +176,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - override: true - uses: Swatinem/rust-cache@v1 # Run `tokio` with "unstable" cfg flag. - name: test tokio full --cfg unstable @@ -200,11 +196,10 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_nightly }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_nightly }} components: miri - override: true - uses: Swatinem/rust-cache@v1 - name: miri # Many of tests in tokio/tests and doctests use #[tokio::test] or @@ -224,10 +219,9 @@ jobs: # Required to resolve symbols in sanitizer output run: sudo apt-get install -y llvm - name: Install Rust ${{ env.rust_nightly }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_nightly }} - override: true - uses: Swatinem/rust-cache@v1 - name: asan run: cargo test --workspace --all-features --target x86_64-unknown-linux-gnu --tests -- --test-threads 1 @@ -250,16 +244,13 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} target: ${{ matrix.target }} - override: true - - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: check - args: --workspace --all-features --target ${{ matrix.target }} + - name: Install cross + uses: taiki-e/install-action@cross + - run: cross check --workspace --all-features --target ${{ matrix.target }} env: RUSTFLAGS: --cfg tokio_unstable -Dwarnings @@ -280,28 +271,21 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust stable - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} target: ${{ matrix.target }} - override: true + - name: Install cross + uses: taiki-e/install-action@cross # First run with all features (including parking_lot) - - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: test - args: -p tokio --all-features --target ${{ matrix.target }} --tests + - run: cross test -p tokio --all-features --target ${{ matrix.target }} --tests env: RUSTFLAGS: --cfg tokio_unstable -Dwarnings --cfg tokio_no_ipv6 ${{ matrix.rustflags }} # Now run without parking_lot - name: Remove `parking_lot` from `full` feature run: sed -i '0,/parking_lot/{/parking_lot/d;}' tokio/Cargo.toml - - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: test - # The `tokio_no_parking_lot` cfg is here to ensure the `sed` above does not silently break. - args: -p tokio --features full,test-util --target ${{ matrix.target }} --tests + # The `tokio_no_parking_lot` cfg is here to ensure the `sed` above does not silently break. + - run: cross test -p tokio --features full,test-util --target ${{ matrix.target }} --tests env: RUSTFLAGS: --cfg tokio_unstable -Dwarnings --cfg tokio_no_ipv6 --cfg tokio_no_parking_lot ${{ matrix.rustflags }} @@ -312,11 +296,10 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_nightly }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_nightly }} components: rust-src - override: true # Install linker and libraries for i686-unknown-linux-gnu - uses: taiki-e/setup-cross-toolchain-action@v1 with: @@ -331,11 +314,10 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_nightly }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_nightly }} target: ${{ matrix.target }} - override: true - uses: Swatinem/rust-cache@v1 - name: Install cargo-hack uses: taiki-e/install-action@cargo-hack @@ -353,10 +335,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_min }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_min }} - override: true - uses: Swatinem/rust-cache@v1 # First compile just the main tokio crate with minrust and newest version # of all dependencies, then pin once_cell and compile the rest of the @@ -378,10 +359,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_nightly }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_nightly }} - override: true - uses: Swatinem/rust-cache@v1 - name: Install cargo-hack uses: taiki-e/install-action@cargo-hack @@ -410,10 +390,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - override: true components: rustfmt - uses: Swatinem/rust-cache@v1 # Check fmt @@ -431,10 +410,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_clippy }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_clippy }} - override: true components: clippy - uses: Swatinem/rust-cache@v1 # Run clippy @@ -447,10 +425,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_nightly }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_nightly }} - override: true - uses: Swatinem/rust-cache@v1 - name: "doc --lib --all-features" run: cargo doc --lib --no-deps --all-features --document-private-items @@ -464,10 +441,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - override: true - uses: Swatinem/rust-cache@v1 - name: build --cfg loom run: cargo test --no-run --lib --features full @@ -499,10 +475,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - override: true - uses: Swatinem/rust-cache@v1 - name: Test hyper run: | @@ -527,11 +502,10 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_nightly }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_nightly }} target: x86_64-fortanix-unknown-sgx - override: true - uses: Swatinem/rust-cache@v1 # NOTE: Currently the only test we can run is to build tokio with rt and sync features. - name: build tokio @@ -544,10 +518,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - override: true - uses: Swatinem/rust-cache@v1 - name: Install wasm-pack run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh @@ -561,10 +534,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - override: true - uses: Swatinem/rust-cache@v1 # Install dependencies @@ -614,12 +586,11 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust nightly-2022-07-25 - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: # `check-external-types` requires a specific Rust nightly version. See # the README for details: https://github.com/awslabs/cargo-check-external-types toolchain: nightly-2022-07-25 - override: true - uses: Swatinem/rust-cache@v1 - name: check-external-types run: | diff --git a/.github/workflows/loom.yml b/.github/workflows/loom.yml index bd92a1e45a4..51dc3285471 100644 --- a/.github/workflows/loom.yml +++ b/.github/workflows/loom.yml @@ -34,10 +34,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - override: true - uses: Swatinem/rust-cache@v1 - name: loom ${{ matrix.scope }} run: cargo test --lib --release --features full -- --nocapture $SCOPE diff --git a/.github/workflows/pr-audit.yml b/.github/workflows/pr-audit.yml index 408bc49edf1..d6f798292fe 100644 --- a/.github/workflows/pr-audit.yml +++ b/.github/workflows/pr-audit.yml @@ -19,17 +19,10 @@ jobs: - uses: actions/checkout@v3 - name: Install cargo-audit - uses: actions-rs/cargo@v1 - with: - command: install - args: cargo-audit + run: cargo install cargo-audit - name: Generate lockfile - uses: actions-rs/cargo@v1 - with: - command: generate-lockfile + run: cargo generate-lockfile - name: Audit dependencies - uses: actions-rs/cargo@v1 - with: - command: audit + run: cargo audit diff --git a/.github/workflows/stress-test.yml b/.github/workflows/stress-test.yml index 9b93fdb67a5..9aac706b883 100644 --- a/.github/workflows/stress-test.yml +++ b/.github/workflows/stress-test.yml @@ -25,10 +25,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust ${{ env.rust_stable }} - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - override: true - uses: Swatinem/rust-cache@v1 - name: Install Valgrind uses: taiki-e/install-action@valgrind From 8d58dc85b5fbd7509fb4b44874043565e69f9dee Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Tue, 27 Dec 2022 16:24:50 +0100 Subject: [PATCH 020/101] sync: document that there is no spsc and spmc channel (#5306) --- tokio/src/sync/broadcast.rs | 3 +++ tokio/src/sync/mod.rs | 8 ++++++++ tokio/src/sync/mpsc/mod.rs | 6 +++++- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/tokio/src/sync/broadcast.rs b/tokio/src/sync/broadcast.rs index 452f3b1102b..ede990b046e 100644 --- a/tokio/src/sync/broadcast.rs +++ b/tokio/src/sync/broadcast.rs @@ -18,6 +18,9 @@ //! returned [`Receiver`] will receive values sent **after** the call to //! `subscribe`. //! +//! This channel is also suitable for the single-producer multi-consumer +//! use-case, where a single sender broadcasts values to many receivers. +//! //! ## Lagging //! //! As sent messages must be retained until **all** [`Receiver`] handles receive diff --git a/tokio/src/sync/mod.rs b/tokio/src/sync/mod.rs index 457e6ab2946..8fba196e381 100644 --- a/tokio/src/sync/mod.rs +++ b/tokio/src/sync/mod.rs @@ -94,6 +94,10 @@ //! producers to a single consumer. This channel is often used to send work to a //! task or to receive the result of many computations. //! +//! This is also the channel you should use if you want to send many messages +//! from a single producer to a single consumer. There is no dedicated spsc +//! channel. +//! //! **Example:** using an mpsc to incrementally stream the results of a series //! of computations. //! @@ -244,6 +248,10 @@ //! This channel tends to be used less often than `oneshot` and `mpsc` but still //! has its use cases. //! +//! This is also the channel you should use if you want to broadcast values from +//! a single producer to many consumers. There is no dedicated spmc broadcast +//! channel. +//! //! Basic usage //! //! ``` diff --git a/tokio/src/sync/mpsc/mod.rs b/tokio/src/sync/mpsc/mod.rs index fff309127ad..33889fad766 100644 --- a/tokio/src/sync/mpsc/mod.rs +++ b/tokio/src/sync/mpsc/mod.rs @@ -21,6 +21,9 @@ //! when additional capacity is available. In other words, the channel provides //! backpressure. //! +//! This channel is also suitable for the single-producer single-consumer +//! use-case. (Unless you only need to send one message, in which case you +//! should use the [oneshot] channel.) //! //! # Disconnection //! @@ -62,7 +65,7 @@ //! in mind, but they can also be generalized to other kinds of channels. In //! general, any channel method that isn't marked async can be called anywhere, //! including outside of the runtime. For example, sending a message on a -//! oneshot channel from outside the runtime is perfectly fine. +//! [oneshot] channel from outside the runtime is perfectly fine. //! //! # Multiple runtimes //! @@ -82,6 +85,7 @@ //! [blocking-recv]: crate::sync::mpsc::Receiver::blocking_recv() //! [`UnboundedSender`]: crate::sync::mpsc::UnboundedSender //! [`UnboundedReceiver`]: crate::sync::mpsc::UnboundedReceiver +//! [oneshot]: crate::sync::oneshot //! [`Handle::block_on`]: crate::runtime::Handle::block_on() //! [std-unbounded]: std::sync::mpsc::channel //! [crossbeam-unbounded]: https://docs.rs/crossbeam/*/crossbeam/channel/fn.unbounded.html From 353e5cabb8740ff90f928e447a54e70f66fff5be Mon Sep 17 00:00:00 2001 From: Lencerf Date: Tue, 27 Dec 2022 07:50:12 -0800 Subject: [PATCH 021/101] process: fix typo in `process::imp::Pipe` comment (#5314) Signed-off-by: Changyuan Lyu --- tokio/src/process/unix/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/process/unix/mod.rs b/tokio/src/process/unix/mod.rs index 0345083dc09..78c792cc765 100644 --- a/tokio/src/process/unix/mod.rs +++ b/tokio/src/process/unix/mod.rs @@ -156,7 +156,7 @@ impl Future for Child { #[derive(Debug)] pub(crate) struct Pipe { - // Actually a pipe and not a File. However, we are reusing `File` to get + // Actually a pipe is not a File. However, we are reusing `File` to get // close on drop. This is a similar trick as `mio`. fd: File, } From b75dba690442bafd12b5ceba4c09cab14c5c80f1 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Wed, 28 Dec 2022 02:20:12 +0900 Subject: [PATCH 022/101] ci: update Swatinem/rust-cache action to v2 (#5320) --- .github/workflows/ci.yml | 36 +++++++++++++++---------------- .github/workflows/loom.yml | 2 +- .github/workflows/stress-test.yml | 2 +- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cd20b491ccb..3be5ef92aea 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -78,7 +78,7 @@ jobs: toolchain: ${{ env.rust_stable }} - name: Install Rust run: rustup update stable - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Install cargo-hack uses: taiki-e/install-action@cargo-hack @@ -125,7 +125,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Enable parking_lot send_guard feature # Inserts the line "plsend = ["parking_lot/send_guard"]" right after [features] run: sed -i '/\[features\]/a plsend = ["parking_lot/send_guard"]' tokio/Cargo.toml @@ -141,7 +141,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Install Valgrind uses: taiki-e/install-action@valgrind @@ -179,7 +179,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 # Run `tokio` with "unstable" cfg flag. - name: test tokio full --cfg unstable run: cargo test --all-features @@ -200,7 +200,7 @@ jobs: with: toolchain: ${{ env.rust_nightly }} components: miri - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: miri # Many of tests in tokio/tests and doctests use #[tokio::test] or # #[tokio::main] that calls epoll_create1 that Miri does not support. @@ -222,7 +222,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_nightly }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: asan run: cargo test --workspace --all-features --target x86_64-unknown-linux-gnu --tests -- --test-threads 1 env: @@ -318,7 +318,7 @@ jobs: with: toolchain: ${{ env.rust_nightly }} target: ${{ matrix.target }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Install cargo-hack uses: taiki-e/install-action@cargo-hack - name: check --feature-powerset @@ -338,7 +338,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_min }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 # First compile just the main tokio crate with minrust and newest version # of all dependencies, then pin once_cell and compile the rest of the # crates with the pinned once_cell version. @@ -362,7 +362,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_nightly }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Install cargo-hack uses: taiki-e/install-action@cargo-hack - name: "check --all-features -Z minimal-versions" @@ -394,7 +394,7 @@ jobs: with: toolchain: ${{ env.rust_stable }} components: rustfmt - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 # Check fmt - name: "rustfmt --check" # Workaround for rust-lang/cargo#7732 @@ -414,7 +414,7 @@ jobs: with: toolchain: ${{ env.rust_clippy }} components: clippy - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 # Run clippy - name: "clippy --all" run: cargo clippy --all --tests --all-features @@ -428,7 +428,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_nightly }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: "doc --lib --all-features" run: cargo doc --lib --no-deps --all-features --document-private-items env: @@ -444,7 +444,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: build --cfg loom run: cargo test --no-run --lib --features full working-directory: tokio @@ -478,7 +478,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Test hyper run: | set -x @@ -506,7 +506,7 @@ jobs: with: toolchain: ${{ env.rust_nightly }} target: x86_64-fortanix-unknown-sgx - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 # NOTE: Currently the only test we can run is to build tokio with rt and sync features. - name: build tokio run: cargo build --target x86_64-fortanix-unknown-sgx --features rt,sync @@ -521,7 +521,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Install wasm-pack run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh - name: test tokio @@ -537,7 +537,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 # Install dependencies - name: Install cargo-hack @@ -591,7 +591,7 @@ jobs: # `check-external-types` requires a specific Rust nightly version. See # the README for details: https://github.com/awslabs/cargo-check-external-types toolchain: nightly-2022-07-25 - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: check-external-types run: | set -x diff --git a/.github/workflows/loom.yml b/.github/workflows/loom.yml index 51dc3285471..0c18e0c3aa8 100644 --- a/.github/workflows/loom.yml +++ b/.github/workflows/loom.yml @@ -37,7 +37,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: loom ${{ matrix.scope }} run: cargo test --lib --release --features full -- --nocapture $SCOPE working-directory: tokio diff --git a/.github/workflows/stress-test.yml b/.github/workflows/stress-test.yml index 9aac706b883..f33cc181b3d 100644 --- a/.github/workflows/stress-test.yml +++ b/.github/workflows/stress-test.yml @@ -28,7 +28,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.rust_stable }} - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Install Valgrind uses: taiki-e/install-action@valgrind From 9af2f5ee5931a79684d4afd8897e54aafa39c156 Mon Sep 17 00:00:00 2001 From: Hyeonu Park Date: Wed, 28 Dec 2022 03:40:41 +0900 Subject: [PATCH 023/101] io: optimize shutdown check on I/O operations (#5300) The global flag remains and is used to prevent duplicated shutdowns and new io operations after shutdown. On shutdown, the driver flips the shutdown flag of every pending io operations and wake them to fail with a shutdown error. Fixes: #5227 --- tokio/docs/reactor-refactor.md | 4 +-- tokio/src/runtime/io/mod.rs | 12 +++----- tokio/src/runtime/io/registration.rs | 26 +++++++---------- tokio/src/runtime/io/scheduled_io.rs | 43 ++++++++++++++++------------ 4 files changed, 40 insertions(+), 45 deletions(-) diff --git a/tokio/docs/reactor-refactor.md b/tokio/docs/reactor-refactor.md index 3005afc0168..77e64f4dfd7 100644 --- a/tokio/docs/reactor-refactor.md +++ b/tokio/docs/reactor-refactor.md @@ -188,12 +188,12 @@ readiness, the driver's tick is packed into the atomic `usize`. The `ScheduledIo` readiness `AtomicUsize` is structured as: ``` -| reserved | generation | driver tick | readiness | +| shutdown | generation | driver tick | readiness | |----------+------------+--------------+-----------| | 1 bit | 7 bits + 8 bits + 16 bits | ``` -The `reserved` and `generation` components exist today. +The `shutdown` and `generation` components exist today. The `readiness()` function returns a `ReadyEvent` value. This value includes the `tick` component read with the resource's readiness value. When diff --git a/tokio/src/runtime/io/mod.rs b/tokio/src/runtime/io/mod.rs index 1ddf920f3bb..2e578b6ee6b 100644 --- a/tokio/src/runtime/io/mod.rs +++ b/tokio/src/runtime/io/mod.rs @@ -60,6 +60,7 @@ pub(crate) struct Handle { pub(crate) struct ReadyEvent { tick: u8, pub(crate) ready: Ready, + is_shutdown: bool, } struct IoDispatcher { @@ -147,9 +148,8 @@ impl Driver { if handle.shutdown() { self.resources.for_each(|io| { - // If a task is waiting on the I/O resource, notify it. The task - // will then attempt to use the I/O resource and fail due to the - // driver being shutdown. And shutdown will clear all wakers. + // If a task is waiting on the I/O resource, notify it that the + // runtime is being shutdown. And shutdown will clear all wakers. io.shutdown(); }); } @@ -282,16 +282,12 @@ impl Handle { true } - fn is_shutdown(&self) -> bool { - return self.io_dispatch.read().unwrap().is_shutdown; - } - fn allocate(&self) -> io::Result<(slab::Address, slab::Ref)> { let io = self.io_dispatch.read().unwrap(); if io.is_shutdown { return Err(io::Error::new( io::ErrorKind::Other, - "failed to find event loop", + crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR, )); } io.allocator.allocate().ok_or_else(|| { diff --git a/tokio/src/runtime/io/registration.rs b/tokio/src/runtime/io/registration.rs index 7b95f7f0409..140b9240ae4 100644 --- a/tokio/src/runtime/io/registration.rs +++ b/tokio/src/runtime/io/registration.rs @@ -148,7 +148,7 @@ impl Registration { let coop = ready!(crate::runtime::coop::poll_proceed(cx)); let ev = ready!(self.shared.poll_readiness(cx, direction)); - if self.handle().is_shutdown() { + if ev.is_shutdown { return Poll::Ready(Err(gone())); } @@ -217,28 +217,22 @@ impl Drop for Registration { } fn gone() -> io::Error { - io::Error::new(io::ErrorKind::Other, "IO driver has terminated") + io::Error::new( + io::ErrorKind::Other, + crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR, + ) } cfg_io_readiness! { impl Registration { pub(crate) async fn readiness(&self, interest: Interest) -> io::Result { - use std::future::Future; - use std::pin::Pin; + let ev = self.shared.readiness(interest).await; - let fut = self.shared.readiness(interest); - pin!(fut); - - crate::future::poll_fn(|cx| { - if self.handle().is_shutdown() { - return Poll::Ready(Err(io::Error::new( - io::ErrorKind::Other, - crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR - ))); - } + if ev.is_shutdown { + return Err(gone()) + } - Pin::new(&mut fut).poll(cx).map(Ok) - }).await + Ok(ev) } pub(crate) async fn async_io(&self, interest: Interest, mut f: impl FnMut() -> io::Result) -> io::Result { diff --git a/tokio/src/runtime/io/scheduled_io.rs b/tokio/src/runtime/io/scheduled_io.rs index 1709091032b..197a4e0e211 100644 --- a/tokio/src/runtime/io/scheduled_io.rs +++ b/tokio/src/runtime/io/scheduled_io.rs @@ -46,9 +46,6 @@ struct Waiters { /// Waker used for AsyncWrite. writer: Option, - - /// True if this ScheduledIo has been killed due to IO driver shutdown. - is_shutdown: bool, } cfg_io_readiness! { @@ -95,7 +92,7 @@ cfg_io_readiness! { // The `ScheduledIo::readiness` (`AtomicUsize`) is packed full of goodness. // -// | reserved | generation | driver tick | readiness | +// | shutdown | generation | driver tick | readiness | // |----------+------------+--------------+-----------| // | 1 bit | 7 bits + 8 bits + 16 bits | @@ -105,6 +102,8 @@ const TICK: bit::Pack = READINESS.then(8); const GENERATION: bit::Pack = TICK.then(7); +const SHUTDOWN: bit::Pack = GENERATION.then(1); + #[test] fn test_generations_assert_same() { assert_eq!(super::GENERATION, GENERATION); @@ -138,9 +137,11 @@ impl ScheduledIo { } /// Invoked when the IO driver is shut down; forces this ScheduledIo into a - /// permanently ready state. + /// permanently shutdown state. pub(super) fn shutdown(&self) { - self.wake0(Ready::ALL, true) + let mask = SHUTDOWN.pack(1, 0); + self.readiness.fetch_or(mask, AcqRel); + self.wake(Ready::ALL); } /// Sets the readiness on this `ScheduledIo` by invoking the given closure on @@ -219,16 +220,10 @@ impl ScheduledIo { /// than 32 wakers to notify, if the stack array fills up, the lock is /// released, the array is cleared, and the iteration continues. pub(super) fn wake(&self, ready: Ready) { - self.wake0(ready, false); - } - - fn wake0(&self, ready: Ready, shutdown: bool) { let mut wakers = WakeList::new(); let mut waiters = self.waiters.lock(); - waiters.is_shutdown |= shutdown; - // check for AsyncRead slot if ready.is_readable() { if let Some(waker) = waiters.reader.take() { @@ -283,6 +278,7 @@ impl ScheduledIo { ReadyEvent { tick: TICK.unpack(curr) as u8, ready: interest.mask() & Ready::from_usize(READINESS.unpack(curr)), + is_shutdown: SHUTDOWN.unpack(curr) != 0, } } @@ -299,8 +295,9 @@ impl ScheduledIo { let curr = self.readiness.load(Acquire); let ready = direction.mask() & Ready::from_usize(READINESS.unpack(curr)); + let is_shutdown = SHUTDOWN.unpack(curr) != 0; - if ready.is_empty() { + if ready.is_empty() && !is_shutdown { // Update the task info let mut waiters = self.waiters.lock(); let slot = match direction { @@ -325,10 +322,12 @@ impl ScheduledIo { // taking the waiters lock let curr = self.readiness.load(Acquire); let ready = direction.mask() & Ready::from_usize(READINESS.unpack(curr)); - if waiters.is_shutdown { + let is_shutdown = SHUTDOWN.unpack(curr) != 0; + if is_shutdown { Poll::Ready(ReadyEvent { tick: TICK.unpack(curr) as u8, ready: direction.mask(), + is_shutdown, }) } else if ready.is_empty() { Poll::Pending @@ -336,12 +335,14 @@ impl ScheduledIo { Poll::Ready(ReadyEvent { tick: TICK.unpack(curr) as u8, ready, + is_shutdown, }) } } else { Poll::Ready(ReadyEvent { tick: TICK.unpack(curr) as u8, ready, + is_shutdown, }) } } @@ -433,16 +434,17 @@ cfg_io_readiness! { // Optimistically check existing readiness let curr = scheduled_io.readiness.load(SeqCst); let ready = Ready::from_usize(READINESS.unpack(curr)); + let is_shutdown = SHUTDOWN.unpack(curr) != 0; // Safety: `waiter.interest` never changes let interest = unsafe { (*waiter.get()).interest }; let ready = ready.intersection(interest); - if !ready.is_empty() { + if !ready.is_empty() || is_shutdown { // Currently ready! let tick = TICK.unpack(curr) as u8; *state = State::Done; - return Poll::Ready(ReadyEvent { tick, ready }); + return Poll::Ready(ReadyEvent { tick, ready, is_shutdown }); } // Wasn't ready, take the lock (and check again while locked). @@ -450,18 +452,19 @@ cfg_io_readiness! { let curr = scheduled_io.readiness.load(SeqCst); let mut ready = Ready::from_usize(READINESS.unpack(curr)); + let is_shutdown = SHUTDOWN.unpack(curr) != 0; - if waiters.is_shutdown { + if is_shutdown { ready = Ready::ALL; } let ready = ready.intersection(interest); - if !ready.is_empty() { + if !ready.is_empty() || is_shutdown { // Currently ready! let tick = TICK.unpack(curr) as u8; *state = State::Done; - return Poll::Ready(ReadyEvent { tick, ready }); + return Poll::Ready(ReadyEvent { tick, ready, is_shutdown }); } // Not ready even after locked, insert into list... @@ -514,6 +517,7 @@ cfg_io_readiness! { let w = unsafe { &mut *waiter.get() }; let curr = scheduled_io.readiness.load(Acquire); + let is_shutdown = SHUTDOWN.unpack(curr) != 0; // The returned tick might be newer than the event // which notified our waker. This is ok because the future @@ -528,6 +532,7 @@ cfg_io_readiness! { return Poll::Ready(ReadyEvent { tick, ready, + is_shutdown, }); } } From 4a4f80ca700f65c018aebec7284b8364aa917bfd Mon Sep 17 00:00:00 2001 From: icedrocket <114203630+icedrocket@users.noreply.github.com> Date: Wed, 28 Dec 2022 20:06:46 +0900 Subject: [PATCH 024/101] fs: use chunks in `fs::read_dir` (#5309) --- tokio/src/fs/read_dir.rs | 91 ++++++++++++++++++++++++++++++++-------- 1 file changed, 73 insertions(+), 18 deletions(-) diff --git a/tokio/src/fs/read_dir.rs b/tokio/src/fs/read_dir.rs index 10ad150d70c..9471e8ce809 100644 --- a/tokio/src/fs/read_dir.rs +++ b/tokio/src/fs/read_dir.rs @@ -1,5 +1,6 @@ use crate::fs::asyncify; +use std::collections::VecDeque; use std::ffi::OsString; use std::fs::{FileType, Metadata}; use std::future::Future; @@ -19,6 +20,8 @@ use crate::blocking::spawn_blocking; #[cfg(not(test))] use crate::blocking::JoinHandle; +const CHUNK_SIZE: usize = 32; + /// Returns a stream over the entries within a directory. /// /// This is an async version of [`std::fs::read_dir`](std::fs::read_dir) @@ -29,9 +32,14 @@ use crate::blocking::JoinHandle; /// [`spawn_blocking`]: crate::task::spawn_blocking pub async fn read_dir(path: impl AsRef) -> io::Result { let path = path.as_ref().to_owned(); - let std = asyncify(|| std::fs::read_dir(path)).await?; + asyncify(|| -> io::Result { + let mut std = std::fs::read_dir(path)?; + let mut buf = VecDeque::with_capacity(CHUNK_SIZE); + ReadDir::next_chunk(&mut buf, &mut std); - Ok(ReadDir(State::Idle(Some(std)))) + Ok(ReadDir(State::Idle(Some((buf, std))))) + }) + .await } /// Reads the entries in a directory. @@ -58,8 +66,8 @@ pub struct ReadDir(State); #[derive(Debug)] enum State { - Idle(Option), - Pending(JoinHandle<(Option>, std::fs::ReadDir)>), + Idle(Option<(VecDeque>, std::fs::ReadDir)>), + Pending(JoinHandle<(VecDeque>, std::fs::ReadDir)>), } impl ReadDir { @@ -94,29 +102,57 @@ impl ReadDir { pub fn poll_next_entry(&mut self, cx: &mut Context<'_>) -> Poll>> { loop { match self.0 { - State::Idle(ref mut std) => { - let mut std = std.take().unwrap(); + State::Idle(ref mut data) => { + let (buf, _) = data.as_mut().unwrap(); + + if let Some(ent) = buf.pop_front() { + return Poll::Ready(ent.map(Some)); + }; + + let (mut buf, mut std) = data.take().unwrap(); self.0 = State::Pending(spawn_blocking(move || { - let ret = std.next(); - (ret, std) + ReadDir::next_chunk(&mut buf, &mut std); + (buf, std) })); } State::Pending(ref mut rx) => { - let (ret, std) = ready!(Pin::new(rx).poll(cx))?; - self.0 = State::Idle(Some(std)); + let (mut buf, std) = ready!(Pin::new(rx).poll(cx))?; - let ret = match ret { - Some(Ok(std)) => Ok(Some(DirEntry(Arc::new(std)))), + let ret = match buf.pop_front() { + Some(Ok(x)) => Ok(Some(x)), Some(Err(e)) => Err(e), None => Ok(None), }; + self.0 = State::Idle(Some((buf, std))); + return Poll::Ready(ret); } } } } + + fn next_chunk(buf: &mut VecDeque>, std: &mut std::fs::ReadDir) { + for ret in std.by_ref().take(CHUNK_SIZE) { + let success = ret.is_ok(); + + buf.push_back(ret.map(|std| DirEntry { + #[cfg(not(any( + target_os = "solaris", + target_os = "illumos", + target_os = "haiku", + target_os = "vxworks" + )))] + file_type: std.file_type().ok(), + std: Arc::new(std), + })); + + if !success { + break; + } + } + } } feature! { @@ -160,7 +196,16 @@ feature! { /// filesystem. Each entry can be inspected via methods to learn about the full /// path or possibly other metadata through per-platform extension traits. #[derive(Debug)] -pub struct DirEntry(Arc); +pub struct DirEntry { + #[cfg(not(any( + target_os = "solaris", + target_os = "illumos", + target_os = "haiku", + target_os = "vxworks" + )))] + file_type: Option, + std: Arc, +} impl DirEntry { /// Returns the full path to the file that this entry represents. @@ -193,7 +238,7 @@ impl DirEntry { /// /// The exact text, of course, depends on what files you have in `.`. pub fn path(&self) -> PathBuf { - self.0.path() + self.std.path() } /// Returns the bare file name of this directory entry without any other @@ -214,7 +259,7 @@ impl DirEntry { /// # } /// ``` pub fn file_name(&self) -> OsString { - self.0.file_name() + self.std.file_name() } /// Returns the metadata for the file that this entry points at. @@ -248,7 +293,7 @@ impl DirEntry { /// # } /// ``` pub async fn metadata(&self) -> io::Result { - let std = self.0.clone(); + let std = self.std.clone(); asyncify(move || std.metadata()).await } @@ -283,13 +328,23 @@ impl DirEntry { /// # } /// ``` pub async fn file_type(&self) -> io::Result { - let std = self.0.clone(); + #[cfg(not(any( + target_os = "solaris", + target_os = "illumos", + target_os = "haiku", + target_os = "vxworks" + )))] + if let Some(file_type) = self.file_type { + return Ok(file_type); + } + + let std = self.std.clone(); asyncify(move || std.file_type()).await } /// Returns a reference to the underlying `std::fs::DirEntry`. #[cfg(unix)] pub(super) fn as_inner(&self) -> &std::fs::DirEntry { - &self.0 + &self.std } } From ef0224246b9473b3500df7b4704a4f3680d43975 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Thu, 29 Dec 2022 11:46:08 +0900 Subject: [PATCH 025/101] tests: fix SB violation in LeakedBuffers (#5322) --- tokio/tests/support/leaked_buffers.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tokio/tests/support/leaked_buffers.rs b/tokio/tests/support/leaked_buffers.rs index 3ee8a18967b..a6079fb707a 100644 --- a/tokio/tests/support/leaked_buffers.rs +++ b/tokio/tests/support/leaked_buffers.rs @@ -18,9 +18,9 @@ impl LeakedBuffers { } } pub unsafe fn create<'a>(&mut self, size: usize) -> &'a mut [u8] { - let mut new_mem = vec![0u8; size].into_boxed_slice(); - let slice = std::slice::from_raw_parts_mut(new_mem.as_mut_ptr(), new_mem.len()); + let new_mem = vec![0u8; size].into_boxed_slice(); self.leaked_vecs.push(new_mem); - slice + let new_mem = self.leaked_vecs.last_mut().unwrap(); + std::slice::from_raw_parts_mut(new_mem.as_mut_ptr(), new_mem.len()) } } From 98d484e29c363b1b0b8b605ddb7d742367a8fb5c Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 30 Dec 2022 19:03:44 +0900 Subject: [PATCH 026/101] ci: update cargo-check-external-types to 0.1.6 (#5325) --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3be5ef92aea..eda3e7ea0c8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -585,16 +585,16 @@ jobs: - ubuntu-latest steps: - uses: actions/checkout@v3 - - name: Install Rust nightly-2022-07-25 + - name: Install Rust nightly-2022-11-16 uses: dtolnay/rust-toolchain@master with: # `check-external-types` requires a specific Rust nightly version. See # the README for details: https://github.com/awslabs/cargo-check-external-types - toolchain: nightly-2022-07-25 + toolchain: nightly-2022-11-16 - uses: Swatinem/rust-cache@v2 - name: check-external-types run: | set -x - cargo install cargo-check-external-types --locked --version 0.1.5 + cargo install cargo-check-external-types --locked --version 0.1.6 cargo check-external-types --all-features --config external-types.toml working-directory: tokio From 048049f8883f1bc73ee5d3fa3c94e61cf41d1e38 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Fri, 30 Dec 2022 10:49:45 -0800 Subject: [PATCH 027/101] rt: move task::Id into its own file (#5327) This is a minor internal cleanup. --- .github/workflows/ci.yml | 4 ++ tokio/src/runtime/task/id.rs | 87 ++++++++++++++++++++++++++++++++ tokio/src/runtime/task/mod.rs | 93 ++--------------------------------- 3 files changed, 95 insertions(+), 89 deletions(-) create mode 100644 tokio/src/runtime/task/id.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index eda3e7ea0c8..88fa10f5fca 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -348,10 +348,14 @@ jobs: # release. - name: "check -p tokio --all-features" run: cargo check -p tokio --all-features + env: + RUSTFLAGS: "" # remove -Dwarnings - name: "pin once_cell version" run: cargo update -p once_cell --precise 1.14.0 - name: "check --workspace --all-features" run: cargo check --workspace --all-features + env: + RUSTFLAGS: "" # remove -Dwarnings minimal-versions: name: minimal-versions diff --git a/tokio/src/runtime/task/id.rs b/tokio/src/runtime/task/id.rs new file mode 100644 index 00000000000..2b0d95c0243 --- /dev/null +++ b/tokio/src/runtime/task/id.rs @@ -0,0 +1,87 @@ +use crate::runtime::context; + +use std::fmt; + +/// An opaque ID that uniquely identifies a task relative to all other currently +/// running tasks. +/// +/// # Notes +/// +/// - Task IDs are unique relative to other *currently running* tasks. When a +/// task completes, the same ID may be used for another task. +/// - Task IDs are *not* sequential, and do not indicate the order in which +/// tasks are spawned, what runtime a task is spawned on, or any other data. +/// - The task ID of the currently running task can be obtained from inside the +/// task via the [`task::try_id()`](crate::task::try_id()) and +/// [`task::id()`](crate::task::id()) functions and from outside the task via +/// the [`JoinHandle::id()`](crate::task::JoinHandle::id()) function. +/// +/// **Note**: This is an [unstable API][unstable]. The public API of this type +/// may break in 1.x releases. See [the documentation on unstable +/// features][unstable] for details. +/// +/// [unstable]: crate#unstable-features +#[cfg_attr(docsrs, doc(cfg(all(feature = "rt", tokio_unstable))))] +#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] +#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq)] +pub struct Id(u64); + +/// Returns the [`Id`] of the currently running task. +/// +/// # Panics +/// +/// This function panics if called from outside a task. Please note that calls +/// to `block_on` do not have task IDs, so the method will panic if called from +/// within a call to `block_on`. For a version of this function that doesn't +/// panic, see [`task::try_id()`](crate::runtime::task::try_id()). +/// +/// **Note**: This is an [unstable API][unstable]. The public API of this type +/// may break in 1.x releases. See [the documentation on unstable +/// features][unstable] for details. +/// +/// [task ID]: crate::task::Id +/// [unstable]: crate#unstable-features +#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] +#[track_caller] +pub fn id() -> Id { + context::current_task_id().expect("Can't get a task id when not inside a task") +} + +/// Returns the [`Id`] of the currently running task, or `None` if called outside +/// of a task. +/// +/// This function is similar to [`task::id()`](crate::runtime::task::id()), except +/// that it returns `None` rather than panicking if called outside of a task +/// context. +/// +/// **Note**: This is an [unstable API][unstable]. The public API of this type +/// may break in 1.x releases. See [the documentation on unstable +/// features][unstable] for details. +/// +/// [task ID]: crate::task::Id +/// [unstable]: crate#unstable-features +#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] +#[track_caller] +pub fn try_id() -> Option { + context::current_task_id() +} + +impl fmt::Display for Id { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl Id { + pub(crate) fn next() -> Self { + use crate::loom::sync::atomic::{Ordering::Relaxed, StaticAtomicU64}; + + static NEXT_ID: StaticAtomicU64 = StaticAtomicU64::new(1); + + Self(NEXT_ID.fetch_add(1, Relaxed)) + } + + pub(crate) fn as_u64(&self) -> u64 { + self.0 + } +} diff --git a/tokio/src/runtime/task/mod.rs b/tokio/src/runtime/task/mod.rs index 9bb1044ce92..55131ac5665 100644 --- a/tokio/src/runtime/task/mod.rs +++ b/tokio/src/runtime/task/mod.rs @@ -168,19 +168,20 @@ // unstable. This should be removed once `JoinSet` is stabilized. #![cfg_attr(not(tokio_unstable), allow(dead_code))] -use crate::runtime::context; - mod core; use self::core::Cell; use self::core::Header; mod error; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 pub use self::error::JoinError; mod harness; use self::harness::Harness; +mod id; +#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] +pub use id::{id, try_id, Id}; + cfg_rt_multi_thread! { mod inject; pub(super) use self::inject::Inject; @@ -191,10 +192,8 @@ mod abort; mod join; #[cfg(feature = "rt")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 pub use self::abort::AbortHandle; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 pub use self::join::JoinHandle; mod list; @@ -215,70 +214,6 @@ use std::marker::PhantomData; use std::ptr::NonNull; use std::{fmt, mem}; -/// An opaque ID that uniquely identifies a task relative to all other currently -/// running tasks. -/// -/// # Notes -/// -/// - Task IDs are unique relative to other *currently running* tasks. When a -/// task completes, the same ID may be used for another task. -/// - Task IDs are *not* sequential, and do not indicate the order in which -/// tasks are spawned, what runtime a task is spawned on, or any other data. -/// - The task ID of the currently running task can be obtained from inside the -/// task via the [`task::try_id()`](crate::task::try_id()) and -/// [`task::id()`](crate::task::id()) functions and from outside the task via -/// the [`JoinHandle::id()`](crate::task::JoinHandle::id()) function. -/// -/// **Note**: This is an [unstable API][unstable]. The public API of this type -/// may break in 1.x releases. See [the documentation on unstable -/// features][unstable] for details. -/// -/// [unstable]: crate#unstable-features -#[cfg_attr(docsrs, doc(cfg(all(feature = "rt", tokio_unstable))))] -#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] -#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq)] -pub struct Id(u64); - -/// Returns the [`Id`] of the currently running task. -/// -/// # Panics -/// -/// This function panics if called from outside a task. Please note that calls -/// to `block_on` do not have task IDs, so the method will panic if called from -/// within a call to `block_on`. For a version of this function that doesn't -/// panic, see [`task::try_id()`](crate::runtime::task::try_id()). -/// -/// **Note**: This is an [unstable API][unstable]. The public API of this type -/// may break in 1.x releases. See [the documentation on unstable -/// features][unstable] for details. -/// -/// [task ID]: crate::task::Id -/// [unstable]: crate#unstable-features -#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] -#[track_caller] -pub fn id() -> Id { - context::current_task_id().expect("Can't get a task id when not inside a task") -} - -/// Returns the [`Id`] of the currently running task, or `None` if called outside -/// of a task. -/// -/// This function is similar to [`task::id()`](crate::runtime::task::id()), except -/// that it returns `None` rather than panicking if called outside of a task -/// context. -/// -/// **Note**: This is an [unstable API][unstable]. The public API of this type -/// may break in 1.x releases. See [the documentation on unstable -/// features][unstable] for details. -/// -/// [task ID]: crate::task::Id -/// [unstable]: crate#unstable-features -#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] -#[track_caller] -pub fn try_id() -> Option { - context::current_task_id() -} - /// An owned handle to the task, tracked by ref count. #[repr(transparent)] pub(crate) struct Task { @@ -554,23 +489,3 @@ unsafe impl linked_list::Link for Task { self::core::Trailer::addr_of_owned(Header::get_trailer(target)) } } - -impl fmt::Display for Id { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -impl Id { - pub(crate) fn next() -> Self { - use crate::loom::sync::atomic::{Ordering::Relaxed, StaticAtomicU64}; - - static NEXT_ID: StaticAtomicU64 = StaticAtomicU64::new(1); - - Self(NEXT_ID.fetch_add(1, Relaxed)) - } - - pub(crate) fn as_u64(&self) -> u64 { - self.0 - } -} From c6552c5680fa14105547cfbbc26b26d67197b64e Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Fri, 30 Dec 2022 15:17:35 -0800 Subject: [PATCH 028/101] rt: use internal ThreadId implementation (#5329) The version provided by `std` has limitations, including no way to try to get a thread ID without panicking. --- tokio/src/runtime/context.rs | 22 +++++++++++++++++++++- tokio/src/runtime/mod.rs | 3 +++ tokio/src/runtime/thread_id.rs | 31 +++++++++++++++++++++++++++++++ tokio/src/task/local.rs | 34 ++++++++-------------------------- tokio/tests/rt_metrics.rs | 6 +++--- 5 files changed, 66 insertions(+), 30 deletions(-) create mode 100644 tokio/src/runtime/thread_id.rs diff --git a/tokio/src/runtime/context.rs b/tokio/src/runtime/context.rs index 2e54c8ba366..fef53cab8a6 100644 --- a/tokio/src/runtime/context.rs +++ b/tokio/src/runtime/context.rs @@ -15,6 +15,10 @@ cfg_rt! { } struct Context { + /// Uniquely identifies the current thread + #[cfg(feature = "rt")] + thread_id: Cell>, + /// Handle to the runtime scheduler running on the current thread. #[cfg(feature = "rt")] handle: RefCell>, @@ -46,6 +50,9 @@ struct Context { tokio_thread_local! { static CONTEXT: Context = { Context { + #[cfg(feature = "rt")] + thread_id: Cell::new(None), + /// Tracks the current runtime handle to use when spawning, /// accessing drivers, etc... #[cfg(feature = "rt")] @@ -82,10 +89,23 @@ pub(super) fn budget(f: impl FnOnce(&Cell) -> R) -> Result Result { + CONTEXT.try_with(|ctx| { + match ctx.thread_id.get() { + Some(id) => id, + None => { + let id = ThreadId::next(); + ctx.thread_id.set(Some(id)); + id + } + } + }) + } + #[derive(Debug, Clone, Copy)] #[must_use] pub(crate) enum EnterRuntime { diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index 45b79b0ac81..b6f43ea1754 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -237,6 +237,9 @@ cfg_rt! { mod runtime; pub use runtime::{Runtime, RuntimeFlavor}; + mod thread_id; + pub(crate) use thread_id::ThreadId; + cfg_metrics! { mod metrics; pub use metrics::RuntimeMetrics; diff --git a/tokio/src/runtime/thread_id.rs b/tokio/src/runtime/thread_id.rs new file mode 100644 index 00000000000..ef392897963 --- /dev/null +++ b/tokio/src/runtime/thread_id.rs @@ -0,0 +1,31 @@ +use std::num::NonZeroU64; + +#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)] +pub(crate) struct ThreadId(NonZeroU64); + +impl ThreadId { + pub(crate) fn next() -> Self { + use crate::loom::sync::atomic::{Ordering::Relaxed, StaticAtomicU64}; + + static NEXT_ID: StaticAtomicU64 = StaticAtomicU64::new(0); + + let mut last = NEXT_ID.load(Relaxed); + loop { + let id = match last.checked_add(1) { + Some(id) => id, + None => exhausted(), + }; + + match NEXT_ID.compare_exchange_weak(last, id, Relaxed, Relaxed) { + Ok(_) => return ThreadId(NonZeroU64::new(id).unwrap()), + Err(id) => last = id, + } + } + } +} + +#[cold] +#[allow(dead_code)] +fn exhausted() -> ! { + panic!("failed to generate unique thread ID: bitspace exhausted") +} diff --git a/tokio/src/task/local.rs b/tokio/src/task/local.rs index e4a198bd053..cc4500a58e7 100644 --- a/tokio/src/task/local.rs +++ b/tokio/src/task/local.rs @@ -1,8 +1,8 @@ //! Runs `!Send` futures on the current thread. use crate::loom::cell::UnsafeCell; use crate::loom::sync::{Arc, Mutex}; -use crate::loom::thread::{self, ThreadId}; use crate::runtime::task::{self, JoinHandle, LocalOwnedTasks, Task}; +use crate::runtime::{context, ThreadId}; use crate::sync::AtomicWaker; use crate::util::RcCell; @@ -277,12 +277,10 @@ pin_project! { } tokio_thread_local!(static CURRENT: LocalData = const { LocalData { - thread_id: Cell::new(None), ctx: RcCell::new(), } }); struct LocalData { - thread_id: Cell>, ctx: RcCell, } @@ -379,12 +377,14 @@ impl fmt::Debug for LocalEnterGuard { impl LocalSet { /// Returns a new local task set. pub fn new() -> LocalSet { + let owner = context::thread_id().expect("cannot create LocalSet during thread shutdown"); + LocalSet { tick: Cell::new(0), context: Rc::new(Context { shared: Arc::new(Shared { local_state: LocalState { - owner: thread_id().expect("cannot create LocalSet during thread shutdown"), + owner, owned: LocalOwnedTasks::new(), local_queue: UnsafeCell::new(VecDeque::with_capacity(INITIAL_CAPACITY)), }, @@ -949,7 +949,7 @@ impl Shared { // We are on the thread that owns the `LocalSet`, so we can // wake to the local queue. - _ if localdata.get_id() == Some(self.local_state.owner) => { + _ if context::thread_id().ok() == Some(self.local_state.owner) => { unsafe { // Safety: we just checked that the thread ID matches // the localset's owner, so this is safe. @@ -1093,7 +1093,9 @@ impl LocalState { // if we couldn't get the thread ID because we're dropping the local // data, skip the assertion --- the `Drop` impl is not going to be // called from another thread, because `LocalSet` is `!Send` - thread_id().map(|id| id == self.owner).unwrap_or(true), + context::thread_id() + .map(|id| id == self.owner) + .unwrap_or(true), "`LocalSet`'s local run queue must not be accessed by another thread!" ); } @@ -1103,26 +1105,6 @@ impl LocalState { // ensure they are on the same thread that owns the `LocalSet`. unsafe impl Send for LocalState {} -impl LocalData { - fn get_id(&self) -> Option { - self.thread_id.get() - } - - fn get_or_insert_id(&self) -> ThreadId { - self.thread_id.get().unwrap_or_else(|| { - let id = thread::current().id(); - self.thread_id.set(Some(id)); - id - }) - } -} - -fn thread_id() -> Option { - CURRENT - .try_with(|localdata| localdata.get_or_insert_id()) - .ok() -} - #[cfg(all(test, not(loom)))] mod tests { use super::*; diff --git a/tokio/tests/rt_metrics.rs b/tokio/tests/rt_metrics.rs index 4b98d234c41..fdb2fb5f551 100644 --- a/tokio/tests/rt_metrics.rs +++ b/tokio/tests/rt_metrics.rs @@ -141,7 +141,7 @@ fn worker_noop_count() { time::sleep(Duration::from_millis(1)).await; }); drop(rt); - assert!(2 <= metrics.worker_noop_count(0)); + assert!(0 < metrics.worker_noop_count(0)); let rt = threaded(); let metrics = rt.metrics(); @@ -149,8 +149,8 @@ fn worker_noop_count() { time::sleep(Duration::from_millis(1)).await; }); drop(rt); - assert!(1 <= metrics.worker_noop_count(0)); - assert!(1 <= metrics.worker_noop_count(1)); + assert!(0 < metrics.worker_noop_count(0)); + assert!(0 < metrics.worker_noop_count(1)); } #[test] From 699573d550fabf4bfb45d82505d6709faaae9037 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Tue, 3 Jan 2023 12:06:04 -0800 Subject: [PATCH 029/101] net: fix named pipes server configuration builder The `pipe_mode` function would erase any previously set configuration option that is specified using the pipe_mode fit field. This patch fixes the builder to maintain the bit field when changing the pipe mode. --- .github/workflows/ci.yml | 17 --------- tokio/src/net/windows/named_pipe.rs | 54 ++++++++++++++++++++++++++--- 2 files changed, 49 insertions(+), 22 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8b67a0dd17d..ae99b17b7e0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -33,7 +33,6 @@ jobs: - features - minrust - fmt - - clippy - docs - valgrind - loom-compile @@ -341,22 +340,6 @@ jobs: exit 1 fi - clippy: - name: clippy - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Install Rust ${{ env.rust_clippy }} - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ env.rust_clippy }} - override: true - components: clippy - - uses: Swatinem/rust-cache@v1 - # Run clippy - - name: "clippy --all" - run: cargo clippy --all --tests --all-features - docs: name: docs runs-on: ubuntu-latest diff --git a/tokio/src/net/windows/named_pipe.rs b/tokio/src/net/windows/named_pipe.rs index 695b8eb3d39..51c625e8db6 100644 --- a/tokio/src/net/windows/named_pipe.rs +++ b/tokio/src/net/windows/named_pipe.rs @@ -1681,11 +1681,10 @@ impl ServerOptions { /// /// [`dwPipeMode`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea pub fn pipe_mode(&mut self, pipe_mode: PipeMode) -> &mut Self { - self.pipe_mode = match pipe_mode { - PipeMode::Byte => winbase::PIPE_TYPE_BYTE, - PipeMode::Message => winbase::PIPE_TYPE_MESSAGE, - }; - + let is_msg = matches!(pipe_mode, PipeMode::Message); + // Pipe mode is implemented as a bit flag 0x4. Set is message and unset + // is byte. + bool_flag!(self.pipe_mode, is_msg, winbase::PIPE_TYPE_MESSAGE); self } @@ -2412,3 +2411,48 @@ unsafe fn named_pipe_info(handle: RawHandle) -> io::Result { max_instances, }) } + +#[cfg(test)] +mod test { + use self::winbase::{PIPE_REJECT_REMOTE_CLIENTS, PIPE_TYPE_BYTE, PIPE_TYPE_MESSAGE}; + use super::*; + + #[test] + fn opts_default_pipe_mode() { + let opts = ServerOptions::new(); + assert_eq!(opts.pipe_mode, PIPE_TYPE_BYTE | PIPE_REJECT_REMOTE_CLIENTS); + } + + #[test] + fn opts_unset_reject_remote() { + let mut opts = ServerOptions::new(); + opts.reject_remote_clients(false); + assert_eq!(opts.pipe_mode & PIPE_REJECT_REMOTE_CLIENTS, 0); + } + + #[test] + fn opts_set_pipe_mode_maintains_reject_remote_clients() { + let mut opts = ServerOptions::new(); + opts.pipe_mode(PipeMode::Byte); + assert_eq!(opts.pipe_mode, PIPE_TYPE_BYTE | PIPE_REJECT_REMOTE_CLIENTS); + + opts.reject_remote_clients(false); + opts.pipe_mode(PipeMode::Byte); + assert_eq!(opts.pipe_mode, PIPE_TYPE_BYTE); + + opts.reject_remote_clients(true); + opts.pipe_mode(PipeMode::Byte); + assert_eq!(opts.pipe_mode, PIPE_TYPE_BYTE | PIPE_REJECT_REMOTE_CLIENTS); + + opts.reject_remote_clients(false); + opts.pipe_mode(PipeMode::Message); + assert_eq!(opts.pipe_mode, PIPE_TYPE_MESSAGE); + + opts.reject_remote_clients(true); + opts.pipe_mode(PipeMode::Message); + assert_eq!( + opts.pipe_mode, + PIPE_TYPE_MESSAGE | PIPE_REJECT_REMOTE_CLIENTS + ); + } +} From 9241c3eddf4a6a218681b088d71f7191513e2376 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Tue, 3 Jan 2023 12:34:56 -0800 Subject: [PATCH 030/101] chore: prepare Tokio v1.18.4 release --- README.md | 2 +- tokio/CHANGELOG.md | 9 +++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 81529078ab3..0ef20ec0b18 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.18.3", features = ["full"] } +tokio = { version = "1.18.4", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 7c4076eed2c..8652e4a0e49 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,12 @@ +# 1.18.4 (January 3, 2022) + +### Fixed + +- net: fix Windows named pipe server builder to maintain option when toggling + pipe mode ([#5336]). + +[#5336]: https://github.com/tokio-rs/tokio/pull/5336 + # 1.18.3 (September 27, 2022) This release removes the dependency on the `once_cell` crate to restore the MSRV diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index cf1c2094cc8..42beeb73d64 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.0.x" git tag. -version = "1.18.3" +version = "1.18.4" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index 81529078ab3..0ef20ec0b18 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.18.3", features = ["full"] } +tokio = { version = "1.18.4", features = ["full"] } ``` Then, on your main.rs: From 763bdc967e3e128d1e6e000238f1d257a81bf59a Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Tue, 3 Jan 2023 13:27:58 -0800 Subject: [PATCH 031/101] ci: run WASI tasks using latest Rust This should let CI to pass. --- .github/workflows/ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7454732ac59..fe1a56c4771 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -453,10 +453,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - name: Install Rust ${{ env.rust_stable }} + - name: Install Rust stable uses: actions-rs/toolchain@v1 with: - toolchain: ${{ env.rust_stable }} + toolchain: stable override: true - uses: Swatinem/rust-cache@v1 @@ -481,3 +481,5 @@ jobs: # TODO: this should become: `cargo hack wasi test --each-feature` run: cargo wasi test --test rt_yield --features wasi-rt working-directory: tests-integration + env: + RUSTFLAGS: "" From ba81945ffc2695b71f2bbcadbfb5e46ec55aaef3 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Tue, 3 Jan 2023 13:15:13 -0800 Subject: [PATCH 032/101] chore: prepare Tokio 1.20.3 release --- README.md | 2 +- tokio/CHANGELOG.md | 11 +++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 4fb3abe9f8c..3113fadd8ab 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.20.2", features = ["full"] } +tokio = { version = "1.20.3", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 6f0c07d7b10..42d1f6b8f2a 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,14 @@ +# 1.20.3 (January 3, 2022) + +This release forward ports changes from 1.18.4. + +### Fixed + +- net: fix Windows named pipe server builder to maintain option when toggling + pipe mode ([#5336]). + +[#5336]: https://github.com/tokio-rs/tokio/pull/5336 + # 1.20.2 (September 27, 2022) This release removes the dependency on the `once_cell` crate to restore the MSRV diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 0dc0de79ce0..a83937445b6 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.0.x" git tag. -version = "1.20.2" +version = "1.20.3" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index 4fb3abe9f8c..3113fadd8ab 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.20.2", features = ["full"] } +tokio = { version = "1.20.3", features = ["full"] } ``` Then, on your main.rs: From 1a997ffbd62334af2553775234e75ede2d7d949f Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Wed, 4 Jan 2023 10:32:38 -0800 Subject: [PATCH 033/101] chore: prepare Tokio v1.23.1 release --- README.md | 2 +- tokio/CHANGELOG.md | 11 +++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 3e51cf546b7..67d931d1a8d 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.23.0", features = ["full"] } +tokio = { version = "1.23.1", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 80835dc1e1e..0f1ad8bc61a 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,14 @@ +# 1.23.1 (January 4, 2022) + +This release forward ports changes from 1.18.4. + +### Fixed + +- net: fix Windows named pipe server builder to maintain option when toggling + pipe mode ([#5336]). + +[#5336]: https://github.com/tokio-rs/tokio/pull/5336 + # 1.23.0 (December 5, 2022) ### Fixed diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 78cba811156..4dd60339863 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.23.0" +version = "1.23.1" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index 3e51cf546b7..67d931d1a8d 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.23.0", features = ["full"] } +tokio = { version = "1.23.1", features = ["full"] } ``` Then, on your main.rs: From 21b233fa9c55ac69c8c74f0ca720aeba41c5c439 Mon Sep 17 00:00:00 2001 From: Paul Loyd Date: Thu, 5 Jan 2023 18:02:07 +0800 Subject: [PATCH 034/101] test: bump version of async-stream (#5347) --- tokio-test/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-test/Cargo.toml b/tokio-test/Cargo.toml index e889dcec85a..30cacead51f 100644 --- a/tokio-test/Cargo.toml +++ b/tokio-test/Cargo.toml @@ -19,7 +19,7 @@ categories = ["asynchronous", "testing"] [dependencies] tokio = { version = "1.2.0", path = "../tokio", features = ["rt", "sync", "time", "test-util"] } tokio-stream = { version = "0.1.1", path = "../tokio-stream" } -async-stream = "0.3" +async-stream = "0.3.3" bytes = "1.0.0" futures-core = "0.3.0" From dfe252d1fa82bdfefd7cba20f9a88cdd27e310cc Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Thu, 5 Jan 2023 11:20:35 -0800 Subject: [PATCH 035/101] chore: prepare Tokio v1.24.0 release (#5353) --- README.md | 2 +- tokio/CHANGELOG.md | 27 +++++++++++++++++++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 30 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 67d931d1a8d..dd4a96a2f18 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.23.1", features = ["full"] } +tokio = { version = "1.24.0", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 0f1ad8bc61a..8428aad36dc 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,30 @@ +# 1.24.0 (January 5, 2022) + +### Fixed + - rt: improve native `AtomicU64` support detection ([#5284]) + +### Added + - rt: add configuration option for max number of I/O events polled from the OS + per tick ([#5186]) + - rt: add an environment variable for configuring the default number of worker + threads per runtime instance ([#4250]) + +### Changed + - sync: reduce MPSC channel stack usage ([#5294]) + - io: reduce lock contention in I/O operations ([#5300]) + - fs: speed up `read_dir()` by chunking operations ([#5309]) + - rt: use internal `ThreadId` implementation ([#5329]) + - test: don't auto-advance time when a `spawn_blocking` task is running ([#5115]) + +[#5186]: https://github.com/tokio-rs/tokio/pull/5186 +[#5294]: https://github.com/tokio-rs/tokio/pull/5294 +[#5284]: https://github.com/tokio-rs/tokio/pull/5284 +[#4250]: https://github.com/tokio-rs/tokio/pull/4250 +[#5300]: https://github.com/tokio-rs/tokio/pull/5300 +[#5329]: https://github.com/tokio-rs/tokio/pull/5329 +[#5115]: https://github.com/tokio-rs/tokio/pull/5115 +[#5309]: https://github.com/tokio-rs/tokio/pull/5309 + # 1.23.1 (January 4, 2022) This release forward ports changes from 1.18.4. diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 1b635360055..6055e78520a 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.23.1" +version = "1.24.0" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index 67d931d1a8d..dd4a96a2f18 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.23.1", features = ["full"] } +tokio = { version = "1.24.0", features = ["full"] } ``` Then, on your main.rs: From 8d8db27442de797999f2facd44ec6bdb59143efc Mon Sep 17 00:00:00 2001 From: David Tolnay Date: Thu, 5 Jan 2023 22:19:31 -0800 Subject: [PATCH 036/101] tokio: add load and compare_exchange_weak to loom StaticAtomicU64 (#5356) --- .../loom/std/atomic_u64_static_once_cell.rs | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tokio/src/loom/std/atomic_u64_static_once_cell.rs b/tokio/src/loom/std/atomic_u64_static_once_cell.rs index 14b3a54937b..40c6172a524 100644 --- a/tokio/src/loom/std/atomic_u64_static_once_cell.rs +++ b/tokio/src/loom/std/atomic_u64_static_once_cell.rs @@ -23,6 +23,10 @@ impl StaticAtomicU64 { } } + pub(crate) fn load(&self, order: Ordering) -> u64 { + *self.inner().lock() + } + pub(crate) fn fetch_add(&self, val: u64, order: Ordering) -> u64 { let mut lock = self.inner().lock(); let prev = *lock; @@ -30,6 +34,23 @@ impl StaticAtomicU64 { prev } + pub(crate) fn compare_exchange_weak( + &self, + current: u64, + new: u64, + _success: Ordering, + _failure: Ordering, + ) -> Result { + let mut lock = self.inner().lock(); + + if *lock == current { + *lock = new; + Ok(current) + } else { + Err(*lock) + } + } + fn inner(&self) -> &Mutex { self.cell.get(|| Mutex::new(self.init)) } From 31c7e8291993f42b27f5306fd0c33848c9fc796f Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Fri, 6 Jan 2023 11:48:42 +0100 Subject: [PATCH 037/101] chore: prepare Tokio v1.24.1 (#5357) --- README.md | 2 +- tokio/CHANGELOG.md | 6 ++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index dd4a96a2f18..7bf8befff04 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.24.0", features = ["full"] } +tokio = { version = "1.24.1", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 8428aad36dc..65a600a8947 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,9 @@ +# 1.24.1 (January 6, 2022) + +This release fixes a compilation failure on targets without `AtomicU64` when using rustc older than 1.63. ([#5356]) + +[#5356]: https://github.com/tokio-rs/tokio/pull/5356 + # 1.24.0 (January 5, 2022) ### Fixed diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 6055e78520a..3b685e2e9c3 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.24.0" +version = "1.24.1" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index dd4a96a2f18..7bf8befff04 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.24.0", features = ["full"] } +tokio = { version = "1.24.1", features = ["full"] } ``` Then, on your main.rs: From f9dbfa82513c346940a6255336ed8a3e0f89b5f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tymoteusz=20Wi=C5=9Bniewski?= Date: Wed, 11 Jan 2023 13:56:27 +0100 Subject: [PATCH 038/101] net: improve `from_std` docs regarding non-blocking IO (#5332) --- tokio/src/net/tcp/listener.rs | 13 ++++++-- tokio/src/net/tcp/socket.rs | 11 ++++++- tokio/src/net/tcp/stream.rs | 13 ++++++-- tokio/src/net/udp.rs | 13 ++++++-- tokio/src/net/unix/datagram/socket.rs | 31 ++++++++++-------- tokio/src/net/unix/listener.rs | 46 +++++++++++++++++++-------- tokio/src/net/unix/stream.rs | 28 ++++++++++++++-- 7 files changed, 116 insertions(+), 39 deletions(-) diff --git a/tokio/src/net/tcp/listener.rs b/tokio/src/net/tcp/listener.rs index 4a022fa2a2c..4441313703e 100644 --- a/tokio/src/net/tcp/listener.rs +++ b/tokio/src/net/tcp/listener.rs @@ -195,15 +195,22 @@ impl TcpListener { /// Creates new `TcpListener` from a `std::net::TcpListener`. /// /// This function is intended to be used to wrap a TCP listener from the - /// standard library in the Tokio equivalent. The conversion assumes nothing - /// about the underlying listener; it is left up to the user to set it in - /// non-blocking mode. + /// standard library in the Tokio equivalent. /// /// This API is typically paired with the `socket2` crate and the `Socket` /// type to build up and customize a listener before it's shipped off to the /// backing event loop. This allows configuration of options like /// `SO_REUSEPORT`, binding to multiple addresses, etc. /// + /// # Notes + /// + /// The caller is responsible for ensuring that the listener is in + /// non-blocking mode. Otherwise all I/O operations on the listener + /// will block the thread, which will cause unexpected behavior. + /// Non-blocking mode can be set using [`set_nonblocking`]. + /// + /// [`set_nonblocking`]: std::net::TcpListener::set_nonblocking + /// /// # Examples /// /// ```rust,no_run diff --git a/tokio/src/net/tcp/socket.rs b/tokio/src/net/tcp/socket.rs index 94534110460..09349fe53eb 100644 --- a/tokio/src/net/tcp/socket.rs +++ b/tokio/src/net/tcp/socket.rs @@ -670,6 +670,15 @@ impl TcpSocket { /// [`std::net::TcpStream`]: struct@std::net::TcpStream /// [`socket2`]: https://docs.rs/socket2/ /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. Otherwise all I/O operations on the socket + /// will block the thread, which will cause unexpected behavior. + /// Non-blocking mode can be set using [`set_nonblocking`]. + /// + /// [`set_nonblocking`]: std::net::TcpStream::set_nonblocking + /// /// # Examples /// /// ``` @@ -678,8 +687,8 @@ impl TcpSocket { /// /// #[tokio::main] /// async fn main() -> std::io::Result<()> { - /// /// let socket2_socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; + /// socket2_socket.set_nonblocking(true)?; /// /// let socket = TcpSocket::from_std_stream(socket2_socket.into()); /// diff --git a/tokio/src/net/tcp/stream.rs b/tokio/src/net/tcp/stream.rs index b7dd3377b75..b17d33feaea 100644 --- a/tokio/src/net/tcp/stream.rs +++ b/tokio/src/net/tcp/stream.rs @@ -165,9 +165,16 @@ impl TcpStream { /// Creates new `TcpStream` from a `std::net::TcpStream`. /// /// This function is intended to be used to wrap a TCP stream from the - /// standard library in the Tokio equivalent. The conversion assumes nothing - /// about the underlying stream; it is left up to the user to set it in - /// non-blocking mode. + /// standard library in the Tokio equivalent. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the stream is in + /// non-blocking mode. Otherwise all I/O operations on the stream + /// will block the thread, which will cause unexpected behavior. + /// Non-blocking mode can be set using [`set_nonblocking`]. + /// + /// [`set_nonblocking`]: std::net::TcpStream::set_nonblocking /// /// # Examples /// diff --git a/tokio/src/net/udp.rs b/tokio/src/net/udp.rs index af343f20090..213d9149dad 100644 --- a/tokio/src/net/udp.rs +++ b/tokio/src/net/udp.rs @@ -179,14 +179,21 @@ impl UdpSocket { /// Creates new `UdpSocket` from a previously bound `std::net::UdpSocket`. /// /// This function is intended to be used to wrap a UDP socket from the - /// standard library in the Tokio equivalent. The conversion assumes nothing - /// about the underlying socket; it is left up to the user to set it in - /// non-blocking mode. + /// standard library in the Tokio equivalent. /// /// This can be used in conjunction with socket2's `Socket` interface to /// configure a socket before it's handed off, such as setting options like /// `reuse_address` or binding to multiple addresses. /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. Otherwise all I/O operations on the socket + /// will block the thread, which will cause unexpected behavior. + /// Non-blocking mode can be set using [`set_nonblocking`]. + /// + /// [`set_nonblocking`]: std::net::UdpSocket::set_nonblocking + /// /// # Panics /// /// This function panics if thread-local runtime is not set. diff --git a/tokio/src/net/unix/datagram/socket.rs b/tokio/src/net/unix/datagram/socket.rs index 5e1453e380d..76c6b191283 100644 --- a/tokio/src/net/unix/datagram/socket.rs +++ b/tokio/src/net/unix/datagram/socket.rs @@ -426,9 +426,16 @@ impl UnixDatagram { /// Creates new `UnixDatagram` from a `std::os::unix::net::UnixDatagram`. /// /// This function is intended to be used to wrap a UnixDatagram from the - /// standard library in the Tokio equivalent. The conversion assumes - /// nothing about the underlying datagram; it is left up to the user to set - /// it in non-blocking mode. + /// standard library in the Tokio equivalent. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socker is in + /// non-blocking mode. Otherwise all I/O operations on the socket + /// will block the thread, which will cause unexpected behavior. + /// Non-blocking mode can be set using [`set_nonblocking`]. + /// + /// [`set_nonblocking`]: std::os::unix::net::UnixDatagram::set_nonblocking /// /// # Panics /// @@ -470,21 +477,19 @@ impl UnixDatagram { /// Turns a [`tokio::net::UnixDatagram`] into a [`std::os::unix::net::UnixDatagram`]. /// /// The returned [`std::os::unix::net::UnixDatagram`] will have nonblocking - /// mode set as `true`. Use [`set_nonblocking`] to change the blocking mode + /// mode set as `true`. Use [`set_nonblocking`] to change the blocking mode /// if needed. /// /// # Examples /// /// ```rust,no_run - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let tokio_socket = tokio::net::UnixDatagram::bind("127.0.0.1:0")?; - /// let std_socket = tokio_socket.into_std()?; - /// std_socket.set_nonblocking(false)?; - /// Ok(()) - /// } + /// # use std::error::Error; + /// # async fn dox() -> Result<(), Box> { + /// let tokio_socket = tokio::net::UnixDatagram::bind("/path/to/the/socket")?; + /// let std_socket = tokio_socket.into_std()?; + /// std_socket.set_nonblocking(false)?; + /// # Ok(()) + /// # } /// ``` /// /// [`tokio::net::UnixDatagram`]: UnixDatagram diff --git a/tokio/src/net/unix/listener.rs b/tokio/src/net/unix/listener.rs index fbea3e76a1a..9887f7343be 100644 --- a/tokio/src/net/unix/listener.rs +++ b/tokio/src/net/unix/listener.rs @@ -73,9 +73,31 @@ impl UnixListener { /// Creates new `UnixListener` from a `std::os::unix::net::UnixListener `. /// /// This function is intended to be used to wrap a UnixListener from the - /// standard library in the Tokio equivalent. The conversion assumes - /// nothing about the underlying listener; it is left up to the user to set - /// it in non-blocking mode. + /// standard library in the Tokio equivalent. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the listener is in + /// non-blocking mode. Otherwise all I/O operations on the listener + /// will block the thread, which will cause unexpected behavior. + /// Non-blocking mode can be set using [`set_nonblocking`]. + /// + /// [`set_nonblocking`]: std::os::unix::net::UnixListener::set_nonblocking + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::UnixListener; + /// use std::os::unix::net::UnixListener as StdUnixListener; + /// # use std::error::Error; + /// + /// # async fn dox() -> Result<(), Box> { + /// let std_listener = StdUnixListener::bind("/path/to/the/socket")?; + /// std_listener.set_nonblocking(true)?; + /// let listener = UnixListener::from_std(std_listener)?; + /// # Ok(()) + /// # } + /// ``` /// /// # Panics /// @@ -95,20 +117,18 @@ impl UnixListener { /// Turns a [`tokio::net::UnixListener`] into a [`std::os::unix::net::UnixListener`]. /// /// The returned [`std::os::unix::net::UnixListener`] will have nonblocking mode - /// set as `true`. Use [`set_nonblocking`] to change the blocking mode if needed. + /// set as `true`. Use [`set_nonblocking`] to change the blocking mode if needed. /// /// # Examples /// /// ```rust,no_run - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let tokio_listener = tokio::net::UnixListener::bind("127.0.0.1:0")?; - /// let std_listener = tokio_listener.into_std()?; - /// std_listener.set_nonblocking(false)?; - /// Ok(()) - /// } + /// # use std::error::Error; + /// # async fn dox() -> Result<(), Box> { + /// let tokio_listener = tokio::net::UnixListener::bind("/path/to/the/socket")?; + /// let std_listener = tokio_listener.into_std()?; + /// std_listener.set_nonblocking(false)?; + /// # Ok(()) + /// # } /// ``` /// /// [`tokio::net::UnixListener`]: UnixListener diff --git a/tokio/src/net/unix/stream.rs b/tokio/src/net/unix/stream.rs index 2d278986c97..c249bf4b269 100644 --- a/tokio/src/net/unix/stream.rs +++ b/tokio/src/net/unix/stream.rs @@ -709,9 +709,31 @@ impl UnixStream { /// Creates new `UnixStream` from a `std::os::unix::net::UnixStream`. /// /// This function is intended to be used to wrap a UnixStream from the - /// standard library in the Tokio equivalent. The conversion assumes - /// nothing about the underlying stream; it is left up to the user to set - /// it in non-blocking mode. + /// standard library in the Tokio equivalent. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the stream is in + /// non-blocking mode. Otherwise all I/O operations on the stream + /// will block the thread, which will cause unexpected behavior. + /// Non-blocking mode can be set using [`set_nonblocking`]. + /// + /// [`set_nonblocking`]: std::os::unix::net::UnixStream::set_nonblocking + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::UnixStream; + /// use std::os::unix::net::UnixStream as StdUnixStream; + /// # use std::error::Error; + /// + /// # async fn dox() -> Result<(), Box> { + /// let std_stream = StdUnixStream::connect("/path/to/the/socket")?; + /// std_stream.set_nonblocking(true)?; + /// let stream = UnixStream::from_std(std_stream)?; + /// # Ok(()) + /// # } + /// ``` /// /// # Panics /// From c390a62387fe7346951c8bc57ea2761614b83e82 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 12 Jan 2023 13:53:31 -0500 Subject: [PATCH 039/101] Add broadcast::Sender::len (#5343) * Add broadcast::Sender::len * Add a randomized test for broadcast::Sender::len * fix wasm build * less silly cfg * review feedback * grammar? --- tokio/src/sync/broadcast.rs | 95 ++++++++++++++++++++++++++++++++++- tokio/tests/sync_broadcast.rs | 60 ++++++++++++++++++++++ 2 files changed, 153 insertions(+), 2 deletions(-) diff --git a/tokio/src/sync/broadcast.rs b/tokio/src/sync/broadcast.rs index ede990b046e..1c6b2caa3bb 100644 --- a/tokio/src/sync/broadcast.rs +++ b/tokio/src/sync/broadcast.rs @@ -603,6 +603,97 @@ impl Sender { new_receiver(shared) } + /// Returns the number of queued values. + /// + /// A value is queued until it has either been seen by all receivers that were alive at the time + /// it was sent, or has been evicted from the queue by subsequent sends that exceeded the + /// queue's capacity. + /// + /// # Note + /// + /// In contrast to [`Receiver::len`], this method only reports queued values and not values that + /// have been evicted from the queue before being seen by all receivers. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::broadcast; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx1) = broadcast::channel(16); + /// let mut rx2 = tx.subscribe(); + /// + /// tx.send(10).unwrap(); + /// tx.send(20).unwrap(); + /// tx.send(30).unwrap(); + /// + /// assert_eq!(tx.len(), 3); + /// + /// rx1.recv().await.unwrap(); + /// + /// // The len is still 3 since rx2 hasn't seen the first value yet. + /// assert_eq!(tx.len(), 3); + /// + /// rx2.recv().await.unwrap(); + /// + /// assert_eq!(tx.len(), 2); + /// } + /// ``` + pub fn len(&self) -> usize { + let tail = self.shared.tail.lock(); + + let base_idx = (tail.pos & self.shared.mask as u64) as usize; + let mut low = 0; + let mut high = self.shared.buffer.len(); + while low < high { + let mid = low + (high - low) / 2; + let idx = base_idx.wrapping_add(mid) & self.shared.mask; + if self.shared.buffer[idx].read().unwrap().rem.load(SeqCst) == 0 { + low = mid + 1; + } else { + high = mid; + } + } + + self.shared.buffer.len() - low + } + + /// Returns true if there are no queued values. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::broadcast; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx1) = broadcast::channel(16); + /// let mut rx2 = tx.subscribe(); + /// + /// assert!(tx.is_empty()); + /// + /// tx.send(10).unwrap(); + /// + /// assert!(!tx.is_empty()); + /// + /// rx1.recv().await.unwrap(); + /// + /// // The queue is still not empty since rx2 hasn't seen the value. + /// assert!(!tx.is_empty()); + /// + /// rx2.recv().await.unwrap(); + /// + /// assert!(tx.is_empty()); + /// } + /// ``` + pub fn is_empty(&self) -> bool { + let tail = self.shared.tail.lock(); + + let idx = (tail.pos.wrapping_sub(1) & self.shared.mask as u64) as usize; + self.shared.buffer[idx].read().unwrap().rem.load(SeqCst) == 0 + } + /// Returns the number of active receivers /// /// An active receiver is a [`Receiver`] handle returned from [`channel`] or @@ -731,7 +822,7 @@ impl Receiver { /// assert_eq!(rx1.len(), 2); /// assert_eq!(rx1.recv().await.unwrap(), 10); /// assert_eq!(rx1.len(), 1); - /// assert_eq!(rx1.recv().await.unwrap(), 20); + /// assert_eq!(rx1.recv().await.unwrap(), 20); /// assert_eq!(rx1.len(), 0); /// } /// ``` @@ -761,7 +852,7 @@ impl Receiver { /// /// assert!(!rx1.is_empty()); /// assert_eq!(rx1.recv().await.unwrap(), 10); - /// assert_eq!(rx1.recv().await.unwrap(), 20); + /// assert_eq!(rx1.recv().await.unwrap(), 20); /// assert!(rx1.is_empty()); /// } /// ``` diff --git a/tokio/tests/sync_broadcast.rs b/tokio/tests/sync_broadcast.rs index 9aa34841e26..67c378b84a6 100644 --- a/tokio/tests/sync_broadcast.rs +++ b/tokio/tests/sync_broadcast.rs @@ -526,3 +526,63 @@ fn resubscribe_to_closed_channel() { let mut rx_resub = rx.resubscribe(); assert_closed!(rx_resub.try_recv()); } + +#[test] +fn sender_len() { + let (tx, mut rx1) = broadcast::channel(4); + let mut rx2 = tx.subscribe(); + + assert_eq!(tx.len(), 0); + assert!(tx.is_empty()); + + tx.send(1).unwrap(); + tx.send(2).unwrap(); + tx.send(3).unwrap(); + + assert_eq!(tx.len(), 3); + assert!(!tx.is_empty()); + + assert_recv!(rx1); + assert_recv!(rx1); + + assert_eq!(tx.len(), 3); + assert!(!tx.is_empty()); + + assert_recv!(rx2); + + assert_eq!(tx.len(), 2); + assert!(!tx.is_empty()); + + tx.send(4).unwrap(); + tx.send(5).unwrap(); + tx.send(6).unwrap(); + + assert_eq!(tx.len(), 4); + assert!(!tx.is_empty()); +} + +#[test] +#[cfg(not(tokio_wasm_not_wasi))] +fn sender_len_random() { + use rand::Rng; + + let (tx, mut rx1) = broadcast::channel(16); + let mut rx2 = tx.subscribe(); + + for _ in 0..1000 { + match rand::thread_rng().gen_range(0..4) { + 0 => { + let _ = rx1.try_recv(); + } + 1 => { + let _ = rx2.try_recv(); + } + _ => { + tx.send(0).unwrap(); + } + } + + let expected_len = usize::min(usize::max(rx1.len(), rx2.len()), 16); + assert_eq!(tx.len(), expected_len); + } +} From 40782efb769ef8d0df070089f9bca8b6125d6421 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 15 Jan 2023 04:11:05 +0900 Subject: [PATCH 040/101] tokio: fix remaining issues about atomic_u64_static_once_cell.rs (#5374) Fixes #5373 Closes #5358 - Add check for no_atomic_u64 & no_const_mutex_new (condition to atomic_u64_static_once_cell.rs is compiled) - Allow unused_imports in TARGET_ATOMIC_U64_PROBE. I also tested other *_PROBE and found no other errors triggered by -D warning. - Fix cfg of util::once_cell module --- .github/workflows/ci.yml | 10 ++++++++++ tokio/build.rs | 3 ++- tokio/src/util/mod.rs | 7 ++++++- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 88fa10f5fca..2813ede8a86 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -300,6 +300,8 @@ jobs: with: toolchain: ${{ env.rust_nightly }} components: rust-src + - name: Install cargo-hack + uses: taiki-e/install-action@cargo-hack # Install linker and libraries for i686-unknown-linux-gnu - uses: taiki-e/setup-cross-toolchain-action@v1 with: @@ -307,6 +309,14 @@ jobs: - run: cargo test -Zbuild-std --target target-specs/i686-unknown-linux-gnu.json -p tokio --all-features env: RUSTFLAGS: --cfg tokio_unstable -Dwarnings --cfg tokio_no_atomic_u64 + # https://github.com/tokio-rs/tokio/pull/5356 + # https://github.com/tokio-rs/tokio/issues/5373 + - run: cargo hack build -p tokio --feature-powerset --depth 2 -Z avoid-dev-deps --keep-going + env: + RUSTFLAGS: --cfg tokio_unstable -Dwarnings --cfg tokio_no_atomic_u64 --cfg tokio_no_const_mutex_new + - run: cargo hack build -p tokio --feature-powerset --depth 2 -Z avoid-dev-deps --keep-going + env: + RUSTFLAGS: --cfg tokio_unstable -Dwarnings --cfg tokio_no_atomic_u64 features: name: features diff --git a/tokio/build.rs b/tokio/build.rs index 503c0242fc8..ddade2876bb 100644 --- a/tokio/build.rs +++ b/tokio/build.rs @@ -27,12 +27,13 @@ const CONST_MUTEX_NEW_PROBE: &str = r#" const TARGET_HAS_ATOMIC_PROBE: &str = r#" { #[cfg(target_has_atomic = "ptr")] - let _ = (); + let _ = (); } "#; const TARGET_ATOMIC_U64_PROBE: &str = r#" { + #[allow(unused_imports)] use std::sync::atomic::AtomicU64 as _; } "#; diff --git a/tokio/src/util/mod.rs b/tokio/src/util/mod.rs index 245e64de6b4..9f6119acbb5 100644 --- a/tokio/src/util/mod.rs +++ b/tokio/src/util/mod.rs @@ -6,7 +6,12 @@ cfg_io_driver! { #[cfg(feature = "rt")] pub(crate) mod atomic_cell; -#[cfg(any(feature = "rt", feature = "signal", feature = "process"))] +#[cfg(any( + feature = "rt", + feature = "signal", + feature = "process", + tokio_no_const_mutex_new, +))] pub(crate) mod once_cell; #[cfg(any( From 06f1a601bb05b1aba9f95020a7fa7572899c588f Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sat, 14 Jan 2023 20:11:45 +0100 Subject: [PATCH 041/101] task: clarify doc about tasks starting immediately (#5364) --- tokio/src/runtime/handle.rs | 6 +++--- tokio/src/runtime/runtime.rs | 6 +++--- tokio/src/runtime/task/join.rs | 6 +++--- tokio/src/task/join_set.rs | 24 +++++++++++------------- tokio/src/task/local.rs | 13 ++++++------- tokio/src/task/spawn.rs | 6 +++--- 6 files changed, 29 insertions(+), 32 deletions(-) diff --git a/tokio/src/runtime/handle.rs b/tokio/src/runtime/handle.rs index da47ecb27b2..c5dc65f6e81 100644 --- a/tokio/src/runtime/handle.rs +++ b/tokio/src/runtime/handle.rs @@ -118,9 +118,9 @@ impl Handle { /// thread pool. The thread pool is then responsible for polling the future /// until it completes. /// - /// You do not have to `.await` the returned `JoinHandle` to make the - /// provided future start execution. It will start running in the background - /// immediately when `spawn` is called. + /// The provided future will start running in the background immediately + /// when `spawn` is called, even if you don't await the returned + /// `JoinHandle`. /// /// See [module level][mod] documentation for more details. /// diff --git a/tokio/src/runtime/runtime.rs b/tokio/src/runtime/runtime.rs index 9ede0a7b0b5..198567390a8 100644 --- a/tokio/src/runtime/runtime.rs +++ b/tokio/src/runtime/runtime.rs @@ -163,9 +163,9 @@ impl Runtime { /// thread pool. The thread pool is then responsible for polling the future /// until it completes. /// - /// You do not have to `.await` the returned `JoinHandle` to make the - /// provided future start execution. It will start running in the - /// background immediately when `spawn` is called. + /// The provided future will start running in the background immediately + /// when `spawn` is called, even if you don't await the returned + /// `JoinHandle`. /// /// See [module level][mod] documentation for more details. /// diff --git a/tokio/src/runtime/task/join.rs b/tokio/src/runtime/task/join.rs index 5660575504e..11c4b9ba311 100644 --- a/tokio/src/runtime/task/join.rs +++ b/tokio/src/runtime/task/join.rs @@ -11,9 +11,9 @@ cfg_rt! { /// An owned permission to join on a task (await its termination). /// /// This can be thought of as the equivalent of [`std::thread::JoinHandle`] - /// for a Tokio task rather than a thread. You do not need to `.await` the - /// `JoinHandle` to make the task execute — it will start running in the - /// background immediately. + /// for a Tokio task rather than a thread. Note that the background task + /// associated with this `JoinHandle` started running immediately when you + /// called spawn, even if you have not yet awaited the `JoinHandle`. /// /// A `JoinHandle` *detaches* the associated task when it is dropped, which /// means that there is no longer any handle to the task, and no way to `join` diff --git a/tokio/src/task/join_set.rs b/tokio/src/task/join_set.rs index f18130228dc..e6d8d62c3dc 100644 --- a/tokio/src/task/join_set.rs +++ b/tokio/src/task/join_set.rs @@ -120,9 +120,9 @@ impl JoinSet { /// Spawn the provided task on the `JoinSet`, returning an [`AbortHandle`] /// that can be used to remotely cancel the task. /// - /// You do not have to `.await` the returned `JoinHandle` to make the - /// provided future start execution. It will start running in the background - /// immediately when `spawn` is called. + /// The provided future will start running in the background immediately + /// when this method is called, even if you don't await anything on this + /// `JoinSet`. /// /// # Panics /// @@ -143,9 +143,9 @@ impl JoinSet { /// `JoinSet` returning an [`AbortHandle`] that can be used to remotely /// cancel the task. /// - /// You do not have to `.await` the returned `JoinHandle` to make the - /// provided future start execution. It will start running in the background - /// immediately when `spawn_on` is called. + /// The provided future will start running in the background immediately + /// when this method is called, even if you don't await anything on this + /// `JoinSet`. /// /// [`AbortHandle`]: crate::task::AbortHandle #[track_caller] @@ -162,9 +162,9 @@ impl JoinSet { /// `JoinSet`, returning an [`AbortHandle`] that can be used to remotely /// cancel the task. /// - /// You do not have to `.await` the returned `JoinHandle` to make the - /// provided future start execution. It will start running in the background - /// immediately when `spawn_local` is called. + /// The provided future will start running in the background immediately + /// when this method is called, even if you don't await anything on this + /// `JoinSet`. /// /// # Panics /// @@ -186,10 +186,8 @@ impl JoinSet { /// remotely cancel the task. /// /// Unlike the [`spawn_local`] method, this method may be used to spawn local - /// tasks when the `LocalSet` is _not_ running. You do not have to `.await` - /// the returned `JoinHandle` to make the provided future start execution. - /// It will start running immediately whenever the `LocalSet` is next - /// started. + /// tasks on a `LocalSet` that is _not_ currently running. The provided + /// future will start running whenever the `LocalSet` is next started. /// /// [`LocalSet`]: crate::task::LocalSet /// [`AbortHandle`]: crate::task::AbortHandle diff --git a/tokio/src/task/local.rs b/tokio/src/task/local.rs index cc4500a58e7..0675faa1884 100644 --- a/tokio/src/task/local.rs +++ b/tokio/src/task/local.rs @@ -289,9 +289,9 @@ cfg_rt! { /// /// The spawned future will run on the same thread that called `spawn_local`. /// - /// You do not have to `.await` the returned `JoinHandle` to make the - /// provided future start execution. It will start running in the background - /// immediately when `spawn_local` is called. + /// The provided future will start running in the background immediately + /// when `spawn_local` is called, even if you don't await the returned + /// `JoinHandle`. /// /// # Panics /// @@ -417,10 +417,9 @@ impl LocalSet { /// This task is guaranteed to be run on the current thread. /// /// Unlike the free function [`spawn_local`], this method may be used to - /// spawn local tasks when the `LocalSet` is _not_ running. You do not have - /// to `.await` the returned `JoinHandle` to make the provided future start - /// execution. It will start running immediately whenever the `LocalSet` is - /// next started. + /// spawn local tasks when the `LocalSet` is _not_ running. The provided + /// future will start running once the `LocalSet` is next started, even if + /// you don't await the returned `JoinHandle`. /// /// # Examples /// diff --git a/tokio/src/task/spawn.rs b/tokio/src/task/spawn.rs index 5db11a47994..66b0d673177 100644 --- a/tokio/src/task/spawn.rs +++ b/tokio/src/task/spawn.rs @@ -7,9 +7,9 @@ cfg_rt! { /// Spawns a new asynchronous task, returning a /// [`JoinHandle`](super::JoinHandle) for it. /// - /// You do not have to `.await` the returned `JoinHandle` to make the - /// provided future start execution. It will start running in the background - /// immediately when `spawn` is called. + /// The provided future will start running in the background immediately + /// when `spawn` is called, even if you don't await the returned + /// `JoinHandle`. /// /// Spawning a task enables the task to execute concurrently to other tasks. The /// spawned task may execute on the current thread, or it may be sent to a From d6ea7a742b92d3e516035a584ab9347a96be363a Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sat, 14 Jan 2023 04:58:34 +0900 Subject: [PATCH 042/101] Add `T: Unpin` bound to ReadHalf::unsplit --- tokio/src/io/split.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tokio/src/io/split.rs b/tokio/src/io/split.rs index 8258a0f7a08..a3aa9d60c07 100644 --- a/tokio/src/io/split.rs +++ b/tokio/src/io/split.rs @@ -74,7 +74,10 @@ impl ReadHalf { /// same `split` operation this method will panic. /// This can be checked ahead of time by comparing the stream ID /// of the two halves. - pub fn unsplit(self, wr: WriteHalf) -> T { + pub fn unsplit(self, wr: WriteHalf) -> T + where + T: Unpin, + { if self.is_pair_of(&wr) { drop(wr); From 171ce0ff8d38b87a39c1e6a16fedc9a2373720e0 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 17 Jan 2023 23:00:38 +0900 Subject: [PATCH 043/101] chore: prepare Tokio v1.18.5 release --- README.md | 2 +- tokio/CHANGELOG.md | 10 +++++++++- tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 0ef20ec0b18..46b1e089cfd 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.18.4", features = ["full"] } +tokio = { version = "1.18.5", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 8652e4a0e49..05f4d8152e0 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,11 @@ +# 1.18.5 (January 17, 2023) + +### Fixed + +- io: fix unsoundness in `ReadHalf::unsplit` ([#5375]) + +[#5375]: https://github.com/tokio-rs/tokio/pull/5375 + # 1.18.4 (January 3, 2022) ### Fixed @@ -136,7 +144,7 @@ performance improvements. - time: use bit manipulation instead of modulo to improve performance ([#4480]) - net: use `std::future::Ready` instead of our own `Ready` future ([#4271]) - replace deprecated `atomic::spin_loop_hint` with `hint::spin_loop` ([#4491]) -- fix miri failures in intrusive linked lists ([#4397]) +- fix miri failures in intrusive linked lists ([#4397]) ### Documented diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 42beeb73d64..3e6b95784b6 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.0.x" git tag. -version = "1.18.4" +version = "1.18.5" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index 0ef20ec0b18..46b1e089cfd 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.18.4", features = ["full"] } +tokio = { version = "1.18.5", features = ["full"] } ``` Then, on your main.rs: From f3ce29a0038c04094c39915c0221c8e428fe7ac3 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Tue, 17 Jan 2023 11:09:42 -0800 Subject: [PATCH 044/101] chore: prepare Tokio v1.20.4 release --- README.md | 2 +- tokio/CHANGELOG.md | 10 ++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 3113fadd8ab..288662716ef 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.20.3", features = ["full"] } +tokio = { version = "1.20.4", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 60266e42b35..ad6034a6aa2 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,13 @@ +# 1.20.4 (January 17, 2023) + +Forward ports 1.18.5 changes. + +### Fixed + +- io: fix unsoundness in `ReadHalf::unsplit` ([#5375]) + +[#5375]: https://github.com/tokio-rs/tokio/pull/5375 + # 1.20.3 (January 3, 2022) This release forward ports changes from 1.18.4. diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index a83937445b6..121cd5df5c6 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.0.x" git tag. -version = "1.20.3" +version = "1.20.4" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index 3113fadd8ab..288662716ef 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.20.3", features = ["full"] } +tokio = { version = "1.20.4", features = ["full"] } ``` Then, on your main.rs: From 4f6a95badc181ff0fa3b8462350bf6cd45547792 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Tue, 17 Jan 2023 12:26:13 -0800 Subject: [PATCH 045/101] chore: prepare Tokio v1.24.2 release --- README.md | 2 +- tokio/CHANGELOG.md | 10 ++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 7bf8befff04..a2edc8671e3 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.24.1", features = ["full"] } +tokio = { version = "1.24.2", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index fb6b1aea5c2..14d6a1c4193 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,13 @@ +# 1.24.2 (January 17, 2023) + +Forward ports 1.18.5 changes. + +### Fixed + +- io: fix unsoundness in `ReadHalf::unsplit` ([#5375]) + +[#5375]: https://github.com/tokio-rs/tokio/pull/5375 + # 1.24.1 (January 6, 2022) This release fixes a compilation failure on targets without `AtomicU64` when using rustc older than 1.63. ([#5356]) diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 3b685e2e9c3..df87cb68e55 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.24.1" +version = "1.24.2" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index 7bf8befff04..a2edc8671e3 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.24.1", features = ["full"] } +tokio = { version = "1.24.2", features = ["full"] } ``` Then, on your main.rs: From f3f8e4f17fba222a3fe729a05c70e1dd99584e01 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sat, 21 Jan 2023 12:17:04 +0900 Subject: [PATCH 046/101] chore: update nix to 0.26 (#5385) --- tokio/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index df87cb68e55..9fb0891d930 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -122,7 +122,7 @@ signal-hook-registry = { version = "1.1.1", optional = true } [target.'cfg(unix)'.dev-dependencies] libc = { version = "0.2.42" } -nix = { version = "0.24", default-features = false, features = ["fs", "socket"] } +nix = { version = "0.26", default-features = false, features = ["fs", "socket"] } [target.'cfg(windows)'.dependencies.windows-sys] version = "0.42.0" From c90757f07a1b15d7e26a710003d8e98a83db1ffc Mon Sep 17 00:00:00 2001 From: Chris Wailes Date: Sat, 21 Jan 2023 02:12:24 -0800 Subject: [PATCH 047/101] tests: condition unwinding tests on `cfg(panic = "unwind")` (#5384) --- tokio/tests/sync_broadcast.rs | 1 + tokio/tests/sync_watch.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/tokio/tests/sync_broadcast.rs b/tokio/tests/sync_broadcast.rs index 67c378b84a6..cd6692448bb 100644 --- a/tokio/tests/sync_broadcast.rs +++ b/tokio/tests/sync_broadcast.rs @@ -291,6 +291,7 @@ fn capacity_too_big() { } #[test] +#[cfg(panic = "unwind")] #[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding fn panic_in_clone() { use std::panic::{self, AssertUnwindSafe}; diff --git a/tokio/tests/sync_watch.rs b/tokio/tests/sync_watch.rs index 34f9b786710..d4f8ce87d95 100644 --- a/tokio/tests/sync_watch.rs +++ b/tokio/tests/sync_watch.rs @@ -213,6 +213,7 @@ fn reopened_after_subscribe() { } #[test] +#[cfg(panic = "unwind")] #[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding fn send_modify_panic() { let (tx, mut rx) = watch::channel("one"); From fe2dcb9453d6c7fbab0e32861c6bff65d77b172a Mon Sep 17 00:00:00 2001 From: jake <77554505+brxken128@users.noreply.github.com> Date: Fri, 27 Jan 2023 12:50:55 +0000 Subject: [PATCH 048/101] io: increase `MAX_BUF` from 16384 to 2MiB (#5397) --- tokio/src/fs/file/tests.rs | 8 ++++---- tokio/src/io/blocking.rs | 2 +- tokio/src/io/stdio_common.rs | 3 +-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/tokio/src/fs/file/tests.rs b/tokio/src/fs/file/tests.rs index 1c90a8d1541..7c61b3c4b31 100644 --- a/tokio/src/fs/file/tests.rs +++ b/tokio/src/fs/file/tests.rs @@ -231,12 +231,12 @@ fn flush_while_idle() { #[cfg_attr(miri, ignore)] // takes a really long time with miri fn read_with_buffer_larger_than_max() { // Chunks - let chunk_a = 16 * 1024; + let chunk_a = crate::io::blocking::MAX_BUF; let chunk_b = chunk_a * 2; let chunk_c = chunk_a * 3; let chunk_d = chunk_a * 4; - assert_eq!(chunk_d / 1024, 64); + assert_eq!(chunk_d / 1024 / 1024, 8); let mut data = vec![]; for i in 0..(chunk_d - 1) { @@ -303,12 +303,12 @@ fn read_with_buffer_larger_than_max() { #[cfg_attr(miri, ignore)] // takes a really long time with miri fn write_with_buffer_larger_than_max() { // Chunks - let chunk_a = 16 * 1024; + let chunk_a = crate::io::blocking::MAX_BUF; let chunk_b = chunk_a * 2; let chunk_c = chunk_a * 3; let chunk_d = chunk_a * 4; - assert_eq!(chunk_d / 1024, 64); + assert_eq!(chunk_d / 1024 / 1024, 8); let mut data = vec![]; for i in 0..(chunk_d - 1) { diff --git a/tokio/src/io/blocking.rs b/tokio/src/io/blocking.rs index f6db4500af1..416573e9732 100644 --- a/tokio/src/io/blocking.rs +++ b/tokio/src/io/blocking.rs @@ -26,7 +26,7 @@ pub(crate) struct Buf { pos: usize, } -pub(crate) const MAX_BUF: usize = 16 * 1024; +pub(crate) const MAX_BUF: usize = 2 * 1024 * 1024; #[derive(Debug)] enum State { diff --git a/tokio/src/io/stdio_common.rs b/tokio/src/io/stdio_common.rs index 2715ba7923a..b1cc61d7ac4 100644 --- a/tokio/src/io/stdio_common.rs +++ b/tokio/src/io/stdio_common.rs @@ -108,14 +108,13 @@ where #[cfg(test)] #[cfg(not(loom))] mod tests { + use crate::io::blocking::MAX_BUF; use crate::io::AsyncWriteExt; use std::io; use std::pin::Pin; use std::task::Context; use std::task::Poll; - const MAX_BUF: usize = 16 * 1024; - struct TextMockWriter; impl crate::io::AsyncWrite for TextMockWriter { From a18b3645f3b206eaa63200707422a4deaea984e8 Mon Sep 17 00:00:00 2001 From: Flavio Moreira <34199219+fdvmoreira@users.noreply.github.com> Date: Fri, 27 Jan 2023 15:05:48 +0000 Subject: [PATCH 049/101] chore: update year in LICENSE files (#5402) --- LICENSE | 2 +- tokio-macros/LICENSE | 2 +- tokio-stream/LICENSE | 2 +- tokio-test/LICENSE | 2 +- tokio-util/LICENSE | 2 +- tokio/LICENSE | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/LICENSE b/LICENSE index 8af5baf01ea..8bdf6bd60d3 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2022 Tokio Contributors +Copyright (c) 2023 Tokio Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/tokio-macros/LICENSE b/tokio-macros/LICENSE index a3753c034a9..12d1037fd0c 100644 --- a/tokio-macros/LICENSE +++ b/tokio-macros/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2022 Tokio Contributors +Copyright (c) 2023 Tokio Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/tokio-stream/LICENSE b/tokio-stream/LICENSE index 8af5baf01ea..8bdf6bd60d3 100644 --- a/tokio-stream/LICENSE +++ b/tokio-stream/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2022 Tokio Contributors +Copyright (c) 2023 Tokio Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/tokio-test/LICENSE b/tokio-test/LICENSE index 8af5baf01ea..8bdf6bd60d3 100644 --- a/tokio-test/LICENSE +++ b/tokio-test/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2022 Tokio Contributors +Copyright (c) 2023 Tokio Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/tokio-util/LICENSE b/tokio-util/LICENSE index 8af5baf01ea..8bdf6bd60d3 100644 --- a/tokio-util/LICENSE +++ b/tokio-util/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2022 Tokio Contributors +Copyright (c) 2023 Tokio Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/tokio/LICENSE b/tokio/LICENSE index 8af5baf01ea..8bdf6bd60d3 100644 --- a/tokio/LICENSE +++ b/tokio/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2022 Tokio Contributors +Copyright (c) 2023 Tokio Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated From 1f50c57185d28a1d118adc22bf587541e3b7edcc Mon Sep 17 00:00:00 2001 From: Jonathan Schwender <55576758+jschwe@users.noreply.github.com> Date: Fri, 27 Jan 2023 20:44:47 +0100 Subject: [PATCH 050/101] metrics: fix `steal_count` docs, add `steal_operations` (#5330) --- tokio/src/runtime/metrics/batch.rs | 13 +++- tokio/src/runtime/metrics/mock.rs | 1 + tokio/src/runtime/metrics/runtime.rs | 59 +++++++++++++++++-- tokio/src/runtime/metrics/worker.rs | 6 +- .../runtime/scheduler/multi_thread/queue.rs | 1 + 5 files changed, 72 insertions(+), 8 deletions(-) diff --git a/tokio/src/runtime/metrics/batch.rs b/tokio/src/runtime/metrics/batch.rs index f1c3fa6b747..4e6b28d68ed 100644 --- a/tokio/src/runtime/metrics/batch.rs +++ b/tokio/src/runtime/metrics/batch.rs @@ -11,9 +11,12 @@ pub(crate) struct MetricsBatch { /// Number of times the worker woke w/o doing work. noop_count: u64, - /// Number of times stolen. + /// Number of tasks stolen. steal_count: u64, + /// Number of times tasks where stolen. + steal_operations: u64, + /// Number of tasks that were polled by the worker. poll_count: u64, @@ -39,6 +42,7 @@ impl MetricsBatch { park_count: 0, noop_count: 0, steal_count: 0, + steal_operations: 0, poll_count: 0, poll_count_on_last_park: 0, local_schedule_count: 0, @@ -52,6 +56,9 @@ impl MetricsBatch { worker.park_count.store(self.park_count, Relaxed); worker.noop_count.store(self.noop_count, Relaxed); worker.steal_count.store(self.steal_count, Relaxed); + worker + .steal_operations + .store(self.steal_operations, Relaxed); worker.poll_count.store(self.poll_count, Relaxed); worker @@ -98,6 +105,10 @@ cfg_rt_multi_thread! { self.steal_count += by as u64; } + pub(crate) fn incr_steal_operations(&mut self) { + self.steal_operations += 1; + } + pub(crate) fn incr_overflow_count(&mut self) { self.overflow_count += 1; } diff --git a/tokio/src/runtime/metrics/mock.rs b/tokio/src/runtime/metrics/mock.rs index 6b9cf704f42..c388dc06981 100644 --- a/tokio/src/runtime/metrics/mock.rs +++ b/tokio/src/runtime/metrics/mock.rs @@ -38,6 +38,7 @@ impl MetricsBatch { cfg_rt_multi_thread! { impl MetricsBatch { pub(crate) fn incr_steal_count(&mut self, _by: u16) {} + pub(crate) fn incr_steal_operations(&mut self) {} pub(crate) fn incr_overflow_count(&mut self) {} } } diff --git a/tokio/src/runtime/metrics/runtime.rs b/tokio/src/runtime/metrics/runtime.rs index dee14a45729..d29cb3d48ff 100644 --- a/tokio/src/runtime/metrics/runtime.rs +++ b/tokio/src/runtime/metrics/runtime.rs @@ -210,10 +210,57 @@ impl RuntimeMetrics { .load(Relaxed) } + /// Returns the number of tasks the given worker thread stole from + /// another worker thread. + /// + /// This metric only applies to the **multi-threaded** runtime and will + /// always return `0` when using the current thread runtime. + /// + /// The worker steal count starts at zero when the runtime is created and + /// increases by `N` each time the worker has processed its scheduled queue + /// and successfully steals `N` more pending tasks from another worker. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_steal_count(0); + /// println!("worker 0 has stolen {} tasks", n); + /// } + /// ``` + pub fn worker_steal_count(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .steal_count + .load(Relaxed) + } + /// Returns the number of times the given worker thread stole tasks from /// another worker thread. /// - /// This metric only applies to the **multi-threaded** runtime and will always return `0` when using the current thread runtime. + /// This metric only applies to the **multi-threaded** runtime and will + /// always return `0` when using the current thread runtime. /// /// The worker steal count starts at zero when the runtime is created and /// increases by one each time the worker has processed its scheduled queue @@ -243,15 +290,15 @@ impl RuntimeMetrics { /// async fn main() { /// let metrics = Handle::current().metrics(); /// - /// let n = metrics.worker_noop_count(0); + /// let n = metrics.worker_steal_operations(0); /// println!("worker 0 has stolen tasks {} times", n); /// } /// ``` - pub fn worker_steal_count(&self, worker: usize) -> u64 { + pub fn worker_steal_operations(&self, worker: usize) -> u64 { self.handle .inner .worker_metrics(worker) - .steal_count + .steal_operations .load(Relaxed) } @@ -328,8 +375,8 @@ impl RuntimeMetrics { /// async fn main() { /// let metrics = Handle::current().metrics(); /// - /// let n = metrics.worker_poll_count(0); - /// println!("worker 0 has polled {} tasks", n); + /// let n = metrics.worker_total_busy_duration(0); + /// println!("worker 0 was busy for a total of {:?}", n); /// } /// ``` pub fn worker_total_busy_duration(&self, worker: usize) -> Duration { diff --git a/tokio/src/runtime/metrics/worker.rs b/tokio/src/runtime/metrics/worker.rs index ec58de6b3a0..a40c76effbf 100644 --- a/tokio/src/runtime/metrics/worker.rs +++ b/tokio/src/runtime/metrics/worker.rs @@ -17,9 +17,12 @@ pub(crate) struct WorkerMetrics { /// Number of times the worker woke then parked again without doing work. pub(crate) noop_count: AtomicU64, - /// Number of times the worker attempted to steal. + /// Number of tasks the worker stole. pub(crate) steal_count: AtomicU64, + /// Number of times the worker stole + pub(crate) steal_operations: AtomicU64, + /// Number of tasks the worker polled. pub(crate) poll_count: AtomicU64, @@ -43,6 +46,7 @@ impl WorkerMetrics { park_count: AtomicU64::new(0), noop_count: AtomicU64::new(0), steal_count: AtomicU64::new(0), + steal_operations: AtomicU64::new(0), poll_count: AtomicU64::new(0), overflow_count: AtomicU64::new(0), busy_duration_total: AtomicU64::new(0), diff --git a/tokio/src/runtime/scheduler/multi_thread/queue.rs b/tokio/src/runtime/scheduler/multi_thread/queue.rs index 958c32716f4..faf56db2e91 100644 --- a/tokio/src/runtime/scheduler/multi_thread/queue.rs +++ b/tokio/src/runtime/scheduler/multi_thread/queue.rs @@ -353,6 +353,7 @@ impl Steal { } dst_metrics.incr_steal_count(n as u16); + dst_metrics.incr_steal_operations(); // We are returning a task here n -= 1; From 88b1eb54fb66461b9f3524f4b5316241a019279a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 29 Jan 2023 16:44:31 -0500 Subject: [PATCH 051/101] chore: prepare Tokio v1.25.0 release (#5408) --- README.md | 2 +- tokio/CHANGELOG.md | 18 ++++++++++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 21 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a2edc8671e3..462e6e8b0b7 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.24.2", features = ["full"] } +tokio = { version = "1.25.0", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 14d6a1c4193..39a57fde717 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,21 @@ +# 1.25.0 (January 28, 2023) + +### Fixed + +- rt: fix runtime metrics reporting ([#5330]) + +### Added + +- sync: add `broadcast::Sender::len` ([#5343]) + +### Changed + +- fs: increase maximum read buffer size to 2MiB ([#5397]) + +[#5330]: https://github.com/tokio-rs/tokio/pull/5330 +[#5343]: https://github.com/tokio-rs/tokio/pull/5343 +[#5397]: https://github.com/tokio-rs/tokio/pull/5397 + # 1.24.2 (January 17, 2023) Forward ports 1.18.5 changes. diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 9fb0891d930..0f6d30a687d 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.24.2" +version = "1.25.0" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index a2edc8671e3..462e6e8b0b7 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.24.2", features = ["full"] } +tokio = { version = "1.25.0", features = ["full"] } ``` Then, on your main.rs: From 80ec80165bfa41c197353983edcbeb64bd45a869 Mon Sep 17 00:00:00 2001 From: mTsBucy1 <85930053+mTsBucy1@users.noreply.github.com> Date: Mon, 30 Jan 2023 16:47:04 +0100 Subject: [PATCH 052/101] task: clarify `process::Command` docs (#5406) (#5413) --- tokio/src/process/mod.rs | 95 ++++++++++++++++++++++++++++------------ 1 file changed, 67 insertions(+), 28 deletions(-) diff --git a/tokio/src/process/mod.rs b/tokio/src/process/mod.rs index 66e42127717..feada06f6d3 100644 --- a/tokio/src/process/mod.rs +++ b/tokio/src/process/mod.rs @@ -309,7 +309,8 @@ impl Command { /// /// ```no_run /// use tokio::process::Command; - /// let command = Command::new("sh"); + /// let mut command = Command::new("sh"); + /// # let _ = command.output(); // assert borrow checker /// ``` /// /// [rust-lang/rust#37519]: https://github.com/rust-lang/rust/issues/37519 @@ -328,16 +329,20 @@ impl Command { /// Only one argument can be passed per use. So instead of: /// /// ```no_run - /// tokio::process::Command::new("sh") - /// .arg("-C /path/to/repo"); + /// let mut command = tokio::process::Command::new("sh"); + /// command.arg("-C /path/to/repo"); + /// + /// # let _ = command.output(); // assert borrow checker /// ``` /// /// usage would be: /// /// ```no_run - /// tokio::process::Command::new("sh") - /// .arg("-C") - /// .arg("/path/to/repo"); + /// let mut command = tokio::process::Command::new("sh"); + /// command.arg("-C"); + /// command.arg("/path/to/repo"); + /// + /// # let _ = command.output(); // assert borrow checker /// ``` /// /// To pass multiple arguments see [`args`]. @@ -349,11 +354,15 @@ impl Command { /// Basic usage: /// /// ```no_run + /// # async fn test() { // allow using await /// use tokio::process::Command; /// - /// let command = Command::new("ls") + /// let output = Command::new("ls") /// .arg("-l") - /// .arg("-a"); + /// .arg("-a") + /// .output().await.unwrap(); + /// # } + /// /// ``` pub fn arg>(&mut self, arg: S) -> &mut Command { self.std.arg(arg); @@ -371,10 +380,13 @@ impl Command { /// Basic usage: /// /// ```no_run + /// # async fn test() { // allow using await /// use tokio::process::Command; /// - /// let command = Command::new("ls") - /// .args(&["-l", "-a"]); + /// let output = Command::new("ls") + /// .args(&["-l", "-a"]) + /// .output().await.unwrap(); + /// # } /// ``` pub fn args(&mut self, args: I) -> &mut Command where @@ -395,10 +407,13 @@ impl Command { /// Basic usage: /// /// ```no_run + /// # async fn test() { // allow using await /// use tokio::process::Command; /// - /// let command = Command::new("ls") - /// .env("PATH", "/bin"); + /// let output = Command::new("ls") + /// .env("PATH", "/bin") + /// .output().await.unwrap(); + /// # } /// ``` pub fn env(&mut self, key: K, val: V) -> &mut Command where @@ -416,6 +431,7 @@ impl Command { /// Basic usage: /// /// ```no_run + /// # async fn test() { // allow using await /// use tokio::process::Command; /// use std::process::{Stdio}; /// use std::env; @@ -426,11 +442,13 @@ impl Command { /// k == "TERM" || k == "TZ" || k == "LANG" || k == "PATH" /// ).collect(); /// - /// let command = Command::new("printenv") + /// let output = Command::new("printenv") /// .stdin(Stdio::null()) /// .stdout(Stdio::inherit()) /// .env_clear() - /// .envs(&filtered_env); + /// .envs(&filtered_env) + /// .output().await.unwrap(); + /// # } /// ``` pub fn envs(&mut self, vars: I) -> &mut Command where @@ -449,10 +467,13 @@ impl Command { /// Basic usage: /// /// ```no_run + /// # async fn test() { // allow using await /// use tokio::process::Command; /// - /// let command = Command::new("ls") - /// .env_remove("PATH"); + /// let output = Command::new("ls") + /// .env_remove("PATH") + /// .output().await.unwrap(); + /// # } /// ``` pub fn env_remove>(&mut self, key: K) -> &mut Command { self.std.env_remove(key); @@ -466,10 +487,13 @@ impl Command { /// Basic usage: /// /// ```no_run + /// # async fn test() { // allow using await /// use tokio::process::Command; /// - /// let command = Command::new("ls") - /// .env_clear(); + /// let output = Command::new("ls") + /// .env_clear() + /// .output().await.unwrap(); + /// # } /// ``` pub fn env_clear(&mut self) -> &mut Command { self.std.env_clear(); @@ -493,10 +517,13 @@ impl Command { /// Basic usage: /// /// ```no_run + /// # async fn test() { // allow using await /// use tokio::process::Command; /// - /// let command = Command::new("ls") - /// .current_dir("/bin"); + /// let output = Command::new("ls") + /// .current_dir("/bin") + /// .output().await.unwrap(); + /// # } /// ``` pub fn current_dir>(&mut self, dir: P) -> &mut Command { self.std.current_dir(dir); @@ -516,11 +543,14 @@ impl Command { /// Basic usage: /// /// ```no_run + /// # async fn test() { // allow using await /// use std::process::{Stdio}; /// use tokio::process::Command; /// - /// let command = Command::new("ls") - /// .stdin(Stdio::null()); + /// let output = Command::new("ls") + /// .stdin(Stdio::null()) + /// .output().await.unwrap(); + /// # } /// ``` pub fn stdin>(&mut self, cfg: T) -> &mut Command { self.std.stdin(cfg); @@ -540,11 +570,14 @@ impl Command { /// Basic usage: /// /// ```no_run + /// # async fn test() { // allow using await /// use tokio::process::Command; /// use std::process::Stdio; /// - /// let command = Command::new("ls") - /// .stdout(Stdio::null()); + /// let output = Command::new("ls") + /// .stdout(Stdio::null()) + /// .output().await.unwrap(); + /// # } /// ``` pub fn stdout>(&mut self, cfg: T) -> &mut Command { self.std.stdout(cfg); @@ -564,11 +597,14 @@ impl Command { /// Basic usage: /// /// ```no_run + /// # async fn test() { // allow using await /// use tokio::process::Command; /// use std::process::{Stdio}; /// - /// let command = Command::new("ls") - /// .stderr(Stdio::null()); + /// let output = Command::new("ls") + /// .stderr(Stdio::null()) + /// .output().await.unwrap(); + /// # } /// ``` pub fn stderr>(&mut self, cfg: T) -> &mut Command { self.std.stderr(cfg); @@ -707,10 +743,13 @@ impl Command { /// [`tokio::process::Command`]: crate::process::Command /// /// ```no_run + /// # async fn test() { // allow using await /// use tokio::process::Command; /// - /// let command = Command::new("ls") - /// .process_group(0); + /// let output = Command::new("ls") + /// .process_group(0) + /// .output().await.unwrap(); + /// # } /// ``` #[cfg(unix)] #[cfg(tokio_unstable)] From a7945b469d634cf205094d8a1661720358622cc0 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 7 Feb 2023 03:58:19 +0900 Subject: [PATCH 053/101] ci: update Cirrus CI config (#5428) * Use image_family for FreeBSD image in Cirrus CI * Do not trigger Cirrus CI on branches other than master and tokio-.* --- .cirrus.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.cirrus.yml b/.cirrus.yml index bdc44e0c93c..416ad879fc4 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -1,5 +1,7 @@ +only_if: $CIRRUS_TAG == '' && ($CIRRUS_PR != '' || $CIRRUS_BRANCH == 'master' || $CIRRUS_BRANCH =~ 'tokio-.*') +auto_cancellation: $CIRRUS_BRANCH != 'master' && $CIRRUS_BRANCH !=~ 'tokio-.*' freebsd_instance: - image: freebsd-12-4-release-amd64 + image_family: freebsd-12-4 env: RUST_STABLE: stable RUST_NIGHTLY: nightly-2022-10-25 @@ -11,7 +13,6 @@ env: # the system's binaries, so the environment shouldn't matter. task: name: FreeBSD 64-bit - auto_cancellation: $CIRRUS_BRANCH != 'master' && $CIRRUS_BRANCH !=~ 'tokio-.*' setup_script: - pkg install -y bash curl - curl https://sh.rustup.rs -sSf --output rustup.sh @@ -26,7 +27,6 @@ task: task: name: FreeBSD docs - auto_cancellation: $CIRRUS_BRANCH != 'master' && $CIRRUS_BRANCH !=~ 'tokio-.*' env: RUSTFLAGS: --cfg docsrs --cfg tokio_unstable RUSTDOCFLAGS: --cfg docsrs --cfg tokio_unstable -Dwarnings @@ -44,7 +44,6 @@ task: task: name: FreeBSD 32-bit - auto_cancellation: $CIRRUS_BRANCH != 'master' && $CIRRUS_BRANCH !=~ 'tokio-.*' setup_script: - pkg install -y bash curl - curl https://sh.rustup.rs -sSf --output rustup.sh From abf5d28f2ccaf55ea264f1bea7a1ac1bac6fe98b Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Tue, 7 Feb 2023 09:45:14 -0800 Subject: [PATCH 054/101] rt: remove Arc from Clock (#5434) This patch removes `Arc` from Tokio's internal clock source. Instead of cloning `Clock` when needed, a reference is passed into functions that need to get the current instant. --- tokio/src/runtime/driver.rs | 13 ++-- tokio/src/runtime/time/entry.rs | 5 ++ tokio/src/runtime/time/mod.rs | 16 +++-- tokio/src/runtime/time/source.rs | 8 +-- tokio/src/runtime/time/tests/mod.rs | 15 ++-- tokio/src/time/clock.rs | 103 +++++++++++++++++++--------- tokio/src/time/sleep.rs | 6 +- 7 files changed, 104 insertions(+), 62 deletions(-) diff --git a/tokio/src/runtime/driver.rs b/tokio/src/runtime/driver.rs index 4fb6b8783f4..572fdefb0da 100644 --- a/tokio/src/runtime/driver.rs +++ b/tokio/src/runtime/driver.rs @@ -45,8 +45,7 @@ impl Driver { let clock = create_clock(cfg.enable_pause_time, cfg.start_paused); - let (time_driver, time_handle) = - create_time_driver(cfg.enable_time, io_stack, clock.clone()); + let (time_driver, time_handle) = create_time_driver(cfg.enable_time, io_stack, &clock); Ok(( Self { inner: time_driver }, @@ -111,10 +110,8 @@ impl Handle { .expect("A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers.") } - cfg_test_util! { - pub(crate) fn clock(&self) -> &Clock { - &self.clock - } + pub(crate) fn clock(&self) -> &Clock { + &self.clock } } } @@ -289,7 +286,7 @@ cfg_time! { fn create_time_driver( enable: bool, io_stack: IoStack, - clock: Clock, + clock: &Clock, ) -> (TimeDriver, TimeHandle) { if enable { let (driver, handle) = crate::runtime::time::Driver::new(io_stack, clock); @@ -337,7 +334,7 @@ cfg_not_time! { fn create_time_driver( _enable: bool, io_stack: IoStack, - _clock: Clock, + _clock: &Clock, ) -> (TimeDriver, TimeHandle) { (io_stack, ()) } diff --git a/tokio/src/runtime/time/entry.rs b/tokio/src/runtime/time/entry.rs index f0d613a3bb4..69f93823551 100644 --- a/tokio/src/runtime/time/entry.rs +++ b/tokio/src/runtime/time/entry.rs @@ -579,6 +579,11 @@ impl TimerEntry { pub(crate) fn driver(&self) -> &super::Handle { self.driver.driver().time() } + + #[cfg(all(tokio_unstable, feature = "tracing"))] + pub(crate) fn clock(&self) -> &super::Clock { + self.driver.driver().clock() + } } impl TimerHandle { diff --git a/tokio/src/runtime/time/mod.rs b/tokio/src/runtime/time/mod.rs index f81cab8cc35..215714dd576 100644 --- a/tokio/src/runtime/time/mod.rs +++ b/tokio/src/runtime/time/mod.rs @@ -125,7 +125,7 @@ impl Driver { /// thread and `time_source` to get the current time and convert to ticks. /// /// Specifying the source of time is useful when testing. - pub(crate) fn new(park: IoStack, clock: Clock) -> (Driver, Handle) { + pub(crate) fn new(park: IoStack, clock: &Clock) -> (Driver, Handle) { let time_source = TimeSource::new(clock); let handle = Handle { @@ -186,7 +186,7 @@ impl Driver { match next_wake { Some(when) => { - let now = handle.time_source.now(); + let now = handle.time_source.now(rt_handle.clock()); // Note that we effectively round up to 1ms here - this avoids // very short-duration microsecond-resolution sleeps that the OS // might treat as zero-length. @@ -214,13 +214,13 @@ impl Driver { } // Process pending timers after waking up - handle.process(); + handle.process(rt_handle.clock()); } cfg_test_util! { fn park_thread_timeout(&mut self, rt_handle: &driver::Handle, duration: Duration) { let handle = rt_handle.time(); - let clock = &handle.time_source.clock; + let clock = rt_handle.clock(); if clock.can_auto_advance() { self.park.park_timeout(rt_handle, Duration::from_secs(0)); @@ -231,7 +231,9 @@ impl Driver { // advance the clock. if !handle.did_wake() { // Simulate advancing time - clock.advance(duration); + if let Err(msg) = clock.advance(duration) { + panic!("{}", msg); + } } } else { self.park.park_timeout(rt_handle, duration); @@ -248,8 +250,8 @@ impl Driver { impl Handle { /// Runs timer related logic, and returns the next wakeup time - pub(self) fn process(&self) { - let now = self.time_source().now(); + pub(self) fn process(&self, clock: &Clock) { + let now = self.time_source().now(clock); self.process_at_time(now) } diff --git a/tokio/src/runtime/time/source.rs b/tokio/src/runtime/time/source.rs index e6788edcaf8..39483b5c0ad 100644 --- a/tokio/src/runtime/time/source.rs +++ b/tokio/src/runtime/time/source.rs @@ -5,15 +5,13 @@ use std::convert::TryInto; /// A structure which handles conversion from Instants to u64 timestamps. #[derive(Debug)] pub(crate) struct TimeSource { - pub(crate) clock: Clock, start_time: Instant, } impl TimeSource { - pub(crate) fn new(clock: Clock) -> Self { + pub(crate) fn new(clock: &Clock) -> Self { Self { start_time: clock.now(), - clock, } } @@ -36,7 +34,7 @@ impl TimeSource { Duration::from_millis(t) } - pub(crate) fn now(&self) -> u64 { - self.instant_to_tick(self.clock.now()) + pub(crate) fn now(&self, clock: &Clock) -> u64 { + self.instant_to_tick(clock.now()) } } diff --git a/tokio/src/runtime/time/tests/mod.rs b/tokio/src/runtime/time/tests/mod.rs index 88c7d768d46..2468a1ae67b 100644 --- a/tokio/src/runtime/time/tests/mod.rs +++ b/tokio/src/runtime/time/tests/mod.rs @@ -62,12 +62,13 @@ fn single_timer() { thread::yield_now(); - let handle = handle.inner.driver().time(); + let time = handle.inner.driver().time(); + let clock = handle.inner.driver().clock(); // This may or may not return Some (depending on how it races with the // thread). If it does return None, however, the timer should complete // synchronously. - handle.process_at_time(handle.time_source().now() + 2_000_000_000); + time.process_at_time(time.time_source().now(clock) + 2_000_000_000); jh.join().unwrap(); }) @@ -97,10 +98,11 @@ fn drop_timer() { thread::yield_now(); - let handle = handle.inner.driver().time(); + let time = handle.inner.driver().time(); + let clock = handle.inner.driver().clock(); // advance 2s in the future. - handle.process_at_time(handle.time_source().now() + 2_000_000_000); + time.process_at_time(time.time_source().now(clock) + 2_000_000_000); jh.join().unwrap(); }) @@ -132,10 +134,11 @@ fn change_waker() { thread::yield_now(); - let handle = handle.inner.driver().time(); + let time = handle.inner.driver().time(); + let clock = handle.inner.driver().clock(); // advance 2s - handle.process_at_time(handle.time_source().now() + 2_000_000_000); + time.process_at_time(time.time_source().now(clock) + 2_000_000_000); jh.join().unwrap(); }) diff --git a/tokio/src/time/clock.rs b/tokio/src/time/clock.rs index cd11a67527f..1e273554ec7 100644 --- a/tokio/src/time/clock.rs +++ b/tokio/src/time/clock.rs @@ -29,30 +29,40 @@ cfg_not_test_util! { cfg_test_util! { use crate::time::{Duration, Instant}; - use crate::loom::sync::{Arc, Mutex}; + use crate::loom::sync::Mutex; cfg_rt! { - fn clock() -> Option { + #[track_caller] + fn with_clock(f: impl FnOnce(Option<&Clock>) -> Result) -> R { use crate::runtime::Handle; - match Handle::try_current() { - Ok(handle) => Some(handle.inner.driver().clock().clone()), - Err(ref e) if e.is_missing_context() => None, + let res = match Handle::try_current() { + Ok(handle) => f(Some(handle.inner.driver().clock())), + Err(ref e) if e.is_missing_context() => f(None), Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR), + }; + + match res { + Ok(ret) => ret, + Err(msg) => panic!("{}", msg), } } } cfg_not_rt! { - fn clock() -> Option { - None + #[track_caller] + fn with_clock(f: impl FnOnce(Option<&Clock>) -> Result) -> R { + match f(None) { + Ok(ret) => ret, + Err(msg) => panic!("{}", msg), + } } } /// A handle to a source of time. - #[derive(Debug, Clone)] + #[derive(Debug)] pub(crate) struct Clock { - inner: Arc>, + inner: Mutex, } #[derive(Debug)] @@ -107,8 +117,12 @@ cfg_test_util! { /// [`advance`]: crate::time::advance #[track_caller] pub fn pause() { - let clock = clock().expect("time cannot be frozen from outside the Tokio runtime"); - clock.pause(); + with_clock(|maybe_clock| { + match maybe_clock { + Some(clock) => clock.pause(), + None => Err("time cannot be frozen from outside the Tokio runtime"), + } + }) } /// Resumes time. @@ -122,14 +136,21 @@ cfg_test_util! { /// runtime. #[track_caller] pub fn resume() { - let clock = clock().expect("time cannot be frozen from outside the Tokio runtime"); - let mut inner = clock.inner.lock(); + with_clock(|maybe_clock| { + let clock = match maybe_clock { + Some(clock) => clock, + None => return Err("time cannot be frozen from outside the Tokio runtime"), + }; - if inner.unfrozen.is_some() { - panic!("time is not frozen"); - } + let mut inner = clock.inner.lock(); - inner.unfrozen = Some(std::time::Instant::now()); + if inner.unfrozen.is_some() { + return Err("time is not frozen"); + } + + inner.unfrozen = Some(std::time::Instant::now()); + Ok(()) + }) } /// Advances time. @@ -164,19 +185,27 @@ cfg_test_util! { /// /// [`sleep`]: fn@crate::time::sleep pub async fn advance(duration: Duration) { - let clock = clock().expect("time cannot be frozen from outside the Tokio runtime"); - clock.advance(duration); + with_clock(|maybe_clock| { + let clock = match maybe_clock { + Some(clock) => clock, + None => return Err("time cannot be frozen from outside the Tokio runtime"), + }; + + clock.advance(duration) + }); crate::task::yield_now().await; } /// Returns the current instant, factoring in frozen time. pub(crate) fn now() -> Instant { - if let Some(clock) = clock() { - clock.now() - } else { - Instant::from_std(std::time::Instant::now()) - } + with_clock(|maybe_clock| { + Ok(if let Some(clock) = maybe_clock { + clock.now() + } else { + Instant::from_std(std::time::Instant::now()) + }) + }) } impl Clock { @@ -186,34 +215,40 @@ cfg_test_util! { let now = std::time::Instant::now(); let clock = Clock { - inner: Arc::new(Mutex::new(Inner { + inner: Mutex::new(Inner { enable_pausing, base: now, unfrozen: Some(now), auto_advance_inhibit_count: 0, - })), + }), }; if start_paused { - clock.pause(); + if let Err(msg) = clock.pause() { + panic!("{}", msg); + } } clock } - #[track_caller] - pub(crate) fn pause(&self) { + pub(crate) fn pause(&self) -> Result<(), &'static str> { let mut inner = self.inner.lock(); if !inner.enable_pausing { drop(inner); // avoid poisoning the lock - panic!("`time::pause()` requires the `current_thread` Tokio runtime. \ + return Err("`time::pause()` requires the `current_thread` Tokio runtime. \ This is the default Runtime used by `#[tokio::test]."); } - let elapsed = inner.unfrozen.as_ref().expect("time is already frozen").elapsed(); + let elapsed = match inner.unfrozen.as_ref() { + Some(v) => v.elapsed(), + None => return Err("time is already frozen") + }; inner.base += elapsed; inner.unfrozen = None; + + Ok(()) } /// Temporarily stop auto-advancing the clock (see `tokio::time::pause`). @@ -232,15 +267,15 @@ cfg_test_util! { inner.unfrozen.is_none() && inner.auto_advance_inhibit_count == 0 } - #[track_caller] - pub(crate) fn advance(&self, duration: Duration) { + pub(crate) fn advance(&self, duration: Duration) -> Result<(), &'static str> { let mut inner = self.inner.lock(); if inner.unfrozen.is_some() { - panic!("time is not frozen"); + return Err("time is not frozen"); } inner.base += duration; + Ok(()) } pub(crate) fn now(&self) -> Instant { diff --git a/tokio/src/time/sleep.rs b/tokio/src/time/sleep.rs index 0a012e25015..ee46a186c01 100644 --- a/tokio/src/time/sleep.rs +++ b/tokio/src/time/sleep.rs @@ -261,10 +261,11 @@ impl Sleep { #[cfg(all(tokio_unstable, feature = "tracing"))] let inner = { + let clock = handle.driver().clock(); let handle = &handle.driver().time(); let time_source = handle.time_source(); let deadline_tick = time_source.deadline_to_tick(deadline); - let duration = deadline_tick.saturating_sub(time_source.now()); + let duration = deadline_tick.saturating_sub(time_source.now(clock)); let location = location.expect("should have location if tracing"); let resource_span = tracing::trace_span!( @@ -370,8 +371,9 @@ impl Sleep { tracing::trace_span!("runtime.resource.async_op.poll"); let duration = { + let clock = me.entry.clock(); let time_source = me.entry.driver().time_source(); - let now = time_source.now(); + let now = time_source.now(clock); let deadline_tick = time_source.deadline_to_tick(deadline); deadline_tick.saturating_sub(now) }; From 5653b4583ccab2e0065a3fc0950de8718149df56 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 7 Feb 2023 21:32:27 +0300 Subject: [PATCH 055/101] io: remove erroneous wake call in `SinkWriter` (#5436) --- tokio-util/src/io/sink_writer.rs | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/tokio-util/src/io/sink_writer.rs b/tokio-util/src/io/sink_writer.rs index 5d1acc499cc..f2af262c4d0 100644 --- a/tokio-util/src/io/sink_writer.rs +++ b/tokio-util/src/io/sink_writer.rs @@ -1,3 +1,4 @@ +use futures_core::ready; use futures_sink::Sink; use pin_project_lite::pin_project; @@ -98,19 +99,11 @@ where buf: &[u8], ) -> Poll> { let mut this = self.project(); - match this.inner.as_mut().poll_ready(cx) { - Poll::Ready(Ok(())) => { - if let Err(e) = this.inner.as_mut().start_send(buf) { - Poll::Ready(Err(e.into())) - } else { - Poll::Ready(Ok(buf.len())) - } - } - Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), - Poll::Pending => { - cx.waker().wake_by_ref(); - Poll::Pending - } + + ready!(this.inner.as_mut().poll_ready(cx).map_err(Into::into))?; + match this.inner.as_mut().start_send(buf) { + Ok(()) => Poll::Ready(Ok(buf.len())), + Err(e) => Poll::Ready(Err(e.into())), } } From 1dcfe1cc9bb32fdc57ead3ff642d58bc8f282fd4 Mon Sep 17 00:00:00 2001 From: Finomnis Date: Wed, 8 Feb 2023 16:45:02 +0100 Subject: [PATCH 056/101] ci: add semver checking to CI (#5437) --- .github/workflows/ci.yml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2813ede8a86..704f7b4e56f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,6 +42,7 @@ jobs: - test-unstable - miri - asan + - semver - cross-check - cross-test - no-atomic-u64 @@ -230,6 +231,28 @@ jobs: # Ignore `trybuild` errors as they are irrelevant and flaky on nightly TRYBUILD: overwrite + semver: + name: semver + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Install Rust ${{ env.rust_stable }} + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ env.rust_stable }} + - name: Install cargo-semver-checks + uses: taiki-e/install-action@v2 + with: + tool: cargo-semver-checks + - name: Check semver compatibility + run: | + cargo semver-checks check-release \ + --exclude benches \ + --exclude examples \ + --exclude stress-test \ + --exclude tests-build \ + --exclude tests-integration + cross-check: name: cross-check runs-on: ubuntu-latest From d7d5d05333f7970c2d75bfb20371450b5ad838d7 Mon Sep 17 00:00:00 2001 From: Nathaniel Brough Date: Thu, 9 Feb 2023 02:08:50 -0800 Subject: [PATCH 057/101] tests: port proptest fuzz harnesses to use cargo-fuzz (#5392) This change ports fuzz tests from the black-box fuzzing framework, proptest-rs over to use the grey-box fuzzing framework cargo-fuzz. Refs: #5391 --- CONTRIBUTING.md | 27 ++++++- tokio-stream/Cargo.toml | 3 - tokio-stream/fuzz/.gitignore | 4 + tokio-stream/fuzz/Cargo.toml | 29 +++++++ .../fuzz/fuzz_targets/fuzz_stream_map.rs | 80 +++++++++++++++++++ tokio-stream/tests/stream_stream_map.rs | 57 ------------- tokio/Cargo.toml | 1 - tokio/fuzz/.gitignore | 4 + tokio/fuzz/Cargo.toml | 29 +++++++ tokio/fuzz/fuzz_targets/fuzz_linked_list.rs | 7 ++ tokio/src/fuzz.rs | 1 + tokio/src/lib.rs | 3 + tokio/src/util/linked_list.rs | 24 ++---- 13 files changed, 190 insertions(+), 79 deletions(-) create mode 100644 tokio-stream/fuzz/.gitignore create mode 100644 tokio-stream/fuzz/Cargo.toml create mode 100644 tokio-stream/fuzz/fuzz_targets/fuzz_stream_map.rs create mode 100644 tokio/fuzz/.gitignore create mode 100644 tokio/fuzz/Cargo.toml create mode 100644 tokio/fuzz/fuzz_targets/fuzz_linked_list.rs create mode 100644 tokio/src/fuzz.rs diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a3c1c91084e..7681ef9da23 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -187,7 +187,7 @@ LOOM_MAX_PREEMPTIONS=1 RUSTFLAGS="--cfg loom" \ You can run miri tests with ``` -MIRIFLAGS="-Zmiri-disable-isolation -Zmiri-tag-raw-pointers" PROPTEST_CASES=10 \ +MIRIFLAGS="-Zmiri-disable-isolation -Zmiri-tag-raw-pointers" \ cargo +nightly miri test --features full --lib ``` @@ -209,6 +209,31 @@ utilities available to use in tests, no matter the crate being tested. The best strategy for writing a new integration test is to look at existing integration tests in the crate and follow the style. +#### Fuzz tests + +Some of our crates include a set of fuzz tests, this will be marked by a +directory `fuzz`. It is a good idea to run fuzz tests after each change. +To get started with fuzz testing you'll need to install +[cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz). + +`cargo install cargo-fuzz` + +To list the available fuzzing harnesses you can run; + +```bash +$ cd tokio +$ cargo fuzz list +fuzz_linked_list +```` + +Running a fuzz test is as simple as; + +`cargo fuzz run fuzz_linked_list` + +**NOTE**: Keep in mind that by default when running a fuzz test the fuzz +harness will run forever and will only exit if you `ctrl-c` or it finds +a bug. + #### Documentation tests Ideally, every API has at least one [documentation test] that demonstrates how to diff --git a/tokio-stream/Cargo.toml b/tokio-stream/Cargo.toml index 6dfa9784f79..01acec3cd73 100644 --- a/tokio-stream/Cargo.toml +++ b/tokio-stream/Cargo.toml @@ -38,9 +38,6 @@ parking_lot = "0.12.0" tokio-test = { path = "../tokio-test" } futures = { version = "0.3", default-features = false } -[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] -proptest = "1" - [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] diff --git a/tokio-stream/fuzz/.gitignore b/tokio-stream/fuzz/.gitignore new file mode 100644 index 00000000000..1a45eee7760 --- /dev/null +++ b/tokio-stream/fuzz/.gitignore @@ -0,0 +1,4 @@ +target +corpus +artifacts +coverage diff --git a/tokio-stream/fuzz/Cargo.toml b/tokio-stream/fuzz/Cargo.toml new file mode 100644 index 00000000000..e1003ee5783 --- /dev/null +++ b/tokio-stream/fuzz/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tokio-stream-fuzz" +version = "0.0.0" +publish = false +edition = "2018" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +libfuzzer-sys = "0.4" +tokio-test = { path = "../../tokio-test" } + +[dependencies.tokio-stream] +path = ".." + + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[profile.release] +debug = 1 + +[[bin]] +name = "fuzz_stream_map" +path = "fuzz_targets/fuzz_stream_map.rs" +test = false +doc = false diff --git a/tokio-stream/fuzz/fuzz_targets/fuzz_stream_map.rs b/tokio-stream/fuzz/fuzz_targets/fuzz_stream_map.rs new file mode 100644 index 00000000000..4c3a2d04aaf --- /dev/null +++ b/tokio-stream/fuzz/fuzz_targets/fuzz_stream_map.rs @@ -0,0 +1,80 @@ +#![no_main] + +use libfuzzer_sys::fuzz_target; +use std::pin::Pin; + +use tokio_stream::{self as stream, pending, Stream, StreamExt, StreamMap}; +use tokio_test::{assert_ok, assert_pending, assert_ready, task}; + +macro_rules! assert_ready_some { + ($($t:tt)*) => { + match assert_ready!($($t)*) { + Some(v) => v, + None => panic!("expected `Some`, got `None`"), + } + }; +} + +macro_rules! assert_ready_none { + ($($t:tt)*) => { + match assert_ready!($($t)*) { + None => {} + Some(v) => panic!("expected `None`, got `Some({:?})`", v), + } + }; +} + +fn pin_box + 'static, U>(s: T) -> Pin>> { + Box::pin(s) +} + +fuzz_target!(|data: &[u8]| { + use std::task::{Context, Poll}; + + struct DidPoll { + did_poll: bool, + inner: T, + } + + impl Stream for DidPoll { + type Item = T::Item; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.did_poll = true; + Pin::new(&mut self.inner).poll_next(cx) + } + } + + for _ in 0..10 { + let mut map = task::spawn(StreamMap::new()); + let mut expect = 0; + + for (i, is_empty) in data.iter().map(|x| *x != 0).enumerate() { + let inner = if is_empty { + pin_box(stream::empty::<()>()) + } else { + expect += 1; + pin_box(stream::pending::<()>()) + }; + + let stream = DidPoll { + did_poll: false, + inner, + }; + + map.insert(i, stream); + } + + if expect == 0 { + assert_ready_none!(map.poll_next()); + } else { + assert_pending!(map.poll_next()); + + assert_eq!(expect, map.values().count()); + + for stream in map.values() { + assert!(stream.did_poll); + } + } + } +}); diff --git a/tokio-stream/tests/stream_stream_map.rs b/tokio-stream/tests/stream_stream_map.rs index ffc489b32ef..b6b87e9d0ac 100644 --- a/tokio-stream/tests/stream_stream_map.rs +++ b/tokio-stream/tests/stream_stream_map.rs @@ -325,63 +325,6 @@ fn one_ready_many_none() { } } -#[cfg(not(target_os = "wasi"))] -proptest::proptest! { - #[test] - fn fuzz_pending_complete_mix(kinds: Vec) { - use std::task::{Context, Poll}; - - struct DidPoll { - did_poll: bool, - inner: T, - } - - impl Stream for DidPoll { - type Item = T::Item; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) - -> Poll> - { - self.did_poll = true; - Pin::new(&mut self.inner).poll_next(cx) - } - } - - for _ in 0..10 { - let mut map = task::spawn(StreamMap::new()); - let mut expect = 0; - - for (i, &is_empty) in kinds.iter().enumerate() { - let inner = if is_empty { - pin_box(stream::empty::<()>()) - } else { - expect += 1; - pin_box(stream::pending::<()>()) - }; - - let stream = DidPoll { - did_poll: false, - inner, - }; - - map.insert(i, stream); - } - - if expect == 0 { - assert_ready_none!(map.poll_next()); - } else { - assert_pending!(map.poll_next()); - - assert_eq!(expect, map.values().count()); - - for stream in map.values() { - assert!(stream.did_poll); - } - } - } - } -} - fn pin_box + 'static, U>(s: T) -> Pin>> { Box::pin(s) } diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 0f6d30a687d..fcf1bea4bf8 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -147,7 +147,6 @@ tempfile = "3.1.0" async-stream = "0.3" [target.'cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))'.dev-dependencies] -proptest = "1" socket2 = "0.4" [target.'cfg(not(all(any(target_arch = "wasm32", target_arch = "wasm64"), target_os = "unknown")))'.dev-dependencies] diff --git a/tokio/fuzz/.gitignore b/tokio/fuzz/.gitignore new file mode 100644 index 00000000000..1a45eee7760 --- /dev/null +++ b/tokio/fuzz/.gitignore @@ -0,0 +1,4 @@ +target +corpus +artifacts +coverage diff --git a/tokio/fuzz/Cargo.toml b/tokio/fuzz/Cargo.toml new file mode 100644 index 00000000000..4b47d7bdf94 --- /dev/null +++ b/tokio/fuzz/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tokio-fuzz" +version = "0.0.0" +publish = false +edition = "2018" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +libfuzzer-sys = "0.4" + +[dependencies.tokio] +path = ".." +features = ["fs","net","process","rt","sync","signal","time"] + + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[profile.release] +debug = 1 + +[[bin]] +name = "fuzz_linked_list" +path = "fuzz_targets/fuzz_linked_list.rs" +test = false +doc = false diff --git a/tokio/fuzz/fuzz_targets/fuzz_linked_list.rs b/tokio/fuzz/fuzz_targets/fuzz_linked_list.rs new file mode 100644 index 00000000000..276e16dc775 --- /dev/null +++ b/tokio/fuzz/fuzz_targets/fuzz_linked_list.rs @@ -0,0 +1,7 @@ +#![no_main] + +use libfuzzer_sys::fuzz_target; + +fuzz_target!(|data: &[u8]| { + tokio::fuzz::fuzz_linked_list(data); +}); diff --git a/tokio/src/fuzz.rs b/tokio/src/fuzz.rs new file mode 100644 index 00000000000..d89718f0e0e --- /dev/null +++ b/tokio/src/fuzz.rs @@ -0,0 +1 @@ +pub use crate::util::linked_list::tests::fuzz_linked_list; diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index 05767d017bc..aa94ff020da 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -631,3 +631,6 @@ cfg_macros! { #[cfg(feature = "io-util")] #[cfg(test)] fn is_unpin() {} + +#[cfg(fuzzing)] +pub mod fuzz; diff --git a/tokio/src/util/linked_list.rs b/tokio/src/util/linked_list.rs index b46bd6d4d9e..c9d99e97247 100644 --- a/tokio/src/util/linked_list.rs +++ b/tokio/src/util/linked_list.rs @@ -352,9 +352,9 @@ impl fmt::Debug for Pointers { } } -#[cfg(test)] +#[cfg(any(test, fuzzing))] #[cfg(not(loom))] -mod tests { +pub(crate) mod tests { use super::*; use std::pin::Pin; @@ -623,31 +623,21 @@ mod tests { } } - #[cfg(not(tokio_wasm))] - proptest::proptest! { - #[test] - fn fuzz_linked_list(ops: Vec) { - run_fuzz(ops); - } - } - - #[cfg(not(tokio_wasm))] - fn run_fuzz(ops: Vec) { - use std::collections::VecDeque; - - #[derive(Debug)] + #[cfg(fuzzing)] + pub fn fuzz_linked_list(ops: &[u8]) { enum Op { Push, Pop, Remove(usize), } + use std::collections::VecDeque; let ops = ops .iter() - .map(|i| match i % 3 { + .map(|i| match i % 3u8 { 0 => Op::Push, 1 => Op::Pop, - 2 => Op::Remove(i / 3), + 2 => Op::Remove((i / 3u8) as usize), _ => unreachable!(), }) .collect::>(); From 061325ba7ea952c78b23abe685e40a3aac132a2b Mon Sep 17 00:00:00 2001 From: Valentin Date: Thu, 9 Feb 2023 11:14:01 +0100 Subject: [PATCH 058/101] task: clarify what happens to spawned work during runtime shutdown (#5394) --- tokio/src/runtime/mod.rs | 8 ++++--- tokio/src/runtime/runtime.rs | 46 ++++++++++++++++++++---------------- 2 files changed, 31 insertions(+), 23 deletions(-) diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index b6f43ea1754..8cd99917546 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -156,9 +156,11 @@ //! multi-thread scheduler spawns threads to schedule tasks and for `spawn_blocking` //! calls. //! -//! While the `Runtime` is active, threads may shutdown after periods of being -//! idle. Once `Runtime` is dropped, all runtime threads are forcibly shutdown. -//! Any tasks that have not yet completed will be dropped. +//! While the `Runtime` is active, threads may shut down after periods of being +//! idle. Once `Runtime` is dropped, all runtime threads have usually been +//! terminated, but in the presence of unstoppable spawned work are not +//! guaranteed to have been terminated. See the +//! [struct level documentation](Runtime#shutdown) for more details. //! //! [tasks]: crate::task //! [`Runtime`]: Runtime diff --git a/tokio/src/runtime/runtime.rs b/tokio/src/runtime/runtime.rs index 198567390a8..c11fc585363 100644 --- a/tokio/src/runtime/runtime.rs +++ b/tokio/src/runtime/runtime.rs @@ -24,16 +24,29 @@ cfg_rt_multi_thread! { /// /// # Shutdown /// -/// Shutting down the runtime is done by dropping the value. The current -/// thread will block until the shut down operation has completed. +/// Shutting down the runtime is done by dropping the value, or calling +/// [`Runtime::shutdown_background`] or [`Runtime::shutdown_timeout`]. /// -/// * Drain any scheduled work queues. -/// * Drop any futures that have not yet completed. -/// * Drop the reactor. +/// Tasks spawned through [`Runtime::spawn`] keep running until they yield. +/// Then they are dropped. They are not *guaranteed* to run to completion, but +/// *might* do so if they do not yield until completion. /// -/// Once the reactor has dropped, any outstanding I/O resources bound to -/// that reactor will no longer function. Calling any method on them will -/// result in an error. +/// Blocking functions spawned through [`Runtime::spawn_blocking`] keep running +/// until they return. +/// +/// The thread initiating the shutdown blocks until all spawned work has been +/// stopped. This can take an indefinite amount of time. The `Drop` +/// implementation waits forever for this. +/// +/// `shutdown_background` and `shutdown_timeout` can be used if waiting forever +/// is undesired. When the timeout is reached, spawned work that did not stop +/// in time and threads running it are leaked. The work continues to run until +/// one of the stopping conditions is fulfilled, but the thread initiating the +/// shutdown is unblocked. +/// +/// Once the runtime has been dropped, any outstanding I/O resources bound to +/// it will no longer function. Calling any method on them will result in an +/// error. /// /// # Sharing /// @@ -322,18 +335,9 @@ impl Runtime { } /// Shuts down the runtime, waiting for at most `duration` for all spawned - /// task to shutdown. + /// work to stop. /// - /// Usually, dropping a `Runtime` handle is sufficient as tasks are able to - /// shutdown in a timely fashion. However, dropping a `Runtime` will wait - /// indefinitely for all tasks to terminate, and there are cases where a long - /// blocking task has been spawned, which can block dropping `Runtime`. - /// - /// In this case, calling `shutdown_timeout` with an explicit wait timeout - /// can work. The `shutdown_timeout` will signal all tasks to shutdown and - /// will wait for at most `duration` for all spawned tasks to terminate. If - /// `timeout` elapses before all tasks are dropped, the function returns and - /// outstanding tasks are potentially leaked. + /// See the [struct level documentation](Runtime#shutdown) for more details. /// /// # Examples /// @@ -362,7 +366,7 @@ impl Runtime { self.blocking_pool.shutdown(Some(duration)); } - /// Shuts down the runtime, without waiting for any spawned tasks to shutdown. + /// Shuts down the runtime, without waiting for any spawned work to stop. /// /// This can be useful if you want to drop a runtime from within another runtime. /// Normally, dropping a runtime will block indefinitely for spawned blocking tasks @@ -373,6 +377,8 @@ impl Runtime { /// may result in a resource leak (in that any blocking tasks are still running until they /// return. /// + /// See the [struct level documentation](Runtime#shutdown) for more details. + /// /// This function is equivalent to calling `shutdown_timeout(Duration::from_nanos(0))`. /// /// ``` From 09b2653e71f6518bf6a62ac054612e129da13a38 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Thu, 9 Feb 2023 19:16:12 +0900 Subject: [PATCH 059/101] chore: update windows-sys to 0.45 (#5386) --- examples/Cargo.toml | 2 +- tokio/Cargo.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b35c587b6f3..ceb2a36a18e 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -25,7 +25,7 @@ once_cell = "1.5.2" rand = "0.8.3" [target.'cfg(windows)'.dev-dependencies.windows-sys] -version = "0.42.0" +version = "0.45" [[example]] name = "chat" diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index fcf1bea4bf8..0e96ccb7cd7 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -125,11 +125,11 @@ libc = { version = "0.2.42" } nix = { version = "0.26", default-features = false, features = ["fs", "socket"] } [target.'cfg(windows)'.dependencies.windows-sys] -version = "0.42.0" +version = "0.45" optional = true [target.'cfg(docsrs)'.dependencies.windows-sys] -version = "0.42.0" +version = "0.45" features = [ "Win32_Foundation", "Win32_Security_Authorization", From d96bbf04655bc7e40f87967996f0ae8976fe5aae Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Thu, 9 Feb 2023 10:19:02 +0000 Subject: [PATCH 060/101] time: don't store deadline twice in sleep entries (#5410) --- tokio/src/runtime/time/entry.rs | 21 +++++++++++++++------ tokio/src/time/sleep.rs | 9 +++------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/tokio/src/runtime/time/entry.rs b/tokio/src/runtime/time/entry.rs index 69f93823551..4780cfcad32 100644 --- a/tokio/src/runtime/time/entry.rs +++ b/tokio/src/runtime/time/entry.rs @@ -298,9 +298,11 @@ pub(crate) struct TimerEntry { /// This is manipulated only under the inner mutex. TODO: Can we use loom /// cells for this? inner: StdUnsafeCell, - /// Initial deadline for the timer. This is used to register on the first + /// Deadline for the timer. This is used to register on the first /// poll, as we can't register prior to being pinned. - initial_deadline: Option, + deadline: Instant, + /// Whether the deadline has been registered. + registered: bool, /// Ensure the type is !Unpin _m: std::marker::PhantomPinned, } @@ -504,7 +506,8 @@ impl TimerEntry { Self { driver, inner: StdUnsafeCell::new(TimerShared::new()), - initial_deadline: Some(deadline), + deadline, + registered: false, _m: std::marker::PhantomPinned, } } @@ -513,8 +516,12 @@ impl TimerEntry { unsafe { &*self.inner.get() } } + pub(crate) fn deadline(&self) -> Instant { + self.deadline + } + pub(crate) fn is_elapsed(&self) -> bool { - !self.inner().state.might_be_registered() && self.initial_deadline.is_none() + !self.inner().state.might_be_registered() && self.registered } /// Cancels and deregisters the timer. This operation is irreversible. @@ -545,7 +552,8 @@ impl TimerEntry { } pub(crate) fn reset(mut self: Pin<&mut Self>, new_time: Instant) { - unsafe { self.as_mut().get_unchecked_mut() }.initial_deadline = None; + unsafe { self.as_mut().get_unchecked_mut() }.deadline = new_time; + unsafe { self.as_mut().get_unchecked_mut() }.registered = true; let tick = self.driver().time_source().deadline_to_tick(new_time); @@ -567,7 +575,8 @@ impl TimerEntry { panic!("{}", crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR); } - if let Some(deadline) = self.initial_deadline { + if !self.registered { + let deadline = self.deadline; self.as_mut().reset(deadline); } diff --git a/tokio/src/time/sleep.rs b/tokio/src/time/sleep.rs index ee46a186c01..370a98b9902 100644 --- a/tokio/src/time/sleep.rs +++ b/tokio/src/time/sleep.rs @@ -235,7 +235,6 @@ pin_project! { cfg_trace! { #[derive(Debug)] struct Inner { - deadline: Instant, ctx: trace::AsyncOpTracingCtx, } } @@ -243,7 +242,6 @@ cfg_trace! { cfg_not_trace! { #[derive(Debug)] struct Inner { - deadline: Instant, } } @@ -297,11 +295,11 @@ impl Sleep { resource_span, }; - Inner { deadline, ctx } + Inner { ctx } }; #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let inner = Inner { deadline }; + let inner = Inner {}; Sleep { inner, entry } } @@ -312,7 +310,7 @@ impl Sleep { /// Returns the instant at which the future will complete. pub fn deadline(&self) -> Instant { - self.inner.deadline + self.entry.deadline() } /// Returns `true` if `Sleep` has elapsed. @@ -358,7 +356,6 @@ impl Sleep { fn reset_inner(self: Pin<&mut Self>, deadline: Instant) { let mut me = self.project(); me.entry.as_mut().reset(deadline); - (me.inner).deadline = deadline; #[cfg(all(tokio_unstable, feature = "tracing"))] { From d6dbefcdc043da552615bf2c8ad1f3a9580a1735 Mon Sep 17 00:00:00 2001 From: Caio Date: Thu, 9 Feb 2023 07:20:09 -0300 Subject: [PATCH 061/101] sync: mark lock guards with `#[clippy::has_significant_drop]` (#5422) --- tokio/src/sync/mutex.rs | 3 +++ tokio/src/sync/rwlock/owned_read_guard.rs | 1 + tokio/src/sync/rwlock/owned_write_guard.rs | 1 + tokio/src/sync/rwlock/owned_write_guard_mapped.rs | 1 + tokio/src/sync/rwlock/read_guard.rs | 1 + tokio/src/sync/rwlock/write_guard.rs | 1 + tokio/src/sync/rwlock/write_guard_mapped.rs | 1 + tokio/src/sync/semaphore.rs | 2 ++ 8 files changed, 11 insertions(+) diff --git a/tokio/src/sync/mutex.rs b/tokio/src/sync/mutex.rs index 024755c83b5..2243566509a 100644 --- a/tokio/src/sync/mutex.rs +++ b/tokio/src/sync/mutex.rs @@ -141,6 +141,7 @@ pub struct Mutex { /// /// The lock is automatically released whenever the guard is dropped, at which /// point `lock` will succeed yet again. +#[clippy::has_significant_drop] #[must_use = "if unused the Mutex will immediately unlock"] pub struct MutexGuard<'a, T: ?Sized> { #[cfg(all(tokio_unstable, feature = "tracing"))] @@ -163,6 +164,7 @@ pub struct MutexGuard<'a, T: ?Sized> { /// point `lock` will succeed yet again. /// /// [`Arc`]: std::sync::Arc +#[clippy::has_significant_drop] pub struct OwnedMutexGuard { #[cfg(all(tokio_unstable, feature = "tracing"))] resource_span: tracing::Span, @@ -174,6 +176,7 @@ pub struct OwnedMutexGuard { /// This can be used to hold a subfield of the protected data. /// /// [`MutexGuard::map`]: method@MutexGuard::map +#[clippy::has_significant_drop] #[must_use = "if unused the Mutex will immediately unlock"] pub struct MappedMutexGuard<'a, T: ?Sized> { s: &'a semaphore::Semaphore, diff --git a/tokio/src/sync/rwlock/owned_read_guard.rs b/tokio/src/sync/rwlock/owned_read_guard.rs index 27b71bd988b..e457a1b663a 100644 --- a/tokio/src/sync/rwlock/owned_read_guard.rs +++ b/tokio/src/sync/rwlock/owned_read_guard.rs @@ -14,6 +14,7 @@ use std::sync::Arc; /// /// [`read_owned`]: method@crate::sync::RwLock::read_owned /// [`RwLock`]: struct@crate::sync::RwLock +#[clippy::has_significant_drop] pub struct OwnedRwLockReadGuard { #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, diff --git a/tokio/src/sync/rwlock/owned_write_guard.rs b/tokio/src/sync/rwlock/owned_write_guard.rs index dbedab4cbb2..0a8d7db6107 100644 --- a/tokio/src/sync/rwlock/owned_write_guard.rs +++ b/tokio/src/sync/rwlock/owned_write_guard.rs @@ -15,6 +15,7 @@ use std::sync::Arc; /// /// [`write_owned`]: method@crate::sync::RwLock::write_owned /// [`RwLock`]: struct@crate::sync::RwLock +#[clippy::has_significant_drop] pub struct OwnedRwLockWriteGuard { #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, diff --git a/tokio/src/sync/rwlock/owned_write_guard_mapped.rs b/tokio/src/sync/rwlock/owned_write_guard_mapped.rs index 55a24d96ac3..c986fd5eee8 100644 --- a/tokio/src/sync/rwlock/owned_write_guard_mapped.rs +++ b/tokio/src/sync/rwlock/owned_write_guard_mapped.rs @@ -14,6 +14,7 @@ use std::sync::Arc; /// /// [mapping]: method@crate::sync::OwnedRwLockWriteGuard::map /// [`OwnedRwLockWriteGuard`]: struct@crate::sync::OwnedRwLockWriteGuard +#[clippy::has_significant_drop] pub struct OwnedRwLockMappedWriteGuard { #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, diff --git a/tokio/src/sync/rwlock/read_guard.rs b/tokio/src/sync/rwlock/read_guard.rs index f5fc1d6de81..6f2eed1c864 100644 --- a/tokio/src/sync/rwlock/read_guard.rs +++ b/tokio/src/sync/rwlock/read_guard.rs @@ -12,6 +12,7 @@ use std::ops; /// /// [`read`]: method@crate::sync::RwLock::read /// [`RwLock`]: struct@crate::sync::RwLock +#[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockReadGuard<'a, T: ?Sized> { #[cfg(all(tokio_unstable, feature = "tracing"))] diff --git a/tokio/src/sync/rwlock/write_guard.rs b/tokio/src/sync/rwlock/write_guard.rs index cefa183d996..d584bb49c56 100644 --- a/tokio/src/sync/rwlock/write_guard.rs +++ b/tokio/src/sync/rwlock/write_guard.rs @@ -14,6 +14,7 @@ use std::ops; /// /// [`write`]: method@crate::sync::RwLock::write /// [`RwLock`]: struct@crate::sync::RwLock +#[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockWriteGuard<'a, T: ?Sized> { #[cfg(all(tokio_unstable, feature = "tracing"))] diff --git a/tokio/src/sync/rwlock/write_guard_mapped.rs b/tokio/src/sync/rwlock/write_guard_mapped.rs index b5c644a9e83..1f5d279111f 100644 --- a/tokio/src/sync/rwlock/write_guard_mapped.rs +++ b/tokio/src/sync/rwlock/write_guard_mapped.rs @@ -13,6 +13,7 @@ use std::ops; /// /// [mapping]: method@crate::sync::RwLockWriteGuard::map /// [`RwLockWriteGuard`]: struct@crate::sync::RwLockWriteGuard +#[clippy::has_significant_drop] pub struct RwLockMappedWriteGuard<'a, T: ?Sized> { #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, diff --git a/tokio/src/sync/semaphore.rs b/tokio/src/sync/semaphore.rs index 6e5a1a88abb..1eee175088e 100644 --- a/tokio/src/sync/semaphore.rs +++ b/tokio/src/sync/semaphore.rs @@ -89,6 +89,7 @@ pub struct Semaphore { /// /// [`acquire`]: crate::sync::Semaphore::acquire() #[must_use] +#[clippy::has_significant_drop] #[derive(Debug)] pub struct SemaphorePermit<'a> { sem: &'a Semaphore, @@ -101,6 +102,7 @@ pub struct SemaphorePermit<'a> { /// /// [`acquire_owned`]: crate::sync::Semaphore::acquire_owned() #[must_use] +#[clippy::has_significant_drop] #[derive(Debug)] pub struct OwnedSemaphorePermit { sem: Arc, From 8b44077ebcf1b1077e76958af3f4bf865406f6de Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 9 Feb 2023 13:07:52 +0100 Subject: [PATCH 062/101] sync: make CancellationToken UnwindSafe (#5438) --- tokio-util/src/sync/cancellation_token.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tokio-util/src/sync/cancellation_token.rs b/tokio-util/src/sync/cancellation_token.rs index fbceb2eeba2..c44be69864f 100644 --- a/tokio-util/src/sync/cancellation_token.rs +++ b/tokio-util/src/sync/cancellation_token.rs @@ -55,6 +55,9 @@ pub struct CancellationToken { inner: Arc, } +impl std::panic::UnwindSafe for CancellationToken {} +impl std::panic::RefUnwindSafe for CancellationToken {} + pin_project! { /// A Future that is resolved once the corresponding [`CancellationToken`] /// is cancelled. From 74fb9e387aa3604193ac7da0b103c96ff90c73ee Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 9 Feb 2023 17:35:53 +0100 Subject: [PATCH 063/101] chore: prepare tokio-util v0.7.5 (#5442) --- tokio-util/CHANGELOG.md | 35 +++++++++++++++++++++++++++++++++++ tokio-util/Cargo.toml | 2 +- 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/tokio-util/CHANGELOG.md b/tokio-util/CHANGELOG.md index ca378b1ed20..d6dd694e322 100644 --- a/tokio-util/CHANGELOG.md +++ b/tokio-util/CHANGELOG.md @@ -1,3 +1,38 @@ +# 0.7.5 (February 9, 2023) + +This release fixes an accidental breaking change where `UnwindSafe` was +accidentally removed from `CancellationToken`. + +### Added +- codec: add `Framed::backpressure_boundary` ([#5124]) +- io: add `InspectReader` and `InspectWriter` ([#5033]) +- io: add `tokio_util::io::{CopyToBytes, SinkWriter}` ([#5070], [#5436]) +- io: impl `std::io::BufRead` on `SyncIoBridge` ([#5265]) +- sync: add `PollSemaphore::poll_acquire_many` ([#5137]) +- sync: add owned future for `CancellationToken` ([#5153]) +- time: add `DelayQueue::try_remove` ([#5052]) + +### Fixed +- codec: fix `LengthDelimitedCodec` buffer over-reservation ([#4997]) +- sync: impl `UnwindSafe` on `CancellationToken` ([#5438]) +- util: remove `Encoder` bound on `FramedParts` constructor ([#5280]) + +### Documented +- io: add lines example for `StreamReader` ([#5145]) + +[#4997]: https://github.com/tokio-rs/tokio/pull/4997 +[#5033]: https://github.com/tokio-rs/tokio/pull/5033 +[#5052]: https://github.com/tokio-rs/tokio/pull/5052 +[#5070]: https://github.com/tokio-rs/tokio/pull/5070 +[#5124]: https://github.com/tokio-rs/tokio/pull/5124 +[#5137]: https://github.com/tokio-rs/tokio/pull/5137 +[#5145]: https://github.com/tokio-rs/tokio/pull/5145 +[#5153]: https://github.com/tokio-rs/tokio/pull/5153 +[#5265]: https://github.com/tokio-rs/tokio/pull/5265 +[#5280]: https://github.com/tokio-rs/tokio/pull/5280 +[#5436]: https://github.com/tokio-rs/tokio/pull/5436 +[#5438]: https://github.com/tokio-rs/tokio/pull/5438 + # 0.7.4 (September 8, 2022) ### Added diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index d5a9f748c18..6ba99ec050e 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -4,7 +4,7 @@ name = "tokio-util" # - Remove path dependencies # - Update CHANGELOG.md. # - Create "tokio-util-0.7.x" git tag. -version = "0.7.4" +version = "0.7.5" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] From 36d2233579a179ba078b44a7e0d91206fe959bf0 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Fri, 10 Feb 2023 10:13:37 +0100 Subject: [PATCH 064/101] chore: fix dependency on Tokio (#5445) --- tokio-util/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 6ba99ec050e..6a42184014a 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -34,7 +34,7 @@ rt = ["tokio/rt", "tokio/sync", "futures-util", "hashbrown"] __docs_rs = ["futures-util"] [dependencies] -tokio = { version = "1.21.0", path = "../tokio", features = ["sync"] } +tokio = { version = "1.22.0", path = "../tokio", features = ["sync"] } bytes = "1.0.0" futures-core = "0.3.0" futures-sink = "0.3.0" From 01bb1ecf4dee073da65dfede4c845ac27c555af0 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Fri, 10 Feb 2023 10:48:36 +0100 Subject: [PATCH 065/101] chore: prepare tokio-util v0.7.6 (#5447) --- tokio-util/CHANGELOG.md | 7 +++++++ tokio-util/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/tokio-util/CHANGELOG.md b/tokio-util/CHANGELOG.md index d6dd694e322..bcbba174668 100644 --- a/tokio-util/CHANGELOG.md +++ b/tokio-util/CHANGELOG.md @@ -1,3 +1,10 @@ +# 0.7.6 (February 10, 2023) + +This release fixes a compilation failure in 0.7.5 when it is used together with +Tokio version 1.21 and unstable features are enabled. ([#5445]) + +[#5445]: https://github.com/tokio-rs/tokio/pull/5445 + # 0.7.5 (February 9, 2023) This release fixes an accidental breaking change where `UnwindSafe` was diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 6a42184014a..e9a840f97e2 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -4,7 +4,7 @@ name = "tokio-util" # - Remove path dependencies # - Update CHANGELOG.md. # - Create "tokio-util-0.7.x" git tag. -version = "0.7.5" +version = "0.7.6" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] From 36fdccc3bcaad6d5ff5a046ce5b4e28a5e23dfd3 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 12 Feb 2023 11:55:41 +0100 Subject: [PATCH 066/101] util: Revert "remove `Encoder` bound on `FramedParts` constructor" (#5450) This reverts commit ae69d11d1f9f17c536f35369ab597cebb4bd0159. --- tokio-util/src/codec/framed.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tokio-util/src/codec/framed.rs b/tokio-util/src/codec/framed.rs index 043b4384193..8a344f90db2 100644 --- a/tokio-util/src/codec/framed.rs +++ b/tokio-util/src/codec/framed.rs @@ -368,7 +368,10 @@ pub struct FramedParts { impl FramedParts { /// Create a new, default, `FramedParts` - pub fn new(io: T, codec: U) -> FramedParts { + pub fn new(io: T, codec: U) -> FramedParts + where + U: Encoder, + { FramedParts { io, codec, From e629ad7c9ac48df0cede0620c44e0ea652f45af9 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 12 Feb 2023 12:42:38 +0100 Subject: [PATCH 067/101] chore: prepare tokio-util v0.7.7 (#5451) --- tokio-util/CHANGELOG.md | 7 +++++++ tokio-util/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/tokio-util/CHANGELOG.md b/tokio-util/CHANGELOG.md index bcbba174668..0c11b2144eb 100644 --- a/tokio-util/CHANGELOG.md +++ b/tokio-util/CHANGELOG.md @@ -1,3 +1,10 @@ +# 0.7.7 (February 12, 2023) + +This release reverts the removal of the `Encoder` bound on the `FramedParts` +constructor from [#5280] since it turned out to be a breaking change. ([#5450]) + +[#5450]: https://github.com/tokio-rs/tokio/pull/5450 + # 0.7.6 (February 10, 2023) This release fixes a compilation failure in 0.7.5 when it is used together with diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index e9a840f97e2..267662b4d5b 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -4,7 +4,7 @@ name = "tokio-util" # - Remove path dependencies # - Update CHANGELOG.md. # - Create "tokio-util-0.7.x" git tag. -version = "0.7.6" +version = "0.7.7" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] From d1da6c20d8dc2add746a8194ec1c6517cea20d59 Mon Sep 17 00:00:00 2001 From: Finomnis Date: Tue, 14 Feb 2023 10:09:59 +0100 Subject: [PATCH 068/101] ci: always assume minor release in semver check (#5455) --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 704f7b4e56f..7da76abb1b7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -247,6 +247,7 @@ jobs: - name: Check semver compatibility run: | cargo semver-checks check-release \ + --release-type minor \ --exclude benches \ --exclude examples \ --exclude stress-test \ From 28d6f4d5093ba5f565e6836b81e3db79ca7a196a Mon Sep 17 00:00:00 2001 From: Tim de Jager Date: Tue, 14 Feb 2023 13:44:05 +0100 Subject: [PATCH 069/101] task: fix wording with 'unsend' (#5452) --- tokio/src/task/local.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tokio/src/task/local.rs b/tokio/src/task/local.rs index 0675faa1884..b709c215579 100644 --- a/tokio/src/task/local.rs +++ b/tokio/src/task/local.rs @@ -34,14 +34,14 @@ cfg_rt! { /// async fn main() { /// // `Rc` does not implement `Send`, and thus may not be sent between /// // threads safely. - /// let unsend_data = Rc::new("my unsend data..."); + /// let nonsend_data = Rc::new("my nonsend data..."); /// - /// let unsend_data = unsend_data.clone(); - /// // Because the `async` block here moves `unsend_data`, the future is `!Send`. + /// let nonsend_data = nonsend_data.clone(); + /// // Because the `async` block here moves `nonsend_data`, the future is `!Send`. /// // Since `tokio::spawn` requires the spawned future to implement `Send`, this /// // will not compile. /// tokio::spawn(async move { - /// println!("{}", unsend_data); + /// println!("{}", nonsend_data); /// // ... /// }).await.unwrap(); /// } @@ -60,18 +60,18 @@ cfg_rt! { /// /// #[tokio::main] /// async fn main() { - /// let unsend_data = Rc::new("my unsend data..."); + /// let nonsend_data = Rc::new("my nonsend data..."); /// /// // Construct a local task set that can run `!Send` futures. /// let local = task::LocalSet::new(); /// /// // Run the local task set. /// local.run_until(async move { - /// let unsend_data = unsend_data.clone(); + /// let nonsend_data = nonsend_data.clone(); /// // `spawn_local` ensures that the future is spawned on the local /// // task set. /// task::spawn_local(async move { - /// println!("{}", unsend_data); + /// println!("{}", nonsend_data); /// // ... /// }).await.unwrap(); /// }).await; @@ -94,18 +94,18 @@ cfg_rt! { /// /// #[tokio::main] /// async fn main() { - /// let unsend_data = Rc::new("world"); + /// let nonsend_data = Rc::new("world"); /// let local = task::LocalSet::new(); /// - /// let unsend_data2 = unsend_data.clone(); + /// let nonsend_data2 = nonsend_data.clone(); /// local.spawn_local(async move { /// // ... - /// println!("hello {}", unsend_data2) + /// println!("hello {}", nonsend_data2) /// }); /// /// local.spawn_local(async move { /// time::sleep(time::Duration::from_millis(100)).await; - /// println!("goodbye {}", unsend_data) + /// println!("goodbye {}", nonsend_data) /// }); /// /// // ... @@ -309,15 +309,15 @@ cfg_rt! { /// /// #[tokio::main] /// async fn main() { - /// let unsend_data = Rc::new("my unsend data..."); + /// let nonsend_data = Rc::new("my nonsend data..."); /// /// let local = task::LocalSet::new(); /// /// // Run the local task set. /// local.run_until(async move { - /// let unsend_data = unsend_data.clone(); + /// let nonsend_data = nonsend_data.clone(); /// task::spawn_local(async move { - /// println!("{}", unsend_data); + /// println!("{}", nonsend_data); /// // ... /// }).await.unwrap(); /// }).await; From e106c4d32b9f32b5b49d409ddabd1bee7a21c80e Mon Sep 17 00:00:00 2001 From: Christopher Hunt Date: Wed, 15 Feb 2023 10:05:10 +1100 Subject: [PATCH 070/101] benches: benchmark for things in block_on (#5440) This additional benchmark exercises a common request/reply pattern using an MPSC for requests along with a oneshot payload as a reply mechanism. When used in a current threaded scenario, the bench is 17 times faster on my machine than when using the multi-threaded runtime and one worker thread. Not only that, but if I increase the number of worker threads to 6, performance degrades further. Does this suggest a scheduling problem with the multi-threaded runtime? No matter what, hopefully the benchmarks are a useful addition. --- benches/Cargo.toml | 5 ++++ benches/sync_mpsc_oneshot.rs | 53 ++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 benches/sync_mpsc_oneshot.rs diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 78e80fb1d3c..29017323390 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -27,6 +27,11 @@ name = "sync_mpsc" path = "sync_mpsc.rs" harness = false +[[bench]] +name = "sync_mpsc_oneshot" +path = "sync_mpsc_oneshot.rs" +harness = false + [[bench]] name = "rt_multi_threaded" path = "rt_multi_threaded.rs" diff --git a/benches/sync_mpsc_oneshot.rs b/benches/sync_mpsc_oneshot.rs new file mode 100644 index 00000000000..04b783b9d41 --- /dev/null +++ b/benches/sync_mpsc_oneshot.rs @@ -0,0 +1,53 @@ +use bencher::{benchmark_group, benchmark_main, Bencher}; +use tokio::{ + runtime::Runtime, + sync::{mpsc, oneshot}, +}; + +fn request_reply_current_thread(b: &mut Bencher) { + let rt = tokio::runtime::Builder::new_current_thread() + .build() + .unwrap(); + + request_reply(b, rt); +} + +fn request_reply_multi_threaded(b: &mut Bencher) { + let rt = tokio::runtime::Builder::new_multi_thread() + .worker_threads(1) + .build() + .unwrap(); + + request_reply(b, rt); +} + +fn request_reply(b: &mut Bencher, rt: Runtime) { + let tx = rt.block_on(async move { + let (tx, mut rx) = mpsc::channel::>(10); + tokio::spawn(async move { + while let Some(reply) = rx.recv().await { + reply.send(()).unwrap(); + } + }); + tx + }); + + b.iter(|| { + let task_tx = tx.clone(); + rt.block_on(async move { + for _ in 0..1_000 { + let (o_tx, o_rx) = oneshot::channel(); + task_tx.send(o_tx).await.unwrap(); + let _ = o_rx.await; + } + }) + }); +} + +benchmark_group!( + sync_mpsc_oneshot_group, + request_reply_current_thread, + request_reply_multi_threaded, +); + +benchmark_main!(sync_mpsc_oneshot_group); From d19f2f2d395a4d6befb5f66ff87a19172aede2ee Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Fri, 17 Feb 2023 16:23:57 +0100 Subject: [PATCH 071/101] sync: add doc aliases for `blocking_*` methods (#5448) --- tokio/src/sync/mpsc/bounded.rs | 2 ++ tokio/src/sync/mpsc/unbounded.rs | 1 + tokio/src/sync/mutex.rs | 1 + tokio/src/sync/oneshot.rs | 1 + 4 files changed, 5 insertions(+) diff --git a/tokio/src/sync/mpsc/bounded.rs b/tokio/src/sync/mpsc/bounded.rs index 8babdc7e076..30bad6aa8e8 100644 --- a/tokio/src/sync/mpsc/bounded.rs +++ b/tokio/src/sync/mpsc/bounded.rs @@ -326,6 +326,7 @@ impl Receiver { /// ``` #[track_caller] #[cfg(feature = "sync")] + #[cfg_attr(docsrs, doc(alias = "recv_blocking"))] pub fn blocking_recv(&mut self) -> Option { crate::future::block_on(self.recv()) } @@ -696,6 +697,7 @@ impl Sender { /// ``` #[track_caller] #[cfg(feature = "sync")] + #[cfg_attr(docsrs, doc(alias = "send_blocking"))] pub fn blocking_send(&self, value: T) -> Result<(), SendError> { crate::future::block_on(self.send(value)) } diff --git a/tokio/src/sync/mpsc/unbounded.rs b/tokio/src/sync/mpsc/unbounded.rs index 501020433cf..cd83fc12513 100644 --- a/tokio/src/sync/mpsc/unbounded.rs +++ b/tokio/src/sync/mpsc/unbounded.rs @@ -243,6 +243,7 @@ impl UnboundedReceiver { /// ``` #[track_caller] #[cfg(feature = "sync")] + #[cfg_attr(docsrs, doc(alias = "recv_blocking"))] pub fn blocking_recv(&mut self) -> Option { crate::future::block_on(self.recv()) } diff --git a/tokio/src/sync/mutex.rs b/tokio/src/sync/mutex.rs index 2243566509a..6377daff360 100644 --- a/tokio/src/sync/mutex.rs +++ b/tokio/src/sync/mutex.rs @@ -417,6 +417,7 @@ impl Mutex { /// ``` #[track_caller] #[cfg(feature = "sync")] + #[cfg_attr(docsrs, doc(alias = "lock_blocking"))] pub fn blocking_lock(&self) -> MutexGuard<'_, T> { crate::future::block_on(self.lock()) } diff --git a/tokio/src/sync/oneshot.rs b/tokio/src/sync/oneshot.rs index fcd7a32e8e8..a900dbfef24 100644 --- a/tokio/src/sync/oneshot.rs +++ b/tokio/src/sync/oneshot.rs @@ -1056,6 +1056,7 @@ impl Receiver { /// ``` #[track_caller] #[cfg(feature = "sync")] + #[cfg_attr(docsrs, doc(alias = "recv_blocking"))] pub fn blocking_recv(self) -> Result { crate::future::block_on(self) } From 0dc1b71e6e53782ed2314935a70631b667686805 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Fri, 17 Feb 2023 22:49:45 +0100 Subject: [PATCH 072/101] time: remove cache padding in timer entries (#5468) --- tokio/src/runtime/time/entry.rs | 171 +++++++------------------------- tokio/src/util/pad.rs | 52 ---------- 2 files changed, 38 insertions(+), 185 deletions(-) delete mode 100644 tokio/src/util/pad.rs diff --git a/tokio/src/runtime/time/entry.rs b/tokio/src/runtime/time/entry.rs index 4780cfcad32..f86d9ed97f0 100644 --- a/tokio/src/runtime/time/entry.rs +++ b/tokio/src/runtime/time/entry.rs @@ -94,7 +94,7 @@ pub(super) struct StateCell { /// without holding the driver lock is undefined behavior. result: UnsafeCell, /// The currently-registered waker - waker: CachePadded, + waker: AtomicWaker, } impl Default for StateCell { @@ -114,7 +114,7 @@ impl StateCell { Self { state: AtomicU64::new(STATE_DEREGISTERED), result: UnsafeCell::new(Ok(())), - waker: CachePadded(AtomicWaker::new()), + waker: AtomicWaker::new(), } } @@ -139,7 +139,7 @@ impl StateCell { // We must register first. This ensures that either `fire` will // observe the new waker, or we will observe a racing fire to have set // the state, or both. - self.waker.0.register_by_ref(waker); + self.waker.register_by_ref(waker); self.read_state() } @@ -227,7 +227,7 @@ impl StateCell { self.state.store(STATE_DEREGISTERED, Ordering::Release); - self.waker.0.take_waker() + self.waker.take_waker() } /// Marks the timer as registered (poll will return None) and sets the @@ -331,11 +331,20 @@ pub(super) type EntryList = crate::util::linked_list::LinkedList, + /// A link within the doubly-linked list of timers on a particular level and + /// slot. Valid only if state is equal to Registered. + /// + /// Only accessed under the entry lock. + pointers: linked_list::Pointers, + + /// The expiration time for which this entry is currently registered. + /// Generally owned by the driver, but is accessed by the entry when not + /// registered. + cached_when: AtomicU64, + + /// The true expiration time. Set by the timer future, read by the driver. + true_when: AtomicU64, /// Current state. This records whether the timer entry is currently under /// the ownership of the driver, and if not, its current state (not @@ -345,10 +354,23 @@ pub(crate) struct TimerShared { _p: PhantomPinned, } +unsafe impl Send for TimerShared {} +unsafe impl Sync for TimerShared {} + +impl std::fmt::Debug for TimerShared { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TimerShared") + .field("when", &self.true_when.load(Ordering::Relaxed)) + .field("cached_when", &self.cached_when.load(Ordering::Relaxed)) + .field("state", &self.state) + .finish() + } +} + generate_addr_of_methods! { impl<> TimerShared { unsafe fn addr_of_pointers(self: NonNull) -> NonNull> { - &self.driver_state.0.pointers + &self.pointers } } } @@ -356,8 +378,10 @@ generate_addr_of_methods! { impl TimerShared { pub(super) fn new() -> Self { Self { + cached_when: AtomicU64::new(0), + true_when: AtomicU64::new(0), + pointers: linked_list::Pointers::new(), state: StateCell::default(), - driver_state: CachePadded(TimerSharedPadded::new()), _p: PhantomPinned, } } @@ -365,7 +389,7 @@ impl TimerShared { /// Gets the cached time-of-expiration value. pub(super) fn cached_when(&self) -> u64 { // Cached-when is only accessed under the driver lock, so we can use relaxed - self.driver_state.0.cached_when.load(Ordering::Relaxed) + self.cached_when.load(Ordering::Relaxed) } /// Gets the true time-of-expiration value, and copies it into the cached @@ -376,10 +400,7 @@ impl TimerShared { pub(super) unsafe fn sync_when(&self) -> u64 { let true_when = self.true_when(); - self.driver_state - .0 - .cached_when - .store(true_when, Ordering::Relaxed); + self.cached_when.store(true_when, Ordering::Relaxed); true_when } @@ -389,10 +410,7 @@ impl TimerShared { /// SAFETY: Must be called with the driver lock held, and when this entry is /// not in any timer wheel lists. unsafe fn set_cached_when(&self, when: u64) { - self.driver_state - .0 - .cached_when - .store(when, Ordering::Relaxed); + self.cached_when.store(when, Ordering::Relaxed); } /// Returns the true time-of-expiration value, with relaxed memory ordering. @@ -407,7 +425,7 @@ impl TimerShared { /// in the timer wheel. pub(super) unsafe fn set_expiration(&self, t: u64) { self.state.set_expiration(t); - self.driver_state.0.cached_when.store(t, Ordering::Relaxed); + self.cached_when.store(t, Ordering::Relaxed); } /// Sets the true time-of-expiration only if it is after the current. @@ -431,48 +449,6 @@ impl TimerShared { } } -/// Additional shared state between the driver and the timer which is cache -/// padded. This contains the information that the driver thread accesses most -/// frequently to minimize contention. In particular, we move it away from the -/// waker, as the waker is updated on every poll. -struct TimerSharedPadded { - /// A link within the doubly-linked list of timers on a particular level and - /// slot. Valid only if state is equal to Registered. - /// - /// Only accessed under the entry lock. - pointers: linked_list::Pointers, - - /// The expiration time for which this entry is currently registered. - /// Generally owned by the driver, but is accessed by the entry when not - /// registered. - cached_when: AtomicU64, - - /// The true expiration time. Set by the timer future, read by the driver. - true_when: AtomicU64, -} - -impl std::fmt::Debug for TimerSharedPadded { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("TimerSharedPadded") - .field("when", &self.true_when.load(Ordering::Relaxed)) - .field("cached_when", &self.cached_when.load(Ordering::Relaxed)) - .finish() - } -} - -impl TimerSharedPadded { - fn new() -> Self { - Self { - cached_when: AtomicU64::new(0), - true_when: AtomicU64::new(0), - pointers: linked_list::Pointers::new(), - } - } -} - -unsafe impl Send for TimerShared {} -unsafe impl Sync for TimerShared {} - unsafe impl linked_list::Link for TimerShared { type Handle = TimerHandle; @@ -660,74 +636,3 @@ impl Drop for TimerEntry { unsafe { Pin::new_unchecked(self) }.as_mut().cancel() } } - -// Copied from [crossbeam/cache_padded](https://github.com/crossbeam-rs/crossbeam/blob/fa35346b7c789bba045ad789e894c68c466d1779/crossbeam-utils/src/cache_padded.rs#L62-L127) -// -// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache -// lines at a time, so we have to align to 128 bytes rather than 64. -// -// Sources: -// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf -// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 -// -// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size. -// -// Sources: -// - https://www.mono-project.com/news/2016/09/12/arm64-icache/ -// -// powerpc64 has 128-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9 -#[cfg_attr( - any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - ), - repr(align(128)) -)] -// arm, mips, mips64, and riscv64 have 32-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_riscv64.go#L7 -#[cfg_attr( - any( - target_arch = "arm", - target_arch = "mips", - target_arch = "mips64", - target_arch = "riscv64", - ), - repr(align(32)) -)] -// s390x has 256-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7 -#[cfg_attr(target_arch = "s390x", repr(align(256)))] -// x86 and wasm have 64-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7 -// -// All others are assumed to have 64-byte cache line size. -#[cfg_attr( - not(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "arm", - target_arch = "mips", - target_arch = "mips64", - target_arch = "riscv64", - target_arch = "s390x", - )), - repr(align(64)) -)] -#[derive(Debug, Default)] -struct CachePadded(T); diff --git a/tokio/src/util/pad.rs b/tokio/src/util/pad.rs deleted file mode 100644 index bf0913ca853..00000000000 --- a/tokio/src/util/pad.rs +++ /dev/null @@ -1,52 +0,0 @@ -use core::fmt; -use core::ops::{Deref, DerefMut}; - -#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] -// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache -// lines at a time, so we have to align to 128 bytes rather than 64. -// -// Sources: -// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf -// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 -#[cfg_attr(target_arch = "x86_64", repr(align(128)))] -#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] -pub(crate) struct CachePadded { - value: T, -} - -unsafe impl Send for CachePadded {} -unsafe impl Sync for CachePadded {} - -impl CachePadded { - pub(crate) fn new(t: T) -> CachePadded { - CachePadded:: { value: t } - } -} - -impl Deref for CachePadded { - type Target = T; - - fn deref(&self) -> &T { - &self.value - } -} - -impl DerefMut for CachePadded { - fn deref_mut(&mut self) -> &mut T { - &mut self.value - } -} - -impl fmt::Debug for CachePadded { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("CachePadded") - .field("value", &self.value) - .finish() - } -} - -impl From for CachePadded { - fn from(t: T) -> Self { - CachePadded::new(t) - } -} From b921fe45ac9fc49e18bc6e834065b61d246f56e0 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Fri, 17 Feb 2023 23:56:56 +0100 Subject: [PATCH 073/101] sync: reduce contention in watch channel (#5464) --- tokio/src/runtime/context.rs | 2 +- tokio/src/sync/watch.rs | 74 ++++++++++++++++++++++++++++++++++-- tokio/src/util/rand.rs | 6 ++- 3 files changed, 77 insertions(+), 5 deletions(-) diff --git a/tokio/src/runtime/context.rs b/tokio/src/runtime/context.rs index fef53cab8a6..a2401306634 100644 --- a/tokio/src/runtime/context.rs +++ b/tokio/src/runtime/context.rs @@ -79,7 +79,7 @@ tokio_thread_local! { } } -#[cfg(feature = "macros")] +#[cfg(any(feature = "macros", all(feature = "sync", feature = "rt")))] pub(crate) fn thread_rng_n(n: u32) -> u32 { CONTEXT.with(|ctx| ctx.rng.fastrand_n(n)) } diff --git a/tokio/src/sync/watch.rs b/tokio/src/sync/watch.rs index 6db46b71fc5..f8250edd6f3 100644 --- a/tokio/src/sync/watch.rs +++ b/tokio/src/sync/watch.rs @@ -58,6 +58,7 @@ use crate::sync::notify::Notify; use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::atomic::Ordering::Relaxed; use crate::loom::sync::{Arc, RwLock, RwLockReadGuard}; +use std::fmt; use std::mem; use std::ops; use std::panic; @@ -166,7 +167,6 @@ impl<'a, T> Ref<'a, T> { } } -#[derive(Debug)] struct Shared { /// The most recent value. value: RwLock, @@ -181,12 +181,24 @@ struct Shared { ref_count_rx: AtomicUsize, /// Notifies waiting receivers that the value changed. - notify_rx: Notify, + notify_rx: big_notify::BigNotify, /// Notifies any task listening for `Receiver` dropped events. notify_tx: Notify, } +impl fmt::Debug for Shared { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let state = self.state.load(); + f.debug_struct("Shared") + .field("value", &self.value) + .field("version", &state.version()) + .field("is_closed", &state.is_closed()) + .field("ref_count_rx", &self.ref_count_rx) + .finish() + } +} + pub mod error { //! Watch error types. @@ -221,6 +233,62 @@ pub mod error { impl std::error::Error for RecvError {} } +mod big_notify { + use super::*; + use crate::sync::notify::Notified; + + // To avoid contention on the lock inside the `Notify`, we store multiple + // copies of it. Then, we use either circular access or randomness to spread + // out threads over different `Notify` objects. + // + // Some simple benchmarks show that randomness performs slightly better than + // circular access (probably due to contention on `next`), so we prefer to + // use randomness when Tokio is compiled with a random number generator. + // + // When the random number generator is not available, we fall back to + // circular access. + + pub(super) struct BigNotify { + #[cfg(not(all(not(loom), feature = "sync", any(feature = "rt", feature = "macros"))))] + next: AtomicUsize, + inner: [Notify; 8], + } + + impl BigNotify { + pub(super) fn new() -> Self { + Self { + #[cfg(not(all( + not(loom), + feature = "sync", + any(feature = "rt", feature = "macros") + )))] + next: AtomicUsize::new(0), + inner: Default::default(), + } + } + + pub(super) fn notify_waiters(&self) { + for notify in &self.inner { + notify.notify_waiters(); + } + } + + /// This function implements the case where randomness is not available. + #[cfg(not(all(not(loom), feature = "sync", any(feature = "rt", feature = "macros"))))] + pub(super) fn notified(&self) -> Notified<'_> { + let i = self.next.fetch_add(1, Relaxed) % 8; + self.inner[i].notified() + } + + /// This function implements the case where randomness is available. + #[cfg(all(not(loom), feature = "sync", any(feature = "rt", feature = "macros")))] + pub(super) fn notified(&self) -> Notified<'_> { + let i = crate::runtime::context::thread_rng_n(8) as usize; + self.inner[i].notified() + } + } +} + use self::state::{AtomicState, Version}; mod state { use crate::loom::sync::atomic::AtomicUsize; @@ -320,7 +388,7 @@ pub fn channel(init: T) -> (Sender, Receiver) { value: RwLock::new(init), state: AtomicState::new(), ref_count_rx: AtomicUsize::new(1), - notify_rx: Notify::new(), + notify_rx: big_notify::BigNotify::new(), notify_tx: Notify::new(), }); diff --git a/tokio/src/util/rand.rs b/tokio/src/util/rand.rs index 749da6bcac4..7018c1ee0e5 100644 --- a/tokio/src/util/rand.rs +++ b/tokio/src/util/rand.rs @@ -135,7 +135,11 @@ impl FastRand { old_seed } - #[cfg(any(feature = "macros", feature = "rt-multi-thread"))] + #[cfg(any( + feature = "macros", + feature = "rt-multi-thread", + all(feature = "sync", feature = "rt") + ))] pub(crate) fn fastrand_n(&self, n: u32) -> u32 { // This is similar to fastrand() % n, but faster. // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ From 24aac0add3547803b571e9d5c6d6c3ecbd09bb5b Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sat, 18 Feb 2023 10:39:25 +0100 Subject: [PATCH 074/101] sync: don't leak tracing spans in mutex guards (#5469) --- tokio/src/sync/mutex.rs | 170 +++++++----- tokio/src/sync/rwlock.rs | 255 ++++++++++-------- tokio/src/sync/rwlock/owned_read_guard.rs | 57 ++-- tokio/src/sync/rwlock/owned_write_guard.rs | 96 ++++--- .../sync/rwlock/owned_write_guard_mapped.rs | 61 +++-- tokio/src/sync/rwlock/read_guard.rs | 54 ++-- tokio/src/sync/rwlock/write_guard.rs | 96 ++++--- tokio/src/sync/rwlock/write_guard_mapped.rs | 62 +++-- 8 files changed, 492 insertions(+), 359 deletions(-) diff --git a/tokio/src/sync/mutex.rs b/tokio/src/sync/mutex.rs index 6377daff360..c33021acced 100644 --- a/tokio/src/sync/mutex.rs +++ b/tokio/src/sync/mutex.rs @@ -6,9 +6,10 @@ use crate::util::trace; use std::cell::UnsafeCell; use std::error::Error; +use std::marker::PhantomData; use std::ops::{Deref, DerefMut}; use std::sync::Arc; -use std::{fmt, marker, mem}; +use std::{fmt, mem}; /// An asynchronous `Mutex`-like type. /// @@ -144,6 +145,8 @@ pub struct Mutex { #[clippy::has_significant_drop] #[must_use = "if unused the Mutex will immediately unlock"] pub struct MutexGuard<'a, T: ?Sized> { + // When changing the fields in this struct, make sure to update the + // `skip_drop` method. #[cfg(all(tokio_unstable, feature = "tracing"))] resource_span: tracing::Span, lock: &'a Mutex, @@ -179,10 +182,29 @@ pub struct OwnedMutexGuard { #[clippy::has_significant_drop] #[must_use = "if unused the Mutex will immediately unlock"] pub struct MappedMutexGuard<'a, T: ?Sized> { + // When changing the fields in this struct, make sure to update the + // `skip_drop` method. s: &'a semaphore::Semaphore, data: *mut T, // Needed to tell the borrow checker that we are holding a `&mut T` - marker: marker::PhantomData<&'a mut T>, + marker: PhantomData<&'a mut T>, +} + +/// A helper type used when taking apart a `MutexGuard` without running its +/// Drop implementation. +#[allow(dead_code)] // Unused fields are still used in Drop. +struct MutexGuardInner<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + lock: &'a Mutex, +} + +/// A helper type used when taking apart a `MappedMutexGuard` without running +/// its Drop implementation. +#[allow(dead_code)] // Unused fields are still used in Drop. +struct MappedMutexGuardInner<'a, T: ?Sized> { + s: &'a semaphore::Semaphore, + data: *mut T, } // As long as T: Send, it's fine to send and share Mutex between threads. @@ -340,15 +362,27 @@ impl Mutex { /// } /// ``` pub async fn lock(&self) -> MutexGuard<'_, T> { + let acquire_fut = async { + self.acquire().await; + + MutexGuard { + lock: self, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + } + }; + #[cfg(all(tokio_unstable, feature = "tracing"))] - trace::async_op( - || self.acquire(), + let acquire_fut = trace::async_op( + move || acquire_fut, self.resource_span.clone(), "Mutex::lock", "poll", false, - ) - .await; + ); + + #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing + let guard = acquire_fut.await; #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { @@ -358,14 +392,7 @@ impl Mutex { ); }); - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - self.acquire().await; - - MutexGuard { - lock: self, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - } + guard } /// Blockingly locks this `Mutex`. When the lock has been acquired, function returns a @@ -512,34 +539,39 @@ impl Mutex { /// [`Arc`]: std::sync::Arc pub async fn lock_owned(self: Arc) -> OwnedMutexGuard { #[cfg(all(tokio_unstable, feature = "tracing"))] - trace::async_op( - || self.acquire(), - self.resource_span.clone(), + let resource_span = self.resource_span.clone(); + + let acquire_fut = async { + self.acquire().await; + + OwnedMutexGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + lock: self, + } + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let acquire_fut = trace::async_op( + move || acquire_fut, + resource_span, "Mutex::lock_owned", "poll", false, - ) - .await; + ); + + #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing + let guard = acquire_fut.await; #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { + guard.resource_span.in_scope(|| { tracing::trace!( target: "runtime::resource::state_update", locked = true, ); }); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - self.acquire().await; - - OwnedMutexGuard { - lock: self, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } + guard } async fn acquire(&self) { @@ -570,6 +602,12 @@ impl Mutex { pub fn try_lock(&self) -> Result, TryLockError> { match self.s.try_acquire(1) { Ok(_) => { + let guard = MutexGuard { + lock: self, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + }; + #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { tracing::trace!( @@ -578,11 +616,7 @@ impl Mutex { ); }); - Ok(MutexGuard { - lock: self, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - }) + Ok(guard) } Err(_) => Err(TryLockError(())), } @@ -639,22 +673,21 @@ impl Mutex { pub fn try_lock_owned(self: Arc) -> Result, TryLockError> { match self.s.try_acquire(1) { Ok(_) => { + let guard = OwnedMutexGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + lock: self, + }; + #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { + guard.resource_span.in_scope(|| { tracing::trace!( target: "runtime::resource::state_update", locked = true, ); }); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - - Ok(OwnedMutexGuard { - lock: self, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - }) + Ok(guard) } Err(_) => Err(TryLockError(())), } @@ -714,6 +747,17 @@ where // === impl MutexGuard === impl<'a, T: ?Sized> MutexGuard<'a, T> { + fn skip_drop(self) -> MutexGuardInner<'a, T> { + let me = mem::ManuallyDrop::new(self); + // SAFETY: This duplicates the `resource_span` and then forgets the + // original. In the end, we have not duplicated or forgotten any values. + MutexGuardInner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: unsafe { std::ptr::read(&me.resource_span) }, + lock: me.lock, + } + } + /// Makes a new [`MappedMutexGuard`] for a component of the locked data. /// /// This operation cannot fail as the [`MutexGuard`] passed in already locked the mutex. @@ -750,12 +794,11 @@ impl<'a, T: ?Sized> MutexGuard<'a, T> { F: FnOnce(&mut T) -> &mut U, { let data = f(&mut *this) as *mut U; - let s = &this.lock.s; - mem::forget(this); + let inner = this.skip_drop(); MappedMutexGuard { - s, + s: &inner.lock.s, data, - marker: marker::PhantomData, + marker: PhantomData, } } @@ -800,12 +843,11 @@ impl<'a, T: ?Sized> MutexGuard<'a, T> { Some(data) => data as *mut U, None => return Err(this), }; - let s = &this.lock.s; - mem::forget(this); + let inner = this.skip_drop(); Ok(MappedMutexGuard { - s, + s: &inner.lock.s, data, - marker: marker::PhantomData, + marker: PhantomData, }) } @@ -946,6 +988,14 @@ impl fmt::Display for OwnedMutexGuard { // === impl MappedMutexGuard === impl<'a, T: ?Sized> MappedMutexGuard<'a, T> { + fn skip_drop(self) -> MappedMutexGuardInner<'a, T> { + let me = mem::ManuallyDrop::new(self); + MappedMutexGuardInner { + s: me.s, + data: me.data, + } + } + /// Makes a new [`MappedMutexGuard`] for a component of the locked data. /// /// This operation cannot fail as the [`MappedMutexGuard`] passed in already locked the mutex. @@ -960,12 +1010,11 @@ impl<'a, T: ?Sized> MappedMutexGuard<'a, T> { F: FnOnce(&mut T) -> &mut U, { let data = f(&mut *this) as *mut U; - let s = this.s; - mem::forget(this); + let inner = this.skip_drop(); MappedMutexGuard { - s, + s: inner.s, data, - marker: marker::PhantomData, + marker: PhantomData, } } @@ -987,12 +1036,11 @@ impl<'a, T: ?Sized> MappedMutexGuard<'a, T> { Some(data) => data as *mut U, None => return Err(this), }; - let s = this.s; - mem::forget(this); + let inner = this.skip_drop(); Ok(MappedMutexGuard { - s, + s: inner.s, data, - marker: marker::PhantomData, + marker: PhantomData, }) } } diff --git a/tokio/src/sync/rwlock.rs b/tokio/src/sync/rwlock.rs index 0492e4e9d39..c332ede2bbe 100644 --- a/tokio/src/sync/rwlock.rs +++ b/tokio/src/sync/rwlock.rs @@ -5,7 +5,6 @@ use crate::util::trace; use std::cell::UnsafeCell; use std::marker; use std::marker::PhantomData; -use std::mem::ManuallyDrop; use std::sync::Arc; pub(crate) mod owned_read_guard; @@ -423,23 +422,33 @@ impl RwLock { /// } /// ``` pub async fn read(&self) -> RwLockReadGuard<'_, T> { + let acquire_fut = async { + self.s.acquire(1).await.unwrap_or_else(|_| { + // The semaphore was closed. but, we never explicitly close it, and we have a + // handle to it through the Arc, which means that this can never happen. + unreachable!() + }); + + RwLockReadGuard { + s: &self.s, + data: self.c.get(), + marker: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + } + }; + #[cfg(all(tokio_unstable, feature = "tracing"))] - let inner = trace::async_op( - || self.s.acquire(1), + let acquire_fut = trace::async_op( + move || acquire_fut, self.resource_span.clone(), "RwLock::read", "poll", false, ); - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let inner = self.s.acquire(1); - - inner.await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and we have a - // handle to it through the Arc, which means that this can never happen. - unreachable!() - }); + #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing + let guard = acquire_fut.await; #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { @@ -450,13 +459,7 @@ impl RwLock { ) }); - RwLockReadGuard { - s: &self.s, - data: self.c.get(), - marker: marker::PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - } + guard } /// Blockingly locks this `RwLock` with shared read access. @@ -565,25 +568,38 @@ impl RwLock { /// ``` pub async fn read_owned(self: Arc) -> OwnedRwLockReadGuard { #[cfg(all(tokio_unstable, feature = "tracing"))] - let inner = trace::async_op( - || self.s.acquire(1), - self.resource_span.clone(), + let resource_span = self.resource_span.clone(); + + let acquire_fut = async { + self.s.acquire(1).await.unwrap_or_else(|_| { + // The semaphore was closed. but, we never explicitly close it, and we have a + // handle to it through the Arc, which means that this can never happen. + unreachable!() + }); + + OwnedRwLockReadGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + data: self.c.get(), + lock: self, + _p: PhantomData, + } + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let acquire_fut = trace::async_op( + move || acquire_fut, + resource_span, "RwLock::read_owned", "poll", false, ); - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let inner = self.s.acquire(1); - - inner.await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and we have a - // handle to it through the Arc, which means that this can never happen. - unreachable!() - }); + #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing + let guard = acquire_fut.await; #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { + guard.resource_span.in_scope(|| { tracing::trace!( target: "runtime::resource::state_update", current_readers = 1, @@ -591,16 +607,7 @@ impl RwLock { ) }); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - - OwnedRwLockReadGuard { - data: self.c.get(), - lock: ManuallyDrop::new(self), - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } + guard } /// Attempts to acquire this `RwLock` with shared read access. @@ -642,6 +649,14 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + let guard = RwLockReadGuard { + s: &self.s, + data: self.c.get(), + marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + }; + #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { tracing::trace!( @@ -651,13 +666,7 @@ impl RwLock { ) }); - Ok(RwLockReadGuard { - s: &self.s, - data: self.c.get(), - marker: marker::PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - }) + Ok(guard) } /// Attempts to acquire this `RwLock` with shared read access. @@ -705,8 +714,16 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + let guard = OwnedRwLockReadGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + data: self.c.get(), + lock: self, + _p: PhantomData, + }; + #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { + guard.resource_span.in_scope(|| { tracing::trace!( target: "runtime::resource::state_update", current_readers = 1, @@ -714,16 +731,7 @@ impl RwLock { ) }); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - - Ok(OwnedRwLockReadGuard { - data: self.c.get(), - lock: ManuallyDrop::new(self), - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - }) + Ok(guard) } /// Locks this `RwLock` with exclusive write access, causing the current @@ -755,23 +763,34 @@ impl RwLock { ///} /// ``` pub async fn write(&self) -> RwLockWriteGuard<'_, T> { + let acquire_fut = async { + self.s.acquire(self.mr).await.unwrap_or_else(|_| { + // The semaphore was closed. but, we never explicitly close it, and we have a + // handle to it through the Arc, which means that this can never happen. + unreachable!() + }); + + RwLockWriteGuard { + permits_acquired: self.mr, + s: &self.s, + data: self.c.get(), + marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + } + }; + #[cfg(all(tokio_unstable, feature = "tracing"))] - let inner = trace::async_op( - || self.s.acquire(self.mr), + let acquire_fut = trace::async_op( + move || acquire_fut, self.resource_span.clone(), "RwLock::write", "poll", false, ); - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let inner = self.s.acquire(self.mr); - - inner.await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and we have a - // handle to it through the Arc, which means that this can never happen. - unreachable!() - }); + #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing + let guard = acquire_fut.await; #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { @@ -782,14 +801,7 @@ impl RwLock { ) }); - RwLockWriteGuard { - permits_acquired: self.mr, - s: &self.s, - data: self.c.get(), - marker: marker::PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - } + guard } /// Blockingly locks this `RwLock` with exclusive write access. @@ -884,25 +896,39 @@ impl RwLock { /// ``` pub async fn write_owned(self: Arc) -> OwnedRwLockWriteGuard { #[cfg(all(tokio_unstable, feature = "tracing"))] - let inner = trace::async_op( - || self.s.acquire(self.mr), - self.resource_span.clone(), + let resource_span = self.resource_span.clone(); + + let acquire_fut = async { + self.s.acquire(self.mr).await.unwrap_or_else(|_| { + // The semaphore was closed. but, we never explicitly close it, and we have a + // handle to it through the Arc, which means that this can never happen. + unreachable!() + }); + + OwnedRwLockWriteGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + permits_acquired: self.mr, + data: self.c.get(), + lock: self, + _p: PhantomData, + } + }; + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let acquire_fut = trace::async_op( + move || acquire_fut, + resource_span, "RwLock::write_owned", "poll", false, ); - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let inner = self.s.acquire(self.mr); - - inner.await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and we have a - // handle to it through the Arc, which means that this can never happen. - unreachable!() - }); + #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing + let guard = acquire_fut.await; #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { + guard.resource_span.in_scope(|| { tracing::trace!( target: "runtime::resource::state_update", write_locked = true, @@ -910,17 +936,7 @@ impl RwLock { ) }); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - - OwnedRwLockWriteGuard { - permits_acquired: self.mr, - data: self.c.get(), - lock: ManuallyDrop::new(self), - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } + guard } /// Attempts to acquire this `RwLock` with exclusive write access. @@ -953,6 +969,15 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + let guard = RwLockWriteGuard { + permits_acquired: self.mr, + s: &self.s, + data: self.c.get(), + marker: marker::PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + }; + #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { tracing::trace!( @@ -962,14 +987,7 @@ impl RwLock { ) }); - Ok(RwLockWriteGuard { - permits_acquired: self.mr, - s: &self.s, - data: self.c.get(), - marker: marker::PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - }) + Ok(guard) } /// Attempts to acquire this `RwLock` with exclusive write access. @@ -1009,8 +1027,17 @@ impl RwLock { Err(TryAcquireError::Closed) => unreachable!(), } + let guard = OwnedRwLockWriteGuard { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: self.resource_span.clone(), + permits_acquired: self.mr, + data: self.c.get(), + lock: self, + _p: PhantomData, + }; + #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { + guard.resource_span.in_scope(|| { tracing::trace!( target: "runtime::resource::state_update", write_locked = true, @@ -1018,17 +1045,7 @@ impl RwLock { ) }); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - - Ok(OwnedRwLockWriteGuard { - permits_acquired: self.mr, - data: self.c.get(), - lock: ManuallyDrop::new(self), - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - }) + Ok(guard) } /// Returns a mutable reference to the underlying data. diff --git a/tokio/src/sync/rwlock/owned_read_guard.rs b/tokio/src/sync/rwlock/owned_read_guard.rs index e457a1b663a..273e7b86f2f 100644 --- a/tokio/src/sync/rwlock/owned_read_guard.rs +++ b/tokio/src/sync/rwlock/owned_read_guard.rs @@ -1,10 +1,7 @@ use crate::sync::rwlock::RwLock; -use std::fmt; use std::marker::PhantomData; -use std::mem; -use std::mem::ManuallyDrop; -use std::ops; use std::sync::Arc; +use std::{fmt, mem, ops, ptr}; /// Owned RAII structure used to release the shared read access of a lock when /// dropped. @@ -16,15 +13,38 @@ use std::sync::Arc; /// [`RwLock`]: struct@crate::sync::RwLock #[clippy::has_significant_drop] pub struct OwnedRwLockReadGuard { + // When changing the fields in this struct, make sure to update the + // `skip_drop` method. #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, - // ManuallyDrop allows us to destructure into this field without running the destructor. - pub(super) lock: ManuallyDrop>>, + pub(super) lock: Arc>, pub(super) data: *const U, pub(super) _p: PhantomData, } +#[allow(dead_code)] // Unused fields are still used in Drop. +struct Inner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + lock: Arc>, + data: *const U, +} + impl OwnedRwLockReadGuard { + fn skip_drop(self) -> Inner { + let me = mem::ManuallyDrop::new(self); + // SAFETY: This duplicates the values in every field of the guard, then + // forgets the originals, so in the end no value is duplicated. + unsafe { + Inner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: ptr::read(&me.resource_span), + lock: ptr::read(&me.lock), + data: me.data, + } + } + } + /// Makes a new `OwnedRwLockReadGuard` for a component of the locked data. /// This operation cannot fail as the `OwnedRwLockReadGuard` passed in /// already locked the data. @@ -53,23 +73,19 @@ impl OwnedRwLockReadGuard { /// # } /// ``` #[inline] - pub fn map(mut this: Self, f: F) -> OwnedRwLockReadGuard + pub fn map(this: Self, f: F) -> OwnedRwLockReadGuard where F: FnOnce(&U) -> &V, { let data = f(&*this) as *const V; - let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); OwnedRwLockReadGuard { - lock: ManuallyDrop::new(lock), + lock: this.lock, data, _p: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, } } @@ -104,7 +120,7 @@ impl OwnedRwLockReadGuard { /// # } /// ``` #[inline] - pub fn try_map(mut this: Self, f: F) -> Result, Self> + pub fn try_map(this: Self, f: F) -> Result, Self> where F: FnOnce(&U) -> Option<&V>, { @@ -112,18 +128,14 @@ impl OwnedRwLockReadGuard { Some(data) => data as *const V, None => return Err(this), }; - let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); Ok(OwnedRwLockReadGuard { - lock: ManuallyDrop::new(lock), + lock: this.lock, data, _p: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, }) } } @@ -157,7 +169,6 @@ where impl Drop for OwnedRwLockReadGuard { fn drop(&mut self) { self.lock.s.release(1); - unsafe { ManuallyDrop::drop(&mut self.lock) }; #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { diff --git a/tokio/src/sync/rwlock/owned_write_guard.rs b/tokio/src/sync/rwlock/owned_write_guard.rs index 0a8d7db6107..cbc77f6f4a2 100644 --- a/tokio/src/sync/rwlock/owned_write_guard.rs +++ b/tokio/src/sync/rwlock/owned_write_guard.rs @@ -1,11 +1,9 @@ use crate::sync::rwlock::owned_read_guard::OwnedRwLockReadGuard; use crate::sync::rwlock::owned_write_guard_mapped::OwnedRwLockMappedWriteGuard; use crate::sync::rwlock::RwLock; -use std::fmt; use std::marker::PhantomData; -use std::mem::{self, ManuallyDrop}; -use std::ops; use std::sync::Arc; +use std::{fmt, mem, ops, ptr}; /// Owned RAII structure used to release the exclusive write access of a lock when /// dropped. @@ -17,16 +15,41 @@ use std::sync::Arc; /// [`RwLock`]: struct@crate::sync::RwLock #[clippy::has_significant_drop] pub struct OwnedRwLockWriteGuard { + // When changing the fields in this struct, make sure to update the + // `skip_drop` method. #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, - // ManuallyDrop allows us to destructure into this field without running the destructor. - pub(super) lock: ManuallyDrop>>, + pub(super) lock: Arc>, pub(super) data: *mut T, pub(super) _p: PhantomData, } +#[allow(dead_code)] // Unused fields are still used in Drop. +struct Inner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + permits_acquired: u32, + lock: Arc>, + data: *const T, +} + impl OwnedRwLockWriteGuard { + fn skip_drop(self) -> Inner { + let me = mem::ManuallyDrop::new(self); + // SAFETY: This duplicates the values in every field of the guard, then + // forgets the originals, so in the end no value is duplicated. + unsafe { + Inner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: ptr::read(&me.resource_span), + permits_acquired: me.permits_acquired, + lock: ptr::read(&me.lock), + data: me.data, + } + } + } + /// Makes a new [`OwnedRwLockMappedWriteGuard`] for a component of the locked /// data. /// @@ -65,20 +88,15 @@ impl OwnedRwLockWriteGuard { F: FnOnce(&mut T) -> &mut U, { let data = f(&mut *this) as *mut U; - let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; - let permits_acquired = this.permits_acquired; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); OwnedRwLockMappedWriteGuard { - permits_acquired, - lock: ManuallyDrop::new(lock), + permits_acquired: this.permits_acquired, + lock: this.lock, data, _p: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, } } @@ -129,21 +147,15 @@ impl OwnedRwLockWriteGuard { Some(data) => data as *mut U, None => return Err(this), }; - let permits_acquired = this.permits_acquired; - let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); Ok(OwnedRwLockMappedWriteGuard { - permits_acquired, - lock: ManuallyDrop::new(lock), + permits_acquired: this.permits_acquired, + lock: this.lock, data, _p: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, }) } @@ -192,15 +204,22 @@ impl OwnedRwLockWriteGuard { /// assert_eq!(*lock.read().await, 2, "second writer obtained write lock"); /// # } /// ``` - pub fn downgrade(mut self) -> OwnedRwLockReadGuard { - let lock = unsafe { ManuallyDrop::take(&mut self.lock) }; - let data = self.data; - let to_release = (self.permits_acquired - 1) as usize; + pub fn downgrade(self) -> OwnedRwLockReadGuard { + let this = self.skip_drop(); + let guard = OwnedRwLockReadGuard { + lock: this.lock, + data: this.data, + _p: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: this.resource_span, + }; // Release all but one of the permits held by the write guard - lock.s.release(to_release); + let to_release = (this.permits_acquired - 1) as usize; + guard.lock.s.release(to_release); + #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { + guard.resource_span.in_scope(|| { tracing::trace!( target: "runtime::resource::state_update", write_locked = false, @@ -209,7 +228,7 @@ impl OwnedRwLockWriteGuard { }); #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { + guard.resource_span.in_scope(|| { tracing::trace!( target: "runtime::resource::state_update", current_readers = 1, @@ -217,18 +236,7 @@ impl OwnedRwLockWriteGuard { ) }); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(self); - - OwnedRwLockReadGuard { - lock: ManuallyDrop::new(lock), - data, - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } + guard } } @@ -267,6 +275,7 @@ where impl Drop for OwnedRwLockWriteGuard { fn drop(&mut self) { self.lock.s.release(self.permits_acquired as usize); + #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { tracing::trace!( @@ -275,6 +284,5 @@ impl Drop for OwnedRwLockWriteGuard { write_locked.op = "override", ) }); - unsafe { ManuallyDrop::drop(&mut self.lock) }; } } diff --git a/tokio/src/sync/rwlock/owned_write_guard_mapped.rs b/tokio/src/sync/rwlock/owned_write_guard_mapped.rs index c986fd5eee8..9f4952100a5 100644 --- a/tokio/src/sync/rwlock/owned_write_guard_mapped.rs +++ b/tokio/src/sync/rwlock/owned_write_guard_mapped.rs @@ -1,9 +1,7 @@ use crate::sync::rwlock::RwLock; -use std::fmt; use std::marker::PhantomData; -use std::mem::{self, ManuallyDrop}; -use std::ops; use std::sync::Arc; +use std::{fmt, mem, ops, ptr}; /// Owned RAII structure used to release the exclusive write access of a lock when /// dropped. @@ -16,16 +14,41 @@ use std::sync::Arc; /// [`OwnedRwLockWriteGuard`]: struct@crate::sync::OwnedRwLockWriteGuard #[clippy::has_significant_drop] pub struct OwnedRwLockMappedWriteGuard { + // When changing the fields in this struct, make sure to update the + // `skip_drop` method. #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, - // ManuallyDrop allows us to destructure into this field without running the destructor. - pub(super) lock: ManuallyDrop>>, + pub(super) lock: Arc>, pub(super) data: *mut U, pub(super) _p: PhantomData, } +#[allow(dead_code)] // Unused fields are still used in Drop. +struct Inner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + permits_acquired: u32, + lock: Arc>, + data: *const U, +} + impl OwnedRwLockMappedWriteGuard { + fn skip_drop(self) -> Inner { + let me = mem::ManuallyDrop::new(self); + // SAFETY: This duplicates the values in every field of the guard, then + // forgets the originals, so in the end no value is duplicated. + unsafe { + Inner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: ptr::read(&me.resource_span), + permits_acquired: me.permits_acquired, + lock: ptr::read(&me.lock), + data: me.data, + } + } + } + /// Makes a new `OwnedRwLockMappedWriteGuard` for a component of the locked /// data. /// @@ -64,20 +87,15 @@ impl OwnedRwLockMappedWriteGuard { F: FnOnce(&mut U) -> &mut V, { let data = f(&mut *this) as *mut V; - let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; - let permits_acquired = this.permits_acquired; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); OwnedRwLockMappedWriteGuard { - permits_acquired, - lock: ManuallyDrop::new(lock), + permits_acquired: this.permits_acquired, + lock: this.lock, data, _p: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, } } @@ -126,20 +144,15 @@ impl OwnedRwLockMappedWriteGuard { Some(data) => data as *mut V, None => return Err(this), }; - let lock = unsafe { ManuallyDrop::take(&mut this.lock) }; - let permits_acquired = this.permits_acquired; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); Ok(OwnedRwLockMappedWriteGuard { - permits_acquired, - lock: ManuallyDrop::new(lock), + permits_acquired: this.permits_acquired, + lock: this.lock, data, _p: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, }) } } @@ -179,6 +192,7 @@ where impl Drop for OwnedRwLockMappedWriteGuard { fn drop(&mut self) { self.lock.s.release(self.permits_acquired as usize); + #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { tracing::trace!( @@ -187,6 +201,5 @@ impl Drop for OwnedRwLockMappedWriteGuard { write_locked.op = "override", ) }); - unsafe { ManuallyDrop::drop(&mut self.lock) }; } } diff --git a/tokio/src/sync/rwlock/read_guard.rs b/tokio/src/sync/rwlock/read_guard.rs index 6f2eed1c864..a04b59588d5 100644 --- a/tokio/src/sync/rwlock/read_guard.rs +++ b/tokio/src/sync/rwlock/read_guard.rs @@ -1,8 +1,6 @@ use crate::sync::batch_semaphore::Semaphore; -use std::fmt; -use std::marker; -use std::mem; -use std::ops; +use std::marker::PhantomData; +use std::{fmt, mem, ops}; /// RAII structure used to release the shared read access of a lock when /// dropped. @@ -15,14 +13,36 @@ use std::ops; #[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockReadGuard<'a, T: ?Sized> { + // When changing the fields in this struct, make sure to update the + // `skip_drop` method. #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, pub(super) s: &'a Semaphore, pub(super) data: *const T, - pub(super) marker: marker::PhantomData<&'a T>, + pub(super) marker: PhantomData<&'a T>, +} + +#[allow(dead_code)] // Unused fields are still used in Drop. +struct Inner<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + s: &'a Semaphore, + data: *const T, } impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { + fn skip_drop(self) -> Inner<'a, T> { + let me = mem::ManuallyDrop::new(self); + // SAFETY: This duplicates the values in every field of the guard, then + // forgets the originals, so in the end no value is duplicated. + Inner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: unsafe { std::ptr::read(&me.resource_span) }, + s: me.s, + data: me.data, + } + } + /// Makes a new `RwLockReadGuard` for a component of the locked data. /// /// This operation cannot fail as the `RwLockReadGuard` passed in already @@ -62,18 +82,14 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { F: FnOnce(&T) -> &U, { let data = f(&*this) as *const U; - let s = this.s; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); RwLockReadGuard { - s, + s: this.s, data, - marker: marker::PhantomData, + marker: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, } } @@ -121,18 +137,14 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { Some(data) => data as *const U, None => return Err(this), }; - let s = this.s; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); Ok(RwLockReadGuard { - s, + s: this.s, data, - marker: marker::PhantomData, + marker: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, }) } } diff --git a/tokio/src/sync/rwlock/write_guard.rs b/tokio/src/sync/rwlock/write_guard.rs index d584bb49c56..4527e5581c9 100644 --- a/tokio/src/sync/rwlock/write_guard.rs +++ b/tokio/src/sync/rwlock/write_guard.rs @@ -1,10 +1,8 @@ use crate::sync::batch_semaphore::Semaphore; use crate::sync::rwlock::read_guard::RwLockReadGuard; use crate::sync::rwlock::write_guard_mapped::RwLockMappedWriteGuard; -use std::fmt; -use std::marker; -use std::mem; -use std::ops; +use std::marker::PhantomData; +use std::{fmt, mem, ops}; /// RAII structure used to release the exclusive write access of a lock when /// dropped. @@ -17,15 +15,39 @@ use std::ops; #[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockWriteGuard<'a, T: ?Sized> { + // When changing the fields in this struct, make sure to update the + // `skip_drop` method. #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, pub(super) s: &'a Semaphore, pub(super) data: *mut T, - pub(super) marker: marker::PhantomData<&'a mut T>, + pub(super) marker: PhantomData<&'a mut T>, +} + +#[allow(dead_code)] // Unused fields are still used in Drop. +struct Inner<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + permits_acquired: u32, + s: &'a Semaphore, + data: *mut T, } impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { + fn skip_drop(self) -> Inner<'a, T> { + let me = mem::ManuallyDrop::new(self); + // SAFETY: This duplicates the values in every field of the guard, then + // forgets the originals, so in the end no value is duplicated. + Inner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: unsafe { std::ptr::read(&me.resource_span) }, + permits_acquired: me.permits_acquired, + s: me.s, + data: me.data, + } + } + /// Makes a new [`RwLockMappedWriteGuard`] for a component of the locked data. /// /// This operation cannot fail as the `RwLockWriteGuard` passed in already @@ -68,19 +90,15 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { F: FnOnce(&mut T) -> &mut U, { let data = f(&mut *this) as *mut U; - let s = this.s; - let permits_acquired = this.permits_acquired; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); + RwLockMappedWriteGuard { - permits_acquired, - s, + permits_acquired: this.permits_acquired, + s: this.s, data, - marker: marker::PhantomData, + marker: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, } } @@ -135,19 +153,15 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { Some(data) => data as *mut U, None => return Err(this), }; - let s = this.s; - let permits_acquired = this.permits_acquired; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); + Ok(RwLockMappedWriteGuard { - permits_acquired, - s, + permits_acquired: this.permits_acquired, + s: this.s, data, - marker: marker::PhantomData, + marker: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, }) } @@ -199,12 +213,21 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { /// /// [`RwLock`]: struct@crate::sync::RwLock pub fn downgrade(self) -> RwLockReadGuard<'a, T> { - let RwLockWriteGuard { s, data, .. } = self; - let to_release = (self.permits_acquired - 1) as usize; + let this = self.skip_drop(); + let guard = RwLockReadGuard { + s: this.s, + data: this.data, + marker: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: this.resource_span, + }; + // Release all but one of the permits held by the write guard - s.release(to_release); + let to_release = (this.permits_acquired - 1) as usize; + this.s.release(to_release); + #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { + guard.resource_span.in_scope(|| { tracing::trace!( target: "runtime::resource::state_update", write_locked = false, @@ -213,7 +236,7 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { }); #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { + guard.resource_span.in_scope(|| { tracing::trace!( target: "runtime::resource::state_update", current_readers = 1, @@ -221,18 +244,7 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { ) }); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(self); - - RwLockReadGuard { - s, - data, - marker: marker::PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } + guard } } diff --git a/tokio/src/sync/rwlock/write_guard_mapped.rs b/tokio/src/sync/rwlock/write_guard_mapped.rs index 1f5d279111f..1248c4c7af3 100644 --- a/tokio/src/sync/rwlock/write_guard_mapped.rs +++ b/tokio/src/sync/rwlock/write_guard_mapped.rs @@ -1,8 +1,6 @@ use crate::sync::batch_semaphore::Semaphore; -use std::fmt; -use std::marker; -use std::mem; -use std::ops; +use std::marker::PhantomData; +use std::{fmt, mem, ops}; /// RAII structure used to release the exclusive write access of a lock when /// dropped. @@ -15,15 +13,39 @@ use std::ops; /// [`RwLockWriteGuard`]: struct@crate::sync::RwLockWriteGuard #[clippy::has_significant_drop] pub struct RwLockMappedWriteGuard<'a, T: ?Sized> { + // When changing the fields in this struct, make sure to update the + // `skip_drop` method. #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, pub(super) permits_acquired: u32, pub(super) s: &'a Semaphore, pub(super) data: *mut T, - pub(super) marker: marker::PhantomData<&'a mut T>, + pub(super) marker: PhantomData<&'a mut T>, +} + +#[allow(dead_code)] // Unused fields are still used in Drop. +struct Inner<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + permits_acquired: u32, + s: &'a Semaphore, + data: *mut T, } impl<'a, T: ?Sized> RwLockMappedWriteGuard<'a, T> { + fn skip_drop(self) -> Inner<'a, T> { + let me = mem::ManuallyDrop::new(self); + // SAFETY: This duplicates the values in every field of the guard, then + // forgets the originals, so in the end no value is duplicated. + Inner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: unsafe { std::ptr::read(&me.resource_span) }, + permits_acquired: me.permits_acquired, + s: me.s, + data: me.data, + } + } + /// Makes a new `RwLockMappedWriteGuard` for a component of the locked data. /// /// This operation cannot fail as the `RwLockMappedWriteGuard` passed in already @@ -65,20 +87,15 @@ impl<'a, T: ?Sized> RwLockMappedWriteGuard<'a, T> { F: FnOnce(&mut T) -> &mut U, { let data = f(&mut *this) as *mut U; - let s = this.s; - let permits_acquired = this.permits_acquired; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); RwLockMappedWriteGuard { - permits_acquired, - s, + permits_acquired: this.permits_acquired, + s: this.s, data, - marker: marker::PhantomData, + marker: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, } } @@ -132,20 +149,15 @@ impl<'a, T: ?Sized> RwLockMappedWriteGuard<'a, T> { Some(data) => data as *mut U, None => return Err(this), }; - let s = this.s; - let permits_acquired = this.permits_acquired; - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = this.resource_span.clone(); - // NB: Forget to avoid drop impl from being called. - mem::forget(this); + let this = this.skip_drop(); Ok(RwLockMappedWriteGuard { - permits_acquired, - s, + permits_acquired: this.permits_acquired, + s: this.s, data, - marker: marker::PhantomData, + marker: PhantomData, #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, + resource_span: this.resource_span, }) } } From d7abdbb315785c35b4dacad1cd817e4a2a0cbfea Mon Sep 17 00:00:00 2001 From: tijsvd <34085809+tijsvd@users.noreply.github.com> Date: Sat, 18 Feb 2023 15:51:08 +0100 Subject: [PATCH 075/101] benches: mutex contention in `watch::Receiver` bench (#5472) --- benches/Cargo.toml | 5 ++++ benches/sync_watch.rs | 64 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 benches/sync_watch.rs diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 29017323390..10c32de0f3f 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -32,6 +32,11 @@ name = "sync_mpsc_oneshot" path = "sync_mpsc_oneshot.rs" harness = false +[[bench]] +name = "sync_watch" +path = "sync_watch.rs" +harness = false + [[bench]] name = "rt_multi_threaded" path = "rt_multi_threaded.rs" diff --git a/benches/sync_watch.rs b/benches/sync_watch.rs new file mode 100644 index 00000000000..401f83c82e7 --- /dev/null +++ b/benches/sync_watch.rs @@ -0,0 +1,64 @@ +use bencher::{black_box, Bencher}; +use rand::prelude::*; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use tokio::sync::{watch, Notify}; + +fn rt() -> tokio::runtime::Runtime { + tokio::runtime::Builder::new_multi_thread() + .worker_threads(6) + .build() + .unwrap() +} + +fn do_work(rng: &mut impl RngCore) -> u32 { + use std::fmt::Write; + let mut message = String::new(); + for i in 1..=10 { + let _ = write!(&mut message, " {i}={}", rng.gen::()); + } + message + .as_bytes() + .iter() + .map(|&c| c as u32) + .fold(0, u32::wrapping_add) +} + +fn contention_resubscribe(b: &mut Bencher) { + const NTASK: u64 = 1000; + + let rt = rt(); + let (snd, rcv) = watch::channel(0i32); + let wg = Arc::new((AtomicU64::new(0), Notify::new())); + for n in 0..NTASK { + let mut rcv = rcv.clone(); + let wg = wg.clone(); + let mut rng = rand::rngs::StdRng::seed_from_u64(n); + rt.spawn(async move { + while rcv.changed().await.is_ok() { + let _ = *rcv.borrow(); // contend on rwlock + let r = do_work(&mut rng); + let _ = black_box(r); + if wg.0.fetch_sub(1, Ordering::Release) == 1 { + wg.1.notify_one(); + } + } + }); + } + + b.iter(|| { + rt.block_on(async { + for _ in 0..100 { + assert_eq!(wg.0.fetch_add(NTASK, Ordering::Relaxed), 0); + let _ = snd.send(black_box(42)); + while wg.0.load(Ordering::Acquire) > 0 { + wg.1.notified().await; + } + } + }); + }); +} + +bencher::benchmark_group!(contention, contention_resubscribe); + +bencher::benchmark_main!(contention); From a8fda870582a1049bdc31284bc3bb82969014895 Mon Sep 17 00:00:00 2001 From: Maximilian Hils Date: Sat, 18 Feb 2023 20:03:16 +0100 Subject: [PATCH 076/101] net: use Message Read Mode for named pipes (#5350) --- tokio/src/net/windows/named_pipe.rs | 44 +++++++++++++++++++++++--- tokio/tests/net_named_pipe.rs | 48 ++++++++++++++++++++++++----- 2 files changed, 80 insertions(+), 12 deletions(-) diff --git a/tokio/src/net/windows/named_pipe.rs b/tokio/src/net/windows/named_pipe.rs index 9ede94ea6a0..2107c1cdfce 100644 --- a/tokio/src/net/windows/named_pipe.rs +++ b/tokio/src/net/windows/named_pipe.rs @@ -1701,14 +1701,18 @@ impl ServerOptions { /// The default pipe mode is [`PipeMode::Byte`]. See [`PipeMode`] for /// documentation of what each mode means. /// - /// This corresponding to specifying [`dwPipeMode`]. + /// This corresponds to specifying `PIPE_TYPE_` and `PIPE_READMODE_` in [`dwPipeMode`]. /// /// [`dwPipeMode`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea pub fn pipe_mode(&mut self, pipe_mode: PipeMode) -> &mut Self { let is_msg = matches!(pipe_mode, PipeMode::Message); // Pipe mode is implemented as a bit flag 0x4. Set is message and unset // is byte. - bool_flag!(self.pipe_mode, is_msg, windows_sys::PIPE_TYPE_MESSAGE); + bool_flag!( + self.pipe_mode, + is_msg, + windows_sys::PIPE_TYPE_MESSAGE | windows_sys::PIPE_READMODE_MESSAGE + ); self } @@ -2268,6 +2272,7 @@ impl ServerOptions { pub struct ClientOptions { desired_access: u32, security_qos_flags: u32, + pipe_mode: PipeMode, } impl ClientOptions { @@ -2289,6 +2294,7 @@ impl ClientOptions { desired_access: windows_sys::GENERIC_READ | windows_sys::GENERIC_WRITE, security_qos_flags: windows_sys::SECURITY_IDENTIFICATION | windows_sys::SECURITY_SQOS_PRESENT, + pipe_mode: PipeMode::Byte, } } @@ -2341,6 +2347,15 @@ impl ClientOptions { self } + /// The pipe mode. + /// + /// The default pipe mode is [`PipeMode::Byte`]. See [`PipeMode`] for + /// documentation of what each mode means. + pub fn pipe_mode(&mut self, pipe_mode: PipeMode) -> &mut Self { + self.pipe_mode = pipe_mode; + self + } + /// Opens the named pipe identified by `addr`. /// /// This opens the client using [`CreateFile`] with the @@ -2437,6 +2452,20 @@ impl ClientOptions { return Err(io::Error::last_os_error()); } + if matches!(self.pipe_mode, PipeMode::Message) { + let mut mode = windows_sys::PIPE_READMODE_MESSAGE; + let result = windows_sys::SetNamedPipeHandleState( + h, + &mut mode, + ptr::null_mut(), + ptr::null_mut(), + ); + + if result == 0 { + return Err(io::Error::last_os_error()); + } + } + NamedPipeClient::from_raw_handle(h as _) } @@ -2556,7 +2585,9 @@ unsafe fn named_pipe_info(handle: RawHandle) -> io::Result { #[cfg(test)] mod test { - use self::windows_sys::{PIPE_REJECT_REMOTE_CLIENTS, PIPE_TYPE_BYTE, PIPE_TYPE_MESSAGE}; + use self::windows_sys::{ + PIPE_READMODE_MESSAGE, PIPE_REJECT_REMOTE_CLIENTS, PIPE_TYPE_BYTE, PIPE_TYPE_MESSAGE, + }; use super::*; #[test] @@ -2588,13 +2619,16 @@ mod test { opts.reject_remote_clients(false); opts.pipe_mode(PipeMode::Message); - assert_eq!(opts.pipe_mode, PIPE_TYPE_MESSAGE); + assert_eq!(opts.pipe_mode, PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE); opts.reject_remote_clients(true); opts.pipe_mode(PipeMode::Message); assert_eq!( opts.pipe_mode, - PIPE_TYPE_MESSAGE | PIPE_REJECT_REMOTE_CLIENTS + PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_REJECT_REMOTE_CLIENTS ); + + opts.pipe_mode(PipeMode::Byte); + assert_eq!(opts.pipe_mode, PIPE_TYPE_BYTE | PIPE_REJECT_REMOTE_CLIENTS); } } diff --git a/tokio/tests/net_named_pipe.rs b/tokio/tests/net_named_pipe.rs index c42122465c0..3ddc4c8a9bf 100644 --- a/tokio/tests/net_named_pipe.rs +++ b/tokio/tests/net_named_pipe.rs @@ -5,7 +5,7 @@ use std::io; use std::mem; use std::os::windows::io::AsRawHandle; use std::time::Duration; -use tokio::io::AsyncWriteExt; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::windows::named_pipe::{ClientOptions, PipeMode, ServerOptions}; use tokio::time; use windows_sys::Win32::Foundation::{ERROR_NO_DATA, ERROR_PIPE_BUSY, NO_ERROR, UNICODE_STRING}; @@ -327,17 +327,51 @@ async fn test_named_pipe_multi_client_ready() -> io::Result<()> { Ok(()) } -// This tests what happens when a client tries to disconnect. +// This tests that message mode works as expected. #[tokio::test] async fn test_named_pipe_mode_message() -> io::Result<()> { - const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-mode-message"; + // it's easy to accidentally get a seemingly working test here because byte pipes + // often return contents at write boundaries. to make sure we're doing the right thing we + // explicitly test that it doesn't work in byte mode. + _named_pipe_mode_message(PipeMode::Message).await?; + _named_pipe_mode_message(PipeMode::Byte).await +} + +async fn _named_pipe_mode_message(mode: PipeMode) -> io::Result<()> { + let pipe_name = format!( + r"\\.\pipe\test-named-pipe-mode-message-{}", + matches!(mode, PipeMode::Message) + ); + let mut buf = [0u8; 32]; - let server = ServerOptions::new() - .pipe_mode(PipeMode::Message) - .create(PIPE_NAME)?; + let mut server = ServerOptions::new() + .first_pipe_instance(true) + .pipe_mode(mode) + .create(&pipe_name)?; + + let mut client = ClientOptions::new().pipe_mode(mode).open(&pipe_name)?; - let _ = ClientOptions::new().open(PIPE_NAME)?; server.connect().await?; + + // this needs a few iterations, presumably Windows waits for a few calls before merging buffers + for _ in 0..10 { + client.write_all(b"hello").await?; + server.write_all(b"world").await?; + } + for _ in 0..10 { + let n = server.read(&mut buf).await?; + if buf[..n] != b"hello"[..] { + assert!(matches!(mode, PipeMode::Byte)); + return Ok(()); + } + let n = client.read(&mut buf).await?; + if buf[..n] != b"world"[..] { + assert!(matches!(mode, PipeMode::Byte)); + return Ok(()); + } + } + // byte mode should have errored before. + assert!(matches!(mode, PipeMode::Message)); Ok(()) } From 901f6d26c6aefd25a8f180e7a12ea8ed3c88ace2 Mon Sep 17 00:00:00 2001 From: amab8901 <83634595+amab8901@users.noreply.github.com> Date: Sun, 19 Feb 2023 11:16:59 +0100 Subject: [PATCH 077/101] sync: drop wakers outside lock in semaphore (#5475) --- tokio/src/sync/batch_semaphore.rs | 5 ++++- tokio/src/sync/tests/semaphore_batch.rs | 29 +++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/tokio/src/sync/batch_semaphore.rs b/tokio/src/sync/batch_semaphore.rs index 57493f4bd2c..a7885bdf172 100644 --- a/tokio/src/sync/batch_semaphore.rs +++ b/tokio/src/sync/batch_semaphore.rs @@ -444,6 +444,7 @@ impl Semaphore { } assert_eq!(acquired, 0); + let mut old_waker = None; // Otherwise, register the waker & enqueue the node. node.waker.with_mut(|waker| { @@ -455,7 +456,7 @@ impl Semaphore { .map(|waker| !waker.will_wake(cx.waker())) .unwrap_or(true) { - *waker = Some(cx.waker().clone()); + old_waker = std::mem::replace(waker, Some(cx.waker().clone())); } }); @@ -468,6 +469,8 @@ impl Semaphore { waiters.queue.push_front(node); } + drop(waiters); + drop(old_waker); Pending } diff --git a/tokio/src/sync/tests/semaphore_batch.rs b/tokio/src/sync/tests/semaphore_batch.rs index d4e35aa7b2f..c9e9d05ec8f 100644 --- a/tokio/src/sync/tests/semaphore_batch.rs +++ b/tokio/src/sync/tests/semaphore_batch.rs @@ -252,3 +252,32 @@ fn cancel_acquire_releases_permits() { assert_eq!(6, s.available_permits()); assert_ok!(s.try_acquire(6)); } + +#[test] +fn release_permits_at_drop() { + use crate::sync::semaphore::*; + use futures::task::ArcWake; + use std::future::Future; + use std::sync::Arc; + + let sem = Arc::new(Semaphore::new(1)); + + struct ReleaseOnDrop(Option); + + impl ArcWake for ReleaseOnDrop { + fn wake_by_ref(_arc_self: &Arc) {} + } + + let mut fut = Box::pin(async { + let _permit = sem.acquire().await.unwrap(); + }); + + // Second iteration shouldn't deadlock. + for _ in 0..=1 { + let waker = futures::task::waker(Arc::new(ReleaseOnDrop( + sem.clone().try_acquire_owned().ok(), + ))); + let mut cx = std::task::Context::from_waker(&waker); + assert!(fut.as_mut().poll(&mut cx).is_pending()); + } +} From 2e7f996f17ff9cac761c775d359355d4e37555b1 Mon Sep 17 00:00:00 2001 From: Maximilian Hils Date: Sun, 19 Feb 2023 11:59:28 +0100 Subject: [PATCH 078/101] net: refactor named pipe builders to not use bitfields (#5477) --- tokio/src/net/windows/named_pipe.rs | 194 ++++++++++++---------------- 1 file changed, 86 insertions(+), 108 deletions(-) diff --git a/tokio/src/net/windows/named_pipe.rs b/tokio/src/net/windows/named_pipe.rs index 2107c1cdfce..356d5e4bd26 100644 --- a/tokio/src/net/windows/named_pipe.rs +++ b/tokio/src/net/windows/named_pipe.rs @@ -1645,19 +1645,6 @@ impl AsRawHandle for NamedPipeClient { } } -// Helper to set a boolean flag as a bitfield. -macro_rules! bool_flag { - ($f:expr, $t:expr, $flag:expr) => {{ - let current = $f; - - if $t { - $f = current | $flag; - } else { - $f = current & !$flag; - }; - }}; -} - /// A builder structure for construct a named pipe with named pipe-specific /// options. This is required to use for named pipe servers who wants to modify /// pipe-related options. @@ -1665,8 +1652,17 @@ macro_rules! bool_flag { /// See [`ServerOptions::create`]. #[derive(Debug, Clone)] pub struct ServerOptions { - open_mode: u32, - pipe_mode: u32, + // dwOpenMode + access_inbound: bool, + access_outbound: bool, + first_pipe_instance: bool, + write_dac: bool, + write_owner: bool, + access_system_security: bool, + // dwPipeMode + pipe_mode: PipeMode, + reject_remote_clients: bool, + // other options max_instances: u32, out_buffer_size: u32, in_buffer_size: u32, @@ -1687,8 +1683,14 @@ impl ServerOptions { /// ``` pub fn new() -> ServerOptions { ServerOptions { - open_mode: windows_sys::PIPE_ACCESS_DUPLEX | windows_sys::FILE_FLAG_OVERLAPPED, - pipe_mode: windows_sys::PIPE_TYPE_BYTE | windows_sys::PIPE_REJECT_REMOTE_CLIENTS, + access_inbound: true, + access_outbound: true, + first_pipe_instance: false, + write_dac: false, + write_owner: false, + access_system_security: false, + pipe_mode: PipeMode::Byte, + reject_remote_clients: true, max_instances: windows_sys::PIPE_UNLIMITED_INSTANCES, out_buffer_size: 65536, in_buffer_size: 65536, @@ -1705,14 +1707,7 @@ impl ServerOptions { /// /// [`dwPipeMode`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea pub fn pipe_mode(&mut self, pipe_mode: PipeMode) -> &mut Self { - let is_msg = matches!(pipe_mode, PipeMode::Message); - // Pipe mode is implemented as a bit flag 0x4. Set is message and unset - // is byte. - bool_flag!( - self.pipe_mode, - is_msg, - windows_sys::PIPE_TYPE_MESSAGE | windows_sys::PIPE_READMODE_MESSAGE - ); + self.pipe_mode = pipe_mode; self } @@ -1808,7 +1803,7 @@ impl ServerOptions { /// # Ok(()) } /// ``` pub fn access_inbound(&mut self, allowed: bool) -> &mut Self { - bool_flag!(self.open_mode, allowed, windows_sys::PIPE_ACCESS_INBOUND); + self.access_inbound = allowed; self } @@ -1906,7 +1901,7 @@ impl ServerOptions { /// # Ok(()) } /// ``` pub fn access_outbound(&mut self, allowed: bool) -> &mut Self { - bool_flag!(self.open_mode, allowed, windows_sys::PIPE_ACCESS_OUTBOUND); + self.access_outbound = allowed; self } @@ -1974,11 +1969,7 @@ impl ServerOptions { /// [`create`]: ServerOptions::create /// [`FILE_FLAG_FIRST_PIPE_INSTANCE`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea#pipe_first_pipe_instance pub fn first_pipe_instance(&mut self, first: bool) -> &mut Self { - bool_flag!( - self.open_mode, - first, - windows_sys::FILE_FLAG_FIRST_PIPE_INSTANCE - ); + self.first_pipe_instance = first; self } @@ -2060,7 +2051,7 @@ impl ServerOptions { /// /// [`WRITE_DAC`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea pub fn write_dac(&mut self, requested: bool) -> &mut Self { - bool_flag!(self.open_mode, requested, windows_sys::WRITE_DAC); + self.write_dac = requested; self } @@ -2070,7 +2061,7 @@ impl ServerOptions { /// /// [`WRITE_OWNER`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea pub fn write_owner(&mut self, requested: bool) -> &mut Self { - bool_flag!(self.open_mode, requested, windows_sys::WRITE_OWNER); + self.write_owner = requested; self } @@ -2080,11 +2071,7 @@ impl ServerOptions { /// /// [`ACCESS_SYSTEM_SECURITY`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea pub fn access_system_security(&mut self, requested: bool) -> &mut Self { - bool_flag!( - self.open_mode, - requested, - windows_sys::ACCESS_SYSTEM_SECURITY - ); + self.access_system_security = requested; self } @@ -2095,11 +2082,7 @@ impl ServerOptions { /// /// [`PIPE_REJECT_REMOTE_CLIENTS`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea#pipe_reject_remote_clients pub fn reject_remote_clients(&mut self, reject: bool) -> &mut Self { - bool_flag!( - self.pipe_mode, - reject, - windows_sys::PIPE_REJECT_REMOTE_CLIENTS - ); + self.reject_remote_clients = reject; self } @@ -2245,10 +2228,46 @@ impl ServerOptions { ) -> io::Result { let addr = encode_addr(addr); + let pipe_mode = { + let mut mode = if matches!(self.pipe_mode, PipeMode::Message) { + windows_sys::PIPE_TYPE_MESSAGE | windows_sys::PIPE_READMODE_MESSAGE + } else { + windows_sys::PIPE_TYPE_BYTE | windows_sys::PIPE_READMODE_BYTE + }; + if self.reject_remote_clients { + mode |= windows_sys::PIPE_REJECT_REMOTE_CLIENTS; + } else { + mode |= windows_sys::PIPE_ACCEPT_REMOTE_CLIENTS; + } + mode + }; + let open_mode = { + let mut mode = windows_sys::FILE_FLAG_OVERLAPPED; + if self.access_inbound { + mode |= windows_sys::PIPE_ACCESS_INBOUND; + } + if self.access_outbound { + mode |= windows_sys::PIPE_ACCESS_OUTBOUND; + } + if self.first_pipe_instance { + mode |= windows_sys::FILE_FLAG_FIRST_PIPE_INSTANCE; + } + if self.write_dac { + mode |= windows_sys::WRITE_DAC; + } + if self.write_owner { + mode |= windows_sys::WRITE_OWNER; + } + if self.access_system_security { + mode |= windows_sys::ACCESS_SYSTEM_SECURITY; + } + mode + }; + let h = windows_sys::CreateNamedPipeW( addr.as_ptr(), - self.open_mode, - self.pipe_mode, + open_mode, + pipe_mode, self.max_instances, self.out_buffer_size, self.in_buffer_size, @@ -2270,7 +2289,8 @@ impl ServerOptions { /// See [`ClientOptions::open`]. #[derive(Debug, Clone)] pub struct ClientOptions { - desired_access: u32, + generic_read: bool, + generic_write: bool, security_qos_flags: u32, pipe_mode: PipeMode, } @@ -2291,7 +2311,8 @@ impl ClientOptions { /// ``` pub fn new() -> Self { Self { - desired_access: windows_sys::GENERIC_READ | windows_sys::GENERIC_WRITE, + generic_read: true, + generic_write: true, security_qos_flags: windows_sys::SECURITY_IDENTIFICATION | windows_sys::SECURITY_SQOS_PRESENT, pipe_mode: PipeMode::Byte, @@ -2305,7 +2326,7 @@ impl ClientOptions { /// [`GENERIC_READ`]: https://docs.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew pub fn read(&mut self, allowed: bool) -> &mut Self { - bool_flag!(self.desired_access, allowed, windows_sys::GENERIC_READ); + self.generic_read = allowed; self } @@ -2316,7 +2337,7 @@ impl ClientOptions { /// [`GENERIC_WRITE`]: https://docs.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew pub fn write(&mut self, allowed: bool) -> &mut Self { - bool_flag!(self.desired_access, allowed, windows_sys::GENERIC_WRITE); + self.generic_write = allowed; self } @@ -2434,13 +2455,24 @@ impl ClientOptions { ) -> io::Result { let addr = encode_addr(addr); + let desired_access = { + let mut access = 0; + if self.generic_read { + access |= windows_sys::GENERIC_READ; + } + if self.generic_write { + access |= windows_sys::GENERIC_WRITE; + } + access + }; + // NB: We could use a platform specialized `OpenOptions` here, but since // we have access to windows_sys it ultimately doesn't hurt to use // `CreateFile` explicitly since it allows the use of our already // well-structured wide `addr` to pass into CreateFileW. let h = windows_sys::CreateFileW( addr.as_ptr(), - self.desired_access, + desired_access, 0, attrs as *mut _, windows_sys::OPEN_EXISTING, @@ -2453,13 +2485,9 @@ impl ClientOptions { } if matches!(self.pipe_mode, PipeMode::Message) { - let mut mode = windows_sys::PIPE_READMODE_MESSAGE; - let result = windows_sys::SetNamedPipeHandleState( - h, - &mut mode, - ptr::null_mut(), - ptr::null_mut(), - ); + let mode = windows_sys::PIPE_READMODE_MESSAGE; + let result = + windows_sys::SetNamedPipeHandleState(h, &mode, ptr::null_mut(), ptr::null_mut()); if result == 0 { return Err(io::Error::last_os_error()); @@ -2582,53 +2610,3 @@ unsafe fn named_pipe_info(handle: RawHandle) -> io::Result { max_instances, }) } - -#[cfg(test)] -mod test { - use self::windows_sys::{ - PIPE_READMODE_MESSAGE, PIPE_REJECT_REMOTE_CLIENTS, PIPE_TYPE_BYTE, PIPE_TYPE_MESSAGE, - }; - use super::*; - - #[test] - fn opts_default_pipe_mode() { - let opts = ServerOptions::new(); - assert_eq!(opts.pipe_mode, PIPE_TYPE_BYTE | PIPE_REJECT_REMOTE_CLIENTS); - } - - #[test] - fn opts_unset_reject_remote() { - let mut opts = ServerOptions::new(); - opts.reject_remote_clients(false); - assert_eq!(opts.pipe_mode & PIPE_REJECT_REMOTE_CLIENTS, 0); - } - - #[test] - fn opts_set_pipe_mode_maintains_reject_remote_clients() { - let mut opts = ServerOptions::new(); - opts.pipe_mode(PipeMode::Byte); - assert_eq!(opts.pipe_mode, PIPE_TYPE_BYTE | PIPE_REJECT_REMOTE_CLIENTS); - - opts.reject_remote_clients(false); - opts.pipe_mode(PipeMode::Byte); - assert_eq!(opts.pipe_mode, PIPE_TYPE_BYTE); - - opts.reject_remote_clients(true); - opts.pipe_mode(PipeMode::Byte); - assert_eq!(opts.pipe_mode, PIPE_TYPE_BYTE | PIPE_REJECT_REMOTE_CLIENTS); - - opts.reject_remote_clients(false); - opts.pipe_mode(PipeMode::Message); - assert_eq!(opts.pipe_mode, PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE); - - opts.reject_remote_clients(true); - opts.pipe_mode(PipeMode::Message); - assert_eq!( - opts.pipe_mode, - PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_REJECT_REMOTE_CLIENTS - ); - - opts.pipe_mode(PipeMode::Byte); - assert_eq!(opts.pipe_mode, PIPE_TYPE_BYTE | PIPE_REJECT_REMOTE_CLIENTS); - } -} From 0f17d69303dd47503da89b32c274f9f6a404cb5f Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 19 Feb 2023 12:11:22 +0100 Subject: [PATCH 079/101] ci: remove PROPTEST_CASES from miri (#5478) This option doesn't do anything anymore. --- .github/workflows/ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7da76abb1b7..54f91488e90 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -209,7 +209,6 @@ jobs: working-directory: tokio env: MIRIFLAGS: -Zmiri-disable-isolation -Zmiri-strict-provenance -Zmiri-retag-fields - PROPTEST_CASES: 10 asan: name: asan From 795754a846a29a0c785d4dadfb7a9136f8aba1ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tymoteusz=20Wi=C5=9Bniewski?= Date: Sun, 19 Feb 2023 14:10:38 +0100 Subject: [PATCH 080/101] sync: make `notify_waiters` calls atomic (#5458) --- tokio/src/sync/notify.rs | 190 +++++++++++++++++++++------ tokio/src/sync/tests/loom_notify.rs | 191 ++++++++++++++++++++++++++++ tokio/src/sync/tests/notify.rs | 39 ++++++ tokio/src/util/linked_list.rs | 98 +++++++++++++- 4 files changed, 474 insertions(+), 44 deletions(-) diff --git a/tokio/src/sync/notify.rs b/tokio/src/sync/notify.rs index efe16f9f8ef..8e83a76b7af 100644 --- a/tokio/src/sync/notify.rs +++ b/tokio/src/sync/notify.rs @@ -7,7 +7,7 @@ use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::Mutex; -use crate::util::linked_list::{self, LinkedList}; +use crate::util::linked_list::{self, GuardedLinkedList, LinkedList}; use crate::util::WakeList; use std::cell::UnsafeCell; @@ -20,6 +20,7 @@ use std::sync::atomic::Ordering::SeqCst; use std::task::{Context, Poll, Waker}; type WaitList = LinkedList::Target>; +type GuardedWaitList = GuardedLinkedList::Target>; /// Notifies a single task to wake up. /// @@ -198,10 +199,16 @@ type WaitList = LinkedList::Target>; /// [`Semaphore`]: crate::sync::Semaphore #[derive(Debug)] pub struct Notify { - // This uses 2 bits to store one of `EMPTY`, + // `state` uses 2 bits to store one of `EMPTY`, // `WAITING` or `NOTIFIED`. The rest of the bits // are used to store the number of times `notify_waiters` // was called. + // + // Throughout the code there are two assumptions: + // - state can be transitioned *from* `WAITING` only if + // `waiters` lock is held + // - number of times `notify_waiters` was called can + // be modified only if `waiters` lock is held state: AtomicUsize, waiters: Mutex, } @@ -229,6 +236,17 @@ struct Waiter { _p: PhantomPinned, } +impl Waiter { + fn new() -> Waiter { + Waiter { + pointers: linked_list::Pointers::new(), + waker: None, + notified: None, + _p: PhantomPinned, + } + } +} + generate_addr_of_methods! { impl<> Waiter { unsafe fn addr_of_pointers(self: NonNull) -> NonNull> { @@ -237,6 +255,59 @@ generate_addr_of_methods! { } } +/// List used in `Notify::notify_waiters`. It wraps a guarded linked list +/// and gates the access to it on `notify.waiters` mutex. It also empties +/// the list on drop. +struct NotifyWaitersList<'a> { + list: GuardedWaitList, + is_empty: bool, + notify: &'a Notify, +} + +impl<'a> NotifyWaitersList<'a> { + fn new( + unguarded_list: WaitList, + guard: Pin<&'a mut UnsafeCell>, + notify: &'a Notify, + ) -> NotifyWaitersList<'a> { + // Safety: pointer to the guarding waiter is not null. + let guard_ptr = unsafe { NonNull::new_unchecked(guard.get()) }; + let list = unguarded_list.into_guarded(guard_ptr); + NotifyWaitersList { + list, + is_empty: false, + notify, + } + } + + /// Removes the last element from the guarded list. Modifying this list + /// requires an exclusive access to the main list in `Notify`. + fn pop_back_locked(&mut self, _waiters: &mut WaitList) -> Option> { + let result = self.list.pop_back(); + if result.is_none() { + // Save information about emptiness to avoid waiting for lock + // in the destructor. + self.is_empty = true; + } + result + } +} + +impl Drop for NotifyWaitersList<'_> { + fn drop(&mut self) { + // If the list is not empty, we unlink all waiters from it. + // We do not wake the waiters to avoid double panics. + if !self.is_empty { + let _lock_guard = self.notify.waiters.lock(); + while let Some(mut waiter) = self.list.pop_back() { + // Safety: we hold the lock. + let waiter = unsafe { waiter.as_mut() }; + waiter.notified = Some(NotificationType::AllWaiters); + } + } + } +} + /// Future returned from [`Notify::notified()`]. /// /// This future is fused, so once it has completed, any future calls to poll @@ -249,6 +320,9 @@ pub struct Notified<'a> { /// The current state of the receiving process. state: State, + /// Number of calls to `notify_waiters` at the time of creation. + notify_waiters_calls: usize, + /// Entry in the waiter `LinkedList`. waiter: UnsafeCell, } @@ -258,7 +332,7 @@ unsafe impl<'a> Sync for Notified<'a> {} #[derive(Debug)] enum State { - Init(usize), + Init, Waiting, Done, } @@ -383,17 +457,13 @@ impl Notify { /// ``` pub fn notified(&self) -> Notified<'_> { // we load the number of times notify_waiters - // was called and store that in our initial state + // was called and store that in the future. let state = self.state.load(SeqCst); Notified { notify: self, - state: State::Init(state >> NOTIFY_WAITERS_SHIFT), - waiter: UnsafeCell::new(Waiter { - pointers: linked_list::Pointers::new(), - waker: None, - notified: None, - _p: PhantomPinned, - }), + state: State::Init, + notify_waiters_calls: get_num_notify_waiters_calls(state), + waiter: UnsafeCell::new(Waiter::new()), } } @@ -500,12 +570,9 @@ impl Notify { /// } /// ``` pub fn notify_waiters(&self) { - let mut wakers = WakeList::new(); - - // There are waiters, the lock must be acquired to notify. let mut waiters = self.waiters.lock(); - // The state must be reloaded while the lock is held. The state may only + // The state must be loaded while the lock is held. The state may only // transition out of WAITING while the lock is held. let curr = self.state.load(SeqCst); @@ -516,12 +583,30 @@ impl Notify { return; } - // At this point, it is guaranteed that the state will not - // concurrently change, as holding the lock is required to - // transition **out** of `WAITING`. + // Increment the number of times this method was called + // and transition to empty. + let new_state = set_state(inc_num_notify_waiters_calls(curr), EMPTY); + self.state.store(new_state, SeqCst); + + // It is critical for `GuardedLinkedList` safety that the guard node is + // pinned in memory and is not dropped until the guarded list is dropped. + let guard = UnsafeCell::new(Waiter::new()); + pin!(guard); + + // We move all waiters to a secondary list. It uses a `GuardedLinkedList` + // underneath to allow every waiter to safely remove itself from it. + // + // * This list will be still guarded by the `waiters` lock. + // `NotifyWaitersList` wrapper makes sure we hold the lock to modify it. + // * This wrapper will empty the list on drop. It is critical for safety + // that we will not leave any list entry with a pointer to the local + // guard node after this function returns / panics. + let mut list = NotifyWaitersList::new(std::mem::take(&mut *waiters), guard, self); + + let mut wakers = WakeList::new(); 'outer: loop { while wakers.can_push() { - match waiters.pop_back() { + match list.pop_back_locked(&mut waiters) { Some(mut waiter) => { // Safety: `waiters` lock is still held. let waiter = unsafe { waiter.as_mut() }; @@ -540,20 +625,17 @@ impl Notify { } } + // Release the lock before notifying. drop(waiters); + // One of the wakers may panic, but the remaining waiters will still + // be unlinked from the list in `NotifyWaitersList` destructor. wakers.wake_all(); // Acquire the lock again. waiters = self.waiters.lock(); } - // All waiters will be notified, the state must be transitioned to - // `EMPTY`. As transitioning **from** `WAITING` requires the lock to be - // held, a `store` is sufficient. - let new = set_state(inc_num_notify_waiters_calls(curr), EMPTY); - self.state.store(new, SeqCst); - // Release the lock before notifying drop(waiters); @@ -730,26 +812,32 @@ impl Notified<'_> { /// A custom `project` implementation is used in place of `pin-project-lite` /// as a custom drop implementation is needed. - fn project(self: Pin<&mut Self>) -> (&Notify, &mut State, &UnsafeCell) { + fn project(self: Pin<&mut Self>) -> (&Notify, &mut State, &usize, &UnsafeCell) { unsafe { - // Safety: both `notify` and `state` are `Unpin`. + // Safety: `notify`, `state` and `notify_waiters_calls` are `Unpin`. is_unpin::<&Notify>(); is_unpin::(); + is_unpin::(); let me = self.get_unchecked_mut(); - (me.notify, &mut me.state, &me.waiter) + ( + me.notify, + &mut me.state, + &me.notify_waiters_calls, + &me.waiter, + ) } } fn poll_notified(self: Pin<&mut Self>, waker: Option<&Waker>) -> Poll<()> { use State::*; - let (notify, state, waiter) = self.project(); + let (notify, state, notify_waiters_calls, waiter) = self.project(); loop { match *state { - Init(initial_notify_waiters_calls) => { + Init => { let curr = notify.state.load(SeqCst); // Optimistically try acquiring a pending notification @@ -779,7 +867,7 @@ impl Notified<'_> { // if notify_waiters has been called after the future // was created, then we are done - if get_num_notify_waiters_calls(curr) != initial_notify_waiters_calls { + if get_num_notify_waiters_calls(curr) != *notify_waiters_calls { *state = Done; return Poll::Ready(()); } @@ -846,21 +934,37 @@ impl Notified<'_> { return Poll::Pending; } Waiting => { - // Currently in the "Waiting" state, implying the caller has - // a waiter stored in the waiter list (guarded by - // `notify.waiters`). In order to access the waker fields, - // we must hold the lock. + // Currently in the "Waiting" state, implying the caller has a waiter stored in + // a waiter list (guarded by `notify.waiters`). In order to access the waker + // fields, we must acquire the lock. - let waiters = notify.waiters.lock(); + let mut waiters = notify.waiters.lock(); + + // Load the state with the lock held. + let curr = notify.state.load(SeqCst); // Safety: called while locked let w = unsafe { &mut *waiter.get() }; if w.notified.is_some() { - // Our waker has been notified. Reset the fields and - // remove it from the list. - w.waker = None; + // Our waker has been notified and our waiter is already removed from + // the list. Reset the notification and convert to `Done`. w.notified = None; + w.waker = None; + *state = Done; + } else if get_num_notify_waiters_calls(curr) != *notify_waiters_calls { + // Before we add a waiter to the list we check if these numbers are + // different while holding the lock. If these numbers are different now, + // it means that there is a call to `notify_waiters` in progress and this + // waiter must be contained by a guarded list used in `notify_waiters`. + // We can treat the waiter as notified and remove it from the list, as + // it would have been notified in the `notify_waiters` call anyways. + + w.waker = None; + + // Safety: we hold the lock, so we have an exclusive access to the list. + // The list is used in `notify_waiters`, so it must be guarded. + unsafe { waiters.remove(NonNull::new_unchecked(w)) }; *state = Done; } else { @@ -906,7 +1010,7 @@ impl Drop for Notified<'_> { use State::*; // Safety: The type only transitions to a "Waiting" state when pinned. - let (notify, state, waiter) = unsafe { Pin::new_unchecked(self).project() }; + let (notify, state, _, waiter) = unsafe { Pin::new_unchecked(self).project() }; // This is where we ensure safety. The `Notified` value is being // dropped, which means we must ensure that the waiter entry is no @@ -917,8 +1021,10 @@ impl Drop for Notified<'_> { // remove the entry from the list (if not already removed) // - // safety: the waiter is only added to `waiters` by virtue of it - // being the only `LinkedList` available to the type. + // Safety: we hold the lock, so we have an exclusive access to every list the + // waiter may be contained in. If the node is not contained in the `waiters` + // list, then it is contained by a guarded list used by `notify_waiters` and + // in such case it must be a middle node. unsafe { waiters.remove(NonNull::new_unchecked(waiter.get())) }; if waiters.is_empty() && get_state(notify_state) == WAITING { diff --git a/tokio/src/sync/tests/loom_notify.rs b/tokio/src/sync/tests/loom_notify.rs index d484a758172..a4ded1d35bc 100644 --- a/tokio/src/sync/tests/loom_notify.rs +++ b/tokio/src/sync/tests/loom_notify.rs @@ -4,6 +4,11 @@ use loom::future::block_on; use loom::sync::Arc; use loom::thread; +use tokio_test::{assert_pending, assert_ready}; + +/// `util::wake_list::NUM_WAKERS` +const WAKE_LIST_SIZE: usize = 32; + #[test] fn notify_one() { loom::model(|| { @@ -138,3 +143,189 @@ fn notify_drop() { th2.join().unwrap(); }); } + +/// Polls two `Notified` futures and checks if poll results are consistent +/// with each other. If the first future is notified by a `notify_waiters` +/// call, then the second one must be notified as well. +#[test] +fn notify_waiters_poll_consistency() { + fn notify_waiters_poll_consistency_variant(poll_setting: [bool; 2]) { + let notify = Arc::new(Notify::new()); + let mut notified = [ + tokio_test::task::spawn(notify.notified()), + tokio_test::task::spawn(notify.notified()), + ]; + for i in 0..2 { + if poll_setting[i] { + assert_pending!(notified[i].poll()); + } + } + + let tx = notify.clone(); + let th = thread::spawn(move || { + tx.notify_waiters(); + }); + + let res1 = notified[0].poll(); + let res2 = notified[1].poll(); + + // If res1 is ready, then res2 must also be ready. + assert!(res1.is_pending() || res2.is_ready()); + + th.join().unwrap(); + } + + // We test different scenarios in which pending futures had or had not + // been polled before the call to `notify_waiters`. + loom::model(|| notify_waiters_poll_consistency_variant([false, false])); + loom::model(|| notify_waiters_poll_consistency_variant([true, false])); + loom::model(|| notify_waiters_poll_consistency_variant([false, true])); + loom::model(|| notify_waiters_poll_consistency_variant([true, true])); +} + +/// Polls two `Notified` futures and checks if poll results are consistent +/// with each other. If the first future is notified by a `notify_waiters` +/// call, then the second one must be notified as well. +/// +/// Here we also add other `Notified` futures in between to force the two +/// tested futures to end up in different chunks. +#[test] +fn notify_waiters_poll_consistency_many() { + fn notify_waiters_poll_consistency_many_variant(order: [usize; 2]) { + let notify = Arc::new(Notify::new()); + + let mut futs = (0..WAKE_LIST_SIZE + 1) + .map(|_| tokio_test::task::spawn(notify.notified())) + .collect::>(); + + assert_pending!(futs[order[0]].poll()); + for i in 2..futs.len() { + assert_pending!(futs[i].poll()); + } + assert_pending!(futs[order[1]].poll()); + + let tx = notify.clone(); + let th = thread::spawn(move || { + tx.notify_waiters(); + }); + + let res1 = futs[0].poll(); + let res2 = futs[1].poll(); + + // If res1 is ready, then res2 must also be ready. + assert!(res1.is_pending() || res2.is_ready()); + + th.join().unwrap(); + } + + // We test different scenarios in which futures are polled in different order. + loom::model(|| notify_waiters_poll_consistency_many_variant([0, 1])); + loom::model(|| notify_waiters_poll_consistency_many_variant([1, 0])); +} + +/// Checks if a call to `notify_waiters` is observed as atomic when combined +/// with a concurrent call to `notify_one`. +#[test] +fn notify_waiters_is_atomic() { + fn notify_waiters_is_atomic_variant(tested_fut_index: usize) { + let notify = Arc::new(Notify::new()); + + let mut futs = (0..WAKE_LIST_SIZE + 1) + .map(|_| tokio_test::task::spawn(notify.notified())) + .collect::>(); + + for fut in &mut futs { + assert_pending!(fut.poll()); + } + + let tx = notify.clone(); + let th = thread::spawn(move || { + tx.notify_waiters(); + }); + + block_on(async { + // If awaiting one of the futures completes, then we should be + // able to assume that all pending futures are notified. Therefore + // a notification from a subsequent `notify_one` call should not + // be consumed by an old future. + futs.remove(tested_fut_index).await; + + let mut new_fut = tokio_test::task::spawn(notify.notified()); + assert_pending!(new_fut.poll()); + + notify.notify_one(); + + // `new_fut` must consume the notification from `notify_one`. + assert_ready!(new_fut.poll()); + }); + + th.join().unwrap(); + } + + // We test different scenarios in which the tested future is at the beginning + // or at the end of the waiters queue used by `Notify`. + loom::model(|| notify_waiters_is_atomic_variant(0)); + loom::model(|| notify_waiters_is_atomic_variant(32)); +} + +/// Checks if a single call to `notify_waiters` does not get through two `Notified` +/// futures created and awaited sequentially like this: +/// ```ignore +/// notify.notified().await; +/// notify.notified().await; +/// ``` +#[test] +fn notify_waiters_sequential_notified_await() { + use crate::sync::oneshot; + + loom::model(|| { + let notify = Arc::new(Notify::new()); + + let (tx_fst, rx_fst) = oneshot::channel(); + let (tx_snd, rx_snd) = oneshot::channel(); + + let receiver = thread::spawn({ + let notify = notify.clone(); + move || { + block_on(async { + // Poll the first `Notified` to put it as the first waiter + // in the queue. + let mut first_notified = tokio_test::task::spawn(notify.notified()); + assert_pending!(first_notified.poll()); + + // Create additional waiters to force `notify_waiters` to + // release the lock at least once. + let _task_pile = (0..WAKE_LIST_SIZE + 1) + .map(|_| { + let mut fut = tokio_test::task::spawn(notify.notified()); + assert_pending!(fut.poll()); + fut + }) + .collect::>(); + + // We are ready for the notify_waiters call. + tx_fst.send(()).unwrap(); + + first_notified.await; + + // Poll the second `Notified` future to try to insert + // it to the waiters queue. + let mut second_notified = tokio_test::task::spawn(notify.notified()); + assert_pending!(second_notified.poll()); + + // Wait for the `notify_waiters` to end and check if we + // are woken up. + rx_snd.await.unwrap(); + assert_pending!(second_notified.poll()); + }); + } + }); + + // Wait for the signal and call `notify_waiters`. + block_on(rx_fst).unwrap(); + notify.notify_waiters(); + tx_snd.send(()).unwrap(); + + receiver.join().unwrap(); + }); +} diff --git a/tokio/src/sync/tests/notify.rs b/tokio/src/sync/tests/notify.rs index eb0da8faad6..4b598959765 100644 --- a/tokio/src/sync/tests/notify.rs +++ b/tokio/src/sync/tests/notify.rs @@ -46,6 +46,45 @@ fn notify_clones_waker_before_lock() { let _ = future.poll(&mut cx); } +#[cfg(panic = "unwind")] +#[test] +fn notify_waiters_handles_panicking_waker() { + use futures::task::ArcWake; + + let notify = Arc::new(Notify::new()); + + struct PanickingWaker(Arc); + + impl ArcWake for PanickingWaker { + fn wake_by_ref(_arc_self: &Arc) { + panic!("waker panicked"); + } + } + + let bad_fut = notify.notified(); + pin!(bad_fut); + + let waker = futures::task::waker(Arc::new(PanickingWaker(notify.clone()))); + let mut cx = Context::from_waker(&waker); + let _ = bad_fut.poll(&mut cx); + + let mut futs = Vec::new(); + for _ in 0..32 { + let mut fut = tokio_test::task::spawn(notify.notified()); + assert!(fut.poll().is_pending()); + futs.push(fut); + } + + assert!(std::panic::catch_unwind(|| { + notify.notify_waiters(); + }) + .is_err()); + + for mut fut in futs { + assert!(fut.poll().is_ready()); + } +} + #[test] fn notify_simple() { let notify = Notify::new(); diff --git a/tokio/src/util/linked_list.rs b/tokio/src/util/linked_list.rs index c9d99e97247..460e2564d4e 100644 --- a/tokio/src/util/linked_list.rs +++ b/tokio/src/util/linked_list.rs @@ -178,8 +178,12 @@ impl LinkedList { /// /// # Safety /// - /// The caller **must** ensure that `node` is currently contained by - /// `self` or not contained by any other list. + /// The caller **must** ensure that exactly one of the following is true: + /// - `node` is currently contained by `self`, + /// - `node` is not contained by any list, + /// - `node` is currently contained by some other `GuardedLinkedList` **and** + /// the caller has an exclusive access to that list. This condition is + /// used by the linked list in `sync::Notify`. pub(crate) unsafe fn remove(&mut self, node: NonNull) -> Option { if let Some(prev) = L::pointers(node).as_ref().get_prev() { debug_assert_eq!(L::pointers(prev).as_ref().get_next(), Some(node)); @@ -290,6 +294,96 @@ cfg_io_readiness! { } } +// ===== impl GuardedLinkedList ===== + +feature! { + #![any( + feature = "process", + feature = "sync", + feature = "rt", + feature = "signal", + )] + + /// An intrusive linked list, but instead of keeping pointers to the head + /// and tail nodes, it uses a special guard node linked with those nodes. + /// It means that the list is circular and every pointer of a node from + /// the list is not `None`, including pointers from the guard node. + /// + /// If a list is empty, then both pointers of the guard node are pointing + /// at the guard node itself. + pub(crate) struct GuardedLinkedList { + /// Pointer to the guard node. + guard: NonNull, + + /// Node type marker. + _marker: PhantomData<*const L>, + } + + impl>> LinkedList { + /// Turns a linked list into the guarded version by linking the guard node + /// with the head and tail nodes. Like with other nodes, you should guarantee + /// that the guard node is pinned in memory. + pub(crate) fn into_guarded(self, guard_handle: L::Handle) -> GuardedLinkedList { + // `guard_handle` is a NonNull pointer, we don't have to care about dropping it. + let guard = L::as_raw(&guard_handle); + + unsafe { + if let Some(head) = self.head { + debug_assert!(L::pointers(head).as_ref().get_prev().is_none()); + L::pointers(head).as_mut().set_prev(Some(guard)); + L::pointers(guard).as_mut().set_next(Some(head)); + + // The list is not empty, so the tail cannot be `None`. + let tail = self.tail.unwrap(); + debug_assert!(L::pointers(tail).as_ref().get_next().is_none()); + L::pointers(tail).as_mut().set_next(Some(guard)); + L::pointers(guard).as_mut().set_prev(Some(tail)); + } else { + // The list is empty. + L::pointers(guard).as_mut().set_prev(Some(guard)); + L::pointers(guard).as_mut().set_next(Some(guard)); + } + } + + GuardedLinkedList { guard, _marker: PhantomData } + } + } + + impl GuardedLinkedList { + fn tail(&self) -> Option> { + let tail_ptr = unsafe { + L::pointers(self.guard).as_ref().get_prev().unwrap() + }; + + // Compare the tail pointer with the address of the guard node itself. + // If the guard points at itself, then there are no other nodes and + // the list is considered empty. + if tail_ptr != self.guard { + Some(tail_ptr) + } else { + None + } + } + + /// Removes the last element from a list and returns it, or None if it is + /// empty. + pub(crate) fn pop_back(&mut self) -> Option { + unsafe { + let last = self.tail()?; + let before_last = L::pointers(last).as_ref().get_prev().unwrap(); + + L::pointers(self.guard).as_mut().set_prev(Some(before_last)); + L::pointers(before_last).as_mut().set_next(Some(self.guard)); + + L::pointers(last).as_mut().set_prev(None); + L::pointers(last).as_mut().set_next(None); + + Some(L::from_raw(last)) + } + } + } +} + // ===== impl Pointers ===== impl Pointers { From eca24068f718f3edbdbb6615fb1523b4578a7ecb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tymoteusz=20Wi=C5=9Bniewski?= Date: Sun, 19 Feb 2023 14:11:42 +0100 Subject: [PATCH 081/101] sync: fix docs for Send/Sync bounds in broadcast (#5480) --- tokio/src/sync/broadcast.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/sync/broadcast.rs b/tokio/src/sync/broadcast.rs index 1c6b2caa3bb..82239c4119c 100644 --- a/tokio/src/sync/broadcast.rs +++ b/tokio/src/sync/broadcast.rs @@ -4,7 +4,7 @@ //! A [`Sender`] is used to broadcast values to **all** connected [`Receiver`] //! values. [`Sender`] handles are clone-able, allowing concurrent send and //! receive actions. [`Sender`] and [`Receiver`] are both `Send` and `Sync` as -//! long as `T` is also `Send` or `Sync` respectively. +//! long as `T` is `Send`. //! //! When a value is sent, **all** [`Receiver`] handles are notified and will //! receive the value. The value is stored once inside the channel and cloned on From 2e0372be6f2fd03e5534aa73e6970c9a6e7bcd69 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 19 Feb 2023 14:12:32 +0100 Subject: [PATCH 082/101] sync: add `MappedOwnedMutexGuard` (#5474) --- tokio/src/sync/mod.rs | 2 +- tokio/src/sync/mutex.rs | 300 ++++++++++++++++++++++++++++++++- tokio/tests/async_send_sync.rs | 9 + 3 files changed, 306 insertions(+), 5 deletions(-) diff --git a/tokio/src/sync/mod.rs b/tokio/src/sync/mod.rs index 8fba196e381..70fd9b9e32d 100644 --- a/tokio/src/sync/mod.rs +++ b/tokio/src/sync/mod.rs @@ -449,7 +449,7 @@ cfg_sync! { pub mod mpsc; mod mutex; - pub use mutex::{Mutex, MutexGuard, TryLockError, OwnedMutexGuard, MappedMutexGuard}; + pub use mutex::{Mutex, MutexGuard, TryLockError, OwnedMutexGuard, MappedMutexGuard, OwnedMappedMutexGuard}; pub(crate) mod notify; pub use notify::Notify; diff --git a/tokio/src/sync/mutex.rs b/tokio/src/sync/mutex.rs index c33021acced..a378116f857 100644 --- a/tokio/src/sync/mutex.rs +++ b/tokio/src/sync/mutex.rs @@ -9,7 +9,7 @@ use std::error::Error; use std::marker::PhantomData; use std::ops::{Deref, DerefMut}; use std::sync::Arc; -use std::{fmt, mem}; +use std::{fmt, mem, ptr}; /// An asynchronous `Mutex`-like type. /// @@ -169,6 +169,8 @@ pub struct MutexGuard<'a, T: ?Sized> { /// [`Arc`]: std::sync::Arc #[clippy::has_significant_drop] pub struct OwnedMutexGuard { + // When changing the fields in this struct, make sure to update the + // `skip_drop` method. #[cfg(all(tokio_unstable, feature = "tracing"))] resource_span: tracing::Span, lock: Arc>, @@ -184,12 +186,31 @@ pub struct OwnedMutexGuard { pub struct MappedMutexGuard<'a, T: ?Sized> { // When changing the fields in this struct, make sure to update the // `skip_drop` method. + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, s: &'a semaphore::Semaphore, data: *mut T, // Needed to tell the borrow checker that we are holding a `&mut T` marker: PhantomData<&'a mut T>, } +/// A owned handle to a held `Mutex` that has had a function applied to it via +/// [`OwnedMutexGuard::map`]. +/// +/// This can be used to hold a subfield of the protected data. +/// +/// [`OwnedMutexGuard::map`]: method@OwnedMutexGuard::map +#[clippy::has_significant_drop] +#[must_use = "if unused the Mutex will immediately unlock"] +pub struct OwnedMappedMutexGuard { + // When changing the fields in this struct, make sure to update the + // `skip_drop` method. + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + data: *mut U, + lock: Arc>, +} + /// A helper type used when taking apart a `MutexGuard` without running its /// Drop implementation. #[allow(dead_code)] // Unused fields are still used in Drop. @@ -199,14 +220,34 @@ struct MutexGuardInner<'a, T: ?Sized> { lock: &'a Mutex, } +/// A helper type used when taking apart a `OwnedMutexGuard` without running +/// its Drop implementation. +struct OwnedMutexGuardInner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + lock: Arc>, +} + /// A helper type used when taking apart a `MappedMutexGuard` without running /// its Drop implementation. #[allow(dead_code)] // Unused fields are still used in Drop. struct MappedMutexGuardInner<'a, T: ?Sized> { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, s: &'a semaphore::Semaphore, data: *mut T, } +/// A helper type used when taking apart a `OwnedMappedMutexGuard` without running +/// its Drop implementation. +#[allow(dead_code)] // Unused fields are still used in Drop. +struct OwnedMappedMutexGuardInner { + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: tracing::Span, + data: *mut U, + lock: Arc>, +} + // As long as T: Send, it's fine to send and share Mutex between threads. // If T was not Send, sending and sharing a Mutex would be bad, since you can // access T through Mutex. @@ -217,6 +258,19 @@ unsafe impl Sync for OwnedMutexGuard where T: ?Sized + Send + Sync {} unsafe impl<'a, T> Sync for MappedMutexGuard<'a, T> where T: ?Sized + Sync + 'a {} unsafe impl<'a, T> Send for MappedMutexGuard<'a, T> where T: ?Sized + Send + 'a {} +unsafe impl Sync for OwnedMappedMutexGuard +where + T: ?Sized + Send + Sync, + U: ?Sized + Send + Sync, +{ +} +unsafe impl Send for OwnedMappedMutexGuard +where + T: ?Sized + Send, + U: ?Sized + Send, +{ +} + /// Error returned from the [`Mutex::try_lock`], [`RwLock::try_read`] and /// [`RwLock::try_write`] functions. /// @@ -799,6 +853,8 @@ impl<'a, T: ?Sized> MutexGuard<'a, T> { s: &inner.lock.s, data, marker: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: inner.resource_span, } } @@ -848,6 +904,8 @@ impl<'a, T: ?Sized> MutexGuard<'a, T> { s: &inner.lock.s, data, marker: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: inner.resource_span, }) } @@ -880,6 +938,8 @@ impl<'a, T: ?Sized> MutexGuard<'a, T> { impl Drop for MutexGuard<'_, T> { fn drop(&mut self) { + self.lock.s.release(1); + #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { tracing::trace!( @@ -887,7 +947,6 @@ impl Drop for MutexGuard<'_, T> { locked = false, ); }); - self.lock.s.release(1); } } @@ -919,6 +978,116 @@ impl fmt::Display for MutexGuard<'_, T> { // === impl OwnedMutexGuard === impl OwnedMutexGuard { + fn skip_drop(self) -> OwnedMutexGuardInner { + let me = mem::ManuallyDrop::new(self); + // SAFETY: This duplicates the values in every field of the guard, then + // forgets the originals, so in the end no value is duplicated. + unsafe { + OwnedMutexGuardInner { + lock: ptr::read(&me.lock), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: ptr::read(&me.resource_span), + } + } + } + + /// Makes a new [`OwnedMappedMutexGuard`] for a component of the locked data. + /// + /// This operation cannot fail as the [`OwnedMutexGuard`] passed in already locked the mutex. + /// + /// This is an associated function that needs to be used as `OwnedMutexGuard::map(...)`. A method + /// would interfere with methods of the same name on the contents of the locked data. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::{Mutex, OwnedMutexGuard}; + /// use std::sync::Arc; + /// + /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] + /// struct Foo(u32); + /// + /// # #[tokio::main] + /// # async fn main() { + /// let foo = Arc::new(Mutex::new(Foo(1))); + /// + /// { + /// let mut mapped = OwnedMutexGuard::map(foo.clone().lock_owned().await, |f| &mut f.0); + /// *mapped = 2; + /// } + /// + /// assert_eq!(Foo(2), *foo.lock().await); + /// # } + /// ``` + /// + /// [`OwnedMutexGuard`]: struct@OwnedMutexGuard + /// [`OwnedMappedMutexGuard`]: struct@OwnedMappedMutexGuard + #[inline] + pub fn map(mut this: Self, f: F) -> OwnedMappedMutexGuard + where + F: FnOnce(&mut T) -> &mut U, + { + let data = f(&mut *this) as *mut U; + let inner = this.skip_drop(); + OwnedMappedMutexGuard { + data, + lock: inner.lock, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: inner.resource_span, + } + } + + /// Attempts to make a new [`OwnedMappedMutexGuard`] for a component of the locked data. The + /// original guard is returned if the closure returns `None`. + /// + /// This operation cannot fail as the [`OwnedMutexGuard`] passed in already locked the mutex. + /// + /// This is an associated function that needs to be used as `OwnedMutexGuard::try_map(...)`. A + /// method would interfere with methods of the same name on the contents of the locked data. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::{Mutex, OwnedMutexGuard}; + /// use std::sync::Arc; + /// + /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] + /// struct Foo(u32); + /// + /// # #[tokio::main] + /// # async fn main() { + /// let foo = Arc::new(Mutex::new(Foo(1))); + /// + /// { + /// let mut mapped = OwnedMutexGuard::try_map(foo.clone().lock_owned().await, |f| Some(&mut f.0)) + /// .expect("should not fail"); + /// *mapped = 2; + /// } + /// + /// assert_eq!(Foo(2), *foo.lock().await); + /// # } + /// ``` + /// + /// [`OwnedMutexGuard`]: struct@OwnedMutexGuard + /// [`OwnedMappedMutexGuard`]: struct@OwnedMappedMutexGuard + #[inline] + pub fn try_map(mut this: Self, f: F) -> Result, Self> + where + F: FnOnce(&mut T) -> Option<&mut U>, + { + let data = match f(&mut *this) { + Some(data) => data as *mut U, + None => return Err(this), + }; + let inner = this.skip_drop(); + Ok(OwnedMappedMutexGuard { + data, + lock: inner.lock, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: inner.resource_span, + }) + } + /// Returns a reference to the original `Arc`. /// /// ``` @@ -949,6 +1118,8 @@ impl OwnedMutexGuard { impl Drop for OwnedMutexGuard { fn drop(&mut self) { + self.lock.s.release(1); + #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { tracing::trace!( @@ -956,7 +1127,6 @@ impl Drop for OwnedMutexGuard { locked = false, ); }); - self.lock.s.release(1) } } @@ -993,6 +1163,8 @@ impl<'a, T: ?Sized> MappedMutexGuard<'a, T> { MappedMutexGuardInner { s: me.s, data: me.data, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: unsafe { std::ptr::read(&me.resource_span) }, } } @@ -1015,6 +1187,8 @@ impl<'a, T: ?Sized> MappedMutexGuard<'a, T> { s: inner.s, data, marker: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: inner.resource_span, } } @@ -1041,13 +1215,23 @@ impl<'a, T: ?Sized> MappedMutexGuard<'a, T> { s: inner.s, data, marker: PhantomData, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: inner.resource_span, }) } } impl<'a, T: ?Sized> Drop for MappedMutexGuard<'a, T> { fn drop(&mut self) { - self.s.release(1) + self.s.release(1); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = false, + ); + }); } } @@ -1075,3 +1259,111 @@ impl<'a, T: ?Sized + fmt::Display> fmt::Display for MappedMutexGuard<'a, T> { fmt::Display::fmt(&**self, f) } } + +// === impl OwnedMappedMutexGuard === + +impl OwnedMappedMutexGuard { + fn skip_drop(self) -> OwnedMappedMutexGuardInner { + let me = mem::ManuallyDrop::new(self); + // SAFETY: This duplicates the values in every field of the guard, then + // forgets the originals, so in the end no value is duplicated. + unsafe { + OwnedMappedMutexGuardInner { + data: me.data, + lock: ptr::read(&me.lock), + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: ptr::read(&me.resource_span), + } + } + } + + /// Makes a new [`OwnedMappedMutexGuard`] for a component of the locked data. + /// + /// This operation cannot fail as the [`OwnedMappedMutexGuard`] passed in already locked the mutex. + /// + /// This is an associated function that needs to be used as `OwnedMappedMutexGuard::map(...)`. A method + /// would interfere with methods of the same name on the contents of the locked data. + /// + /// [`OwnedMappedMutexGuard`]: struct@OwnedMappedMutexGuard + #[inline] + pub fn map(mut this: Self, f: F) -> OwnedMappedMutexGuard + where + F: FnOnce(&mut U) -> &mut S, + { + let data = f(&mut *this) as *mut S; + let inner = this.skip_drop(); + OwnedMappedMutexGuard { + data, + lock: inner.lock, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: inner.resource_span, + } + } + + /// Attempts to make a new [`OwnedMappedMutexGuard`] for a component of the locked data. The + /// original guard is returned if the closure returns `None`. + /// + /// This operation cannot fail as the [`OwnedMutexGuard`] passed in already locked the mutex. + /// + /// This is an associated function that needs to be used as `OwnedMutexGuard::try_map(...)`. A + /// method would interfere with methods of the same name on the contents of the locked data. + /// + /// [`OwnedMutexGuard`]: struct@OwnedMutexGuard + /// [`OwnedMappedMutexGuard`]: struct@OwnedMappedMutexGuard + #[inline] + pub fn try_map(mut this: Self, f: F) -> Result, Self> + where + F: FnOnce(&mut U) -> Option<&mut S>, + { + let data = match f(&mut *this) { + Some(data) => data as *mut S, + None => return Err(this), + }; + let inner = this.skip_drop(); + Ok(OwnedMappedMutexGuard { + data, + lock: inner.lock, + #[cfg(all(tokio_unstable, feature = "tracing"))] + resource_span: inner.resource_span, + }) + } +} + +impl Drop for OwnedMappedMutexGuard { + fn drop(&mut self) { + self.lock.s.release(1); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + self.resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + locked = false, + ); + }); + } +} + +impl Deref for OwnedMappedMutexGuard { + type Target = U; + fn deref(&self) -> &Self::Target { + unsafe { &*self.data } + } +} + +impl DerefMut for OwnedMappedMutexGuard { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.data } + } +} + +impl fmt::Debug for OwnedMappedMutexGuard { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl fmt::Display for OwnedMappedMutexGuard { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} diff --git a/tokio/tests/async_send_sync.rs b/tokio/tests/async_send_sync.rs index e46d5c85a8f..e9c4040c0ca 100644 --- a/tokio/tests/async_send_sync.rs +++ b/tokio/tests/async_send_sync.rs @@ -334,6 +334,15 @@ assert_value!(tokio::sync::OnceCell: Send & Sync & Unpin); assert_value!(tokio::sync::OwnedMutexGuard: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMutexGuard: Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMutexGuard: Send & Sync & Unpin); +assert_value!(tokio::sync::OwnedMappedMutexGuard: !Send & !Sync & Unpin); +assert_value!(tokio::sync::OwnedMappedMutexGuard: !Send & !Sync & Unpin); +assert_value!(tokio::sync::OwnedMappedMutexGuard: !Send & !Sync & Unpin); +assert_value!(tokio::sync::OwnedMappedMutexGuard: !Send & !Sync & Unpin); +assert_value!(tokio::sync::OwnedMappedMutexGuard: Send & !Sync & Unpin); +assert_value!(tokio::sync::OwnedMappedMutexGuard: Send & !Sync & Unpin); +assert_value!(tokio::sync::OwnedMappedMutexGuard: !Send & !Sync & Unpin); +assert_value!(tokio::sync::OwnedMappedMutexGuard: Send & !Sync & Unpin); +assert_value!(tokio::sync::OwnedMappedMutexGuard: Send & Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard: Send & Sync & Unpin); From d07027f5bcce4400cb0ec0bd42eda26edf0b62dc Mon Sep 17 00:00:00 2001 From: Chris Brody Date: Sun, 19 Feb 2023 10:16:12 -0500 Subject: [PATCH 083/101] sync: add `WatchStream::from_changes` (#5432) --- tokio-stream/src/wrappers/watch.rs | 34 ++++++++++++++++++++++++++++-- tokio-stream/tests/watch.rs | 30 +++++++++++++++++++++++++- 2 files changed, 61 insertions(+), 3 deletions(-) diff --git a/tokio-stream/src/wrappers/watch.rs b/tokio-stream/src/wrappers/watch.rs index c682c9c271d..ec8ead06da0 100644 --- a/tokio-stream/src/wrappers/watch.rs +++ b/tokio-stream/src/wrappers/watch.rs @@ -10,8 +10,9 @@ use tokio::sync::watch::error::RecvError; /// A wrapper around [`tokio::sync::watch::Receiver`] that implements [`Stream`]. /// -/// This stream will always start by yielding the current value when the WatchStream is polled, -/// regardless of whether it was the initial value or sent afterwards. +/// This stream will start by yielding the current value when the WatchStream is polled, +/// regardless of whether it was the initial value or sent afterwards, +/// unless you use [`WatchStream::from_changes`]. /// /// # Examples /// @@ -40,6 +41,28 @@ use tokio::sync::watch::error::RecvError; /// let (tx, rx) = watch::channel("hello"); /// let mut rx = WatchStream::new(rx); /// +/// // existing rx output with "hello" is ignored here +/// +/// tx.send("goodbye").unwrap(); +/// assert_eq!(rx.next().await, Some("goodbye")); +/// # } +/// ``` +/// +/// Example with [`WatchStream::from_changes`]: +/// +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use futures::future::FutureExt; +/// use tokio::sync::watch; +/// use tokio_stream::{StreamExt, wrappers::WatchStream}; +/// +/// let (tx, rx) = watch::channel("hello"); +/// let mut rx = WatchStream::from_changes(rx); +/// +/// // no output from rx is available at this point - let's check this: +/// assert!(rx.next().now_or_never().is_none()); +/// /// tx.send("goodbye").unwrap(); /// assert_eq!(rx.next().await, Some("goodbye")); /// # } @@ -66,6 +89,13 @@ impl WatchStream { inner: ReusableBoxFuture::new(async move { (Ok(()), rx) }), } } + + /// Create a new `WatchStream` that waits for the value to be changed. + pub fn from_changes(rx: Receiver) -> Self { + Self { + inner: ReusableBoxFuture::new(make_future(rx)), + } + } } impl Stream for WatchStream { diff --git a/tokio-stream/tests/watch.rs b/tokio-stream/tests/watch.rs index a56254edefd..3a39aaf3db7 100644 --- a/tokio-stream/tests/watch.rs +++ b/tokio-stream/tests/watch.rs @@ -3,9 +3,11 @@ use tokio::sync::watch; use tokio_stream::wrappers::WatchStream; use tokio_stream::StreamExt; +use tokio_test::assert_pending; +use tokio_test::task::spawn; #[tokio::test] -async fn message_not_twice() { +async fn watch_stream_message_not_twice() { let (tx, rx) = watch::channel("hello"); let mut counter = 0; @@ -27,3 +29,29 @@ async fn message_not_twice() { drop(tx); task.await.unwrap(); } + +#[tokio::test] +async fn watch_stream_from_rx() { + let (tx, rx) = watch::channel("hello"); + + let mut stream = WatchStream::from(rx); + + assert_eq!(stream.next().await.unwrap(), "hello"); + + tx.send("bye").unwrap(); + + assert_eq!(stream.next().await.unwrap(), "bye"); +} + +#[tokio::test] +async fn watch_stream_from_changes() { + let (tx, rx) = watch::channel("hello"); + + let mut stream = WatchStream::from_changes(rx); + + assert_pending!(spawn(&mut stream).poll_next()); + + tx.send("bye").unwrap(); + + assert_eq!(stream.next().await.unwrap(), "bye"); +} From ee09e04c31d1c6938f835354cef6b23f1b13b8e7 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 19 Feb 2023 22:16:24 +0100 Subject: [PATCH 084/101] sync: drop wakers after unlocking the mutex in Notify (#5471) --- tokio/src/sync/notify.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/tokio/src/sync/notify.rs b/tokio/src/sync/notify.rs index 8e83a76b7af..9044eda7fd8 100644 --- a/tokio/src/sync/notify.rs +++ b/tokio/src/sync/notify.rs @@ -917,10 +917,14 @@ impl Notified<'_> { } } + let mut old_waker = None; if waker.is_some() { // Safety: called while locked. + // + // The use of `old_waiter` here is not necessary, as the field is always + // None when we reach this line. unsafe { - (*waiter.get()).waker = waker; + old_waker = std::mem::replace(&mut (*waiter.get()).waker, waker); } } @@ -931,6 +935,9 @@ impl Notified<'_> { *state = Waiting; + drop(waiters); + drop(old_waker); + return Poll::Pending; } Waiting => { @@ -945,12 +952,13 @@ impl Notified<'_> { // Safety: called while locked let w = unsafe { &mut *waiter.get() }; + let mut old_waker = None; if w.notified.is_some() { // Our waker has been notified and our waiter is already removed from // the list. Reset the notification and convert to `Done`. + old_waker = std::mem::take(&mut w.waker); w.notified = None; - w.waker = None; *state = Done; } else if get_num_notify_waiters_calls(curr) != *notify_waiters_calls { // Before we add a waiter to the list we check if these numbers are @@ -960,7 +968,7 @@ impl Notified<'_> { // We can treat the waiter as notified and remove it from the list, as // it would have been notified in the `notify_waiters` call anyways. - w.waker = None; + old_waker = std::mem::take(&mut w.waker); // Safety: we hold the lock, so we have an exclusive access to the list. // The list is used in `notify_waiters`, so it must be guarded. @@ -975,10 +983,14 @@ impl Notified<'_> { None => true, }; if should_update { - w.waker = Some(waker.clone()); + old_waker = std::mem::replace(&mut w.waker, Some(waker.clone())); } } + // Drop the old waker after releasing the lock. + drop(waiters); + drop(old_waker); + return Poll::Pending; } @@ -988,6 +1000,9 @@ impl Notified<'_> { // is helpful to visualize the scope of the critical // section. drop(waiters); + + // Drop the old waker after releasing the lock. + drop(old_waker); } Done => { return Poll::Ready(()); From 018d0450c71f782f3833fde6346f7ef48fcb57a2 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 19 Feb 2023 23:40:56 +0100 Subject: [PATCH 085/101] io: improve AsyncFd example (#5481) --- tokio/src/io/async_fd.rs | 41 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/tokio/src/io/async_fd.rs b/tokio/src/io/async_fd.rs index 92fc6b38fd2..b71bcd56e70 100644 --- a/tokio/src/io/async_fd.rs +++ b/tokio/src/io/async_fd.rs @@ -65,8 +65,8 @@ use std::{task::Context, task::Poll}; /// # Examples /// /// This example shows how to turn [`std::net::TcpStream`] asynchronous using -/// `AsyncFd`. It implements `read` as an async fn, and `AsyncWrite` as a trait -/// to show how to implement both approaches. +/// `AsyncFd`. It implements the read/write operations both as an `async fn` +/// and using the IO traits [`AsyncRead`] and [`AsyncWrite`]. /// /// ```no_run /// use futures::ready; @@ -74,7 +74,7 @@ use std::{task::Context, task::Poll}; /// use std::net::TcpStream; /// use std::pin::Pin; /// use std::task::{Context, Poll}; -/// use tokio::io::AsyncWrite; +/// use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; /// use tokio::io::unix::AsyncFd; /// /// pub struct AsyncTcpStream { @@ -99,6 +99,39 @@ use std::{task::Context, task::Poll}; /// } /// } /// } +/// +/// pub async fn write(&self, buf: &[u8]) -> io::Result { +/// loop { +/// let mut guard = self.inner.writable().await?; +/// +/// match guard.try_io(|inner| inner.get_ref().write(buf)) { +/// Ok(result) => return result, +/// Err(_would_block) => continue, +/// } +/// } +/// } +/// } +/// +/// impl AsyncRead for AsyncTcpStream { +/// fn poll_read( +/// self: Pin<&mut Self>, +/// cx: &mut Context<'_>, +/// buf: &mut ReadBuf<'_> +/// ) -> Poll> { +/// loop { +/// let mut guard = ready!(self.inner.poll_read_ready(cx))?; +/// +/// let unfilled = buf.initialize_unfilled(); +/// match guard.try_io(|inner| inner.get_ref().read(unfilled)) { +/// Ok(Ok(len)) => { +/// buf.advance(len); +/// return Poll::Ready(Ok(())); +/// }, +/// Ok(Err(err)) => return Poll::Ready(Err(err)), +/// Err(_would_block) => continue, +/// } +/// } +/// } /// } /// /// impl AsyncWrite for AsyncTcpStream { @@ -139,6 +172,8 @@ use std::{task::Context, task::Poll}; /// [`writable`]: method@Self::writable /// [`AsyncFdReadyGuard`]: struct@self::AsyncFdReadyGuard /// [`TcpStream::poll_read_ready`]: struct@crate::net::TcpStream +/// [`AsyncRead`]: trait@crate::io::AsyncRead +/// [`AsyncWrite`]: trait@crate::io::AsyncWrite pub struct AsyncFd { registration: Registration, inner: Option, From 46f974d8cfcb56c251d80cf1dc4a6bcf9fd1d7a0 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Mon, 20 Feb 2023 10:18:18 +0100 Subject: [PATCH 086/101] chore: prepare tokio-stream v0.1.12 (#5484) --- tokio-stream/CHANGELOG.md | 8 ++++++++ tokio-stream/Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/tokio-stream/CHANGELOG.md b/tokio-stream/CHANGELOG.md index 05c2b18812c..c475c7c398c 100644 --- a/tokio-stream/CHANGELOG.md +++ b/tokio-stream/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.1.12 (January 20, 2022) + +- time: remove `Unpin` bound on `Throttle` methods ([#5105]) +- time: document that `throttle` operates on ms granularity ([#5101]) + +[#5105]: https://github.com/tokio-rs/tokio/pull/5105 +[#5101]: https://github.com/tokio-rs/tokio/pull/5101 + # 0.1.11 (October 11, 2022) - time: allow `StreamExt::chunks_timeout` outside of a runtime ([#5036]) diff --git a/tokio-stream/Cargo.toml b/tokio-stream/Cargo.toml index 01acec3cd73..f87b59a3654 100644 --- a/tokio-stream/Cargo.toml +++ b/tokio-stream/Cargo.toml @@ -4,7 +4,7 @@ name = "tokio-stream" # - Remove path dependencies # - Update CHANGELOG.md. # - Create "tokio-stream-0.1.x" git tag. -version = "0.1.11" +version = "0.1.12" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] From fa31cd999094b6e59e01dc4e47732b731dd18dd3 Mon Sep 17 00:00:00 2001 From: Konrad Borowski Date: Mon, 20 Feb 2023 15:38:40 +0100 Subject: [PATCH 087/101] io: use `poll_fn` in `copy_bidirectional` (#5486) --- tokio/src/io/util/copy_bidirectional.rs | 57 ++++++------------------- 1 file changed, 14 insertions(+), 43 deletions(-) diff --git a/tokio/src/io/util/copy_bidirectional.rs b/tokio/src/io/util/copy_bidirectional.rs index c93060b361a..e1a7db127a7 100644 --- a/tokio/src/io/util/copy_bidirectional.rs +++ b/tokio/src/io/util/copy_bidirectional.rs @@ -1,8 +1,8 @@ use super::copy::CopyBuffer; +use crate::future::poll_fn; use crate::io::{AsyncRead, AsyncWrite}; -use std::future::Future; use std::io; use std::pin::Pin; use std::task::{Context, Poll}; @@ -13,13 +13,6 @@ enum TransferState { Done(u64), } -struct CopyBidirectional<'a, A: ?Sized, B: ?Sized> { - a: &'a mut A, - b: &'a mut B, - a_to_b: TransferState, - b_to_a: TransferState, -} - fn transfer_one_direction( cx: &mut Context<'_>, state: &mut TransferState, @@ -48,35 +41,6 @@ where } } } - -impl<'a, A, B> Future for CopyBidirectional<'a, A, B> -where - A: AsyncRead + AsyncWrite + Unpin + ?Sized, - B: AsyncRead + AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result<(u64, u64)>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // Unpack self into mut refs to each field to avoid borrow check issues. - let CopyBidirectional { - a, - b, - a_to_b, - b_to_a, - } = &mut *self; - - let a_to_b = transfer_one_direction(cx, a_to_b, &mut *a, &mut *b)?; - let b_to_a = transfer_one_direction(cx, b_to_a, &mut *b, &mut *a)?; - - // It is not a problem if ready! returns early because transfer_one_direction for the - // other direction will keep returning TransferState::Done(count) in future calls to poll - let a_to_b = ready!(a_to_b); - let b_to_a = ready!(b_to_a); - - Poll::Ready(Ok((a_to_b, b_to_a))) - } -} - /// Copies data in both directions between `a` and `b`. /// /// This function returns a future that will read from both streams, @@ -110,11 +74,18 @@ where A: AsyncRead + AsyncWrite + Unpin + ?Sized, B: AsyncRead + AsyncWrite + Unpin + ?Sized, { - CopyBidirectional { - a, - b, - a_to_b: TransferState::Running(CopyBuffer::new()), - b_to_a: TransferState::Running(CopyBuffer::new()), - } + let mut a_to_b = TransferState::Running(CopyBuffer::new()); + let mut b_to_a = TransferState::Running(CopyBuffer::new()); + poll_fn(|cx| { + let a_to_b = transfer_one_direction(cx, &mut a_to_b, a, b)?; + let b_to_a = transfer_one_direction(cx, &mut b_to_a, b, a)?; + + // It is not a problem if ready! returns early because transfer_one_direction for the + // other direction will keep returning TransferState::Done(count) in future calls to poll + let a_to_b = ready!(a_to_b); + let b_to_a = ready!(b_to_a); + + Poll::Ready(Ok((a_to_b, b_to_a))) + }) .await } From 3ea5cc5a821699f0f33236292e19574160dd1839 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Mon, 20 Feb 2023 17:19:56 +0100 Subject: [PATCH 088/101] stream: fix changelog for 0.1.12 (#5488) --- tokio-stream/CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tokio-stream/CHANGELOG.md b/tokio-stream/CHANGELOG.md index c475c7c398c..3c9bbc3b330 100644 --- a/tokio-stream/CHANGELOG.md +++ b/tokio-stream/CHANGELOG.md @@ -1,10 +1,12 @@ -# 0.1.12 (January 20, 2022) +# 0.1.12 (January 20, 2023) - time: remove `Unpin` bound on `Throttle` methods ([#5105]) - time: document that `throttle` operates on ms granularity ([#5101]) +- sync: add `WatchStream::from_changes` ([#5432]) [#5105]: https://github.com/tokio-rs/tokio/pull/5105 [#5101]: https://github.com/tokio-rs/tokio/pull/5101 +[#5432]: https://github.com/tokio-rs/tokio/pull/5432 # 0.1.11 (October 11, 2022) From 12f81ffa61aa2e18e2aa717a1bafd16dccf128bf Mon Sep 17 00:00:00 2001 From: "Kevin (Kun) \"Kassimo\" Qian" Date: Tue, 21 Feb 2023 05:15:54 -0800 Subject: [PATCH 089/101] fs: add `fs::try_exists` (#4299) --- tokio/src/fs/mod.rs | 3 +++ tokio/src/fs/try_exists.rs | 34 ++++++++++++++++++++++++++++ tokio/tests/fs_try_exists.rs | 44 ++++++++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+) create mode 100644 tokio/src/fs/try_exists.rs create mode 100644 tokio/tests/fs_try_exists.rs diff --git a/tokio/src/fs/mod.rs b/tokio/src/fs/mod.rs index 3afefc6e3fb..30ce6a91a35 100644 --- a/tokio/src/fs/mod.rs +++ b/tokio/src/fs/mod.rs @@ -102,6 +102,9 @@ pub use self::write::write; mod copy; pub use self::copy::copy; +mod try_exists; +pub use self::try_exists::try_exists; + #[cfg(test)] mod mocks; diff --git a/tokio/src/fs/try_exists.rs b/tokio/src/fs/try_exists.rs new file mode 100644 index 00000000000..069518bf946 --- /dev/null +++ b/tokio/src/fs/try_exists.rs @@ -0,0 +1,34 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::Path; + +/// Returns `Ok(true)` if the path points at an existing entity. +/// +/// This function will traverse symbolic links to query information about the +/// destination file. In case of broken symbolic links this will return `Ok(false)`. +/// +/// This is the async equivalent of [`std::path::Path::try_exists`][std]. +/// +/// [std]: fn@std::path::Path::try_exists +/// +/// # Examples +/// +/// ```no_run +/// use tokio::fs; +/// +/// # async fn dox() -> std::io::Result<()> { +/// fs::try_exists("foo.txt").await?; +/// # Ok(()) +/// # } +/// ``` +pub async fn try_exists(path: impl AsRef) -> io::Result { + let path = path.as_ref().to_owned(); + // std's Path::try_exists is not available for current Rust min supported version. + // Current implementation is based on its internal implementation instead. + match asyncify(move || std::fs::metadata(path)).await { + Ok(_) => Ok(true), + Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(false), + Err(error) => Err(error), + } +} diff --git a/tokio/tests/fs_try_exists.rs b/tokio/tests/fs_try_exists.rs new file mode 100644 index 00000000000..d259b5caaf0 --- /dev/null +++ b/tokio/tests/fs_try_exists.rs @@ -0,0 +1,44 @@ +#![warn(rust_2018_idioms)] +#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support file operations + +use tempfile::tempdir; +use tokio::fs; + +#[tokio::test] +async fn try_exists() { + let dir = tempdir().unwrap(); + + let existing_path = dir.path().join("foo.txt"); + fs::write(&existing_path, b"Hello File!").await.unwrap(); + let nonexisting_path = dir.path().join("bar.txt"); + + assert!(fs::try_exists(existing_path).await.unwrap()); + assert!(!fs::try_exists(nonexisting_path).await.unwrap()); + // FreeBSD root user always has permission to stat. + #[cfg(all(unix, not(target_os = "freebsd")))] + { + use std::os::unix::prelude::PermissionsExt; + let permission_denied_directory_path = dir.path().join("baz"); + fs::create_dir(&permission_denied_directory_path) + .await + .unwrap(); + let permission_denied_file_path = permission_denied_directory_path.join("baz.txt"); + fs::write(&permission_denied_file_path, b"Hello File!") + .await + .unwrap(); + let mut perms = tokio::fs::metadata(&permission_denied_directory_path) + .await + .unwrap() + .permissions(); + + perms.set_mode(0o244); + fs::set_permissions(&permission_denied_directory_path, perms) + .await + .unwrap(); + let permission_denied_result = fs::try_exists(permission_denied_file_path).await; + assert_eq!( + permission_denied_result.err().unwrap().kind(), + std::io::ErrorKind::PermissionDenied + ); + } +} From d7b7c6131774ab631be6529fef3680abfeeb4781 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Tue, 21 Feb 2023 19:57:19 +0100 Subject: [PATCH 090/101] tokio: document supported platforms (#5483) --- tokio/src/lib.rs | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index aa94ff020da..688dd0c4d42 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -384,7 +384,33 @@ //! [unstable features]: https://internals.rust-lang.org/t/feature-request-unstable-opt-in-non-transitive-crate-features/16193#why-not-a-crate-feature-2 //! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section //! -//! ## WASM support +//! ## Supported platforms +//! +//! Tokio currently guarantees support for the following platforms: +//! +//! * Linux +//! * Windows +//! * Android (API level 21) +//! * macOS +//! * iOS +//! * FreeBSD +//! +//! Tokio will continue to support these platforms in the future. However, +//! future releases may change requirements such as the minimum required libc +//! version on Linux, the API level on Android, or the supported FreeBSD +//! release. +//! +//! Beyond the above platforms, Tokio is intended to work on all platforms +//! supported by the mio crate. You can find a longer list [in mio's +//! documentation][mio-supported]. However, these additional platforms may +//! become unsupported in the future. +//! +//! Note that Wine is considered to be a different platform from Windows. See +//! mio's documentation for more information on Wine support. +//! +//! [mio-supported]: https://crates.io/crates/mio#platforms +//! +//! ### WASM support //! //! Tokio has some limited support for the WASM platform. Without the //! `tokio_unstable` flag, the following features are supported: From cf486361d0346a49d2e6a5c001df09375a1afef8 Mon Sep 17 00:00:00 2001 From: Predrag Gruevski <2348618+obi1kenobi@users.noreply.github.com> Date: Wed, 22 Feb 2023 13:54:17 -0500 Subject: [PATCH 091/101] ci: remove cargo-semver-checks flags that are no longer necessary (#5496) These flags were previously only needed due to a bug in the `cargo-semver-checks` CLI logic. The correct behavior (available as of v0.18.3) for `cargo-semver-checks` is to ignore `publish = false` crates when scanning a workspace, *unless* those crates are specifically selected for checking. All the crates being excluded here are `publish = false` so they are already excluded by the default behavior, so all `--exclude` flags are no-ops. --- .github/workflows/ci.yml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 54f91488e90..0c00616005a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -245,13 +245,7 @@ jobs: tool: cargo-semver-checks - name: Check semver compatibility run: | - cargo semver-checks check-release \ - --release-type minor \ - --exclude benches \ - --exclude examples \ - --exclude stress-test \ - --exclude tests-build \ - --exclude tests-integration + cargo semver-checks check-release --release-type minor cross-check: name: cross-check From c89406965ffb4a64936d781c556a2c48855dfbdb Mon Sep 17 00:00:00 2001 From: Hayden Stainsby Date: Thu, 23 Feb 2023 11:04:12 +0100 Subject: [PATCH 092/101] sync: document drop behavior for channels (#5497) Some users mentioned that the behavior of a channel when the receivers and/or senders are dropped isn't explicitly documented. This change adds wording to the documentation for each channel in the sync module, explaining under which conditions messages in a channel are dropped with respect to dropping the senders and the receivers. Refs: #5490 --- tokio/src/sync/broadcast.rs | 4 ++++ tokio/src/sync/mpsc/mod.rs | 3 ++- tokio/src/sync/oneshot.rs | 4 ++++ tokio/src/sync/watch.rs | 3 +++ 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/tokio/src/sync/broadcast.rs b/tokio/src/sync/broadcast.rs index 82239c4119c..6e14ef1e1f4 100644 --- a/tokio/src/sync/broadcast.rs +++ b/tokio/src/sync/broadcast.rs @@ -54,6 +54,10 @@ //! all values retained by the channel, the next call to [`recv`] will return //! with [`RecvError::Closed`]. //! +//! When a [`Receiver`] handle is dropped, any messages not read by the receiver +//! will be marked as read. If this receiver was the only one not to have read +//! that message, the message will be dropped at this point. +//! //! [`Sender`]: crate::sync::broadcast::Sender //! [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe //! [`Receiver`]: crate::sync::broadcast::Receiver diff --git a/tokio/src/sync/mpsc/mod.rs b/tokio/src/sync/mpsc/mod.rs index 33889fad766..b2af084b2ae 100644 --- a/tokio/src/sync/mpsc/mod.rs +++ b/tokio/src/sync/mpsc/mod.rs @@ -33,7 +33,8 @@ //! //! If the [`Receiver`] handle is dropped, then messages can no longer //! be read out of the channel. In this case, all further attempts to send will -//! result in an error. +//! result in an error. Additionally, all unread messages will be drained from the +//! channel and dropped. //! //! # Clean Shutdown //! diff --git a/tokio/src/sync/oneshot.rs b/tokio/src/sync/oneshot.rs index a900dbfef24..da64899d095 100644 --- a/tokio/src/sync/oneshot.rs +++ b/tokio/src/sync/oneshot.rs @@ -12,6 +12,10 @@ //! Since the `send` method is not async, it can be used anywhere. This includes //! sending between two runtimes, and using it from non-async code. //! +//! If the [`Receiver`] is closed before receiving a message which has already +//! been sent, the message will remain in the channel until the receiver is +//! dropped, at which point the message will be dropped immediately. +//! //! # Examples //! //! ``` diff --git a/tokio/src/sync/watch.rs b/tokio/src/sync/watch.rs index f8250edd6f3..4341b642999 100644 --- a/tokio/src/sync/watch.rs +++ b/tokio/src/sync/watch.rs @@ -39,6 +39,9 @@ //! when all [`Receiver`] handles have been dropped. This indicates that there //! is no further interest in the values being produced and work can be stopped. //! +//! The value in the channel will not be dropped until the sender and all receivers +//! have been dropped. +//! //! # Thread safety //! //! Both [`Sender`] and [`Receiver`] are thread safe. They can be moved to other From ca9f7ee9f4750d6bb8a073ab4df1b7e4555857ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20Heine=20n=C3=A9=20Lang?= Date: Sat, 25 Feb 2023 18:06:17 +0100 Subject: [PATCH 093/101] macros: fix empty `join!` and `try_join!` (#5504) Fixes: #5502 --- tokio/src/macros/join.rs | 4 +++- tokio/src/macros/try_join.rs | 4 +++- tokio/tests/macros_join.rs | 6 ++++++ tokio/tests/macros_try_join.rs | 5 +++++ 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/tokio/src/macros/join.rs b/tokio/src/macros/join.rs index 7e85203c0c4..8a0198600b2 100644 --- a/tokio/src/macros/join.rs +++ b/tokio/src/macros/join.rs @@ -158,7 +158,9 @@ macro_rules! join { // ===== Entry point ===== - ( $($e:expr),* $(,)?) => { + ( $($e:expr),+ $(,)?) => { $crate::join!(@{ () (0) } $($e,)*) }; + + () => { async {}.await } } diff --git a/tokio/src/macros/try_join.rs b/tokio/src/macros/try_join.rs index 597cd5df021..7b123709231 100644 --- a/tokio/src/macros/try_join.rs +++ b/tokio/src/macros/try_join.rs @@ -210,7 +210,9 @@ macro_rules! try_join { // ===== Entry point ===== - ( $($e:expr),* $(,)?) => { + ( $($e:expr),+ $(,)?) => { $crate::try_join!(@{ () (0) } $($e,)*) }; + + () => { async { Ok(()) }.await } } diff --git a/tokio/tests/macros_join.rs b/tokio/tests/macros_join.rs index a87c6a6f86e..7866ac086fa 100644 --- a/tokio/tests/macros_join.rs +++ b/tokio/tests/macros_join.rs @@ -153,3 +153,9 @@ async fn a_different_future_is_polled_first_every_time_poll_fn_is_polled() { *poll_order.lock().unwrap() ); } + +#[tokio::test] +#[allow(clippy::unit_cmp)] +async fn empty_join() { + assert_eq!(tokio::join!(), ()); +} diff --git a/tokio/tests/macros_try_join.rs b/tokio/tests/macros_try_join.rs index 6c432221df1..74b1c9f9481 100644 --- a/tokio/tests/macros_try_join.rs +++ b/tokio/tests/macros_try_join.rs @@ -183,3 +183,8 @@ async fn a_different_future_is_polled_first_every_time_poll_fn_is_polled() { *poll_order.lock().unwrap() ); } + +#[tokio::test] +async fn empty_try_join() { + assert_eq!(tokio::try_join!() as Result<_, ()>, Ok(())); +} From cadcd5da5ea4649b978c247ca11f270ddf54fa04 Mon Sep 17 00:00:00 2001 From: Chris Brody Date: Sun, 26 Feb 2023 10:48:09 -0500 Subject: [PATCH 094/101] fs: add more tests for filesystem functionality (#5493) --- tokio/tests/fs_canonicalize_dir.rs | 22 ++++++ tokio/tests/fs_copy.rs | 2 +- tokio/tests/fs_dir.rs | 41 +++++++++- tokio/tests/fs_file.rs | 105 ++++++++++++++++++++++++- tokio/tests/fs_link.rs | 59 ++++++-------- tokio/tests/fs_open_options.rs | 80 +++++++++++++++++++ tokio/tests/fs_open_options_windows.rs | 51 ++++++++++++ tokio/tests/fs_remove_dir_all.rs | 31 ++++++++ tokio/tests/fs_remove_file.rs | 24 ++++++ tokio/tests/fs_rename.rs | 28 +++++++ tokio/tests/fs_symlink_dir_windows.rs | 31 ++++++++ tokio/tests/fs_symlink_file_windows.rs | 24 ++++++ tokio/tests/fs_try_exists.rs | 2 +- 13 files changed, 462 insertions(+), 38 deletions(-) create mode 100644 tokio/tests/fs_canonicalize_dir.rs create mode 100644 tokio/tests/fs_open_options.rs create mode 100644 tokio/tests/fs_open_options_windows.rs create mode 100644 tokio/tests/fs_remove_dir_all.rs create mode 100644 tokio/tests/fs_remove_file.rs create mode 100644 tokio/tests/fs_rename.rs create mode 100644 tokio/tests/fs_symlink_dir_windows.rs create mode 100644 tokio/tests/fs_symlink_file_windows.rs diff --git a/tokio/tests/fs_canonicalize_dir.rs b/tokio/tests/fs_canonicalize_dir.rs new file mode 100644 index 00000000000..83f0d813737 --- /dev/null +++ b/tokio/tests/fs_canonicalize_dir.rs @@ -0,0 +1,22 @@ +#![warn(rust_2018_idioms)] +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations + +use tokio::fs; + +#[tokio::test] +#[cfg(unix)] +async fn canonicalize_root_dir_unix() { + assert_eq!(fs::canonicalize("/.").await.unwrap().to_str().unwrap(), "/"); +} + +#[tokio::test] +#[cfg(windows)] +async fn canonicalize_root_dir_windows() { + // 2-step let bindings due to Rust memory semantics + let dir_path = fs::canonicalize("C:\\.\\").await.unwrap(); + + let dir_name = dir_path.to_str().unwrap(); + + assert!(dir_name.starts_with("\\\\")); + assert!(dir_name.ends_with("C:\\")); +} diff --git a/tokio/tests/fs_copy.rs b/tokio/tests/fs_copy.rs index 04678cf05b4..e6e5b666f4a 100644 --- a/tokio/tests/fs_copy.rs +++ b/tokio/tests/fs_copy.rs @@ -1,5 +1,5 @@ #![warn(rust_2018_idioms)] -#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support file operations +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations use tempfile::tempdir; use tokio::fs; diff --git a/tokio/tests/fs_dir.rs b/tokio/tests/fs_dir.rs index f197a40ac95..5f653cb2135 100644 --- a/tokio/tests/fs_dir.rs +++ b/tokio/tests/fs_dir.rs @@ -1,5 +1,5 @@ #![warn(rust_2018_idioms)] -#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support directory operations +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations use tokio::fs; use tokio_test::{assert_err, assert_ok}; @@ -45,6 +45,27 @@ async fn build_dir() { ); } +#[tokio::test] +#[cfg(unix)] +async fn build_dir_mode_read_only() { + let base_dir = tempdir().unwrap(); + let new_dir = base_dir.path().join("abc"); + + assert_ok!( + fs::DirBuilder::new() + .recursive(true) + .mode(0o444) + .create(&new_dir) + .await + ); + + assert!(fs::metadata(new_dir) + .await + .expect("metadata result") + .permissions() + .readonly()); +} + #[tokio::test] async fn remove() { let base_dir = tempdir().unwrap(); @@ -85,3 +106,21 @@ async fn read_inherent() { vec!["aa".to_string(), "bb".to_string(), "cc".to_string()] ); } + +#[tokio::test] +async fn read_dir_entry_info() { + let temp_dir = tempdir().unwrap(); + + let file_path = temp_dir.path().join("a.txt"); + + fs::write(&file_path, b"Hello File!").await.unwrap(); + + let mut dir = fs::read_dir(temp_dir.path()).await.unwrap(); + + let first_entry = dir.next_entry().await.unwrap().unwrap(); + + assert_eq!(first_entry.path(), file_path); + assert_eq!(first_entry.file_name(), "a.txt"); + assert!(first_entry.metadata().await.unwrap().is_file()); + assert!(first_entry.file_type().await.unwrap().is_file()); +} diff --git a/tokio/tests/fs_file.rs b/tokio/tests/fs_file.rs index 603ccad3802..dde09906246 100644 --- a/tokio/tests/fs_file.rs +++ b/tokio/tests/fs_file.rs @@ -1,5 +1,5 @@ #![warn(rust_2018_idioms)] -#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support file operations +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations use std::io::prelude::*; use tempfile::NamedTempFile; @@ -87,13 +87,93 @@ async fn coop() { panic!("did not yield"); } +#[tokio::test] +async fn write_to_clone() { + let tempfile = tempfile(); + + let file = File::create(tempfile.path()).await.unwrap(); + let mut clone = file.try_clone().await.unwrap(); + + clone.write_all(HELLO).await.unwrap(); + clone.flush().await.unwrap(); + + let contents = std::fs::read(tempfile.path()).unwrap(); + assert_eq!(contents, HELLO); +} + +#[tokio::test] +async fn write_into_std() { + let tempfile = tempfile(); + + let file = File::create(tempfile.path()).await.unwrap(); + let mut std_file = file.into_std().await; + + std_file.write_all(HELLO).unwrap(); + + let contents = std::fs::read(tempfile.path()).unwrap(); + assert_eq!(contents, HELLO); +} + +#[tokio::test] +async fn write_into_std_immediate() { + let tempfile = tempfile(); + + let file = File::create(tempfile.path()).await.unwrap(); + let mut std_file = file.try_into_std().unwrap(); + + std_file.write_all(HELLO).unwrap(); + + let contents = std::fs::read(tempfile.path()).unwrap(); + assert_eq!(contents, HELLO); +} + +#[tokio::test] +async fn read_file_from_std() { + let mut tempfile = tempfile(); + tempfile.write_all(HELLO).unwrap(); + + let std_file = std::fs::File::open(tempfile.path()).unwrap(); + let mut file = File::from(std_file); + + let mut buf = [0; 1024]; + let n = file.read(&mut buf).await.unwrap(); + assert_eq!(n, HELLO.len()); + assert_eq!(&buf[..n], HELLO); +} + fn tempfile() -> NamedTempFile { NamedTempFile::new().unwrap() } #[tokio::test] #[cfg(unix)] -async fn unix_fd() { +async fn file_debug_fmt() { + let tempfile = tempfile(); + + let file = File::open(tempfile.path()).await.unwrap(); + + assert_eq!( + &format!("{:?}", file)[0..33], + "tokio::fs::File { std: File { fd:" + ); +} + +#[tokio::test] +#[cfg(windows)] +async fn file_debug_fmt() { + let tempfile = tempfile(); + + let file = File::open(tempfile.path()).await.unwrap(); + + assert_eq!( + &format!("{:?}", file)[0..37], + "tokio::fs::File { std: File { handle:" + ); +} + +#[tokio::test] +#[cfg(unix)] +async fn unix_fd_is_valid() { use std::os::unix::io::AsRawFd; let tempfile = tempfile(); @@ -101,6 +181,27 @@ async fn unix_fd() { assert!(file.as_raw_fd() as u64 > 0); } +#[tokio::test] +#[cfg(unix)] +async fn read_file_from_unix_fd() { + use std::os::unix::io::AsRawFd; + use std::os::unix::io::FromRawFd; + + let mut tempfile = tempfile(); + tempfile.write_all(HELLO).unwrap(); + + let file1 = File::open(tempfile.path()).await.unwrap(); + let raw_fd = file1.as_raw_fd(); + assert!(raw_fd > 0); + + let mut file2 = unsafe { File::from_raw_fd(raw_fd) }; + + let mut buf = [0; 1024]; + let n = file2.read(&mut buf).await.unwrap(); + assert_eq!(n, HELLO.len()); + assert_eq!(&buf[..n], HELLO); +} + #[tokio::test] #[cfg(windows)] async fn windows_handle() { diff --git a/tokio/tests/fs_link.rs b/tokio/tests/fs_link.rs index d198abc5182..681b566607e 100644 --- a/tokio/tests/fs_link.rs +++ b/tokio/tests/fs_link.rs @@ -1,10 +1,9 @@ #![warn(rust_2018_idioms)] -#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support file operations +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations use tokio::fs; -use std::io::prelude::*; -use std::io::BufReader; +use std::io::Write; use tempfile::tempdir; #[tokio::test] @@ -13,24 +12,23 @@ async fn test_hard_link() { let src = dir.path().join("src.txt"); let dst = dir.path().join("dst.txt"); - { - let mut file = std::fs::File::create(&src).unwrap(); - file.write_all(b"hello").unwrap(); - } + std::fs::File::create(&src) + .unwrap() + .write_all(b"hello") + .unwrap(); - let dst_2 = dst.clone(); + fs::hard_link(&src, &dst).await.unwrap(); - assert!(fs::hard_link(src, dst_2.clone()).await.is_ok()); + std::fs::File::create(&src) + .unwrap() + .write_all(b"new-data") + .unwrap(); - let mut content = String::new(); + let content = fs::read(&dst).await.unwrap(); + assert_eq!(content, b"new-data"); - { - let file = std::fs::File::open(dst).unwrap(); - let mut reader = BufReader::new(file); - reader.read_to_string(&mut content).unwrap(); - } - - assert!(content == "hello"); + // test that this is not a symlink: + assert!(fs::read_link(&dst).await.is_err()); } #[cfg(unix)] @@ -40,25 +38,20 @@ async fn test_symlink() { let src = dir.path().join("src.txt"); let dst = dir.path().join("dst.txt"); - { - let mut file = std::fs::File::create(&src).unwrap(); - file.write_all(b"hello").unwrap(); - } - - let src_2 = src.clone(); - let dst_2 = dst.clone(); - - assert!(fs::symlink(src_2.clone(), dst_2.clone()).await.is_ok()); + std::fs::File::create(&src) + .unwrap() + .write_all(b"hello") + .unwrap(); - let mut content = String::new(); + fs::symlink(&src, &dst).await.unwrap(); - { - let file = std::fs::File::open(dst.clone()).unwrap(); - let mut reader = BufReader::new(file); - reader.read_to_string(&mut content).unwrap(); - } + std::fs::File::create(&src) + .unwrap() + .write_all(b"new-data") + .unwrap(); - assert!(content == "hello"); + let content = fs::read(&dst).await.unwrap(); + assert_eq!(content, b"new-data"); let read = fs::read_link(dst.clone()).await.unwrap(); assert!(read == src); diff --git a/tokio/tests/fs_open_options.rs b/tokio/tests/fs_open_options.rs new file mode 100644 index 00000000000..e8d8b51b39f --- /dev/null +++ b/tokio/tests/fs_open_options.rs @@ -0,0 +1,80 @@ +#![warn(rust_2018_idioms)] +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations + +use std::io::Write; +use tempfile::NamedTempFile; +use tokio::fs::OpenOptions; +use tokio::io::AsyncReadExt; + +const HELLO: &[u8] = b"hello world..."; + +#[tokio::test] +async fn open_with_open_options_and_read() { + let mut tempfile = NamedTempFile::new().unwrap(); + tempfile.write_all(HELLO).unwrap(); + + let mut file = OpenOptions::new().read(true).open(tempfile).await.unwrap(); + + let mut buf = [0; 1024]; + let n = file.read(&mut buf).await.unwrap(); + + assert_eq!(n, HELLO.len()); + assert_eq!(&buf[..n], HELLO); +} + +#[tokio::test] +async fn open_options_write() { + // TESTING HACK: use Debug output to check the stored data + assert!(format!("{:?}", OpenOptions::new().write(true)).contains("write: true")); +} + +#[tokio::test] +async fn open_options_append() { + // TESTING HACK: use Debug output to check the stored data + assert!(format!("{:?}", OpenOptions::new().append(true)).contains("append: true")); +} + +#[tokio::test] +async fn open_options_truncate() { + // TESTING HACK: use Debug output to check the stored data + assert!(format!("{:?}", OpenOptions::new().truncate(true)).contains("truncate: true")); +} + +#[tokio::test] +async fn open_options_create() { + // TESTING HACK: use Debug output to check the stored data + assert!(format!("{:?}", OpenOptions::new().create(true)).contains("create: true")); +} + +#[tokio::test] +async fn open_options_create_new() { + // TESTING HACK: use Debug output to check the stored data + assert!(format!("{:?}", OpenOptions::new().create_new(true)).contains("create_new: true")); +} + +#[tokio::test] +#[cfg(unix)] +async fn open_options_mode() { + // TESTING HACK: use Debug output to check the stored data + assert!(format!("{:?}", OpenOptions::new().mode(0o644)).contains("mode: 420 ")); +} + +#[tokio::test] +#[cfg(target_os = "linux")] +async fn open_options_custom_flags_linux() { + // TESTING HACK: use Debug output to check the stored data + assert!( + format!("{:?}", OpenOptions::new().custom_flags(libc::O_TRUNC)) + .contains("custom_flags: 512,") + ); +} + +#[tokio::test] +#[cfg(any(target_os = "freebsd", target_os = "macos"))] +async fn open_options_custom_flags_bsd_family() { + // TESTING HACK: use Debug output to check the stored data + assert!( + format!("{:?}", OpenOptions::new().custom_flags(libc::O_NOFOLLOW)) + .contains("custom_flags: 256,") + ); +} diff --git a/tokio/tests/fs_open_options_windows.rs b/tokio/tests/fs_open_options_windows.rs new file mode 100644 index 00000000000..398e6a12b36 --- /dev/null +++ b/tokio/tests/fs_open_options_windows.rs @@ -0,0 +1,51 @@ +#![warn(rust_2018_idioms)] +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations +#![cfg(windows)] + +use tokio::fs::OpenOptions; +use windows_sys::Win32::Storage::FileSystem; + +#[tokio::test] +#[cfg(windows)] +async fn open_options_windows_access_mode() { + // TESTING HACK: use Debug output to check the stored data + assert!(format!("{:?}", OpenOptions::new().access_mode(0)).contains("access_mode: Some(0)")); +} + +#[tokio::test] +#[cfg(windows)] +async fn open_options_windows_share_mode() { + // TESTING HACK: use Debug output to check the stored data + assert!(format!("{:?}", OpenOptions::new().share_mode(0)).contains("share_mode: 0,")); +} + +#[tokio::test] +#[cfg(windows)] +async fn open_options_windows_custom_flags() { + // TESTING HACK: use Debug output to check the stored data + assert!(format!( + "{:?}", + OpenOptions::new().custom_flags(FileSystem::FILE_FLAG_DELETE_ON_CLOSE) + ) + .contains("custom_flags: 67108864,")); +} + +#[tokio::test] +#[cfg(windows)] +async fn open_options_windows_attributes() { + assert!(format!( + "{:?}", + OpenOptions::new().attributes(FileSystem::FILE_ATTRIBUTE_HIDDEN) + ) + .contains("attributes: 2,")); +} + +#[tokio::test] +#[cfg(windows)] +async fn open_options_windows_security_qos_flags() { + assert!(format!( + "{:?}", + OpenOptions::new().security_qos_flags(FileSystem::SECURITY_IDENTIFICATION) + ) + .contains("security_qos_flags: 1114112,")); +} diff --git a/tokio/tests/fs_remove_dir_all.rs b/tokio/tests/fs_remove_dir_all.rs new file mode 100644 index 00000000000..53a2fd34f36 --- /dev/null +++ b/tokio/tests/fs_remove_dir_all.rs @@ -0,0 +1,31 @@ +#![warn(rust_2018_idioms)] +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations + +use tempfile::tempdir; +use tokio::fs; + +#[tokio::test] +async fn remove_dir_all() { + let temp_dir = tempdir().unwrap(); + + let test_dir = temp_dir.path().join("test"); + fs::create_dir(&test_dir).await.unwrap(); + + let file_path = test_dir.as_path().join("a.txt"); + + fs::write(&file_path, b"Hello File!").await.unwrap(); + + fs::remove_dir_all(test_dir.as_path()).await.unwrap(); + + // test dir should no longer exist + match fs::try_exists(test_dir).await { + Ok(exists) => assert!(!exists), + Err(_) => println!("ignored try_exists error after remove_dir_all"), + }; + + // contents should no longer exist + match fs::try_exists(file_path).await { + Ok(exists) => assert!(!exists), + Err(_) => println!("ignored try_exists error after remove_dir_all"), + }; +} diff --git a/tokio/tests/fs_remove_file.rs b/tokio/tests/fs_remove_file.rs new file mode 100644 index 00000000000..14fd680dabe --- /dev/null +++ b/tokio/tests/fs_remove_file.rs @@ -0,0 +1,24 @@ +#![warn(rust_2018_idioms)] +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations + +use tempfile::tempdir; +use tokio::fs; + +#[tokio::test] +async fn remove_file() { + let temp_dir = tempdir().unwrap(); + + let file_path = temp_dir.path().join("a.txt"); + + fs::write(&file_path, b"Hello File!").await.unwrap(); + + assert!(fs::try_exists(&file_path).await.unwrap()); + + fs::remove_file(&file_path).await.unwrap(); + + // should no longer exist + match fs::try_exists(file_path).await { + Ok(exists) => assert!(!exists), + Err(_) => println!("ignored try_exists error after remove_file"), + }; +} diff --git a/tokio/tests/fs_rename.rs b/tokio/tests/fs_rename.rs new file mode 100644 index 00000000000..382fea079bd --- /dev/null +++ b/tokio/tests/fs_rename.rs @@ -0,0 +1,28 @@ +#![warn(rust_2018_idioms)] +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations + +use tempfile::tempdir; +use tokio::fs; + +#[tokio::test] +async fn rename_file() { + let temp_dir = tempdir().unwrap(); + + let file_path = temp_dir.path().join("a.txt"); + + fs::write(&file_path, b"Hello File!").await.unwrap(); + + assert!(fs::try_exists(&file_path).await.unwrap()); + + let new_file_path = temp_dir.path().join("b.txt"); + + fs::rename(&file_path, &new_file_path).await.unwrap(); + + assert!(fs::try_exists(new_file_path).await.unwrap()); + + // original file should no longer exist + match fs::try_exists(file_path).await { + Ok(exists) => assert!(!exists), + Err(_) => println!("ignored try_exists error after rename"), + }; +} diff --git a/tokio/tests/fs_symlink_dir_windows.rs b/tokio/tests/fs_symlink_dir_windows.rs new file mode 100644 index 00000000000..d80354268a2 --- /dev/null +++ b/tokio/tests/fs_symlink_dir_windows.rs @@ -0,0 +1,31 @@ +#![warn(rust_2018_idioms)] +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations +#![cfg(windows)] + +use tempfile::tempdir; +use tokio::fs; + +#[tokio::test] +async fn symlink_file_windows() { + const FILE_NAME: &str = "abc.txt"; + + let temp_dir = tempdir().unwrap(); + + let dir1 = temp_dir.path().join("a"); + fs::create_dir(&dir1).await.unwrap(); + + let file1 = dir1.as_path().join(FILE_NAME); + fs::write(&file1, b"Hello File!").await.unwrap(); + + let dir2 = temp_dir.path().join("b"); + fs::symlink_dir(&dir1, &dir2).await.unwrap(); + + fs::write(&file1, b"new data!").await.unwrap(); + + let file2 = dir2.as_path().join(FILE_NAME); + + let from = fs::read(&file1).await.unwrap(); + let to = fs::read(&file2).await.unwrap(); + + assert_eq!(from, to); +} diff --git a/tokio/tests/fs_symlink_file_windows.rs b/tokio/tests/fs_symlink_file_windows.rs new file mode 100644 index 00000000000..50eaf8aab75 --- /dev/null +++ b/tokio/tests/fs_symlink_file_windows.rs @@ -0,0 +1,24 @@ +#![warn(rust_2018_idioms)] +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations +#![cfg(windows)] + +use tempfile::tempdir; +use tokio::fs; + +#[tokio::test] +async fn symlink_file_windows() { + let dir = tempdir().unwrap(); + + let source_path = dir.path().join("foo.txt"); + let dest_path = dir.path().join("bar.txt"); + + fs::write(&source_path, b"Hello File!").await.unwrap(); + fs::symlink_file(&source_path, &dest_path).await.unwrap(); + + fs::write(&source_path, b"new data!").await.unwrap(); + + let from = fs::read(&source_path).await.unwrap(); + let to = fs::read(&dest_path).await.unwrap(); + + assert_eq!(from, to); +} diff --git a/tokio/tests/fs_try_exists.rs b/tokio/tests/fs_try_exists.rs index d259b5caaf0..00d23650015 100644 --- a/tokio/tests/fs_try_exists.rs +++ b/tokio/tests/fs_try_exists.rs @@ -1,5 +1,5 @@ #![warn(rust_2018_idioms)] -#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support file operations +#![cfg(all(feature = "full", not(tokio_wasi)))] // WASI does not support all fs operations use tempfile::tempdir; use tokio::fs; From 2298679af4f7b48ac181b54a3c526391d47c94b1 Mon Sep 17 00:00:00 2001 From: Christopher Hunt Date: Mon, 27 Feb 2023 02:48:56 +1100 Subject: [PATCH 095/101] runtime: document the nature of the main future (#5494) --- tokio-macros/src/lib.rs | 7 +++++++ tokio/src/runtime/runtime.rs | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/tokio-macros/src/lib.rs b/tokio-macros/src/lib.rs index 34041afa4c5..11bbbaec811 100644 --- a/tokio-macros/src/lib.rs +++ b/tokio-macros/src/lib.rs @@ -39,6 +39,13 @@ use proc_macro::TokenStream; /// function is called often, it is preferable to create the runtime using the /// runtime builder so the runtime can be reused across calls. /// +/// # Non-worker async function +/// +/// Note that the async function marked with this macro does not run as a +/// worker. The expectation is that other tasks are spawned by the function here. +/// Awaiting on other futures from the function provided here will not +/// perform as fast as those spawned as workers. +/// /// # Multi-threaded runtime /// /// To use the multi-threaded runtime, the macro can be configured using diff --git a/tokio/src/runtime/runtime.rs b/tokio/src/runtime/runtime.rs index c11fc585363..60c3fc77670 100644 --- a/tokio/src/runtime/runtime.rs +++ b/tokio/src/runtime/runtime.rs @@ -240,6 +240,13 @@ impl Runtime { /// complete, and yielding its resolved result. Any tasks or timers /// which the future spawns internally will be executed on the runtime. /// + /// # Non-worker future + /// + /// Note that the future required by this function does not run as a + /// worker. The expectation is that other tasks are spawned by the future here. + /// Awaiting on other futures from the future provided here will not + /// perform as fast as those spawned as workers. + /// /// # Multi thread scheduler /// /// When the multi thread scheduler is used this will allow futures From 0a50cb3baa7493e82f5154e38d76a6cff7337676 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 26 Feb 2023 20:06:08 +0100 Subject: [PATCH 096/101] net: fix test compilation failure (#5506) --- tokio/Cargo.toml | 2 +- tokio/tests/net_named_pipe.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 0e96ccb7cd7..3087c3318e5 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -143,11 +143,11 @@ tokio-test = { version = "0.4.0", path = "../tokio-test" } tokio-stream = { version = "0.1", path = "../tokio-stream" } futures = { version = "0.3.0", features = ["async-await"] } mockall = "0.11.1" -tempfile = "3.1.0" async-stream = "0.3" [target.'cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))'.dev-dependencies] socket2 = "0.4" +tempfile = "3.1.0" [target.'cfg(not(all(any(target_arch = "wasm32", target_arch = "wasm64"), target_os = "unknown")))'.dev-dependencies] rand = "0.8.0" diff --git a/tokio/tests/net_named_pipe.rs b/tokio/tests/net_named_pipe.rs index 3ddc4c8a9bf..02c8a0919f5 100644 --- a/tokio/tests/net_named_pipe.rs +++ b/tokio/tests/net_named_pipe.rs @@ -417,7 +417,7 @@ fn num_instances(pipe_name: impl AsRef) -> io::Result { let status = unsafe { ntioapi::NtQueryDirectoryFile( - root.as_raw_handle(), + root.as_raw_handle().cast(), std::ptr::null_mut(), None, std::ptr::null_mut(), From e23c6f3935b550ddb7587232c423145e06087551 Mon Sep 17 00:00:00 2001 From: Eric McBride Date: Sun, 26 Feb 2023 13:43:41 -0600 Subject: [PATCH 097/101] signal: updated Documentation for Signals (#5459) --- tokio/src/signal/unix.rs | 14 ++-- tokio/src/signal/windows.rs | 137 +++++++++++++++++++----------------- 2 files changed, 81 insertions(+), 70 deletions(-) diff --git a/tokio/src/signal/unix.rs b/tokio/src/signal/unix.rs index e5345fdfccc..ae5c13085e9 100644 --- a/tokio/src/signal/unix.rs +++ b/tokio/src/signal/unix.rs @@ -292,7 +292,11 @@ fn signal_enable(signal: SignalKind, handle: &Handle) -> io::Result<()> { } } -/// A stream of events for receiving a particular type of OS signal. +/// An listener for receiving a particular type of OS signal. +/// +/// The listener can be turned into a `Stream` using [`SignalStream`]. +/// +/// [`SignalStream`]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.SignalStream.html /// /// In general signal handling on Unix is a pretty tricky topic, and this /// structure is no exception! There are some important limitations to keep in @@ -307,7 +311,7 @@ fn signal_enable(signal: SignalKind, handle: &Handle) -> io::Result<()> { /// Once `poll` has been called, however, a further signal is guaranteed to /// be yielded as an item. /// -/// Put another way, any element pulled off the returned stream corresponds to +/// Put another way, any element pulled off the returned listener corresponds to /// *at least one* signal, but possibly more. /// /// * Signal handling in general is relatively inefficient. Although some @@ -345,11 +349,11 @@ fn signal_enable(signal: SignalKind, handle: &Handle) -> io::Result<()> { /// #[tokio::main] /// async fn main() -> Result<(), Box> { /// // An infinite stream of hangup signals. -/// let mut stream = signal(SignalKind::hangup())?; +/// let mut sig = signal(SignalKind::hangup())?; /// /// // Print whenever a HUP signal is received /// loop { -/// stream.recv().await; +/// sig.recv().await; /// println!("got signal HUP"); /// } /// } @@ -360,7 +364,7 @@ pub struct Signal { inner: RxFuture, } -/// Creates a new stream which will receive notifications when the current +/// Creates a new listener which will receive notifications when the current /// process receives the specified signal `kind`. /// /// This function will create a new stream which binds to the default reactor. diff --git a/tokio/src/signal/windows.rs b/tokio/src/signal/windows.rs index 730f95d0591..2f70f98b15a 100644 --- a/tokio/src/signal/windows.rs +++ b/tokio/src/signal/windows.rs @@ -22,7 +22,7 @@ pub(crate) use self::imp::{OsExtraData, OsStorage}; #[path = "windows/stub.rs"] mod imp; -/// Creates a new stream which receives "ctrl-c" notifications sent to the +/// Creates a new listener which receives "ctrl-c" notifications sent to the /// process. /// /// # Examples @@ -32,12 +32,12 @@ mod imp; /// /// #[tokio::main] /// async fn main() -> Result<(), Box> { -/// // An infinite stream of CTRL-C events. -/// let mut stream = ctrl_c()?; +/// // A listener of CTRL-C events. +/// let mut signal = ctrl_c()?; /// /// // Print whenever a CTRL-C event is received. /// for countdown in (0..3).rev() { -/// stream.recv().await; +/// signal.recv().await; /// println!("got CTRL-C. {} more to exit", countdown); /// } /// @@ -50,14 +50,18 @@ pub fn ctrl_c() -> io::Result { }) } -/// Represents a stream which receives "ctrl-c" notifications sent to the process +/// Represents a listener which receives "ctrl-c" notifications sent to the process /// via `SetConsoleCtrlHandler`. /// -/// A notification to this process notifies *all* streams listening for +/// This event can be turned into a `Stream` using [`CtrlCStream`]. +/// +/// [`CtrlCStream`]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.CtrlCStream.html +/// +/// A notification to this process notifies *all* receivers for /// this event. Moreover, the notifications **are coalesced** if they aren't processed /// quickly enough. This means that if two notifications are received back-to-back, -/// then the stream may only receive one item about the two notifications. -#[must_use = "streams do nothing unless polled"] +/// then the listener may only receive one item about the two notifications. +#[must_use = "listeners do nothing unless polled"] #[derive(Debug)] pub struct CtrlC { inner: RxFuture, @@ -66,7 +70,7 @@ pub struct CtrlC { impl CtrlC { /// Receives the next signal notification event. /// - /// `None` is returned if no more events can be received by this stream. + /// `None` is returned if no more events can be received by the listener. /// /// # Examples /// @@ -75,12 +79,11 @@ impl CtrlC { /// /// #[tokio::main] /// async fn main() -> Result<(), Box> { - /// // An infinite stream of CTRL-C events. - /// let mut stream = ctrl_c()?; + /// let mut signal = ctrl_c()?; /// /// // Print whenever a CTRL-C event is received. /// for countdown in (0..3).rev() { - /// stream.recv().await; + /// signal.recv().await; /// println!("got CTRL-C. {} more to exit", countdown); /// } /// @@ -94,7 +97,7 @@ impl CtrlC { /// Polls to receive the next signal notification event, outside of an /// `async` context. /// - /// `None` is returned if no more events can be received by this stream. + /// `None` is returned if no more events can be received. /// /// # Examples /// @@ -124,14 +127,18 @@ impl CtrlC { } } -/// Represents a stream which receives "ctrl-break" notifications sent to the process +/// Represents a listener which receives "ctrl-break" notifications sent to the process /// via `SetConsoleCtrlHandler`. /// -/// A notification to this process notifies *all* streams listening for +/// This listener can be turned into a `Stream` using [`CtrlBreakStream`]. +/// +/// [`CtrlBreakStream`]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.CtrlBreakStream.html +/// +/// A notification to this process notifies *all* receivers for /// this event. Moreover, the notifications **are coalesced** if they aren't processed /// quickly enough. This means that if two notifications are received back-to-back, -/// then the stream may only receive one item about the two notifications. -#[must_use = "streams do nothing unless polled"] +/// then the listener may only receive one item about the two notifications. +#[must_use = "listeners do nothing unless polled"] #[derive(Debug)] pub struct CtrlBreak { inner: RxFuture, @@ -140,7 +147,7 @@ pub struct CtrlBreak { impl CtrlBreak { /// Receives the next signal notification event. /// - /// `None` is returned if no more events can be received by this stream. + /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// @@ -149,12 +156,12 @@ impl CtrlBreak { /// /// #[tokio::main] /// async fn main() -> Result<(), Box> { - /// // An infinite stream of CTRL-BREAK events. - /// let mut stream = ctrl_break()?; + /// // A listener of CTRL-BREAK events. + /// let mut signal = ctrl_break()?; /// /// // Print whenever a CTRL-BREAK event is received. /// loop { - /// stream.recv().await; + /// signal.recv().await; /// println!("got signal CTRL-BREAK"); /// } /// } @@ -166,7 +173,7 @@ impl CtrlBreak { /// Polls to receive the next signal notification event, outside of an /// `async` context. /// - /// `None` is returned if no more events can be received by this stream. + /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// @@ -196,7 +203,7 @@ impl CtrlBreak { } } -/// Creates a new stream which receives "ctrl-break" notifications sent to the +/// Creates a new listener which receives "ctrl-break" notifications sent to the /// process. /// /// # Examples @@ -206,12 +213,12 @@ impl CtrlBreak { /// /// #[tokio::main] /// async fn main() -> Result<(), Box> { -/// // An infinite stream of CTRL-BREAK events. -/// let mut stream = ctrl_break()?; +/// // A listener of CTRL-BREAK events. +/// let mut signal = ctrl_break()?; /// /// // Print whenever a CTRL-BREAK event is received. /// loop { -/// stream.recv().await; +/// signal.recv().await; /// println!("got signal CTRL-BREAK"); /// } /// } @@ -222,7 +229,7 @@ pub fn ctrl_break() -> io::Result { }) } -/// Creates a new stream which receives "ctrl-close" notifications sent to the +/// Creates a new listener which receives "ctrl-close" notifications sent to the /// process. /// /// # Examples @@ -232,12 +239,12 @@ pub fn ctrl_break() -> io::Result { /// /// #[tokio::main] /// async fn main() -> Result<(), Box> { -/// // An infinite stream of CTRL-CLOSE events. -/// let mut stream = ctrl_close()?; +/// // A listener of CTRL-CLOSE events. +/// let mut signal = ctrl_close()?; /// /// // Print whenever a CTRL-CLOSE event is received. /// for countdown in (0..3).rev() { -/// stream.recv().await; +/// signal.recv().await; /// println!("got CTRL-CLOSE. {} more to exit", countdown); /// } /// @@ -250,14 +257,14 @@ pub fn ctrl_close() -> io::Result { }) } -/// Represents a stream which receives "ctrl-close" notitifications sent to the process +/// Represents a listener which receives "ctrl-close" notitifications sent to the process /// via 'SetConsoleCtrlHandler'. /// -/// A notification to this process notifies *all* streams listening for +/// A notification to this process notifies *all* listeners listening for /// this event. Moreover, the notifications **are coalesced** if they aren't processed /// quickly enough. This means that if two notifications are received back-to-back, -/// then the stream may only receive one item about the two notifications. -#[must_use = "streams do nothing unless polled"] +/// then the listener may only receive one item about the two notifications. +#[must_use = "listeners do nothing unless polled"] #[derive(Debug)] pub struct CtrlClose { inner: RxFuture, @@ -266,7 +273,7 @@ pub struct CtrlClose { impl CtrlClose { /// Receives the next signal notification event. /// - /// `None` is returned if no more events can be received by this stream. + /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// @@ -275,11 +282,11 @@ impl CtrlClose { /// /// #[tokio::main] /// async fn main() -> Result<(), Box> { - /// // An infinite stream of CTRL-CLOSE events. - /// let mut stream = ctrl_close()?; + /// // A listener of CTRL-CLOSE events. + /// let mut signal = ctrl_close()?; /// /// // Print whenever a CTRL-CLOSE event is received. - /// stream.recv().await; + /// signal.recv().await; /// println!("got CTRL-CLOSE. Cleaning up before exiting"); /// /// Ok(()) @@ -292,7 +299,7 @@ impl CtrlClose { /// Polls to receive the next signal notification event, outside of an /// `async` context. /// - /// `None` is returned if no more events can be received by this stream. + /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// @@ -322,7 +329,7 @@ impl CtrlClose { } } -/// Creates a new stream which receives "ctrl-shutdown" notifications sent to the +/// Creates a new listener which receives "ctrl-shutdown" notifications sent to the /// process. /// /// # Examples @@ -332,10 +339,10 @@ impl CtrlClose { /// /// #[tokio::main] /// async fn main() -> Result<(), Box> { -/// // An infinite stream of CTRL-SHUTDOWN events. -/// let mut stream = ctrl_shutdown()?; +/// // A listener of CTRL-SHUTDOWN events. +/// let mut signal = ctrl_shutdown()?; /// -/// stream.recv().await; +/// signal.recv().await; /// println!("got CTRL-SHUTDOWN. Cleaning up before exiting"); /// /// Ok(()) @@ -347,14 +354,14 @@ pub fn ctrl_shutdown() -> io::Result { }) } -/// Represents a stream which receives "ctrl-shutdown" notitifications sent to the process +/// Represents a listener which receives "ctrl-shutdown" notitifications sent to the process /// via 'SetConsoleCtrlHandler'. /// -/// A notification to this process notifies *all* streams listening for +/// A notification to this process notifies *all* listeners listening for /// this event. Moreover, the notifications **are coalesced** if they aren't processed /// quickly enough. This means that if two notifications are received back-to-back, -/// then the stream may only receive one item about the two notifications. -#[must_use = "streams do nothing unless polled"] +/// then the listener may only receive one item about the two notifications. +#[must_use = "listeners do nothing unless polled"] #[derive(Debug)] pub struct CtrlShutdown { inner: RxFuture, @@ -363,7 +370,7 @@ pub struct CtrlShutdown { impl CtrlShutdown { /// Receives the next signal notification event. /// - /// `None` is returned if no more events can be received by this stream. + /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// @@ -372,11 +379,11 @@ impl CtrlShutdown { /// /// #[tokio::main] /// async fn main() -> Result<(), Box> { - /// // An infinite stream of CTRL-SHUTDOWN events. - /// let mut stream = ctrl_shutdown()?; + /// // A listener of CTRL-SHUTDOWN events. + /// let mut signal = ctrl_shutdown()?; /// /// // Print whenever a CTRL-SHUTDOWN event is received. - /// stream.recv().await; + /// signal.recv().await; /// println!("got CTRL-SHUTDOWN. Cleaning up before exiting"); /// /// Ok(()) @@ -389,7 +396,7 @@ impl CtrlShutdown { /// Polls to receive the next signal notification event, outside of an /// `async` context. /// - /// `None` is returned if no more events can be received by this stream. + /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// @@ -419,7 +426,7 @@ impl CtrlShutdown { } } -/// Creates a new stream which receives "ctrl-logoff" notifications sent to the +/// Creates a new listener which receives "ctrl-logoff" notifications sent to the /// process. /// /// # Examples @@ -429,10 +436,10 @@ impl CtrlShutdown { /// /// #[tokio::main] /// async fn main() -> Result<(), Box> { -/// // An infinite stream of CTRL-LOGOFF events. -/// let mut stream = ctrl_logoff()?; +/// // A listener of CTRL-LOGOFF events. +/// let mut signal = ctrl_logoff()?; /// -/// stream.recv().await; +/// signal.recv().await; /// println!("got CTRL-LOGOFF. Cleaning up before exiting"); /// /// Ok(()) @@ -444,14 +451,14 @@ pub fn ctrl_logoff() -> io::Result { }) } -/// Represents a stream which receives "ctrl-logoff" notitifications sent to the process +/// Represents a listener which receives "ctrl-logoff" notitifications sent to the process /// via 'SetConsoleCtrlHandler'. /// -/// A notification to this process notifies *all* streams listening for +/// A notification to this process notifies *all* listeners listening for /// this event. Moreover, the notifications **are coalesced** if they aren't processed /// quickly enough. This means that if two notifications are received back-to-back, -/// then the stream may only receive one item about the two notifications. -#[must_use = "streams do nothing unless polled"] +/// then the listener may only receive one item about the two notifications. +#[must_use = "listeners do nothing unless polled"] #[derive(Debug)] pub struct CtrlLogoff { inner: RxFuture, @@ -460,7 +467,7 @@ pub struct CtrlLogoff { impl CtrlLogoff { /// Receives the next signal notification event. /// - /// `None` is returned if no more events can be received by this stream. + /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// @@ -469,11 +476,11 @@ impl CtrlLogoff { /// /// #[tokio::main] /// async fn main() -> Result<(), Box> { - /// // An infinite stream of CTRL-LOGOFF events. - /// let mut stream = ctrl_logoff()?; + /// // An listener of CTRL-LOGOFF events. + /// let mut signal = ctrl_logoff()?; /// /// // Print whenever a CTRL-LOGOFF event is received. - /// stream.recv().await; + /// signal.recv().await; /// println!("got CTRL-LOGOFF. Cleaning up before exiting"); /// /// Ok(()) @@ -486,7 +493,7 @@ impl CtrlLogoff { /// Polls to receive the next signal notification event, outside of an /// `async` context. /// - /// `None` is returned if no more events can be received by this stream. + /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// From d44b1ca9c8fdf6392d7b5b625bef49d141de79f1 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 26 Feb 2023 22:44:44 +0100 Subject: [PATCH 098/101] io: ignore SplitByUtf8BoundaryIfWindows test on miri (#5507) These tests take a very long time under miri, but the code they're testing isn't unsafe, so there isn't any reason to run them under miri. --- tokio/src/io/stdio_common.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tokio/src/io/stdio_common.rs b/tokio/src/io/stdio_common.rs index b1cc61d7ac4..06da761b858 100644 --- a/tokio/src/io/stdio_common.rs +++ b/tokio/src/io/stdio_common.rs @@ -176,6 +176,7 @@ mod tests { } #[test] + #[cfg_attr(miri, ignore)] fn test_splitter() { let data = str::repeat("█", MAX_BUF); let mut wr = super::SplitByUtf8BoundaryIfWindows::new(TextMockWriter); @@ -189,6 +190,7 @@ mod tests { } #[test] + #[cfg_attr(miri, ignore)] fn test_pseudo_text() { // In this test we write a piece of binary data, whose beginning is // text though. We then validate that even in this corner case buffer From 5a3abe56eeb03b383a994375026b370161a05946 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tymoteusz=20Wi=C5=9Bniewski?= Date: Mon, 27 Feb 2023 09:39:07 +0100 Subject: [PATCH 099/101] net: add types for named unix pipes (#5351) --- tokio/src/net/unix/mod.rs | 4 +- tokio/src/net/unix/pipe.rs | 1206 ++++++++++++++++++++++++++++++++ tokio/tests/async_send_sync.rs | 13 + tokio/tests/net_unix_pipe.rs | 429 ++++++++++++ 4 files changed, 1651 insertions(+), 1 deletion(-) create mode 100644 tokio/src/net/unix/pipe.rs create mode 100644 tokio/tests/net_unix_pipe.rs diff --git a/tokio/src/net/unix/mod.rs b/tokio/src/net/unix/mod.rs index 97b632744dc..a49b70af34a 100644 --- a/tokio/src/net/unix/mod.rs +++ b/tokio/src/net/unix/mod.rs @@ -1,4 +1,4 @@ -//! Unix domain socket utility types. +//! Unix specific network types. // This module does not currently provide any public API, but it was // unintentionally defined as a public module. Hide it from the documentation // instead of changing it to a private module to avoid breakage. @@ -22,6 +22,8 @@ pub(crate) use stream::UnixStream; mod ucred; pub use ucred::UCred; +pub mod pipe; + /// A type representing process and process group IDs. #[allow(non_camel_case_types)] pub type uid_t = u32; diff --git a/tokio/src/net/unix/pipe.rs b/tokio/src/net/unix/pipe.rs new file mode 100644 index 00000000000..0775717b0c6 --- /dev/null +++ b/tokio/src/net/unix/pipe.rs @@ -0,0 +1,1206 @@ +//! Unix pipe types. + +use crate::io::interest::Interest; +use crate::io::{AsyncRead, AsyncWrite, PollEvented, ReadBuf, Ready}; + +use mio::unix::pipe as mio_pipe; +use std::fs::File; +use std::io::{self, Read, Write}; +use std::os::unix::fs::{FileTypeExt, OpenOptionsExt}; +use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +use std::path::Path; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_io_util! { + use bytes::BufMut; +} + +/// Options and flags which can be used to configure how a FIFO file is opened. +/// +/// This builder allows configuring how to create a pipe end from a FIFO file. +/// Generally speaking, when using `OpenOptions`, you'll first call [`new`], +/// then chain calls to methods to set each option, then call either +/// [`open_receiver`] or [`open_sender`], passing the path of the FIFO file you +/// are trying to open. This will give you a [`io::Result`][result] with a pipe +/// end inside that you can further operate on. +/// +/// [`new`]: OpenOptions::new +/// [`open_receiver`]: OpenOptions::open_receiver +/// [`open_sender`]: OpenOptions::open_sender +/// [result]: std::io::Result +/// +/// # Examples +/// +/// Opening a pair of pipe ends from a FIFO file: +/// +/// ```no_run +/// use tokio::net::unix::pipe; +/// # use std::error::Error; +/// +/// const FIFO_NAME: &str = "path/to/a/fifo"; +/// +/// # async fn dox() -> Result<(), Box> { +/// let rx = pipe::OpenOptions::new().open_receiver(FIFO_NAME)?; +/// let tx = pipe::OpenOptions::new().open_sender(FIFO_NAME)?; +/// # Ok(()) +/// # } +/// ``` +/// +/// Opening a [`Sender`] on Linux when you are sure the file is a FIFO: +/// +/// ```ignore +/// use tokio::net::unix::pipe; +/// use nix::{unistd::mkfifo, sys::stat::Mode}; +/// # use std::error::Error; +/// +/// // Our program has exclusive access to this path. +/// const FIFO_NAME: &str = "path/to/a/new/fifo"; +/// +/// # async fn dox() -> Result<(), Box> { +/// mkfifo(FIFO_NAME, Mode::S_IRWXU)?; +/// let tx = pipe::OpenOptions::new() +/// .read_write(true) +/// .unchecked(true) +/// .open_sender(FIFO_NAME)?; +/// # Ok(()) +/// # } +/// ``` +#[derive(Clone, Debug)] +pub struct OpenOptions { + #[cfg(target_os = "linux")] + read_write: bool, + unchecked: bool, +} + +impl OpenOptions { + /// Creates a blank new set of options ready for configuration. + /// + /// All options are initially set to `false`. + pub fn new() -> OpenOptions { + OpenOptions { + #[cfg(target_os = "linux")] + read_write: false, + unchecked: false, + } + } + + /// Sets the option for read-write access. + /// + /// This option, when true, will indicate that a FIFO file will be opened + /// in read-write access mode. This operation is not defined by the POSIX + /// standard and is only guaranteed to work on Linux. + /// + /// # Examples + /// + /// Opening a [`Sender`] even if there are no open reading ends: + /// + /// ```ignore + /// use tokio::net::unix::pipe; + /// + /// let tx = pipe::OpenOptions::new() + /// .read_write(true) + /// .open_sender("path/to/a/fifo"); + /// ``` + /// + /// Opening a resilient [`Receiver`] i.e. a reading pipe end which will not + /// fail with [`UnexpectedEof`] during reading if all writing ends of the + /// pipe close the FIFO file. + /// + /// [`UnexpectedEof`]: std::io::ErrorKind::UnexpectedEof + /// + /// ```ignore + /// use tokio::net::unix::pipe; + /// + /// let tx = pipe::OpenOptions::new() + /// .read_write(true) + /// .open_receiver("path/to/a/fifo"); + /// ``` + #[cfg(target_os = "linux")] + #[cfg_attr(docsrs, doc(cfg(target_os = "linux")))] + pub fn read_write(&mut self, value: bool) -> &mut Self { + self.read_write = value; + self + } + + /// Sets the option to skip the check for FIFO file type. + /// + /// By default, [`open_receiver`] and [`open_sender`] functions will check + /// if the opened file is a FIFO file. Set this option to `true` if you are + /// sure the file is a FIFO file. + /// + /// [`open_receiver`]: OpenOptions::open_receiver + /// [`open_sender`]: OpenOptions::open_sender + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::unix::pipe; + /// use nix::{unistd::mkfifo, sys::stat::Mode}; + /// # use std::error::Error; + /// + /// // Our program has exclusive access to this path. + /// const FIFO_NAME: &str = "path/to/a/new/fifo"; + /// + /// # async fn dox() -> Result<(), Box> { + /// mkfifo(FIFO_NAME, Mode::S_IRWXU)?; + /// let rx = pipe::OpenOptions::new() + /// .unchecked(true) + /// .open_receiver(FIFO_NAME)?; + /// # Ok(()) + /// # } + /// ``` + pub fn unchecked(&mut self, value: bool) -> &mut Self { + self.unchecked = value; + self + } + + /// Creates a [`Receiver`] from a FIFO file with the options specified by `self`. + /// + /// This function will open the FIFO file at the specified path, possibly + /// check if it is a pipe, and associate the pipe with the default event + /// loop for reading. + /// + /// # Errors + /// + /// If the file type check fails, this function will fail with `io::ErrorKind::InvalidInput`. + /// This function may also fail with other standard OS errors. + /// + /// # Panics + /// + /// This function panics if it is not called from within a runtime with + /// IO enabled. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. + pub fn open_receiver>(&self, path: P) -> io::Result { + let file = self.open(path.as_ref(), PipeEnd::Receiver)?; + Receiver::from_file_unchecked(file) + } + + /// Creates a [`Sender`] from a FIFO file with the options specified by `self`. + /// + /// This function will open the FIFO file at the specified path, possibly + /// check if it is a pipe, and associate the pipe with the default event + /// loop for writing. + /// + /// # Errors + /// + /// If the file type check fails, this function will fail with `io::ErrorKind::InvalidInput`. + /// If the file is not opened in read-write access mode and the file is not + /// currently open for reading, this function will fail with `ENXIO`. + /// This function may also fail with other standard OS errors. + /// + /// # Panics + /// + /// This function panics if it is not called from within a runtime with + /// IO enabled. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. + pub fn open_sender>(&self, path: P) -> io::Result { + let file = self.open(path.as_ref(), PipeEnd::Sender)?; + Sender::from_file_unchecked(file) + } + + fn open(&self, path: &Path, pipe_end: PipeEnd) -> io::Result { + let mut options = std::fs::OpenOptions::new(); + options + .read(pipe_end == PipeEnd::Receiver) + .write(pipe_end == PipeEnd::Sender) + .custom_flags(libc::O_NONBLOCK); + + #[cfg(target_os = "linux")] + if self.read_write { + options.read(true).write(true); + } + + let file = options.open(path)?; + + if !self.unchecked && !is_fifo(&file)? { + return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); + } + + Ok(file) + } +} + +impl Default for OpenOptions { + fn default() -> OpenOptions { + OpenOptions::new() + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +enum PipeEnd { + Sender, + Receiver, +} + +/// Writing end of a Unix pipe. +/// +/// It can be constructed from a FIFO file with [`OpenOptions::open_sender`]. +/// +/// Opening a named pipe for writing involves a few steps. +/// Call to [`OpenOptions::open_sender`] might fail with an error indicating +/// different things: +/// +/// * [`io::ErrorKind::NotFound`] - There is no file at the specified path. +/// * [`io::ErrorKind::InvalidInput`] - The file exists, but it is not a FIFO. +/// * [`ENXIO`] - The file is a FIFO, but no process has it open for reading. +/// Sleep for a while and try again. +/// * Other OS errors not specific to opening FIFO files. +/// +/// Opening a `Sender` from a FIFO file should look like this: +/// +/// ```no_run +/// use tokio::net::unix::pipe; +/// use tokio::time::{self, Duration}; +/// +/// const FIFO_NAME: &str = "path/to/a/fifo"; +/// +/// # async fn dox() -> Result<(), Box> { +/// // Wait for a reader to open the file. +/// let tx = loop { +/// match pipe::OpenOptions::new().open_sender(FIFO_NAME) { +/// Ok(tx) => break tx, +/// Err(e) if e.raw_os_error() == Some(libc::ENXIO) => {}, +/// Err(e) => return Err(e.into()), +/// } +/// +/// time::sleep(Duration::from_millis(50)).await; +/// }; +/// # Ok(()) +/// # } +/// ``` +/// +/// On Linux, it is possible to create a `Sender` without waiting in a sleeping +/// loop. This is done by opening a named pipe in read-write access mode with +/// `OpenOptions::read_write`. This way, a `Sender` can at the same time hold +/// both a writing end and a reading end, and the latter allows to open a FIFO +/// without [`ENXIO`] error since the pipe is open for reading as well. +/// +/// `Sender` cannot be used to read from a pipe, so in practice the read access +/// is only used when a FIFO is opened. However, using a `Sender` in read-write +/// mode **may lead to lost data**, because written data will be dropped by the +/// system as soon as all pipe ends are closed. To avoid lost data you have to +/// make sure that a reading end has been opened before dropping a `Sender`. +/// +/// Note that using read-write access mode with FIFO files is not defined by +/// the POSIX standard and it is only guaranteed to work on Linux. +/// +/// ```ignore +/// use tokio::io::AsyncWriteExt; +/// use tokio::net::unix::pipe; +/// +/// const FIFO_NAME: &str = "path/to/a/fifo"; +/// +/// # async fn dox() -> Result<(), Box> { +/// let mut tx = pipe::OpenOptions::new() +/// .read_write(true) +/// .open_sender(FIFO_NAME)?; +/// +/// // Asynchronously write to the pipe before a reader. +/// tx.write_all(b"hello world").await?; +/// # Ok(()) +/// # } +/// ``` +/// +/// [`ENXIO`]: https://docs.rs/libc/latest/libc/constant.ENXIO.html +#[derive(Debug)] +pub struct Sender { + io: PollEvented, +} + +impl Sender { + fn from_mio(mio_tx: mio_pipe::Sender) -> io::Result { + let io = PollEvented::new_with_interest(mio_tx, Interest::WRITABLE)?; + Ok(Sender { io }) + } + + /// Creates a new `Sender` from a [`File`]. + /// + /// This function is intended to construct a pipe from a [`File`] representing + /// a special FIFO file. It will check if the file is a pipe and has write access, + /// set it in non-blocking mode and perform the conversion. + /// + /// # Errors + /// + /// Fails with `io::ErrorKind::InvalidInput` if the file is not a pipe or it + /// does not have write access. Also fails with any standard OS error if it occurs. + /// + /// # Panics + /// + /// This function panics if it is not called from within a runtime with + /// IO enabled. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. + pub fn from_file(mut file: File) -> io::Result { + if !is_fifo(&file)? { + return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); + } + + let flags = get_file_flags(&file)?; + if has_write_access(flags) { + set_nonblocking(&mut file, flags)?; + Sender::from_file_unchecked(file) + } else { + Err(io::Error::new( + io::ErrorKind::InvalidInput, + "not in O_WRONLY or O_RDWR access mode", + )) + } + } + + /// Creates a new `Sender` from a [`File`] without checking pipe properties. + /// + /// This function is intended to construct a pipe from a File representing + /// a special FIFO file. The conversion assumes nothing about the underlying + /// file; it is left up to the user to make sure it is opened with write access, + /// represents a pipe and is set in non-blocking mode. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::unix::pipe; + /// use std::fs::OpenOptions; + /// use std::os::unix::fs::{FileTypeExt, OpenOptionsExt}; + /// # use std::error::Error; + /// + /// const FIFO_NAME: &str = "path/to/a/fifo"; + /// + /// # async fn dox() -> Result<(), Box> { + /// let file = OpenOptions::new() + /// .write(true) + /// .custom_flags(libc::O_NONBLOCK) + /// .open(FIFO_NAME)?; + /// if file.metadata()?.file_type().is_fifo() { + /// let tx = pipe::Sender::from_file_unchecked(file)?; + /// /* use the Sender */ + /// } + /// # Ok(()) + /// # } + /// ``` + /// + /// # Panics + /// + /// This function panics if it is not called from within a runtime with + /// IO enabled. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. + pub fn from_file_unchecked(file: File) -> io::Result { + let raw_fd = file.into_raw_fd(); + let mio_tx = unsafe { mio_pipe::Sender::from_raw_fd(raw_fd) }; + Sender::from_mio(mio_tx) + } + + /// Waits for any of the requested ready states. + /// + /// This function can be used instead of [`writable()`] to check the returned + /// ready set for [`Ready::WRITABLE`] and [`Ready::WRITE_CLOSED`] events. + /// + /// The function may complete without the pipe being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// + /// [`writable()`]: Self::writable + /// + /// # Cancel safety + /// + /// This method is cancel safe. Once a readiness event occurs, the method + /// will continue to return immediately until the readiness event is + /// consumed by an attempt to write that fails with `WouldBlock` or + /// `Poll::Pending`. + pub async fn ready(&self, interest: Interest) -> io::Result { + let event = self.io.registration().readiness(interest).await?; + Ok(event.ready) + } + + /// Waits for the pipe to become writable. + /// + /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually + /// paired with [`try_write()`]. + /// + /// [`try_write()`]: Self::try_write + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::unix::pipe; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// // Open a writing end of a fifo + /// let tx = pipe::OpenOptions::new().open_sender("path/to/a/fifo")?; + /// + /// loop { + /// // Wait for the pipe to be writable + /// tx.writable().await?; + /// + /// // Try to write data, this may still fail with `WouldBlock` + /// // if the readiness event is a false positive. + /// match tx.try_write(b"hello world") { + /// Ok(n) => { + /// break; + /// } + /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { + /// continue; + /// } + /// Err(e) => { + /// return Err(e.into()); + /// } + /// } + /// } + /// + /// Ok(()) + /// } + /// ``` + pub async fn writable(&self) -> io::Result<()> { + self.ready(Interest::WRITABLE).await?; + Ok(()) + } + + /// Polls for write readiness. + /// + /// If the pipe is not currently ready for writing, this method will + /// store a clone of the `Waker` from the provided `Context`. When the pipe + /// becomes ready for writing, `Waker::wake` will be called on the waker. + /// + /// Note that on multiple calls to `poll_write_ready` or `poll_write`, only + /// the `Waker` from the `Context` passed to the most recent call is + /// scheduled to receive a wakeup. + /// + /// This function is intended for cases where creating and pinning a future + /// via [`writable`] is not feasible. Where possible, using [`writable`] is + /// preferred, as this supports polling from multiple tasks at once. + /// + /// [`writable`]: Self::writable + /// + /// # Return value + /// + /// The function returns: + /// + /// * `Poll::Pending` if the pipe is not ready for writing. + /// * `Poll::Ready(Ok(()))` if the pipe is ready for writing. + /// * `Poll::Ready(Err(e))` if an error is encountered. + /// + /// # Errors + /// + /// This function may encounter any standard I/O error except `WouldBlock`. + pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { + self.io.registration().poll_write_ready(cx).map_ok(|_| ()) + } + + /// Tries to write a buffer to the pipe, returning how many bytes were + /// written. + /// + /// The function will attempt to write the entire contents of `buf`, but + /// only part of the buffer may be written. If the length of `buf` is not + /// greater than `PIPE_BUF` (an OS constant, 4096 under Linux), then the + /// write is guaranteed to be atomic, i.e. either the entire content of + /// `buf` will be written or this method will fail with `WouldBlock`. There + /// is no such guarantee if `buf` is larger than `PIPE_BUF`. + /// + /// This function is usually paired with [`writable`]. + /// + /// [`writable`]: Self::writable + /// + /// # Return + /// + /// If data is successfully written, `Ok(n)` is returned, where `n` is the + /// number of bytes written. If the pipe is not ready to write data, + /// `Err(io::ErrorKind::WouldBlock)` is returned. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::unix::pipe; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// // Open a writing end of a fifo + /// let tx = pipe::OpenOptions::new().open_sender("path/to/a/fifo")?; + /// + /// loop { + /// // Wait for the pipe to be writable + /// tx.writable().await?; + /// + /// // Try to write data, this may still fail with `WouldBlock` + /// // if the readiness event is a false positive. + /// match tx.try_write(b"hello world") { + /// Ok(n) => { + /// break; + /// } + /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { + /// continue; + /// } + /// Err(e) => { + /// return Err(e.into()); + /// } + /// } + /// } + /// + /// Ok(()) + /// } + /// ``` + pub fn try_write(&self, buf: &[u8]) -> io::Result { + self.io + .registration() + .try_io(Interest::WRITABLE, || (&*self.io).write(buf)) + } + + /// Tries to write several buffers to the pipe, returning how many bytes + /// were written. + /// + /// Data is written from each buffer in order, with the final buffer read + /// from possible being only partially consumed. This method behaves + /// equivalently to a single call to [`try_write()`] with concatenated + /// buffers. + /// + /// If the total length of buffers is not greater than `PIPE_BUF` (an OS + /// constant, 4096 under Linux), then the write is guaranteed to be atomic, + /// i.e. either the entire contents of buffers will be written or this + /// method will fail with `WouldBlock`. There is no such guarantee if the + /// total length of buffers is greater than `PIPE_BUF`. + /// + /// This function is usually paired with [`writable`]. + /// + /// [`try_write()`]: Self::try_write() + /// [`writable`]: Self::writable + /// + /// # Return + /// + /// If data is successfully written, `Ok(n)` is returned, where `n` is the + /// number of bytes written. If the pipe is not ready to write data, + /// `Err(io::ErrorKind::WouldBlock)` is returned. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::unix::pipe; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// // Open a writing end of a fifo + /// let tx = pipe::OpenOptions::new().open_sender("path/to/a/fifo")?; + /// + /// let bufs = [io::IoSlice::new(b"hello "), io::IoSlice::new(b"world")]; + /// + /// loop { + /// // Wait for the pipe to be writable + /// tx.writable().await?; + /// + /// // Try to write data, this may still fail with `WouldBlock` + /// // if the readiness event is a false positive. + /// match tx.try_write_vectored(&bufs) { + /// Ok(n) => { + /// break; + /// } + /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + /// continue; + /// } + /// Err(e) => { + /// return Err(e.into()); + /// } + /// } + /// } + /// + /// Ok(()) + /// } + /// ``` + pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result { + self.io + .registration() + .try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf)) + } +} + +impl AsyncWrite for Sender { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + self.io.poll_write(cx, buf) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[io::IoSlice<'_>], + ) -> Poll> { + self.io.poll_write_vectored(cx, bufs) + } + + fn is_write_vectored(&self) -> bool { + true + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } +} + +impl AsRawFd for Sender { + fn as_raw_fd(&self) -> RawFd { + self.io.as_raw_fd() + } +} + +/// Reading end of a Unix pipe. +/// +/// It can be constructed from a FIFO file with [`OpenOptions::open_receiver`]. +/// +/// # Examples +/// +/// Receiving messages from a named pipe in a loop: +/// +/// ```no_run +/// use tokio::net::unix::pipe; +/// use tokio::io::{self, AsyncReadExt}; +/// +/// const FIFO_NAME: &str = "path/to/a/fifo"; +/// +/// # async fn dox() -> Result<(), Box> { +/// let mut rx = pipe::OpenOptions::new().open_receiver(FIFO_NAME)?; +/// loop { +/// let mut msg = vec![0; 256]; +/// match rx.read_exact(&mut msg).await { +/// Ok(_) => { +/// /* handle the message */ +/// } +/// Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => { +/// // Writing end has been closed, we should reopen the pipe. +/// rx = pipe::OpenOptions::new().open_receiver(FIFO_NAME)?; +/// } +/// Err(e) => return Err(e.into()), +/// } +/// } +/// # } +/// ``` +/// +/// On Linux, you can use a `Receiver` in read-write access mode to implement +/// resilient reading from a named pipe. Unlike `Receiver` opened in read-only +/// mode, read from a pipe in read-write mode will not fail with `UnexpectedEof` +/// when the writing end is closed. This way, a `Receiver` can asynchronously +/// wait for the next writer to open the pipe. +/// +/// You should not use functions waiting for EOF such as [`read_to_end`] with +/// a `Receiver` in read-write access mode, since it **may wait forever**. +/// `Receiver` in this mode also holds an open writing end, which prevents +/// receiving EOF. +/// +/// To set the read-write access mode you can use `OpenOptions::read_write`. +/// Note that using read-write access mode with FIFO files is not defined by +/// the POSIX standard and it is only guaranteed to work on Linux. +/// +/// ```ignore +/// use tokio::net::unix::pipe; +/// use tokio::io::AsyncReadExt; +/// # use std::error::Error; +/// +/// const FIFO_NAME: &str = "path/to/a/fifo"; +/// +/// # async fn dox() -> Result<(), Box> { +/// let mut rx = pipe::OpenOptions::new() +/// .read_write(true) +/// .open_receiver(FIFO_NAME)?; +/// loop { +/// let mut msg = vec![0; 256]; +/// rx.read_exact(&mut msg).await?; +/// /* handle the message */ +/// } +/// # } +/// ``` +/// +/// [`read_to_end`]: crate::io::AsyncReadExt::read_to_end +#[derive(Debug)] +pub struct Receiver { + io: PollEvented, +} + +impl Receiver { + fn from_mio(mio_rx: mio_pipe::Receiver) -> io::Result { + let io = PollEvented::new_with_interest(mio_rx, Interest::READABLE)?; + Ok(Receiver { io }) + } + + /// Creates a new `Receiver` from a [`File`]. + /// + /// This function is intended to construct a pipe from a [`File`] representing + /// a special FIFO file. It will check if the file is a pipe and has read access, + /// set it in non-blocking mode and perform the conversion. + /// + /// # Errors + /// + /// Fails with `io::ErrorKind::InvalidInput` if the file is not a pipe or it + /// does not have read access. Also fails with any standard OS error if it occurs. + /// + /// # Panics + /// + /// This function panics if it is not called from within a runtime with + /// IO enabled. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. + pub fn from_file(mut file: File) -> io::Result { + if !is_fifo(&file)? { + return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); + } + + let flags = get_file_flags(&file)?; + if has_read_access(flags) { + set_nonblocking(&mut file, flags)?; + Receiver::from_file_unchecked(file) + } else { + Err(io::Error::new( + io::ErrorKind::InvalidInput, + "not in O_RDONLY or O_RDWR access mode", + )) + } + } + + /// Creates a new `Receiver` from a [`File`] without checking pipe properties. + /// + /// This function is intended to construct a pipe from a File representing + /// a special FIFO file. The conversion assumes nothing about the underlying + /// file; it is left up to the user to make sure it is opened with read access, + /// represents a pipe and is set in non-blocking mode. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::unix::pipe; + /// use std::fs::OpenOptions; + /// use std::os::unix::fs::{FileTypeExt, OpenOptionsExt}; + /// # use std::error::Error; + /// + /// const FIFO_NAME: &str = "path/to/a/fifo"; + /// + /// # async fn dox() -> Result<(), Box> { + /// let file = OpenOptions::new() + /// .read(true) + /// .custom_flags(libc::O_NONBLOCK) + /// .open(FIFO_NAME)?; + /// if file.metadata()?.file_type().is_fifo() { + /// let rx = pipe::Receiver::from_file_unchecked(file)?; + /// /* use the Receiver */ + /// } + /// # Ok(()) + /// # } + /// ``` + /// + /// # Panics + /// + /// This function panics if it is not called from within a runtime with + /// IO enabled. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. + pub fn from_file_unchecked(file: File) -> io::Result { + let raw_fd = file.into_raw_fd(); + let mio_rx = unsafe { mio_pipe::Receiver::from_raw_fd(raw_fd) }; + Receiver::from_mio(mio_rx) + } + + /// Waits for any of the requested ready states. + /// + /// This function can be used instead of [`readable()`] to check the returned + /// ready set for [`Ready::READABLE`] and [`Ready::READ_CLOSED`] events. + /// + /// The function may complete without the pipe being ready. This is a + /// false-positive and attempting an operation will return with + /// `io::ErrorKind::WouldBlock`. The function can also return with an empty + /// [`Ready`] set, so you should always check the returned value and possibly + /// wait again if the requested states are not set. + /// + /// [`readable()`]: Self::readable + /// + /// # Cancel safety + /// + /// This method is cancel safe. Once a readiness event occurs, the method + /// will continue to return immediately until the readiness event is + /// consumed by an attempt to read that fails with `WouldBlock` or + /// `Poll::Pending`. + pub async fn ready(&self, interest: Interest) -> io::Result { + let event = self.io.registration().readiness(interest).await?; + Ok(event.ready) + } + + /// Waits for the pipe to become readable. + /// + /// This function is equivalent to `ready(Interest::READABLE)` and is usually + /// paired with [`try_read()`]. + /// + /// [`try_read()`]: Self::try_read() + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::unix::pipe; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// // Open a reading end of a fifo + /// let rx = pipe::OpenOptions::new().open_receiver("path/to/a/fifo")?; + /// + /// let mut msg = vec![0; 1024]; + /// + /// loop { + /// // Wait for the pipe to be readable + /// rx.readable().await?; + /// + /// // Try to read data, this may still fail with `WouldBlock` + /// // if the readiness event is a false positive. + /// match rx.try_read(&mut msg) { + /// Ok(n) => { + /// msg.truncate(n); + /// break; + /// } + /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { + /// continue; + /// } + /// Err(e) => { + /// return Err(e.into()); + /// } + /// } + /// } + /// + /// println!("GOT = {:?}", msg); + /// Ok(()) + /// } + /// ``` + pub async fn readable(&self) -> io::Result<()> { + self.ready(Interest::READABLE).await?; + Ok(()) + } + + /// Polls for read readiness. + /// + /// If the pipe is not currently ready for reading, this method will + /// store a clone of the `Waker` from the provided `Context`. When the pipe + /// becomes ready for reading, `Waker::wake` will be called on the waker. + /// + /// Note that on multiple calls to `poll_read_ready` or `poll_read`, only + /// the `Waker` from the `Context` passed to the most recent call is + /// scheduled to receive a wakeup. + /// + /// This function is intended for cases where creating and pinning a future + /// via [`readable`] is not feasible. Where possible, using [`readable`] is + /// preferred, as this supports polling from multiple tasks at once. + /// + /// [`readable`]: Self::readable + /// + /// # Return value + /// + /// The function returns: + /// + /// * `Poll::Pending` if the pipe is not ready for reading. + /// * `Poll::Ready(Ok(()))` if the pipe is ready for reading. + /// * `Poll::Ready(Err(e))` if an error is encountered. + /// + /// # Errors + /// + /// This function may encounter any standard I/O error except `WouldBlock`. + pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll> { + self.io.registration().poll_read_ready(cx).map_ok(|_| ()) + } + + /// Tries to read data from the pipe into the provided buffer, returning how + /// many bytes were read. + /// + /// Reads any pending data from the pipe but does not wait for new data + /// to arrive. On success, returns the number of bytes read. Because + /// `try_read()` is non-blocking, the buffer does not have to be stored by + /// the async task and can exist entirely on the stack. + /// + /// Usually [`readable()`] is used with this function. + /// + /// [`readable()`]: Self::readable() + /// + /// # Return + /// + /// If data is successfully read, `Ok(n)` is returned, where `n` is the + /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: + /// + /// 1. The pipe's writing end is closed and will no longer write data. + /// 2. The specified buffer was 0 bytes in length. + /// + /// If the pipe is not ready to read data, + /// `Err(io::ErrorKind::WouldBlock)` is returned. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::unix::pipe; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// // Open a reading end of a fifo + /// let rx = pipe::OpenOptions::new().open_receiver("path/to/a/fifo")?; + /// + /// let mut msg = vec![0; 1024]; + /// + /// loop { + /// // Wait for the pipe to be readable + /// rx.readable().await?; + /// + /// // Try to read data, this may still fail with `WouldBlock` + /// // if the readiness event is a false positive. + /// match rx.try_read(&mut msg) { + /// Ok(n) => { + /// msg.truncate(n); + /// break; + /// } + /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { + /// continue; + /// } + /// Err(e) => { + /// return Err(e.into()); + /// } + /// } + /// } + /// + /// println!("GOT = {:?}", msg); + /// Ok(()) + /// } + /// ``` + pub fn try_read(&self, buf: &mut [u8]) -> io::Result { + self.io + .registration() + .try_io(Interest::READABLE, || (&*self.io).read(buf)) + } + + /// Tries to read data from the pipe into the provided buffers, returning + /// how many bytes were read. + /// + /// Data is copied to fill each buffer in order, with the final buffer + /// written to possibly being only partially filled. This method behaves + /// equivalently to a single call to [`try_read()`] with concatenated + /// buffers. + /// + /// Reads any pending data from the pipe but does not wait for new data + /// to arrive. On success, returns the number of bytes read. Because + /// `try_read_vectored()` is non-blocking, the buffer does not have to be + /// stored by the async task and can exist entirely on the stack. + /// + /// Usually, [`readable()`] is used with this function. + /// + /// [`try_read()`]: Self::try_read() + /// [`readable()`]: Self::readable() + /// + /// # Return + /// + /// If data is successfully read, `Ok(n)` is returned, where `n` is the + /// number of bytes read. `Ok(0)` indicates the pipe's writing end is + /// closed and will no longer write data. If the pipe is not ready to read + /// data `Err(io::ErrorKind::WouldBlock)` is returned. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::unix::pipe; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// // Open a reading end of a fifo + /// let rx = pipe::OpenOptions::new().open_receiver("path/to/a/fifo")?; + /// + /// loop { + /// // Wait for the pipe to be readable + /// rx.readable().await?; + /// + /// // Creating the buffer **after** the `await` prevents it from + /// // being stored in the async task. + /// let mut buf_a = [0; 512]; + /// let mut buf_b = [0; 1024]; + /// let mut bufs = [ + /// io::IoSliceMut::new(&mut buf_a), + /// io::IoSliceMut::new(&mut buf_b), + /// ]; + /// + /// // Try to read data, this may still fail with `WouldBlock` + /// // if the readiness event is a false positive. + /// match rx.try_read_vectored(&mut bufs) { + /// Ok(0) => break, + /// Ok(n) => { + /// println!("read {} bytes", n); + /// } + /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { + /// continue; + /// } + /// Err(e) => { + /// return Err(e.into()); + /// } + /// } + /// } + /// + /// Ok(()) + /// } + /// ``` + pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result { + self.io + .registration() + .try_io(Interest::READABLE, || (&*self.io).read_vectored(bufs)) + } + + cfg_io_util! { + /// Tries to read data from the pipe into the provided buffer, advancing the + /// buffer's internal cursor, returning how many bytes were read. + /// + /// Reads any pending data from the pipe but does not wait for new data + /// to arrive. On success, returns the number of bytes read. Because + /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by + /// the async task and can exist entirely on the stack. + /// + /// Usually, [`readable()`] or [`ready()`] is used with this function. + /// + /// [`readable()`]: Self::readable + /// [`ready()`]: Self::ready + /// + /// # Return + /// + /// If data is successfully read, `Ok(n)` is returned, where `n` is the + /// number of bytes read. `Ok(0)` indicates the pipe's writing end is + /// closed and will no longer write data. If the pipe is not ready to read + /// data `Err(io::ErrorKind::WouldBlock)` is returned. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::unix::pipe; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// // Open a reading end of a fifo + /// let rx = pipe::OpenOptions::new().open_receiver("path/to/a/fifo")?; + /// + /// loop { + /// // Wait for the pipe to be readable + /// rx.readable().await?; + /// + /// let mut buf = Vec::with_capacity(4096); + /// + /// // Try to read data, this may still fail with `WouldBlock` + /// // if the readiness event is a false positive. + /// match rx.try_read_buf(&mut buf) { + /// Ok(0) => break, + /// Ok(n) => { + /// println!("read {} bytes", n); + /// } + /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + /// continue; + /// } + /// Err(e) => { + /// return Err(e.into()); + /// } + /// } + /// } + /// + /// Ok(()) + /// } + /// ``` + pub fn try_read_buf(&self, buf: &mut B) -> io::Result { + self.io.registration().try_io(Interest::READABLE, || { + use std::io::Read; + + let dst = buf.chunk_mut(); + let dst = + unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; + + // Safety: `mio_pipe::Receiver` uses a `std::fs::File` underneath, + // which correctly handles reads into uninitialized memory. + let n = (&*self.io).read(dst)?; + + unsafe { + buf.advance_mut(n); + } + + Ok(n) + }) + } + } +} + +impl AsyncRead for Receiver { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + // Safety: `mio_pipe::Receiver` uses a `std::fs::File` underneath, + // which correctly handles reads into uninitialized memory. + unsafe { self.io.poll_read(cx, buf) } + } +} + +impl AsRawFd for Receiver { + fn as_raw_fd(&self) -> RawFd { + self.io.as_raw_fd() + } +} + +/// Checks if file is a FIFO +fn is_fifo(file: &File) -> io::Result { + Ok(file.metadata()?.file_type().is_fifo()) +} + +/// Gets file descriptor's flags by fcntl. +fn get_file_flags(file: &File) -> io::Result { + let fd = file.as_raw_fd(); + let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) }; + if flags < 0 { + Err(io::Error::last_os_error()) + } else { + Ok(flags) + } +} + +/// Checks for O_RDONLY or O_RDWR access mode. +fn has_read_access(flags: libc::c_int) -> bool { + let mode = flags & libc::O_ACCMODE; + mode == libc::O_RDONLY || mode == libc::O_RDWR +} + +/// Checks for O_WRONLY or O_RDWR access mode. +fn has_write_access(flags: libc::c_int) -> bool { + let mode = flags & libc::O_ACCMODE; + mode == libc::O_WRONLY || mode == libc::O_RDWR +} + +/// Sets file's flags with O_NONBLOCK by fcntl. +fn set_nonblocking(file: &mut File, current_flags: libc::c_int) -> io::Result<()> { + let fd = file.as_raw_fd(); + + let flags = current_flags | libc::O_NONBLOCK; + + if flags != current_flags { + let ret = unsafe { libc::fcntl(fd, libc::F_SETFL, flags) }; + if ret < 0 { + return Err(io::Error::last_os_error()); + } + } + + Ok(()) +} diff --git a/tokio/tests/async_send_sync.rs b/tokio/tests/async_send_sync.rs index e9c4040c0ca..0c2c34a0796 100644 --- a/tokio/tests/async_send_sync.rs +++ b/tokio/tests/async_send_sync.rs @@ -263,6 +263,19 @@ mod unix_datagram { async_assert_fn!(UnixStream::writable(_): Send & Sync & !Unpin); } +#[cfg(unix)] +mod unix_pipe { + use super::*; + use tokio::net::unix::pipe::*; + assert_value!(OpenOptions: Send & Sync & Unpin); + assert_value!(Receiver: Send & Sync & Unpin); + assert_value!(Sender: Send & Sync & Unpin); + async_assert_fn!(Receiver::readable(_): Send & Sync & !Unpin); + async_assert_fn!(Receiver::ready(_, tokio::io::Interest): Send & Sync & !Unpin); + async_assert_fn!(Sender::ready(_, tokio::io::Interest): Send & Sync & !Unpin); + async_assert_fn!(Sender::writable(_): Send & Sync & !Unpin); +} + #[cfg(windows)] mod windows_named_pipe { use super::*; diff --git a/tokio/tests/net_unix_pipe.rs b/tokio/tests/net_unix_pipe.rs new file mode 100644 index 00000000000..c96d6e70fbd --- /dev/null +++ b/tokio/tests/net_unix_pipe.rs @@ -0,0 +1,429 @@ +#![cfg(feature = "full")] +#![cfg(unix)] + +use tokio::io::{AsyncReadExt, AsyncWriteExt, Interest}; +use tokio::net::unix::pipe; +use tokio_test::task; +use tokio_test::{assert_err, assert_ok, assert_pending, assert_ready_ok}; + +use std::fs::File; +use std::io; +use std::os::unix::fs::OpenOptionsExt; +use std::os::unix::io::AsRawFd; +use std::path::{Path, PathBuf}; + +/// Helper struct which will clean up temporary files once dropped. +struct TempFifo { + path: PathBuf, + _dir: tempfile::TempDir, +} + +impl TempFifo { + fn new(name: &str) -> io::Result { + let dir = tempfile::Builder::new() + .prefix("tokio-fifo-tests") + .tempdir()?; + let path = dir.path().join(name); + nix::unistd::mkfifo(&path, nix::sys::stat::Mode::S_IRWXU)?; + + Ok(TempFifo { path, _dir: dir }) + } +} + +impl AsRef for TempFifo { + fn as_ref(&self) -> &Path { + self.path.as_ref() + } +} + +#[tokio::test] +async fn fifo_simple_send() -> io::Result<()> { + const DATA: &[u8] = b"this is some data to write to the fifo"; + + let fifo = TempFifo::new("simple_send")?; + + // Create a reading task which should wait for data from the pipe. + let mut reader = pipe::OpenOptions::new().open_receiver(&fifo)?; + let mut read_fut = task::spawn(async move { + let mut buf = vec![0; DATA.len()]; + reader.read_exact(&mut buf).await?; + Ok::<_, io::Error>(buf) + }); + assert_pending!(read_fut.poll()); + + let mut writer = pipe::OpenOptions::new().open_sender(&fifo)?; + writer.write_all(DATA).await?; + + // Let the IO driver poll events for the reader. + while !read_fut.is_woken() { + tokio::task::yield_now().await; + } + + // Reading task should be ready now. + let read_data = assert_ready_ok!(read_fut.poll()); + assert_eq!(&read_data, DATA); + + Ok(()) +} + +#[tokio::test] +#[cfg(target_os = "linux")] +async fn fifo_simple_send_sender_first() -> io::Result<()> { + const DATA: &[u8] = b"this is some data to write to the fifo"; + + // Create a new fifo file with *no reading ends open*. + let fifo = TempFifo::new("simple_send_sender_first")?; + + // Simple `open_sender` should fail with ENXIO (no such device or address). + let err = assert_err!(pipe::OpenOptions::new().open_sender(&fifo)); + assert_eq!(err.raw_os_error(), Some(libc::ENXIO)); + + // `open_sender` in read-write mode should succeed and the pipe should be ready to write. + let mut writer = pipe::OpenOptions::new() + .read_write(true) + .open_sender(&fifo)?; + writer.write_all(DATA).await?; + + // Read the written data and validate. + let mut reader = pipe::OpenOptions::new().open_receiver(&fifo)?; + let mut read_data = vec![0; DATA.len()]; + reader.read_exact(&mut read_data).await?; + assert_eq!(&read_data, DATA); + + Ok(()) +} + +// Opens a FIFO file, write and *close the writer*. +async fn write_and_close(path: impl AsRef, msg: &[u8]) -> io::Result<()> { + let mut writer = pipe::OpenOptions::new().open_sender(path)?; + writer.write_all(msg).await?; + drop(writer); // Explicit drop. + Ok(()) +} + +/// Checks EOF behavior with single reader and writers sequentially opening +/// and closing a FIFO. +#[tokio::test] +async fn fifo_multiple_writes() -> io::Result<()> { + const DATA: &[u8] = b"this is some data to write to the fifo"; + + let fifo = TempFifo::new("fifo_multiple_writes")?; + + let mut reader = pipe::OpenOptions::new().open_receiver(&fifo)?; + + write_and_close(&fifo, DATA).await?; + let ev = reader.ready(Interest::READABLE).await?; + assert!(ev.is_readable()); + let mut read_data = vec![0; DATA.len()]; + assert_ok!(reader.read_exact(&mut read_data).await); + + // Check that reader hits EOF. + let err = assert_err!(reader.read_exact(&mut read_data).await); + assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof); + + // Write more data and read again. + write_and_close(&fifo, DATA).await?; + assert_ok!(reader.read_exact(&mut read_data).await); + + Ok(()) +} + +/// Checks behavior of a resilient reader (Receiver in O_RDWR access mode) +/// with writers sequentially opening and closing a FIFO. +#[tokio::test] +#[cfg(target_os = "linux")] +async fn fifo_resilient_reader() -> io::Result<()> { + const DATA: &[u8] = b"this is some data to write to the fifo"; + + let fifo = TempFifo::new("fifo_resilient_reader")?; + + // Open reader in read-write access mode. + let mut reader = pipe::OpenOptions::new() + .read_write(true) + .open_receiver(&fifo)?; + + write_and_close(&fifo, DATA).await?; + let ev = reader.ready(Interest::READABLE).await?; + let mut read_data = vec![0; DATA.len()]; + reader.read_exact(&mut read_data).await?; + + // Check that reader didn't hit EOF. + assert!(!ev.is_read_closed()); + + // Resilient reader can asynchronously wait for the next writer. + let mut second_read_fut = task::spawn(reader.read_exact(&mut read_data)); + assert_pending!(second_read_fut.poll()); + + // Write more data and read again. + write_and_close(&fifo, DATA).await?; + assert_ok!(second_read_fut.await); + + Ok(()) +} + +#[tokio::test] +async fn open_detects_not_a_fifo() -> io::Result<()> { + let dir = tempfile::Builder::new() + .prefix("tokio-fifo-tests") + .tempdir() + .unwrap(); + let path = dir.path().join("not_a_fifo"); + + // Create an ordinary file. + File::create(&path)?; + + // Check if Sender detects invalid file type. + let err = assert_err!(pipe::OpenOptions::new().open_sender(&path)); + assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + + // Check if Receiver detects invalid file type. + let err = assert_err!(pipe::OpenOptions::new().open_sender(&path)); + assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + + Ok(()) +} + +#[tokio::test] +async fn from_file() -> io::Result<()> { + const DATA: &[u8] = b"this is some data to write to the fifo"; + + let fifo = TempFifo::new("from_file")?; + + // Construct a Receiver from a File. + let file = std::fs::OpenOptions::new() + .read(true) + .custom_flags(libc::O_NONBLOCK) + .open(&fifo)?; + let mut reader = pipe::Receiver::from_file(file)?; + + // Construct a Sender from a File. + let file = std::fs::OpenOptions::new() + .write(true) + .custom_flags(libc::O_NONBLOCK) + .open(&fifo)?; + let mut writer = pipe::Sender::from_file(file)?; + + // Write and read some data to test async. + let mut read_fut = task::spawn(async move { + let mut buf = vec![0; DATA.len()]; + reader.read_exact(&mut buf).await?; + Ok::<_, io::Error>(buf) + }); + assert_pending!(read_fut.poll()); + + writer.write_all(DATA).await?; + + let read_data = assert_ok!(read_fut.await); + assert_eq!(&read_data, DATA); + + Ok(()) +} + +#[tokio::test] +async fn from_file_detects_not_a_fifo() -> io::Result<()> { + let dir = tempfile::Builder::new() + .prefix("tokio-fifo-tests") + .tempdir() + .unwrap(); + let path = dir.path().join("not_a_fifo"); + + // Create an ordinary file. + File::create(&path)?; + + // Check if Sender detects invalid file type. + let file = std::fs::OpenOptions::new().write(true).open(&path)?; + let err = assert_err!(pipe::Sender::from_file(file)); + assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + + // Check if Receiver detects invalid file type. + let file = std::fs::OpenOptions::new().read(true).open(&path)?; + let err = assert_err!(pipe::Receiver::from_file(file)); + assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + + Ok(()) +} + +#[tokio::test] +async fn from_file_detects_wrong_access_mode() -> io::Result<()> { + let fifo = TempFifo::new("wrong_access_mode")?; + + // Open a read end to open the fifo for writing. + let _reader = pipe::OpenOptions::new().open_receiver(&fifo)?; + + // Check if Receiver detects write-only access mode. + let wronly = std::fs::OpenOptions::new() + .write(true) + .custom_flags(libc::O_NONBLOCK) + .open(&fifo)?; + let err = assert_err!(pipe::Receiver::from_file(wronly)); + assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + + // Check if Sender detects read-only access mode. + let rdonly = std::fs::OpenOptions::new() + .read(true) + .custom_flags(libc::O_NONBLOCK) + .open(&fifo)?; + let err = assert_err!(pipe::Sender::from_file(rdonly)); + assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + + Ok(()) +} + +fn is_nonblocking(fd: &T) -> io::Result { + let flags = nix::fcntl::fcntl(fd.as_raw_fd(), nix::fcntl::F_GETFL)?; + Ok((flags & libc::O_NONBLOCK) != 0) +} + +#[tokio::test] +async fn from_file_sets_nonblock() -> io::Result<()> { + let fifo = TempFifo::new("sets_nonblock")?; + + // Open read and write ends to let blocking files open. + let _reader = pipe::OpenOptions::new().open_receiver(&fifo)?; + let _writer = pipe::OpenOptions::new().open_sender(&fifo)?; + + // Check if Receiver sets the pipe in non-blocking mode. + let rdonly = std::fs::OpenOptions::new().read(true).open(&fifo)?; + assert!(!is_nonblocking(&rdonly)?); + let reader = pipe::Receiver::from_file(rdonly)?; + assert!(is_nonblocking(&reader)?); + + // Check if Sender sets the pipe in non-blocking mode. + let wronly = std::fs::OpenOptions::new().write(true).open(&fifo)?; + assert!(!is_nonblocking(&wronly)?); + let writer = pipe::Sender::from_file(wronly)?; + assert!(is_nonblocking(&writer)?); + + Ok(()) +} + +fn writable_by_poll(writer: &pipe::Sender) -> bool { + task::spawn(writer.writable()).poll().is_ready() +} + +#[tokio::test] +async fn try_read_write() -> io::Result<()> { + const DATA: &[u8] = b"this is some data to write to the fifo"; + + // Create a pipe pair over a fifo file. + let fifo = TempFifo::new("try_read_write")?; + let reader = pipe::OpenOptions::new().open_receiver(&fifo)?; + let writer = pipe::OpenOptions::new().open_sender(&fifo)?; + + // Fill the pipe buffer with `try_write`. + let mut write_data = Vec::new(); + while writable_by_poll(&writer) { + match writer.try_write(DATA) { + Ok(n) => write_data.extend(&DATA[..n]), + Err(e) => { + assert_eq!(e.kind(), io::ErrorKind::WouldBlock); + break; + } + } + } + + // Drain the pipe buffer with `try_read`. + let mut read_data = vec![0; write_data.len()]; + let mut i = 0; + while i < write_data.len() { + reader.readable().await?; + match reader.try_read(&mut read_data[i..]) { + Ok(n) => i += n, + Err(e) => { + assert_eq!(e.kind(), io::ErrorKind::WouldBlock); + continue; + } + } + } + + assert_eq!(read_data, write_data); + + Ok(()) +} + +#[tokio::test] +async fn try_read_write_vectored() -> io::Result<()> { + const DATA: &[u8] = b"this is some data to write to the fifo"; + + // Create a pipe pair over a fifo file. + let fifo = TempFifo::new("try_read_write_vectored")?; + let reader = pipe::OpenOptions::new().open_receiver(&fifo)?; + let writer = pipe::OpenOptions::new().open_sender(&fifo)?; + + let write_bufs: Vec<_> = DATA.chunks(3).map(io::IoSlice::new).collect(); + + // Fill the pipe buffer with `try_write_vectored`. + let mut write_data = Vec::new(); + while writable_by_poll(&writer) { + match writer.try_write_vectored(&write_bufs) { + Ok(n) => write_data.extend(&DATA[..n]), + Err(e) => { + assert_eq!(e.kind(), io::ErrorKind::WouldBlock); + break; + } + } + } + + // Drain the pipe buffer with `try_read_vectored`. + let mut read_data = vec![0; write_data.len()]; + let mut i = 0; + while i < write_data.len() { + reader.readable().await?; + + let mut read_bufs: Vec<_> = read_data[i..] + .chunks_mut(0x10000) + .map(io::IoSliceMut::new) + .collect(); + match reader.try_read_vectored(&mut read_bufs) { + Ok(n) => i += n, + Err(e) => { + assert_eq!(e.kind(), io::ErrorKind::WouldBlock); + continue; + } + } + } + + assert_eq!(read_data, write_data); + + Ok(()) +} + +#[tokio::test] +async fn try_read_buf() -> std::io::Result<()> { + const DATA: &[u8] = b"this is some data to write to the fifo"; + + // Create a pipe pair over a fifo file. + let fifo = TempFifo::new("try_read_write_vectored")?; + let reader = pipe::OpenOptions::new().open_receiver(&fifo)?; + let writer = pipe::OpenOptions::new().open_sender(&fifo)?; + + // Fill the pipe buffer with `try_write`. + let mut write_data = Vec::new(); + while writable_by_poll(&writer) { + match writer.try_write(DATA) { + Ok(n) => write_data.extend(&DATA[..n]), + Err(e) => { + assert_eq!(e.kind(), io::ErrorKind::WouldBlock); + break; + } + } + } + + // Drain the pipe buffer with `try_read_buf`. + let mut read_data = vec![0; write_data.len()]; + let mut i = 0; + while i < write_data.len() { + reader.readable().await?; + match reader.try_read_buf(&mut read_data) { + Ok(n) => i += n, + Err(e) => { + assert_eq!(e.kind(), io::ErrorKind::WouldBlock); + continue; + } + } + } + + assert_eq!(read_data, write_data); + + Ok(()) +} From 54aaf3d0e30f0d352a54e11f8543d96a380e6469 Mon Sep 17 00:00:00 2001 From: Daria Sukhonina Date: Mon, 27 Feb 2023 17:17:31 +0300 Subject: [PATCH 100/101] time: document immediate completion guarantee for timeouts (#5509) --- tokio/src/time/timeout.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tokio/src/time/timeout.rs b/tokio/src/time/timeout.rs index 3bb98ea6f92..b6837360edc 100644 --- a/tokio/src/time/timeout.rs +++ b/tokio/src/time/timeout.rs @@ -28,6 +28,11 @@ use std::task::{self, Poll}; /// This function returns a future whose return type is [`Result`]``, where `T` is the /// return type of the provided future. /// +/// If the provided future completes immediatelly, then the future returned from +/// this function is guaranteed to complete immediatelly with an [`Ok`] variant +/// no matter the provided duration. +/// +/// [`Ok`]: std::result::Result::Ok /// [`Result`]: std::result::Result /// [`Elapsed`]: crate::time::error::Elapsed /// @@ -100,6 +105,11 @@ where /// This function returns a future whose return type is [`Result`]``, where `T` is the /// return type of the provided future. /// +/// If the provided future completes immediatelly, then the future returned from +/// this function is guaranteed to complete immediatelly with an [`Ok`] variant +/// no matter the provided deadline. +/// +/// [`Ok`]: std::result::Result::Ok /// [`Result`]: std::result::Result /// [`Elapsed`]: crate::time::error::Elapsed /// From 815d89a407beef40e009efdc7c31716b34449630 Mon Sep 17 00:00:00 2001 From: Grachev Mikhail Date: Mon, 27 Feb 2023 17:41:17 +0300 Subject: [PATCH 101/101] runtime: remove extra period in docs (#5511) --- tokio/src/runtime/handle.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/runtime/handle.rs b/tokio/src/runtime/handle.rs index c5dc65f6e81..fca1cbb4e32 100644 --- a/tokio/src/runtime/handle.rs +++ b/tokio/src/runtime/handle.rs @@ -152,7 +152,7 @@ impl Handle { self.spawn_named(future, None) } - /// Runs the provided function on an executor dedicated to blocking. + /// Runs the provided function on an executor dedicated to blocking /// operations. /// /// # Examples