diff --git a/.cargo/config b/.cargo/config.toml similarity index 94% rename from .cargo/config rename to .cargo/config.toml index f210b8b5..c48d4d28 100644 --- a/.cargo/config +++ b/.cargo/config.toml @@ -8,3 +8,6 @@ ABQ_WORKSPACE_DIR = { value = "", relative = true } # easily hit the number of open FDs (especially on MacOS) during our tests. # Revisit this when we've made everything async on the worker side. RUST_TEST_THREADS = "1" + +[workspace] +resolver = "2" diff --git a/.github/workflows/build_and_upload.yml b/.github/workflows/build_and_upload.yml index 2d29617b..0152b7b7 100644 --- a/.github/workflows/build_and_upload.yml +++ b/.github/workflows/build_and_upload.yml @@ -5,26 +5,26 @@ on: workflow_dispatch: inputs: ref: - description: 'ref to build' + description: "ref to build" required: true type: string release_channel: - description: 'release channel' + description: "release channel" required: true type: choice options: - - "v1" - - "unstable" + - "v1" + - "unstable" # for automatic release from main on the unstable release channel workflow_call: inputs: ref: - description: 'ref to build' + description: "ref to build" required: true type: string release_channel: - description: 'release channel' + description: "release channel" required: true type: string secrets: @@ -61,24 +61,24 @@ jobs: deprecated-platform: linux_x86-64 os: linux architecture: x86_64 - cross-target: 'x86_64-unknown-linux-musl' + cross-target: "x86_64-unknown-linux-musl" install-musl-tools: true - runs-on: ubuntu-latest deprecated-platform: linux_aarch64 os: linux architecture: aarch64 - cross-target: 'aarch64-unknown-linux-musl' + cross-target: "aarch64-unknown-linux-musl" container: messense/rust-musl-cross:aarch64-musl@sha256:777bd4c61179c38dc213bb8472500584646d28fd4a7c3e0b30b9ef70cb446d58 - runs-on: macos-11 # use an older version for broader osx support deprecated-platform: darwin_x86-64 os: darwin architecture: x86_64 - cross-target: '' + cross-target: "" - runs-on: macos-11 # first OS X to support arm64 -- so the first os for cross compilation deprecated-platform: darwin_aarch64 os: darwin architecture: aarch64 - cross-target: 'aarch64-apple-darwin' + cross-target: "aarch64-apple-darwin" runs-on: ${{ matrix.runs-on }} container: ${{ matrix.container }} outputs: @@ -122,7 +122,7 @@ jobs: - name: Install Rust toolchain uses: rwx-research/rust-toolchain@abq with: - toolchain: 1.65.0 + toolchain: 1.81.0 target: ${{ matrix.cross-target }} # We don't build a musl ABQ on MacOS @@ -132,7 +132,7 @@ jobs: sudo apt-get install -y musl-tools - name: Build release - if: '!matrix.cross-target' + if: "!matrix.cross-target" run: cargo build --release --all-features - name: Build release diff --git a/.github/workflows/test_and_package_development.yml b/.github/workflows/test_and_package_development.yml index 79f5316b..a037094e 100644 --- a/.github/workflows/test_and_package_development.yml +++ b/.github/workflows/test_and_package_development.yml @@ -76,7 +76,7 @@ jobs: - name: Install Rust toolchain uses: rwx-research/rust-toolchain@abq with: - toolchain: 1.65.0 + toolchain: 1.81.0 target: ${{ env.RUST_TARGET }} components: clippy, rustfmt diff --git a/Cargo.toml b/Cargo.toml index e646354f..1066c5c3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ [workspace] +resolver = "2" members = [ "crates/abq_cli", diff --git a/crates/abq_cli/src/args.rs b/crates/abq_cli/src/args.rs index 1aff6ff4..faab5644 100644 --- a/crates/abq_cli/src/args.rs +++ b/crates/abq_cli/src/args.rs @@ -150,12 +150,12 @@ pub enum Command { /// /// The remote persistence options are:{n} /// - s3: files are remotely persisted to an S3 bucket. Requires `ABQ_REMOTE_PERSISTENCE_S3_BUCKET` - /// and `ABQ_REMOTE_PERSISTENCE_S3_KEY_PREFIX` to be set as well. AWS credentials and region - /// information are read from the environment, using the standard AWS environment variable - /// support (https://docs.aws.amazon.com/sdkref/latest/guide/environment-variables.html).{n} + /// and `ABQ_REMOTE_PERSISTENCE_S3_KEY_PREFIX` to be set as well. AWS credentials and region + /// information are read from the environment, using the standard AWS environment variable + /// support (https://docs.aws.amazon.com/sdkref/latest/guide/environment-variables.html).{n} /// /// - custom: files are remotely persisted by calling a provided executable. See - /// `--remote-persistence-command` for more information.{n} + /// `--remote-persistence-command` for more information.{n} #[clap(long, required = false, env(ENV_REMOTE_PERSISTENCE_STRATEGY))] remote_persistence_strategy: Option, @@ -169,7 +169,7 @@ pub enum Command { /// /// Where{n} /// - is either "store" or "load", depending on whether the file should be stored - /// into the remote location, or loaded from the remote location.{n} + /// into the remote location, or loaded from the remote location.{n} /// - is "manifest", "results", or "run_state".{n} /// - is the run ID of the test suite run.{n} /// - is the path to the file on the local filesystem.{n} @@ -331,7 +331,7 @@ pub enum Command { /// Options:{n} ///- by-test: distribute the next test to any worker.{n} ///- by-file: distribute all tests in a file to the same worker. This ensures that expensive per-file shared setups or - /// teardowns will run only once on one worker, however it may cause tests to be less evenly distributed. + /// teardowns will run only once on one worker, however it may cause tests to be less evenly distributed. /// /// Note: The Jest & Playwright test frameworks run with a by-file strategy regardless of the value of this flag. #[clap(long, default_value = "by-test")] @@ -341,8 +341,8 @@ pub enum Command { /// /// Options:{n} ///- auto: try to emit colors unless the output channel is detected - /// not to be a TTY, if (on Windows) the console isn't available, if NO_COLOR is set, if - /// TERM is set to `dumb`, amongst other heuristics. + /// not to be a TTY, if (on Windows) the console isn't available, if NO_COLOR is set, if + /// TERM is set to `dumb`, amongst other heuristics. ///- never: don't emit colors. #[clap(long, default_value = "auto")] color: ColorPreference, @@ -411,8 +411,8 @@ pub enum Command { /// /// Options:{n} ///- auto: try to emit colors unless the output channel is detected - /// not to be a TTY, if (on Windows) the console isn't available, if NO_COLOR is set, if - /// TERM is set to `dumb`, amongst other heuristics. + /// not to be a TTY, if (on Windows) the console isn't available, if NO_COLOR is set, if + /// TERM is set to `dumb`, amongst other heuristics. ///- never: don't emit colors. #[clap(long, default_value = "auto")] color: ColorPreference, diff --git a/crates/abq_cli/src/instance.rs b/crates/abq_cli/src/instance.rs index eb2d6ff1..98787476 100644 --- a/crates/abq_cli/src/instance.rs +++ b/crates/abq_cli/src/instance.rs @@ -19,7 +19,6 @@ use signal_hook::consts::TERM_SIGNALS; use signal_hook::iterator::Signals; use std::net::{IpAddr, SocketAddr}; use std::thread; -use tempfile::TempDir; use tokio_cron_scheduler::JobScheduler; use thiserror::Error; @@ -280,12 +279,7 @@ enum AbqLocator { queue_negotiator: QueueNegotiatorHandle, server_addr: SocketAddr, }, - Local(Abq, EphemeralAbqGuards), -} - -struct EphemeralAbqGuards { - _manifests_path: TempDir, - _results_path: TempDir, + Local(Abq), } #[derive(Debug, Error)] @@ -312,14 +306,14 @@ impl AbqInstance { AbqLocator::Remote { queue_negotiator, .. } => *queue_negotiator, - AbqLocator::Local(abq, _) => abq.get_negotiator_handle(), + AbqLocator::Local(abq) => abq.get_negotiator_handle(), } } pub fn server_addr(&self) -> SocketAddr { match &self.locator { AbqLocator::Remote { server_addr, .. } => *server_addr, - AbqLocator::Local(abq, _) => abq.server_addr(), + AbqLocator::Local(abq) => abq.server_addr(), } } @@ -359,13 +353,9 @@ impl AbqInstance { config.server_options = ServerOptions::new(server_auth, server_tls); let queue = Abq::start(config).await; - let guards = EphemeralAbqGuards { - _manifests_path: manifests_path, - _results_path: results_path, - }; AbqInstance { - locator: AbqLocator::Local(queue, guards), + locator: AbqLocator::Local(queue), client_options: ClientOptions::new(client_auth, client_tls), } } diff --git a/crates/abq_cli/tests/cli.rs b/crates/abq_cli/tests/cli.rs index 6d54b53f..8118cc4f 100644 --- a/crates/abq_cli/tests/cli.rs +++ b/crates/abq_cli/tests/cli.rs @@ -475,7 +475,7 @@ test_all_network_config_options! { exit_status, } = Abq::new(name) .args(args) - .working_dir(&testdata_project("jest/npm-jest-project")) + .working_dir(testdata_project("jest/npm-jest-project")) .run(); assert!(exit_status.success()); @@ -496,7 +496,7 @@ test_all_network_config_options! { exit_status, } = Abq::new(name) .args(args) - .working_dir(&testdata_project("jest/npm-jest-project")) + .working_dir(testdata_project("jest/npm-jest-project")) .run(); assert!(exit_status.success()); @@ -688,7 +688,7 @@ test_all_network_config_options! { let CmdOutput { stdout, stderr, exit_status } = Abq::new(name.to_string() + "_worker0") .args(test_args(0)) - .working_dir(&testdata_project("jest/npm-jest-project")) + .working_dir(testdata_project("jest/npm-jest-project")) .run(); assert!(!exit_status.success()); assert!(stdout.contains("-- Test Timeout --"), "STDOUT:\n{}STDERR:\n{}", stdout, stderr); @@ -718,7 +718,7 @@ test_all_network_config_options! { exit_status, } = Abq::new(name) .args(test_args) - .working_dir(&testdata_project("jest/npm-jest-project-with-failures")) + .working_dir(testdata_project("jest/npm-jest-project-with-failures")) .run(); let code = exit_status.code().expect("process killed"); @@ -1542,7 +1542,7 @@ fn test_grouping_without_failures() { let stdout = String::from_utf8_lossy(&stdout).to_string(); let stderr = String::from_utf8_lossy(&stderr).to_string(); - let stdouts = vec![&stdout]; + let stdouts = [&stdout]; assert_sum_of_run_tests(stdouts.iter().map(|s| s.as_str()), 64); assert_sum_of_run_test_failures(stdouts.iter().map(|s| s.as_str()), 0); assert_sum_of_run_test_retries(stdouts.iter().map(|s| s.as_str()), 0); @@ -1672,7 +1672,7 @@ fn test_grouping_with_failures_without_retries() { let stdout = String::from_utf8_lossy(&stdout).to_string(); let stderr = String::from_utf8_lossy(&stderr).to_string(); - let stdouts = vec![&stdout]; + let stdouts = [&stdout]; assert_sum_of_run_tests(stdouts.iter().map(|s| s.as_str()), 64); assert_sum_of_run_test_failures(stdouts.iter().map(|s| s.as_str()), 64); assert_sum_of_run_test_retries(stdouts.iter().map(|s| s.as_str()), 0); @@ -1802,7 +1802,7 @@ fn test_grouping_failures_retries() { let stdout = String::from_utf8_lossy(&stdout).to_string(); let stderr = String::from_utf8_lossy(&stderr).to_string(); - let stdouts = vec![&stdout]; + let stdouts = [&stdout]; assert_sum_of_run_tests(stdouts.iter().map(|s| s.as_str()), 64); assert_sum_of_run_test_failures(stdouts.iter().map(|s| s.as_str()), 64); assert_sum_of_run_test_retries(stdouts.iter().map(|s| s.as_str()), 64); diff --git a/crates/abq_queue/src/job_queue.rs b/crates/abq_queue/src/job_queue.rs index 8ed3dc98..e13b2c3e 100644 --- a/crates/abq_queue/src/job_queue.rs +++ b/crates/abq_queue/src/job_queue.rs @@ -69,7 +69,7 @@ impl JobQueue { entity_tag: Tag, suggested_batch_size: NonZeroUsize, ) -> impl ExactSizeIterator + '_ { - let suggested_batch_size = suggested_batch_size.get() as usize; + let suggested_batch_size = suggested_batch_size.get(); let queue_len = self.queue.len(); // If the start index was past the end of the queue, return fast diff --git a/crates/abq_queue/src/persistence/manifest/fs.rs b/crates/abq_queue/src/persistence/manifest/fs.rs index 5139d23c..f31c14f7 100644 --- a/crates/abq_queue/src/persistence/manifest/fs.rs +++ b/crates/abq_queue/src/persistence/manifest/fs.rs @@ -89,6 +89,7 @@ impl FilesystemPersistor { tokio::task::spawn_blocking(move || { let fi = std::fs::OpenOptions::new() .create(true) + .truncate(false) .read(true) .write(true) .open(path) diff --git a/crates/abq_queue/src/persistence/remote/fake.rs b/crates/abq_queue/src/persistence/remote/fake.rs index f4e5734f..44353876 100644 --- a/crates/abq_queue/src/persistence/remote/fake.rs +++ b/crates/abq_queue/src/persistence/remote/fake.rs @@ -36,7 +36,6 @@ pub struct FakePersister { on_try_load_run_state: Arc, } -#[track_caller] pub async fn unreachable(_x: PersistenceKind, _y: RunId, _z: PathBuf) -> OpaqueResult<()> { unreachable!() } @@ -220,11 +219,11 @@ impl RemotePersistence for OneWriteFakePersister { _run_id: &RunId, _run_state: SerializableRunState, ) -> OpaqueResult<()> { - unimplemented!("FakePersister does not support storing run state."); + Err("FakePersister does not support storing run state.".located(here!())) } async fn try_load_run_state(&self, _run_id: &RunId) -> OpaqueResult { - unimplemented!("FakePersister does not support loading run state."); + Err("FakePersister does not support loading run state.".located(here!())) } fn boxed_clone(&self) -> Box { diff --git a/crates/abq_queue/src/persistence/results/fs.rs b/crates/abq_queue/src/persistence/results/fs.rs index 5c71338d..a5a8add9 100644 --- a/crates/abq_queue/src/persistence/results/fs.rs +++ b/crates/abq_queue/src/persistence/results/fs.rs @@ -410,7 +410,7 @@ impl PersistResults for FilesystemPersistor { async fn write_packed_line(fi: &mut File, packed: Vec) -> OpaqueResult<()> { fi.write_all(&packed).await.located(here!())?; - fi.write_all(&[b'\n']).await.located(here!())?; + fi.write_all(b"\n").await.located(here!())?; fi.flush().await.located(here!()) } diff --git a/crates/abq_queue/src/persistence/results/test_utils.rs b/crates/abq_queue/src/persistence/results/test_utils.rs index 74b14aee..cd459efe 100644 --- a/crates/abq_queue/src/persistence/results/test_utils.rs +++ b/crates/abq_queue/src/persistence/results/test_utils.rs @@ -1,5 +1,3 @@ -#![cfg(test)] - use std::sync::Arc; use abq_utils::{ diff --git a/crates/abq_queue/src/queue.rs b/crates/abq_queue/src/queue.rs index e07bdd66..5ca9c5b7 100644 --- a/crates/abq_queue/src/queue.rs +++ b/crates/abq_queue/src/queue.rs @@ -17,7 +17,7 @@ use abq_utils::net_protocol::queue::{ AssociatedTestResults, CancelReason, GroupId, NativeRunnerInfo, NegotiatorInfo, Request, TestResultsResponse, TestSpec, TestStrategy, }; -use abq_utils::net_protocol::results::{self, OpaqueLazyAssociatedTestResults}; +use abq_utils::net_protocol::results::{self}; use abq_utils::net_protocol::runners::{Manifest, MetadataMap, StdioOutput}; use abq_utils::net_protocol::work_server::{self, RetryManifestResponse}; use abq_utils::net_protocol::workers::{ @@ -1546,6 +1546,7 @@ impl Drop for Abq { if self.active { // Our user never called shutdown; try to perform a clean exit. // We can't do anything with an error, since this is a drop. + #[allow(clippy::let_underscore_future)] let _ = self.shutdown(); } } @@ -2315,10 +2316,6 @@ impl QueueServer { entity: Entity, mut stream: Box, ) -> OpaqueResult<()> { - enum Response { - One(TestResultsResponse), - Chunk(OpaqueLazyAssociatedTestResults), - } let results_cell = match queues.get_read_results_cell(&run_id).located(here!()) { Ok(state) => match state { ReadResultsState::ReadFromCell(cell) => cell, diff --git a/crates/abq_queue/src/queue/test_utils.rs b/crates/abq_queue/src/queue/test_utils.rs index e90b5211..ff508f3a 100644 --- a/crates/abq_queue/src/queue/test_utils.rs +++ b/crates/abq_queue/src/queue/test_utils.rs @@ -1,4 +1,3 @@ -#![cfg(test)] use abq_test_utils::one_nonzero_usize; use abq_utils::{ net_protocol::{ diff --git a/crates/abq_queue/tests/integration.rs b/crates/abq_queue/tests/integration.rs index 1313fc71..b1a885a8 100644 --- a/crates/abq_queue/tests/integration.rs +++ b/crates/abq_queue/tests/integration.rs @@ -71,14 +71,6 @@ struct Run(usize); #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] struct Wid(usize); -/// External party ID -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -struct ExternId(usize); - -/// ID of a spawned action -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -struct SpawnId(usize); - /// External dependencies that must be preserved while a test is ongoing. struct QueueExtDeps { _manifests_path: TempDir, @@ -377,7 +369,7 @@ enum TestResultsOutcome { Results(OpaqueLazyAssociatedTestResults), Error(String), Pending, - OutstandingRunners(Vec), + OutstandingRunners, } #[allow(clippy::type_complexity)] @@ -728,8 +720,8 @@ async fn run_test(server: Server, steps: Steps<'_>) { TestResultsOutcome::Results(results) } Pending => TestResultsOutcome::Pending, - RunInProgress { active_runners } => { - TestResultsOutcome::OutstandingRunners(active_runners) + RunInProgress { .. } => { + TestResultsOutcome::OutstandingRunners } Error(s) => TestResultsOutcome::Error(s), }; diff --git a/crates/abq_runners/generic_test_runner/src/lib.rs b/crates/abq_runners/generic_test_runner/src/lib.rs index 0fe366ec..84e783b7 100644 --- a/crates/abq_runners/generic_test_runner/src/lib.rs +++ b/crates/abq_runners/generic_test_runner/src/lib.rs @@ -1846,7 +1846,6 @@ mod test_abq_jest { fn write_leading_markers(s: &[u8]) -> String { String::from_utf8_lossy(s) .lines() - .into_iter() .map(|s| format!("|{s}")) .collect::>() .join("\n") diff --git a/crates/abq_test_support/native_runner_simulator/Cargo.toml b/crates/abq_test_support/native_runner_simulator/Cargo.toml index aeb2820b..49e828db 100644 --- a/crates/abq_test_support/native_runner_simulator/Cargo.toml +++ b/crates/abq_test_support/native_runner_simulator/Cargo.toml @@ -21,7 +21,19 @@ serde.workspace = true serde_derive.workspace = true serde_json.workspace = true -tokio.workspace = true +tokio = { version = "1.26.0", features = [ + "fs", + "io-util", + "io-std", + "net", + "rt", + "rt-multi-thread", + "macros", + "sync", + "time", + "process", +] } + tempfile.workspace = true abq_utils = { path = "../../abq_utils" } diff --git a/crates/abq_utils/src/net_async/tcp.rs b/crates/abq_utils/src/net_async/tcp.rs index 7c1a14ce..1c9f2f54 100644 --- a/crates/abq_utils/src/net_async/tcp.rs +++ b/crates/abq_utils/src/net_async/tcp.rs @@ -11,8 +11,6 @@ use tokio::net::ToSocketAddrs; use super::UnverifiedServerStream; use crate::auth::{ClientAuthStrategy, Role, ServerAuthStrategy}; -pub struct RawServerStream(tokio::net::TcpStream); - #[derive(Debug)] #[repr(transparent)] pub struct ClientStream(tokio::net::TcpStream); diff --git a/crates/abq_utils/src/net_async/tls.rs b/crates/abq_utils/src/net_async/tls.rs index cea8ee08..e5c6ab25 100644 --- a/crates/abq_utils/src/net_async/tls.rs +++ b/crates/abq_utils/src/net_async/tls.rs @@ -15,8 +15,6 @@ use tokio::net::ToSocketAddrs; use super::UnverifiedServerStream; use crate::auth::{ClientAuthStrategy, Role, ServerAuthStrategy}; -pub struct RawServerStream(tokio::net::TcpStream); - #[derive(Debug)] pub struct ServerStream(tokio_tls::server::TlsStream, Role); diff --git a/crates/abq_utils/src/net_protocol.rs b/crates/abq_utils/src/net_protocol.rs index 6386ead3..cc9f8de4 100644 --- a/crates/abq_utils/src/net_protocol.rs +++ b/crates/abq_utils/src/net_protocol.rs @@ -1427,9 +1427,9 @@ mod test { &[0u8, 0] as &[u8], &[0, 10], // Chop up the rest of the message - &[b'"', b'1', b'1'], - &[b'1', b'1'], - &[b'1', b'1', b'1', b'1', b'"'], + b"\"11", + b"11", + b"1111\"", ]; let mut async_reader = AsyncReader::default(); @@ -1511,8 +1511,8 @@ mod test { let mut splits = vec![]; let mut i = 0; for j in split_idxs { - splits.push(&msg[i..j as usize]); - i = j as usize; + splits.push(&msg[i..j]); + i = j; } assert_eq!(splits.iter().map(|l| l.len()).sum::(), msg_len); splits diff --git a/crates/abq_workers/src/negotiate.rs b/crates/abq_workers/src/negotiate.rs index 2690b16e..87360e12 100644 --- a/crates/abq_workers/src/negotiate.rs +++ b/crates/abq_workers/src/negotiate.rs @@ -25,7 +25,7 @@ use abq_utils::{ auth::User, error::{EntityfulError, ErrorEntity, ResultLocation}, exit::ExitCode, - here, log_entityful, net, net_async, + here, log_entityful, net_async, net_opt::ClientOptions, net_protocol::{ self, @@ -139,7 +139,7 @@ pub enum WorkersNegotiateError { } /// The worker pool side of the negotiation. -pub struct WorkersNegotiator(Box, WorkerContext); +pub struct WorkersNegotiator(); pub enum NegotiatedWorkers { /// No more workers were created, because there is no more work to be done. diff --git a/crates/abq_workers/src/workers.rs b/crates/abq_workers/src/workers.rs index d69dae9f..cc2336d9 100644 --- a/crates/abq_workers/src/workers.rs +++ b/crates/abq_workers/src/workers.rs @@ -799,15 +799,6 @@ async fn attempt_test_id_for_test_like_runner( vec![test] } } - #[cfg(feature = "test-test_ids")] - (R::EchoOnRetry(succeed_on), s) => { - if succeed_on == _attempt { - let result = echo::EchoWorker::run(echo::EchoWork { message: s }); - vec![result] - } else { - panic!("Failed to echo!"); - } - } (runner, test_id) => unreachable!( "Invalid runner/test_id combination: {:?} and {:?}", runner, test_id @@ -1124,7 +1115,7 @@ mod test { let run_id = RunId::unique(); let mut expected_results = HashMap::new(); - let tests = (0..num_echos).into_iter().map(|i| { + let tests = (0..num_echos).map(|i| { let echo_string = format!("echo {}", i); expected_results.insert(WorkId([i as _; 16]), vec![echo_string.clone()]); @@ -1229,139 +1220,6 @@ mod test { test_echo_n(proto, 2, 8).await; } - #[test] - #[cfg(feature = "test-test_ids")] - fn test_timeout() { - let (write_work, get_next_tests) = work_writer(); - let (results, results_handler) = results_collector(); - - let run_id = RunId::new(); - let manifest = ManifestMessage { - test_ids: vec![TestId::Echo("mona lisa".to_string())], - }; - - let (default_config, manifest_collector) = setup_pool( - TestLikeRunner::InduceTimeout, - run_id, - manifest, - get_next_tests, - results_handler, - ); - - let timeout = Duration::from_millis(1); - let config = WorkerPoolConfig { - work_timeout: timeout, - work_retries: 0, - ..default_config - }; - let mut pool = WorkerPool::new(config); - - for test_id in await_manifest_test_specs(manifest_collector) { - write_work(local_work(test_id, run_id, WorkId("id1".to_string()))); - } - - write_work(NextWork::EndOfWork); - - await_results(results, |results| { - let results = results.lock().unwrap(); - if results.is_empty() { - return false; - } - - results.get("id1").unwrap() == &WorkerResult::Timeout(timeout) - }); - - pool.shutdown(); - } - - #[test] - #[cfg(feature = "test-test_ids")] - fn test_panic_no_retries() { - let (write_work, get_next_tests) = work_writer(); - let (results, results_handler) = results_collector(); - - let run_id = RunId::new(); - let manifest = ManifestMessage { - test_ids: vec![TestId::Echo("".to_string())], - }; - - let (default_config, manifest_collector) = setup_pool( - TestLikeRunner::EchoOnRetry(10), - run_id, - manifest, - get_next_tests, - results_handler, - ); - - let config = WorkerPoolConfig { - work_retries: 0, - ..default_config - }; - let mut pool = WorkerPool::new(config); - - for test_id in await_manifest_test_specs(manifest_collector) { - write_work(local_work(test_id, run_id, WorkId("id1".to_string()))); - } - write_work(NextWork::EndOfWork); - - await_results(results, |results| { - let results = results.lock().unwrap(); - if results.is_empty() { - return false; - } - - results.get("id1").unwrap() == &WorkerResult::Panic("Failed to echo!".to_string()) - }); - - pool.shutdown(); - } - - #[test] - #[cfg(feature = "test-test_ids")] - fn test_panic_succeed_after_retry() { - let (write_work, get_next_tests) = work_writer(); - let (results, results_handler) = results_collector(); - - let run_id = RunId::new(); - let manifest = ManifestMessage { - test_ids: vec![TestId::Echo("okay".to_string())], - }; - - let (default_config, manifest_collector) = setup_pool( - TestLikeRunner::EchoOnRetry(2), - run_id, - manifest, - get_next_tests, - results_handler, - ); - - let config = WorkerPoolConfig { - work_retries: 1, - ..default_config - }; - let mut pool = WorkerPool::new(config); - - for test_id in await_manifest_test_specs(manifest_collector) { - write_work(local_work(test_id, run_id, WorkId("id1".to_string()))); - } - write_work(NextWork::EndOfWork); - - await_results(results, |results| { - let results = results.lock().unwrap(); - if results.is_empty() { - return false; - } - - results.get("id1").unwrap() - == &WorkerResult::Output(Output { - success: true, - message: "okay".to_string(), - }) - }); - - pool.shutdown(); - } - #[tokio::test] #[traced_test] async fn bad_message_doesnt_take_down_queue_negotiator_server() { diff --git a/crates/reporters/dot/src/lib.rs b/crates/reporters/dot/src/lib.rs index eed400f7..0225fa2c 100644 --- a/crates/reporters/dot/src/lib.rs +++ b/crates/reporters/dot/src/lib.rs @@ -62,7 +62,7 @@ impl Reporter for DotReporter { if self.num_results % DOT_REPORTER_LINE_LIMIT == 0 { // Print a newline - write(&mut self.buffer, &[b'\n'])?; + write(&mut self.buffer, b"\n")?; } // Make sure to flush the dot out to avoid buffering them! @@ -99,7 +99,7 @@ impl Reporter for DotReporter { fn after_all_results(&mut self) { if self.num_results % DOT_REPORTER_LINE_LIMIT != 0 { - let _ = write(&mut self.buffer, &[b'\n']); + let _ = write(&mut self.buffer, b"\n"); } } diff --git a/crates/reporters/progress/src/lib.rs b/crates/reporters/progress/src/lib.rs index 05744d33..d058b4d3 100644 --- a/crates/reporters/progress/src/lib.rs +++ b/crates/reporters/progress/src/lib.rs @@ -112,7 +112,7 @@ impl Reporter for ProgressReporter { if is_fail_like { if self.wrote_first_output { - output::write(&mut self.buffer, &[b'\n'])?; + output::write(&mut self.buffer, b"\n")?; } format_result_line(&mut self.buffer, test_result)?; self.wrote_first_output = true; @@ -358,59 +358,59 @@ mod test { > ABQ status > [0 seconds] 1 tests run, 1 passed, 0 failing - + > ABQ status > [0 seconds] 1 tests run, 1 passed, 0 failing - + > ABQ status > [0 seconds] 1 tests run, 1 passed, 0 failing - + > ABQ status > [0 seconds] 2 tests run, 1 passed, 1 failing - + > ABQ status > [0 seconds] 2 tests run, 1 passed, 1 failing - + > ABQ status > [0 seconds] 2 tests run, 1 passed, 1 failing - + > ABQ status > [0 seconds] 3 tests run, 2 passed, 1 failing - + > ABQ status > [0 seconds] 3 tests run, 2 passed, 1 failing - + > ABQ status > [0 seconds] 3 tests run, 2 passed, 1 failing - + > ABQ status > [0 seconds] 4 tests run, 2 passed, 2 failing - + > ABQ status > [0 seconds] 4 tests run, 2 passed, 2 failing - + > ABQ status > [0 seconds] 4 tests run, 2 passed, 2 failing - + > ABQ status > [0 seconds] 5 tests run, 3 passed, 2 failing - + > ABQ status > [0 seconds] 5 tests run, 3 passed, 2 failing - + "###); } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 68249258..bbf217f2 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.65.0" +channel = "1.81.0" profile = "default"