diff --git a/Cargo.lock b/Cargo.lock index 0f77763543f..6726b0596fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5450,6 +5450,7 @@ checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "tokio", diff --git a/arrow_util/src/bitset.rs b/arrow_util/src/bitset.rs index 94f02bcedb9..34f177915ef 100644 --- a/arrow_util/src/bitset.rs +++ b/arrow_util/src/bitset.rs @@ -281,7 +281,12 @@ mod tests { } fn iter_set_bools(bools: &[bool]) -> impl Iterator + '_ { - bools.iter().enumerate().filter_map(|(x, y)| y.then(|| x)) + bools + .iter() + .enumerate() + // Filter out all y that are not true and then return only x + .filter(|&(_, y)| *y) + .map(|(x, _)| x) } #[test] diff --git a/arrow_util/src/dictionary.rs b/arrow_util/src/dictionary.rs index 285e1d5a462..1885deb793d 100644 --- a/arrow_util/src/dictionary.rs +++ b/arrow_util/src/dictionary.rs @@ -128,10 +128,7 @@ impl + FromPrimitive + Zero> StringDictionary { } fn hash_str(hasher: &ahash::RandomState, value: &str) -> u64 { - use std::hash::{BuildHasher, Hash, Hasher}; - let mut state = hasher.build_hasher(); - value.hash(&mut state); - state.finish() + hasher.hash_one(value) } impl StringDictionary { diff --git a/cache_system/src/addressable_heap.rs b/cache_system/src/addressable_heap.rs index 64c2c623100..5ce84b6e41c 100644 --- a/cache_system/src/addressable_heap.rs +++ b/cache_system/src/addressable_heap.rs @@ -478,9 +478,12 @@ mod tests { } fn peek(&self) -> Option<(&u8, &String, &i8)> { + #[allow(clippy::map_identity)] self.inner .iter() .min_by_key(|(k, _v, o)| (o, k)) + // This is a false positive as this actually changes + // Option<&(u8, String, i8)> -> Option<(&u8, &String, &i8)> .map(|(k, v, o)| (k, v, o)) } diff --git a/client_util/src/connection.rs b/client_util/src/connection.rs index 02cbe882d05..d6715022139 100644 --- a/client_util/src/connection.rs +++ b/client_util/src/connection.rs @@ -98,7 +98,7 @@ impl From for Error { let details = source .source() .map(|e| format!(" ({e})")) - .unwrap_or_else(|| "".to_string()); + .unwrap_or_default(); Self::TransportError { source, details } } diff --git a/data_types/src/lib.rs b/data_types/src/lib.rs index c7b47225b89..a158e704497 100644 --- a/data_types/src/lib.rs +++ b/data_types/src/lib.rs @@ -1521,7 +1521,7 @@ impl TableSummary { pub fn total_count(&self) -> u64 { // Assumes that all tables have the same number of rows, so // pick the first one - let count = self.columns.get(0).map(|c| c.total_count()).unwrap_or(0); + let count = self.columns.first().map(|c| c.total_count()).unwrap_or(0); // Validate that the counts are consistent across columns for c in &self.columns { diff --git a/generated_types/src/lib.rs b/generated_types/src/lib.rs index 51d81223375..0bd88f222b2 100644 --- a/generated_types/src/lib.rs +++ b/generated_types/src/lib.rs @@ -2,7 +2,11 @@ // crates because of all the generated code it contains that we don't have much // control over. #![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] -#![allow(clippy::derive_partial_eq_without_eq, clippy::needless_borrow)] +#![allow( + clippy::derive_partial_eq_without_eq, + clippy::needless_borrow, + clippy::needless_borrows_for_generic_args +)] #![warn(unused_crate_dependencies)] // Workaround for "unused crate" lint false positives. diff --git a/import_export/Cargo.toml b/import_export/Cargo.toml index 86f63460a3a..27b3cc62583 100644 --- a/import_export/Cargo.toml +++ b/import_export/Cargo.toml @@ -19,5 +19,5 @@ schema = { path = "../schema" } serde_json = "1.0.107" thiserror = "1.0.48" tokio = { version = "1.32" } -tokio-util = { version = "0.7.9" } +tokio-util = { version = "0.7.9", features = ["compat"] } workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/import_export/src/file/export.rs b/import_export/src/file/export.rs index a9be88f064a..f336477995b 100644 --- a/import_export/src/file/export.rs +++ b/import_export/src/file/export.rs @@ -71,8 +71,7 @@ impl RemoteExporter { // Export the metadata for the table. Since all // parquet_files are part of the same table, use the table_id // from the first parquet_file - let table_id = parquet_files - .get(0) + let table_id = parquet_files.first() .map(|parquet_file| parquet_file.table_id); if let Some(table_id) = table_id { self.export_table_metadata(&output_directory, table_id) diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs index e3aa9f0bfb3..b68dd4dfa99 100644 --- a/influxdb3_server/src/lib.rs +++ b/influxdb3_server/src/lib.rs @@ -179,12 +179,8 @@ mod tests { let addr = get_free_port(); let trace_header_parser = trace_http::ctx::TraceHeaderParser::new(); let metrics = Arc::new(metric::Registry::new()); - let common_state = crate::CommonServerState::new( - Arc::clone(&metrics), - None, - trace_header_parser, - addr.clone(), - ); + let common_state = + crate::CommonServerState::new(Arc::clone(&metrics), None, trace_header_parser, addr); let catalog = Arc::new(influxdb3_write::catalog::Catalog::new()); let object_store: Arc = Arc::new(object_store::memory::InMemory::new()); let parquet_store = @@ -240,7 +236,7 @@ mod tests { "| a | 1970-01-01T00:00:00.000000123 | 1 |", "+------+-------------------------------+-----+", ]; - let actual: Vec<_> = body.split("\n").into_iter().collect(); + let actual: Vec<_> = body.split('\n').collect(); assert_eq!( expected, actual, "\n\nexpected:\n\n{:#?}\nactual:\n\n{:#?}\n\n", @@ -251,9 +247,9 @@ mod tests { } pub(crate) async fn write_lp( - server: impl Into, - database: impl Into, - lp: impl Into, + server: impl Into + Send, + database: impl Into + Send, + lp: impl Into + Send, authorization: Option<&str>, ) -> Response { let server = server.into(); @@ -276,9 +272,9 @@ mod tests { } pub(crate) async fn query( - server: impl Into, - database: impl Into, - query: impl Into, + server: impl Into + Send, + database: impl Into + Send, + query: impl Into + Send, authorization: Option<&str>, ) -> Response { let client = Client::new(); diff --git a/influxrpc_parser/src/predicate.rs b/influxrpc_parser/src/predicate.rs index 9cc8e31fd50..362f4dcb19c 100644 --- a/influxrpc_parser/src/predicate.rs +++ b/influxrpc_parser/src/predicate.rs @@ -98,7 +98,7 @@ fn build_node(expr: &Expr, strings_are_regex: bool) -> Result { ), Expr::Cast { expr, data_type } => match data_type { sqlparser::ast::DataType::Custom(ident, _modifiers) => { - if let Some(Ident { value, .. }) = ident.0.get(0) { + if let Some(Ident { value, .. }) = ident.0.first() { // See https://docs.influxdata.com/influxdb/v1.8/query_language/explore-data/#syntax match value.as_str() { "field" => { diff --git a/ingester_query_grpc/src/lib.rs b/ingester_query_grpc/src/lib.rs index 102e9d74d63..d4dc78f4c0a 100644 --- a/ingester_query_grpc/src/lib.rs +++ b/ingester_query_grpc/src/lib.rs @@ -13,7 +13,11 @@ missing_debug_implementations, unused_crate_dependencies )] -#![allow(clippy::derive_partial_eq_without_eq, clippy::needless_borrow)] +#![allow( + clippy::derive_partial_eq_without_eq, + clippy::needless_borrow, + clippy::needless_borrows_for_generic_args +)] // Workaround for "unused crate" lint false positives. use workspace_hack as _; diff --git a/iox_data_generator/src/tag_set.rs b/iox_data_generator/src/tag_set.rs index 3c2a29c7535..a92f4a40d32 100644 --- a/iox_data_generator/src/tag_set.rs +++ b/iox_data_generator/src/tag_set.rs @@ -341,7 +341,7 @@ impl GeneratedTagSets { let parent_has_ones = self .has_one_values .entry(parent_has_one_key.as_str().to_owned()) - .or_insert_with(ParentToHasOnes::default); + .or_default(); let has_one_values = self.values.get(has_one.as_str()).expect( "add_has_ones should never be called before the values collection is created", @@ -354,10 +354,7 @@ impl GeneratedTagSets { ones_iter.next().unwrap() }); - let has_one_map = parent_has_ones - .id_to_has_ones - .entry(parent.id) - .or_insert_with(BTreeMap::new); + let has_one_map = parent_has_ones.id_to_has_ones.entry(parent.id).or_default(); has_one_map.insert(Arc::clone(&parent_has_one_key), Arc::clone(one_val)); } } @@ -414,7 +411,7 @@ impl GeneratedTagSets { let child_vals = self .child_values .entry(child_values_key(belongs_to, &spec.name)) - .or_insert_with(BTreeMap::new); + .or_default(); child_vals.insert(parent.id, parent_owned); } self.values.insert(spec.name.to_string(), all_children); diff --git a/iox_query/src/exec/seriesset/converter.rs b/iox_query/src/exec/seriesset/converter.rs index e7c6383d903..2ad6a63fd6c 100644 --- a/iox_query/src/exec/seriesset/converter.rs +++ b/iox_query/src/exec/seriesset/converter.rs @@ -625,6 +625,7 @@ impl PartialEq for SortableSeries { impl Eq for SortableSeries {} impl PartialOrd for SortableSeries { + #[allow(clippy::non_canonical_partial_ord_impl)] fn partial_cmp(&self, other: &Self) -> Option { self.tag_vals.partial_cmp(&other.tag_vals) } diff --git a/iox_query/src/logical_optimizer/handle_gapfill.rs b/iox_query/src/logical_optimizer/handle_gapfill.rs index 84a60c111f9..291b88e986b 100644 --- a/iox_query/src/logical_optimizer/handle_gapfill.rs +++ b/iox_query/src/logical_optimizer/handle_gapfill.rs @@ -620,7 +620,7 @@ mod test { fn optimize(plan: &LogicalPlan) -> Result> { let optimizer = Optimizer::with_rules(vec![Arc::new(HandleGapFill)]); optimizer.optimize_recursively( - optimizer.rules.get(0).unwrap(), + optimizer.rules.first().unwrap(), plan, &OptimizerContext::new(), ) diff --git a/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs b/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs index 92cf1209f0f..08e94e87dc6 100644 --- a/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs +++ b/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs @@ -89,7 +89,7 @@ impl PhysicalOptimizerRule for DedupSortOrder { .iter() .filter(|sort_key| { match sort_key.get_index_of(col) { - Some(idx) if idx == 0 => { + Some(0) => { // Column next in sort order from this chunks PoV. This is good. true } diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs index 81212ca1ab3..8b11291a601 100644 --- a/iox_query/src/test.rs +++ b/iox_query/src/test.rs @@ -81,9 +81,7 @@ impl TestDatabase { /// Add a test chunk to the database pub fn add_chunk(&self, partition_key: &str, chunk: Arc) -> &Self { let mut partitions = self.partitions.lock(); - let chunks = partitions - .entry(partition_key.to_string()) - .or_insert_with(BTreeMap::new); + let chunks = partitions.entry(partition_key.to_string()).or_default(); chunks.insert(chunk.id(), chunk); self } diff --git a/iox_query_influxql/src/plan/expr_type_evaluator.rs b/iox_query_influxql/src/plan/expr_type_evaluator.rs index 7375f2923f6..be461df8f3f 100644 --- a/iox_query_influxql/src/plan/expr_type_evaluator.rs +++ b/iox_query_influxql/src/plan/expr_type_evaluator.rs @@ -294,8 +294,7 @@ impl<'a> TypeEvaluator<'a> { // These functions require a single numeric as input and return a float name @ ("sin" | "cos" | "tan" | "atan" | "exp" | "log" | "ln" | "log2" | "log10" | "sqrt") => { - match arg_types - .get(0) + match arg_types.first() .ok_or_else(|| error::map::query(format!("{name} expects 1 argument")))? { Some( @@ -310,8 +309,7 @@ impl<'a> TypeEvaluator<'a> { // These functions require a single float as input and return a float name @ ("asin" | "acos") => { - match arg_types - .get(0) + match arg_types.first() .ok_or_else(|| error::map::query(format!("{name} expects 1 argument")))? { Some(VarRefDataType::Float) | None => Ok(Some(VarRefDataType::Float)), @@ -324,7 +322,7 @@ impl<'a> TypeEvaluator<'a> { // These functions require two numeric arguments and return a float name @ ("atan2" | "pow") => { - let (Some(arg0), Some(arg1)) = (arg_types.get(0), arg_types.get(1)) else { + let (Some(arg0), Some(arg1)) = (arg_types.first(), arg_types.get(1)) else { return error::query(format!("{name} expects 2 arguments")); }; @@ -347,8 +345,7 @@ impl<'a> TypeEvaluator<'a> { // These functions return the same data type as their input name @ ("abs" | "floor" | "ceil" | "round") => { - match arg_types - .get(0) + match arg_types.first() .cloned() .ok_or_else(|| error::map::query(format!("{name} expects 1 argument")))? { diff --git a/iox_query_influxql/src/plan/planner/select.rs b/iox_query_influxql/src/plan/planner/select.rs index 24fb3726c05..bfafb455424 100644 --- a/iox_query_influxql/src/plan/planner/select.rs +++ b/iox_query_influxql/src/plan/planner/select.rs @@ -288,7 +288,7 @@ impl<'a> Selector<'a> { )); } Ok(Self::First { - field_key: Self::identifier(call.args.get(0).unwrap())?, + field_key: Self::identifier(call.args.first().unwrap())?, }) } @@ -300,7 +300,7 @@ impl<'a> Selector<'a> { )); } Ok(Self::Last { - field_key: Self::identifier(call.args.get(0).unwrap())?, + field_key: Self::identifier(call.args.first().unwrap())?, }) } @@ -312,7 +312,7 @@ impl<'a> Selector<'a> { )); } Ok(Self::Max { - field_key: Self::identifier(call.args.get(0).unwrap())?, + field_key: Self::identifier(call.args.first().unwrap())?, }) } @@ -324,7 +324,7 @@ impl<'a> Selector<'a> { )); } Ok(Self::Min { - field_key: Self::identifier(call.args.get(0).unwrap())?, + field_key: Self::identifier(call.args.first().unwrap())?, }) } @@ -336,7 +336,7 @@ impl<'a> Selector<'a> { )); } Ok(Self::Percentile { - field_key: Self::identifier(call.args.get(0).unwrap())?, + field_key: Self::identifier(call.args.first().unwrap())?, n: Self::literal_num(call.args.get(1).unwrap())?, }) } @@ -349,7 +349,7 @@ impl<'a> Selector<'a> { )); } Ok(Self::Sample { - field_key: Self::identifier(call.args.get(0).unwrap())?, + field_key: Self::identifier(call.args.first().unwrap())?, n: Self::literal_int(call.args.get(1).unwrap())?, }) } diff --git a/ioxd_common/src/server_type.rs b/ioxd_common/src/server_type.rs index b8aa3085407..767dacd9ea4 100644 --- a/ioxd_common/src/server_type.rs +++ b/ioxd_common/src/server_type.rs @@ -30,7 +30,7 @@ impl From for RpcError { let details = source .source() .map(|e| format!(" ({e})")) - .unwrap_or_else(|| "".to_string()); + .unwrap_or_default(); Self::TransportError { source, details } } diff --git a/schema/src/sort.rs b/schema/src/sort.rs index 06378a124fb..52a8ad2652b 100644 --- a/schema/src/sort.rs +++ b/schema/src/sort.rs @@ -404,8 +404,8 @@ pub fn adjust_sort_key_columns( let existing_columns_without_time = catalog_sort_key .iter() .map(|(col, _opts)| col) - .cloned() - .filter(|col| TIME_COLUMN_NAME != col.as_ref()); + .filter(|&col| TIME_COLUMN_NAME != col.as_ref()) + .cloned(); let new_columns: Vec<_> = primary_key .iter() .filter(|col| !catalog_sort_key.contains(col)) diff --git a/service_grpc_flight/src/keep_alive.rs b/service_grpc_flight/src/keep_alive.rs index 4d838e92d31..f8abdac22ef 100644 --- a/service_grpc_flight/src/keep_alive.rs +++ b/service_grpc_flight/src/keep_alive.rs @@ -375,7 +375,7 @@ mod tests { } else { s }; - let s = panic_on_stream_timeout(s, Duration::from_millis(250)); - s + + (panic_on_stream_timeout(s, Duration::from_millis(250))) as _ } } diff --git a/wal/src/lib.rs b/wal/src/lib.rs index 46e88d957c1..145d1cf0f5f 100644 --- a/wal/src/lib.rs +++ b/wal/src/lib.rs @@ -863,7 +863,7 @@ mod tests { assert_eq!(wal_entries.len(), 2); let write_op_entries = wal_entries.into_iter().flatten().collect::>(); assert_eq!(write_op_entries.len(), 3); - assert_matches!(write_op_entries.get(0), Some(got_op1) => { + assert_matches!(write_op_entries.first(), Some(got_op1) => { assert_op_shape(got_op1, &w1); }); assert_matches!(write_op_entries.get(1), Some(got_op2) => { @@ -916,7 +916,7 @@ mod tests { // error is thrown assert_matches!(decoder.next(), Some(Ok(batch)) => { assert_eq!(batch.len(), 1); - assert_op_shape(batch.get(0).unwrap(), &good_write); + assert_op_shape(batch.first().unwrap(), &good_write); }); assert_matches!( decoder.next(), diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 51582ff7388..29c9fdd5194 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -83,7 +83,7 @@ strum = { version = "0.25", features = ["derive"] } thrift = { version = "0.17" } tokio = { version = "1", features = ["full", "tracing"] } tokio-stream = { version = "0.1", features = ["fs", "net"] } -tokio-util = { version = "0.7", features = ["codec", "io"] } +tokio-util = { version = "0.7", features = ["codec", "compat", "io"] } tonic = { version = "0.9", features = ["tls-webpki-roots"] } tower = { version = "0.4", features = ["balance", "buffer", "limit", "timeout", "util"] } tracing = { version = "0.1", features = ["log", "max_level_trace", "release_max_level_trace"] }