diff --git a/Cargo.lock b/Cargo.lock index 438a0d67bcc..bb89be0b691 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -768,10 +768,11 @@ dependencies = [ [[package]] name = "crossbeam-epoch" version = "0.9.3" -source = "git+https://github.com/tikv/crossbeam.git?branch=tikv-5.0#e0e083d062649484188b7337fe388fd12f2c8d94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.3 (git+https://github.com/tikv/crossbeam.git?branch=tikv-5.0)", + "crossbeam-utils 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static", "memoffset", "scopeguard", @@ -780,11 +781,10 @@ dependencies = [ [[package]] name = "crossbeam-epoch" version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" +source = "git+https://github.com/tikv/crossbeam.git?branch=tikv-5.0#e0e083d062649484188b7337fe388fd12f2c8d94" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.8.3 (git+https://github.com/tikv/crossbeam.git?branch=tikv-5.0)", "lazy_static", "memoffset", "scopeguard", @@ -825,7 +825,8 @@ dependencies = [ [[package]] name = "crossbeam-utils" version = "0.8.3" -source = "git+https://github.com/tikv/crossbeam.git?branch=tikv-5.0#e0e083d062649484188b7337fe388fd12f2c8d94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" dependencies = [ "autocfg", "cfg-if 1.0.0", @@ -835,8 +836,7 @@ dependencies = [ [[package]] name = "crossbeam-utils" version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" +source = "git+https://github.com/tikv/crossbeam.git?branch=tikv-5.0#e0e083d062649484188b7337fe388fd12f2c8d94" dependencies = [ "autocfg", "cfg-if 1.0.0", diff --git a/cmd/tikv-ctl/src/main.rs b/cmd/tikv-ctl/src/main.rs index 15ae3a45a51..0a840ae81d5 100644 --- a/cmd/tikv-ctl/src/main.rs +++ b/cmd/tikv-ctl/src/main.rs @@ -1891,7 +1891,7 @@ fn main() { // Bypass the ldb command to RocksDB. if let Some(cmd) = matches.subcommand_matches("ldb") { - run_ldb_command(&cmd, &cfg); + run_ldb_command(cmd, &cfg); return; } diff --git a/components/backup/src/endpoint.rs b/components/backup/src/endpoint.rs index 4d1f0488818..804975a4139 100644 --- a/components/backup/src/endpoint.rs +++ b/components/backup/src/endpoint.rs @@ -169,7 +169,7 @@ impl BackupRange { |key, lock| { Lock::check_ts_conflict( Cow::Borrowed(lock), - &key, + key, backup_ts, &Default::default(), ) @@ -507,8 +507,8 @@ impl Progress { } } if info.role == StateRole::Leader { - let ekey = get_min_end_key(end_key.as_ref(), ®ion); - let skey = get_max_start_key(start_key.as_ref(), ®ion); + let ekey = get_min_end_key(end_key.as_ref(), region); + let skey = get_max_start_key(start_key.as_ref(), region); assert!(!(skey == ekey && ekey.is_some()), "{:?} {:?}", skey, ekey); let leader = find_peer(region, store_id).unwrap().to_owned(); let backup_range = BackupRange { diff --git a/components/cdc/src/channel.rs b/components/cdc/src/channel.rs index 3c0dde368d6..47810fdbb8e 100644 --- a/components/cdc/src/channel.rs +++ b/components/cdc/src/channel.rs @@ -281,6 +281,7 @@ where mod tests { use super::*; + use std::assert_matches::assert_matches; use std::sync::mpsc; use std::time::Duration; diff --git a/components/cdc/src/endpoint.rs b/components/cdc/src/endpoint.rs index 517db7ffb72..759eb8cd024 100644 --- a/components/cdc/src/endpoint.rs +++ b/components/cdc/src/endpoint.rs @@ -904,7 +904,7 @@ impl, E: KvEngine> Endpoint { for (region_id, _) in regions { if let Some(region) = meta.regions.get(®ion_id) { if let Some((term, leader_id)) = meta.leaders.get(®ion_id) { - let leader_store_id = find_store_id(®ion, *leader_id); + let leader_store_id = find_store_id(region, *leader_id); if leader_store_id.is_none() { continue; } diff --git a/components/cdc/src/old_value.rs b/components/cdc/src/old_value.rs index b0bec738636..75c0bb0c349 100644 --- a/components/cdc/src/old_value.rs +++ b/components/cdc/src/old_value.rs @@ -90,7 +90,7 @@ impl OldValueReader { let mut opts = ReadOptions::new(); opts.set_fill_cache(false); self.snapshot - .get_cf_opt(opts, CF_DEFAULT, &key) + .get_cf_opt(opts, CF_DEFAULT, key) .unwrap() .map(|v| v.deref().to_vec()) } diff --git a/components/cdc/tests/mod.rs b/components/cdc/tests/mod.rs index aa260d41ec8..0a9d2c368b4 100644 --- a/components/cdc/tests/mod.rs +++ b/components/cdc/tests/mod.rs @@ -155,7 +155,7 @@ impl TestSuiteBuilder { for (id, worker) in &mut endpoints { let sim = cluster.sim.wl(); let raft_router = sim.get_server_router(*id); - let cdc_ob = obs.get(&id).unwrap().clone(); + let cdc_ob = obs.get(id).unwrap().clone(); let cm = sim.get_concurrency_manager(*id); let env = Arc::new(Environment::new(1)); let mut cdc_endpoint = cdc::Endpoint::new( diff --git a/components/cloud/aws/src/s3.rs b/components/cloud/aws/src/s3.rs index d040dfb0836..6c8440d1cee 100644 --- a/components/cloud/aws/src/s3.rs +++ b/components/cloud/aws/src/s3.rs @@ -131,7 +131,7 @@ impl Config { impl BlobConfig for Config { fn name(&self) -> &'static str { - &STORAGE_NAME + STORAGE_NAME } fn url(&self) -> io::Result { diff --git a/components/cloud/gcp/src/gcs.rs b/components/cloud/gcp/src/gcs.rs index 135af1f875d..37d52eaf234 100644 --- a/components/cloud/gcp/src/gcs.rs +++ b/components/cloud/gcp/src/gcs.rs @@ -111,7 +111,7 @@ fn deserialize_service_account_info( impl BlobConfig for Config { fn name(&self) -> &'static str { - &STORAGE_NAME + STORAGE_NAME } fn url(&self) -> io::Result { diff --git a/components/cloud/src/blob.rs b/components/cloud/src/blob.rs index ec9beef057c..809ad5f67f7 100644 --- a/components/cloud/src/blob.rs +++ b/components/cloud/src/blob.rs @@ -136,7 +136,7 @@ impl BucketConf { let path = none_to_empty(self.prefix.clone()); if let Some(ep) = &self.endpoint { let mut u = - url::Url::parse(&ep).map_err(|e| format!("invalid endpoint {}: {}", &ep, e))?; + url::Url::parse(ep).map_err(|e| format!("invalid endpoint {}: {}", &ep, e))?; u.set_path(&format!( "{}/{}", &self.bucket.trim_end_matches('/'), diff --git a/components/concurrency_manager/benches/lock_table.rs b/components/concurrency_manager/benches/lock_table.rs index e6561bf5ce0..9360b49346a 100644 --- a/components/concurrency_manager/benches/lock_table.rs +++ b/components/concurrency_manager/benches/lock_table.rs @@ -1,6 +1,7 @@ // Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0. #![feature(test)] +#![feature(bench_black_box)] use concurrency_manager::ConcurrencyManager; use criterion::*; @@ -58,7 +59,7 @@ fn bench_point_check(c: &mut Criterion) { thread_rng().fill_bytes(&mut buf[..]); let key = Key::from_raw(&buf); let _ = cm.read_key_check(&key, |l| { - Lock::check_ts_conflict(Cow::Borrowed(&l), &key, 1.into(), &ts_set) + Lock::check_ts_conflict(Cow::Borrowed(l), &key, 1.into(), &ts_set) }); }) }); @@ -84,7 +85,7 @@ fn bench_range_check(c: &mut Criterion) { let end_key = Key::from_raw(&[start + 25]); // The key range is roughly 1/10 the key space. let _ = cm.read_range_check(Some(&start_key), Some(&end_key), |key, l| { - Lock::check_ts_conflict(Cow::Borrowed(&l), &key, 1.into(), &ts_set) + Lock::check_ts_conflict(Cow::Borrowed(l), key, 1.into(), &ts_set) }); }) }); diff --git a/components/encryption/src/crypter.rs b/components/encryption/src/crypter.rs index f07d80c3863..07f60691269 100644 --- a/components/encryption/src/crypter.rs +++ b/components/encryption/src/crypter.rs @@ -162,7 +162,7 @@ impl<'k> AesGcmCrypter<'k> { &self.key.0, Some(self.iv.as_slice()), &[], /* AAD */ - &pt, + pt, &mut tag.0, )?; Ok((ciphertext, tag)) @@ -175,7 +175,7 @@ impl<'k> AesGcmCrypter<'k> { &self.key.0, Some(self.iv.as_slice()), &[], /* AAD */ - &ct, + ct, &tag.0, )?; Ok(plaintext) diff --git a/components/encryption/src/encrypted_file/mod.rs b/components/encryption/src/encrypted_file/mod.rs index ccd078d97cb..ec3c6f04217 100644 --- a/components/encryption/src/encrypted_file/mod.rs +++ b/components/encryption/src/encrypted_file/mod.rs @@ -74,7 +74,7 @@ impl<'a> EncryptedFile<'a> { // Encrypt the content. let encrypted_content = master_key - .encrypt(&plaintext_content)? + .encrypt(plaintext_content)? .write_to_bytes() .unwrap(); let header = Header::new(&encrypted_content, Version::V1); diff --git a/components/encryption/src/io.rs b/components/encryption/src/io.rs index 6fe2359987b..e67def753dc 100644 --- a/components/encryption/src/io.rs +++ b/components/encryption/src/io.rs @@ -106,7 +106,7 @@ impl CrypterReader { mode: Mode, iv: Option, ) -> Result<(CrypterReader, Iv)> { - crate::verify_encryption_config(method, &key)?; + crate::verify_encryption_config(method, key)?; let iv = iv.unwrap_or_else(Iv::new_ctr); let (cipher, crypter) = create_aes_ctr_crypter(method, key, mode, iv)?; let block_size = cipher.block_size(); @@ -178,7 +178,7 @@ impl EncrypterWriter { key: &[u8], iv: Iv, ) -> Result> { - crate::verify_encryption_config(method, &key)?; + crate::verify_encryption_config(method, key)?; let (cipher, crypter) = create_aes_ctr_crypter(method, key, Mode::Encrypt, iv)?; let block_size = cipher.block_size(); Ok(EncrypterWriter { diff --git a/components/engine_rocks/src/import.rs b/components/engine_rocks/src/import.rs index aa50694884d..ef7d816197b 100644 --- a/components/engine_rocks/src/import.rs +++ b/components/engine_rocks/src/import.rs @@ -22,7 +22,7 @@ impl ImportExt for RocksEngine { // Prior to v5.2.0, TiKV use `write_global_seqno=true` for ingestion. For backward // compatibility, in case TiKV is retrying an ingestion job generated by older // version, it needs to reset the global seqno to 0. - set_external_sst_file_global_seq_no(&self.as_inner(), cf, file, 0)?; + set_external_sst_file_global_seq_no(self.as_inner(), cf, file, 0)?; f.sync_all() .map_err(|e| format!("sync {}: {:?}", file, e))?; Ok(()) @@ -34,7 +34,7 @@ impl ImportExt for RocksEngine { // the manual memtable flush was taken. let _did_nonblocking_memtable_flush = self .as_inner() - .ingest_external_file_optimized(&cf, &opts.0, files)?; + .ingest_external_file_optimized(cf, &opts.0, files)?; Ok(()) } } diff --git a/components/engine_rocks/src/misc.rs b/components/engine_rocks/src/misc.rs index cde1ad59f4e..1528601afe6 100644 --- a/components/engine_rocks/src/misc.rs +++ b/components/engine_rocks/src/misc.rs @@ -179,7 +179,7 @@ impl MiscExt for RocksEngine { } DeleteStrategy::DeleteByKey => { for r in ranges { - self.delete_all_in_range_cf_by_key(cf, &r)?; + self.delete_all_in_range_cf_by_key(cf, r)?; } } DeleteStrategy::DeleteByWriter { sst_path } => { @@ -320,7 +320,7 @@ impl MiscExt for RocksEngine { let handle = util::get_cf_handle(self.as_inner(), cf)?; Ok(crate::util::get_cf_num_files_at_level( self.as_inner(), - &handle, + handle, level, )) } @@ -329,7 +329,7 @@ impl MiscExt for RocksEngine { let handle = util::get_cf_handle(self.as_inner(), cf)?; Ok(crate::util::get_cf_num_immutable_mem_table( self.as_inner(), - &handle, + handle, )) } @@ -337,7 +337,7 @@ impl MiscExt for RocksEngine { let handle = util::get_cf_handle(self.as_inner(), cf)?; Ok(crate::util::get_cf_compaction_pending_bytes( self.as_inner(), - &handle, + handle, )) } diff --git a/components/engine_rocks/src/mvcc_properties.rs b/components/engine_rocks/src/mvcc_properties.rs index e50b7ef99b4..91846e22235 100644 --- a/components/engine_rocks/src/mvcc_properties.rs +++ b/components/engine_rocks/src/mvcc_properties.rs @@ -55,7 +55,7 @@ impl MvccPropertiesExt for RocksEngine { start_key: &[u8], end_key: &[u8], ) -> Option { - let collection = match self.get_range_properties_cf(cf, &start_key, &end_key) { + let collection = match self.get_range_properties_cf(cf, start_key, end_key) { Ok(c) if !c.is_empty() => c, _ => return None, }; diff --git a/components/engine_rocks/src/range_properties.rs b/components/engine_rocks/src/range_properties.rs index 18d4c452a34..53d2faeabc4 100644 --- a/components/engine_rocks/src/range_properties.rs +++ b/components/engine_rocks/src/range_properties.rs @@ -24,7 +24,7 @@ impl RangePropertiesExt for RocksEngine { let start = &range.start_key; let end = &range.end_key; let (_, keys) = - get_range_entries_and_versions(self, CF_WRITE, &start, &end).unwrap_or_default(); + get_range_entries_and_versions(self, CF_WRITE, start, end).unwrap_or_default(); Ok(keys) } @@ -65,8 +65,8 @@ impl RangePropertiesExt for RocksEngine { .join(", "); info!( "range contains too many keys"; - "start" => log_wrappers::Value::key(&range.start_key), - "end" => log_wrappers::Value::key(&range.end_key), + "start" => log_wrappers::Value::key(range.start_key), + "end" => log_wrappers::Value::key(range.end_key), "total_keys" => total_keys, "memtable" => mem_keys, "ssts_keys" => ssts, @@ -100,10 +100,10 @@ impl RangePropertiesExt for RocksEngine { let (_, mem_size) = box_try!(self.get_approximate_memtable_stats_cf(cfname, &range)); total_size += mem_size; - let collection = box_try!(self.get_range_properties_cf(cfname, &start_key, &end_key)); + let collection = box_try!(self.get_range_properties_cf(cfname, start_key, end_key)); for (_, v) in collection.iter() { let props = box_try!(RangeProperties::decode(v.user_collected_properties())); - total_size += props.get_approximate_size_in_range(&start_key, &end_key); + total_size += props.get_approximate_size_in_range(start_key, end_key); } if large_threshold != 0 && total_size > large_threshold { @@ -111,7 +111,7 @@ impl RangePropertiesExt for RocksEngine { .iter() .map(|(k, v)| { let props = RangeProperties::decode(v.user_collected_properties()).unwrap(); - let size = props.get_approximate_size_in_range(&start_key, &end_key); + let size = props.get_approximate_size_in_range(start_key, end_key); format!( "{}:{}", Path::new(&*k) @@ -125,8 +125,8 @@ impl RangePropertiesExt for RocksEngine { .join(", "); info!( "range size is too large"; - "start" => log_wrappers::Value::key(&range.start_key), - "end" => log_wrappers::Value::key(&range.end_key), + "start" => log_wrappers::Value::key(range.start_key), + "end" => log_wrappers::Value::key(range.end_key), "total_size" => total_size, "memtable" => mem_size, "ssts_size" => ssts, @@ -168,7 +168,7 @@ impl RangePropertiesExt for RocksEngine { ) -> Result>> { let start_key = &range.start_key; let end_key = &range.end_key; - let collection = box_try!(self.get_range_properties_cf(cfname, &start_key, &end_key)); + let collection = box_try!(self.get_range_properties_cf(cfname, start_key, end_key)); let mut keys = vec![]; for (_, v) in collection.iter() { diff --git a/components/engine_rocks/src/raw_util.rs b/components/engine_rocks/src/raw_util.rs index 790a91f702a..db0b1ef39a2 100644 --- a/components/engine_rocks/src/raw_util.rs +++ b/components/engine_rocks/src/raw_util.rs @@ -56,7 +56,7 @@ fn adjust_dynamic_level_bytes( cf_descs: &[CColumnFamilyDescriptor], cf_options: &mut CFOptions<'_>, ) { - if let Some(ref cf_desc) = cf_descs + if let Some(cf_desc) = cf_descs .iter() .find(|cf_desc| cf_desc.name() == cf_options.cf) { diff --git a/components/engine_rocks/src/rocks_metrics.rs b/components/engine_rocks/src/rocks_metrics.rs index 0014d09c40d..34e80243a43 100644 --- a/components/engine_rocks/src/rocks_metrics.rs +++ b/components/engine_rocks/src/rocks_metrics.rs @@ -1624,7 +1624,7 @@ mod tests { } let shared_block_cache = false; - flush_engine_properties(&engine.as_inner(), "kv", shared_block_cache); + flush_engine_properties(engine.as_inner(), "kv", shared_block_cache); let handle = engine.as_inner().cf_handle("default").unwrap(); let info = engine .as_inner() diff --git a/components/engine_rocks/src/ttl_properties.rs b/components/engine_rocks/src/ttl_properties.rs index cc9b8138a67..7c93e815248 100644 --- a/components/engine_rocks/src/ttl_properties.rs +++ b/components/engine_rocks/src/ttl_properties.rs @@ -72,7 +72,7 @@ impl TablePropertiesCollector for TtlPropertiesCollector { return; } - let expire_ts = match get_expire_ts(&value) { + let expire_ts = match get_expire_ts(value) { Ok(ts) => ts, Err(e) => { error!("failed to get expire ts"; diff --git a/components/engine_rocks/src/write_batch.rs b/components/engine_rocks/src/write_batch.rs index d04e03d42df..f649d77351b 100644 --- a/components/engine_rocks/src/write_batch.rs +++ b/components/engine_rocks/src/write_batch.rs @@ -23,11 +23,11 @@ impl WriteBatchExt for RocksEngine { } fn write_batch(&self) -> Self::WriteBatch { - Self::WriteBatch::new(Arc::clone(&self.as_inner())) + Self::WriteBatch::new(Arc::clone(self.as_inner())) } fn write_batch_with_cap(&self, cap: usize) -> Self::WriteBatch { - Self::WriteBatch::with_capacity(Arc::clone(&self.as_inner()), cap) + Self::WriteBatch::with_capacity(Arc::clone(self.as_inner()), cap) } } diff --git a/components/engine_traits_tests/src/ctor.rs b/components/engine_traits_tests/src/ctor.rs index c156c31c502..f835741896a 100644 --- a/components/engine_traits_tests/src/ctor.rs +++ b/components/engine_traits_tests/src/ctor.rs @@ -69,7 +69,7 @@ fn new_engine_readonly_dir() { fs::set_permissions(&path, perms).unwrap(); let path = path.to_str().unwrap(); - let err = KvTestEngine::new_engine(&path, None, ALL_CFS, None); + let err = KvTestEngine::new_engine(path, None, ALL_CFS, None); assert!(err.is_err()); } @@ -93,7 +93,7 @@ fn new_engine_opt_readonly_dir() { .iter() .map(|cf| CFOptions::new(cf, ColumnFamilyOptions::new())) .collect(); - let err = KvTestEngine::new_engine_opt(&path, db_opts, cf_opts); + let err = KvTestEngine::new_engine_opt(path, db_opts, cf_opts); assert!(err.is_err()); } diff --git a/components/external_storage/export/src/export.rs b/components/external_storage/export/src/export.rs index ade9fd123e5..496ce5c9c80 100644 --- a/components/external_storage/export/src/export.rs +++ b/components/external_storage/export/src/export.rs @@ -163,9 +163,9 @@ fn create_backend_inner(backend: &Backend) -> io::Result blob_store(GCSStorage::from_input(config.clone())?), Backend::CloudDynamic(dyn_backend) => match dyn_backend.provider_name.as_str() { #[cfg(feature = "cloud-aws")] - "aws" | "s3" => blob_store(S3Storage::from_cloud_dynamic(&dyn_backend)?), + "aws" | "s3" => blob_store(S3Storage::from_cloud_dynamic(dyn_backend)?), #[cfg(feature = "cloud-gcp")] - "gcp" | "gcs" => blob_store(GCSStorage::from_cloud_dynamic(&dyn_backend)?), + "gcp" | "gcs" => blob_store(GCSStorage::from_cloud_dynamic(dyn_backend)?), _ => { return Err(bad_backend(Backend::CloudDynamic(dyn_backend.clone()))); } @@ -337,7 +337,7 @@ impl ExternalStorage for EncryptedExternalStorage { block_on_external_io(read_external_storage_into_file( &mut input, file_writer, - &speed_limiter, + speed_limiter, expected_length, min_read_speed, )) diff --git a/components/external_storage/src/local.rs b/components/external_storage/src/local.rs index 2780f5eabd7..c74054f3c99 100644 --- a/components/external_storage/src/local.rs +++ b/components/external_storage/src/local.rs @@ -55,11 +55,11 @@ const STORAGE_NAME: &str = "local"; impl ExternalStorage for LocalStorage { fn name(&self) -> &'static str { - &STORAGE_NAME + STORAGE_NAME } fn url(&self) -> io::Result { - Ok(url_for(&self.base.as_path())) + Ok(url_for(self.base.as_path())) } fn write( diff --git a/components/external_storage/src/noop.rs b/components/external_storage/src/noop.rs index 101d97d6fd1..33bdf1b3c14 100644 --- a/components/external_storage/src/noop.rs +++ b/components/external_storage/src/noop.rs @@ -24,7 +24,7 @@ const STORAGE_NAME: &str = "noop"; impl ExternalStorage for NoopStorage { fn name(&self) -> &'static str { - &STORAGE_NAME + STORAGE_NAME } fn url(&self) -> io::Result { diff --git a/components/online_config/online_config_derive/src/lib.rs b/components/online_config/online_config_derive/src/lib.rs index d5c1741bc5c..483d7682c21 100644 --- a/components/online_config/online_config_derive/src/lib.rs +++ b/components/online_config/online_config_derive/src/lib.rs @@ -44,7 +44,7 @@ fn generate_token(ast: DeriveInput) -> std::result::Result { let get_encoder_fn = get_encoder(&encoder_name, &encoder_lt); let typed_fn = typed(&fields, &creat_name)?; let encoder_struct = encoder( - &name, + name, &creat_name, &encoder_name, &encoder_lt, @@ -272,7 +272,7 @@ fn typed(fields: &Punctuated, creat_name: &Ident) -> Result Result<(bool, bool, bool)> { let (mut skip, mut hidden, mut submodule) = (false, false, false); for attr in attrs { - if !is_attr("online_config", &attr) { + if !is_attr("online_config", attr) { continue; } match attr.parse_args::()? { diff --git a/components/pd_client/src/client.rs b/components/pd_client/src/client.rs index 91f3b0e22ca..f4edbfacca3 100644 --- a/components/pd_client/src/client.rs +++ b/components/pd_client/src/client.rs @@ -66,7 +66,7 @@ impl RpcClient { // -1 means the max. let retries = match cfg.retry_max_count { -1 => std::isize::MAX, - v => v.checked_add(1).unwrap_or(std::isize::MAX), + v => v.saturating_add(1), }; let monitor = Arc::new( yatp::Builder::new(thd_name!("pdmonitor")) diff --git a/components/raftstore/src/coprocessor/dispatcher.rs b/components/raftstore/src/coprocessor/dispatcher.rs index 5b9f406a9c3..c2885f486ea 100644 --- a/components/raftstore/src/coprocessor/dispatcher.rs +++ b/components/raftstore/src/coprocessor/dispatcher.rs @@ -505,7 +505,7 @@ impl CoprocessorHost { } for batch in &cmd_batches { for cmd in &batch.cmds { - self.post_apply(&batch.region, &cmd); + self.post_apply(&batch.region, cmd); } } for observer in &self.registry.cmd_observers { diff --git a/components/raftstore/src/coprocessor/region_info_accessor.rs b/components/raftstore/src/coprocessor/region_info_accessor.rs index c7e3335d22e..11995b59543 100644 --- a/components/raftstore/src/coprocessor/region_info_accessor.rs +++ b/components/raftstore/src/coprocessor/region_info_accessor.rs @@ -98,7 +98,7 @@ impl Display for RegionInfoQuery { match self { RegionInfoQuery::RaftStoreEvent(e) => write!(f, "RaftStoreEvent({:?})", e), RegionInfoQuery::SeekRegion { from, .. } => { - write!(f, "SeekRegion(from: {})", log_wrappers::Value::key(&from)) + write!(f, "SeekRegion(from: {})", log_wrappers::Value::key(from)) } RegionInfoQuery::FindRegionById { region_id, .. } => { write!(f, "FindRegionById(region_id: {})", region_id) @@ -693,14 +693,14 @@ mod tests { assert!(c.region_ranges.is_empty()); for region in regions { - must_create_region(c, ®ion, StateRole::Follower); + must_create_region(c, region, StateRole::Follower); } let expected_regions: Vec<_> = regions .iter() .map(|r| (r.clone(), StateRole::Follower)) .collect(); - check_collection(&c, &expected_regions); + check_collection(c, &expected_regions); } fn must_create_region(c: &mut RegionCollector, region: &Region, role: StateRole) { diff --git a/components/raftstore/src/coprocessor/split_check/size.rs b/components/raftstore/src/coprocessor/split_check/size.rs index f658687c06a..08b901c874c 100644 --- a/components/raftstore/src/coprocessor/split_check/size.rs +++ b/components/raftstore/src/coprocessor/split_check/size.rs @@ -130,7 +130,7 @@ where let region_id = region.get_id(); let region_size = match get_region_approximate_size( engine, - ®ion, + region, host.cfg.region_max_size.0 * host.cfg.batch_split_limit, ) { Ok(size) => size, @@ -606,17 +606,20 @@ pub mod tests { engine.flush_cf(data_cf, true).unwrap(); } let region = make_region(1, vec![], vec![]); - let split_keys = get_approximate_split_keys(&engine, ®ion, 0) - .unwrap() - .into_iter() - .map(|k| { - Key::from_encoded_slice(keys::origin_key(&k)) - .into_raw() - .unwrap() - }) - .collect::>>(); - assert_eq!(split_keys.is_empty(), true); + assert_eq!( + get_approximate_split_keys(&engine, ®ion, 0) + .unwrap() + .into_iter() + .map(|k| { + Key::from_encoded_slice(keys::origin_key(&k)) + .into_raw() + .unwrap() + }) + .next() + .is_none(), + true + ); for i in 4..5 { let k = format!("key_{:03}", i).into_bytes(); let k = keys::data_key(Key::from_raw(&k).as_encoded()); diff --git a/components/raftstore/src/coprocessor/split_check/table.rs b/components/raftstore/src/coprocessor/split_check/table.rs index 9da785b8bc0..e24258ead63 100644 --- a/components/raftstore/src/coprocessor/split_check/table.rs +++ b/components/raftstore/src/coprocessor/split_check/table.rs @@ -163,8 +163,8 @@ where } _ => panic!( "start_key {} and end_key {} out of order", - log_wrappers::Value::key(&encoded_start_key), - log_wrappers::Value::key(&encoded_end_key) + log_wrappers::Value::key(encoded_start_key), + log_wrappers::Value::key(encoded_end_key) ), } host.add_checker(Box::new(Checker { diff --git a/components/raftstore/src/coprocessor/split_observer.rs b/components/raftstore/src/coprocessor/split_observer.rs index 8fad7acb790..45566722e27 100644 --- a/components/raftstore/src/coprocessor/split_observer.rs +++ b/components/raftstore/src/coprocessor/split_observer.rs @@ -35,13 +35,13 @@ fn is_valid_split_key(key: &[u8], index: usize, region: &Region) -> bool { return false; } - if let Err(Error::KeyNotInRegion(..)) = util::check_key_in_region_exclusive(&key, region) { + if let Err(Error::KeyNotInRegion(..)) = util::check_key_in_region_exclusive(key, region) { warn!( "skip invalid split key: key is not in region"; - "key" => log_wrappers::Value::key(&key), + "key" => log_wrappers::Value::key(key), "region_id" => region.get_id(), - "start_key" => log_wrappers::Value::key(®ion.get_start_key()), - "end_key" => log_wrappers::Value::key(®ion.get_end_key()), + "start_key" => log_wrappers::Value::key(region.get_start_key()), + "end_key" => log_wrappers::Value::key(region.get_end_key()), "index" => index, ); return false; diff --git a/components/raftstore/src/lib.rs b/components/raftstore/src/lib.rs index d78614288fa..638b3aee09f 100644 --- a/components/raftstore/src/lib.rs +++ b/components/raftstore/src/lib.rs @@ -6,7 +6,6 @@ #![feature(div_duration)] #![feature(min_specialization)] #![feature(box_patterns)] -#![feature(vecdeque_binary_search)] #[cfg(test)] extern crate test; diff --git a/components/raftstore/src/store/fsm/apply.rs b/components/raftstore/src/store/fsm/apply.rs index d862c569a26..9236ad4e987 100644 --- a/components/raftstore/src/store/fsm/apply.rs +++ b/components/raftstore/src/store/fsm/apply.rs @@ -1207,7 +1207,7 @@ where ctx.exec_ctx = Some(self.new_ctx(index, term)); ctx.kv_wb_mut().set_save_point(); let mut origin_epoch = None; - let (resp, exec_result) = match self.exec_raft_cmd(ctx, &req) { + let (resp, exec_result) = match self.exec_raft_cmd(ctx, req) { Ok(a) => { ctx.kv_wb_mut().pop_save_point().unwrap(); if req.has_admin_request() { @@ -1523,7 +1523,7 @@ where "{} failed to write ({}, {}) to cf {}: {:?}", self.tag, log_wrappers::Value::key(&key), - log_wrappers::Value::value(&value), + log_wrappers::Value::value(value), cf, e ) @@ -1534,7 +1534,7 @@ where "{} failed to write ({}, {}): {:?}", self.tag, log_wrappers::Value::key(&key), - log_wrappers::Value::value(&value), + log_wrappers::Value::value(value), e ); }); diff --git a/components/raftstore/src/store/fsm/peer.rs b/components/raftstore/src/store/fsm/peer.rs index 4d677a20f28..bedb6267a9f 100644 --- a/components/raftstore/src/store/fsm/peer.rs +++ b/components/raftstore/src/store/fsm/peer.rs @@ -583,7 +583,7 @@ where PeerMsg::UpdateReplicationMode => self.on_update_replication_mode(), PeerMsg::Destroy(peer_id) => { if self.fsm.peer.peer_id() == peer_id { - match self.fsm.peer.maybe_destroy(&self.ctx) { + match self.fsm.peer.maybe_destroy(self.ctx) { None => self.ctx.raft_metrics.message_dropped.applying_snap += 1, Some(job) => { self.handle_destroy_peer(job); @@ -977,7 +977,7 @@ where if StateRole::Leader == ss.raft_state { self.fsm.missing_ticks = 0; self.register_split_region_check_tick(); - self.fsm.peer.heartbeat_pd(&self.ctx); + self.fsm.peer.heartbeat_pd(self.ctx); self.register_pd_heartbeat_tick(); } } @@ -1337,7 +1337,7 @@ where Either::Right(v) => v, }; - if util::is_vote_msg(&msg.get_message()) + if util::is_vote_msg(msg.get_message()) || msg.get_message().get_msg_type() == MessageType::MsgTimeoutNow { if self.fsm.hibernate_state.group_state() != GroupState::Chaos { @@ -1541,7 +1541,7 @@ where if util::is_epoch_stale(from_epoch, self_epoch) && util::find_peer(self.fsm.peer.region(), from_store_id).is_none() { - self.ctx.handle_stale_msg(&msg, self_epoch.clone(), None); + self.ctx.handle_stale_msg(msg, self_epoch.clone(), None); return true; } @@ -1558,7 +1558,7 @@ where true } cmp::Ordering::Greater => { - match self.fsm.peer.maybe_destroy(&self.ctx) { + match self.fsm.peer.maybe_destroy(self.ctx) { Some(job) => { info!( "target peer id is larger, destroying self"; @@ -2640,7 +2640,7 @@ where } }; - let sibling_peer = util::find_peer(&sibling_region, self.store_id()).unwrap(); + let sibling_peer = util::find_peer(sibling_region, self.store_id()).unwrap(); let mut request = new_admin_request(sibling_region.get_id(), sibling_peer.clone()); request .mut_header() @@ -3043,7 +3043,7 @@ where // If the merge succeed, all source peers are impossible in apply snapshot state // and must be initialized. // So `maybe_destroy` must succeed here. - let job = self.fsm.peer.maybe_destroy(&self.ctx).unwrap(); + let job = self.fsm.peer.maybe_destroy(self.ctx).unwrap(); self.handle_destroy_peer(job); } @@ -3082,7 +3082,7 @@ where // Remove its source peers' metadata for r in &apply_result.destroyed_regions { - let prev = meta.region_ranges.remove(&enc_end_key(&r)); + let prev = meta.region_ranges.remove(&enc_end_key(r)); assert_eq!(prev, Some(r.get_id())); assert!(meta.regions.remove(&r.get_id()).is_some()); meta.readers.remove(&r.get_id()); diff --git a/components/raftstore/src/store/fsm/store.rs b/components/raftstore/src/store/fsm/store.rs index 404fbfc09aa..a12cd8aae62 100644 --- a/components/raftstore/src/store/fsm/store.rs +++ b/components/raftstore/src/store/fsm/store.rs @@ -1420,7 +1420,7 @@ pub fn create_raft_batch_system( cfg: &Config, ) -> (RaftRouter, RaftBatchSystem) { let (store_tx, store_fsm) = StoreFsm::new(cfg); - let (apply_router, apply_system) = create_apply_batch_system(&cfg); + let (apply_router, apply_system) = create_apply_batch_system(cfg); let (router, system) = batch_system::create_system(&cfg.store_batch_system, store_tx, store_fsm); let raft_router = RaftRouter { router }; @@ -1699,7 +1699,7 @@ impl<'a, EK: KvEngine, ER: RaftEngine, T: Transport> StoreFsmDelegate<'a, EK, ER pending_create_peers.insert(region_id, (msg.get_to_peer().get_id(), false)); } - let res = self.maybe_create_peer_internal(region_id, &msg, is_local_first); + let res = self.maybe_create_peer_internal(region_id, msg, is_local_first); // If failed, i.e. Err or Ok(false), remove this peer data from `pending_create_peers`. if res.as_ref().map_or(true, |b| !*b) && is_local_first { let mut pending_create_peers = self.ctx.pending_create_peers.lock().unwrap(); @@ -1755,7 +1755,7 @@ impl<'a, EK: KvEngine, ER: RaftEngine, T: Transport> StoreFsmDelegate<'a, EK, ER Excluded(data_key(msg.get_start_key())), Unbounded::>, )) { - let exist_region = &meta.regions[&id]; + let exist_region = &meta.regions[id]; if enc_start_key(exist_region) >= data_end_key(msg.get_end_key()) { break; } diff --git a/components/raftstore/src/store/msg.rs b/components/raftstore/src/store/msg.rs index 49373dbc37d..29f7cb96e88 100644 --- a/components/raftstore/src/store/msg.rs +++ b/components/raftstore/src/store/msg.rs @@ -363,7 +363,7 @@ impl fmt::Debug for CasualMessage { fmt, "ComputeHashResult [index: {}, context: {}, hash: {}]", index, - log_wrappers::Value::key(&context), + log_wrappers::Value::key(context), escape(hash) ), CasualMessage::SplitRegion { diff --git a/components/raftstore/src/store/peer.rs b/components/raftstore/src/store/peer.rs index 5f0de36ee43..12eeba6284e 100644 --- a/components/raftstore/src/store/peer.rs +++ b/components/raftstore/src/store/peer.rs @@ -3213,10 +3213,7 @@ where self.term() )); } - if let Some(index) = self - .cmd_epoch_checker - .propose_check_epoch(&req, self.term()) - { + if let Some(index) = self.cmd_epoch_checker.propose_check_epoch(req, self.term()) { return Ok(Either::Right(index)); } diff --git a/components/raftstore/src/store/region_snapshot.rs b/components/raftstore/src/store/region_snapshot.rs index 732ce004950..576595a2b61 100644 --- a/components/raftstore/src/store/region_snapshot.rs +++ b/components/raftstore/src/store/region_snapshot.rs @@ -231,14 +231,14 @@ where set_panic_mark(); panic!( "failed to get value of key {} in region {}: {:?}", - log_wrappers::Value::key(&key), + log_wrappers::Value::key(key), self.region.get_id(), e, ); } else { error!( "failed to get value of key in cf"; - "key" => log_wrappers::Value::key(&key), + "key" => log_wrappers::Value::key(key), "region" => self.region.get_id(), "cf" => cf, "error" => ?e, diff --git a/components/raftstore/src/store/snap.rs b/components/raftstore/src/store/snap.rs index d091b5be48f..2aeeacbe7ab 100644 --- a/components/raftstore/src/store/snap.rs +++ b/components/raftstore/src/store/snap.rs @@ -232,7 +232,7 @@ fn calc_checksum_and_size( let (checksum, size) = if let Some(mgr) = encryption_key_manager { // Crc32 and file size need to be calculated based on decrypted contents. let file_name = path.to_str().unwrap(); - let mut r = snap_io::get_decrypter_reader(file_name, &mgr)?; + let mut r = snap_io::get_decrypter_reader(file_name, mgr)?; calc_crc32_and_size(&mut r)? } else { (calc_crc32(path)?, get_file_size(path)?) @@ -1800,7 +1800,7 @@ pub mod tests { .prefix("test-snap-file-db-src") .tempdir() .unwrap(); - let db = get_db(&src_db_dir.path(), db_opt.clone(), None).unwrap(); + let db = get_db(src_db_dir.path(), db_opt.clone(), None).unwrap(); let snapshot = db.snapshot(); let src_dir = Builder::new() @@ -1917,7 +1917,7 @@ pub mod tests { .prefix("test-snap-validation-db") .tempdir() .unwrap(); - let db = get_db(&db_dir.path(), None, None).unwrap(); + let db = get_db(db_dir.path(), None, None).unwrap(); let snapshot = db.snapshot(); let dir = Builder::new() @@ -2084,7 +2084,7 @@ pub mod tests { .prefix("test-snap-corruption-db") .tempdir() .unwrap(); - let db: KvTestEngine = open_test_db(&db_dir.path(), None, None).unwrap(); + let db: KvTestEngine = open_test_db(db_dir.path(), None, None).unwrap(); let snapshot = db.snapshot(); let dir = Builder::new() @@ -2150,7 +2150,7 @@ pub mod tests { .prefix("test-snap-corruption-dst-db") .tempdir() .unwrap(); - let dst_db: KvTestEngine = open_test_empty_db(&dst_db_dir.path(), None, None).unwrap(); + let dst_db: KvTestEngine = open_test_empty_db(dst_db_dir.path(), None, None).unwrap(); let options = ApplyOptions { db: dst_db, region, @@ -2173,7 +2173,7 @@ pub mod tests { .prefix("test-snapshot-corruption-meta-db") .tempdir() .unwrap(); - let db: KvTestEngine = open_test_db(&db_dir.path(), None, None).unwrap(); + let db: KvTestEngine = open_test_db(db_dir.path(), None, None).unwrap(); let snapshot = db.snapshot(); let dir = Builder::new() @@ -2271,7 +2271,7 @@ pub mod tests { .prefix("test-snap-mgr-delete-temp-files-v2-db") .tempdir() .unwrap(); - let db: KvTestEngine = open_test_db(&db_dir.path(), None, None).unwrap(); + let db: KvTestEngine = open_test_db(db_dir.path(), None, None).unwrap(); let snapshot = db.snapshot(); let key1 = SnapKey::new(1, 1, 1); let mgr_core = create_manager_core(&path); @@ -2351,7 +2351,7 @@ pub mod tests { .prefix("test-snap-deletion-on-registry-src-db") .tempdir() .unwrap(); - let db: KvTestEngine = open_test_db(&src_db_dir.path(), None, None).unwrap(); + let db: KvTestEngine = open_test_db(src_db_dir.path(), None, None).unwrap(); let snapshot = db.snapshot(); let key = SnapKey::new(1, 1, 1); @@ -2500,7 +2500,7 @@ pub mod tests { .prefix("test_snap_temp_file_delete_kv") .tempdir() .unwrap(); - let engine = open_test_db(&kv_temp_dir.path(), None, None).unwrap(); + let engine = open_test_db(kv_temp_dir.path(), None, None).unwrap(); let sst_path = src_mgr.get_temp_path_for_ingest(); let mut writer = ::SstWriterBuilder::new() .set_db(&engine) @@ -2519,7 +2519,7 @@ pub mod tests { #[test] fn test_build_with_encryption() { - let (_enc_dir, key_path, dict_path) = create_enc_dir(&"test_build_with_encryption_enc"); + let (_enc_dir, key_path, dict_path) = create_enc_dir("test_build_with_encryption_enc"); let enc_cfg = EncryptionConfig { data_encryption_method: EncryptionMethod::Aes128Ctr, master_key: MasterKeyConfig::File { diff --git a/components/raftstore/src/store/snap/io.rs b/components/raftstore/src/store/snap/io.rs index bd71c3bc431..71d7f780b15 100644 --- a/components/raftstore/src/store/snap/io.rs +++ b/components/raftstore/src/store/snap/io.rs @@ -165,7 +165,7 @@ where let mut wb = db.write_batch(); let mut write_to_db = |batch: &mut Vec<(Vec, Vec)>| -> Result<(), EngineError> { - batch.iter().try_for_each(|(k, v)| wb.put_cf(cf, &k, &v))?; + batch.iter().try_for_each(|(k, v)| wb.put_cf(cf, k, v))?; wb.write()?; wb.clear(); callback(batch); @@ -211,7 +211,7 @@ where E: KvEngine, { let builder = E::SstWriterBuilder::new() - .set_db(&engine) + .set_db(engine) .set_cf(cf) .set_compression_type(Some(SstCompressionType::Zstd)); let writer = box_try!(builder.build(path)); @@ -264,21 +264,21 @@ mod tests { for db_creater in db_creaters { for db_opt in vec![None, Some(gen_db_options_with_encryption())] { let dir = Builder::new().prefix("test-snap-cf-db").tempdir().unwrap(); - let db: KvTestEngine = db_creater(&dir.path(), db_opt.clone(), None).unwrap(); + let db: KvTestEngine = db_creater(dir.path(), db_opt.clone(), None).unwrap(); // Collect keys via the key_callback into a collection. let mut applied_keys: HashMap<_, Vec<_>> = HashMap::new(); let dir1 = Builder::new() .prefix("test-snap-cf-db-apply") .tempdir() .unwrap(); - let db1: KvTestEngine = open_test_empty_db(&dir1.path(), db_opt, None).unwrap(); + let db1: KvTestEngine = open_test_empty_db(dir1.path(), db_opt, None).unwrap(); let snap = db.snapshot(); for cf in SNAPSHOT_CFS { let snap_cf_dir = Builder::new().prefix("test-snap-cf").tempdir().unwrap(); let plain_file_path = snap_cf_dir.path().join("plain"); let stats = build_plain_cf_file::( - &plain_file_path.to_str().unwrap(), + plain_file_path.to_str().unwrap(), None, &snap, cf, @@ -296,7 +296,7 @@ mod tests { let detector = TestStaleDetector {}; apply_plain_cf_file( - &plain_file_path.to_str().unwrap(), + plain_file_path.to_str().unwrap(), None, &detector, &db1, @@ -343,12 +343,12 @@ mod tests { for db_creater in db_creaters { for db_opt in vec![None, Some(gen_db_options_with_encryption())] { let dir = Builder::new().prefix("test-snap-cf-db").tempdir().unwrap(); - let db = db_creater(&dir.path(), db_opt.clone(), None).unwrap(); + let db = db_creater(dir.path(), db_opt.clone(), None).unwrap(); let snap_cf_dir = Builder::new().prefix("test-snap-cf").tempdir().unwrap(); let sst_file_path = snap_cf_dir.path().join("sst"); let stats = build_sst_cf_file::( - &sst_file_path.to_str().unwrap(), + sst_file_path.to_str().unwrap(), &db, &db.snapshot(), CF_DEFAULT, @@ -369,8 +369,8 @@ mod tests { .prefix("test-snap-cf-db-apply") .tempdir() .unwrap(); - let db1: KvTestEngine = open_test_empty_db(&dir1.path(), db_opt, None).unwrap(); - apply_sst_cf_file(&sst_file_path.to_str().unwrap(), &db1, CF_DEFAULT).unwrap(); + let db1: KvTestEngine = open_test_empty_db(dir1.path(), db_opt, None).unwrap(); + apply_sst_cf_file(sst_file_path.to_str().unwrap(), &db1, CF_DEFAULT).unwrap(); assert_eq_db(&db, &db1); } } diff --git a/components/raftstore/src/store/worker/pd.rs b/components/raftstore/src/store/worker/pd.rs index 91244d434cc..ab3ce341438 100644 --- a/components/raftstore/src/store/worker/pd.rs +++ b/components/raftstore/src/store/worker/pd.rs @@ -255,7 +255,7 @@ where f, "ask split region {} with key {}", region.get_id(), - log_wrappers::Value::key(&split_key), + log_wrappers::Value::key(split_key), ), Task::AutoSplit { ref split_infos } => { write!(f, "auto split split regions, num is {}", split_infos.len(),) diff --git a/components/raftstore/src/store/worker/read.rs b/components/raftstore/src/store/worker/read.rs index 5043d20a5c1..aa11ebb1339 100644 --- a/components/raftstore/src/store/worker/read.rs +++ b/components/raftstore/src/store/worker/read.rs @@ -603,7 +603,7 @@ where Ok(None) => self.redirect(RaftCommand::new(req, cb)), Err(e) => { let mut response = cmd_resp::new_error(e); - if let Some(ref delegate) = self.delegates.get(&req.get_header().get_region_id()) { + if let Some(delegate) = self.delegates.get(&req.get_header().get_region_id()) { cmd_resp::bind_term(&mut response, delegate.term); } cb.invoke_read(ReadResponse { diff --git a/components/raftstore/src/store/worker/region.rs b/components/raftstore/src/store/worker/region.rs index 3ff4f930e54..703957963da 100644 --- a/components/raftstore/src/store/worker/region.rs +++ b/components/raftstore/src/store/worker/region.rs @@ -99,8 +99,8 @@ impl Display for Task { f, "Destroy {} [{}, {})", region_id, - log_wrappers::Value::key(&start_key), - log_wrappers::Value::key(&end_key) + log_wrappers::Value::key(start_key), + log_wrappers::Value::key(end_key) ), } } @@ -186,12 +186,12 @@ impl PendingDeleteRanges { /// /// Before an insert is called, it must call drain_overlap_ranges to clean the overlapping range. fn insert(&mut self, region_id: u64, start_key: &[u8], end_key: &[u8], stale_sequence: u64) { - if !self.find_overlap_ranges(&start_key, &end_key).is_empty() { + if !self.find_overlap_ranges(start_key, end_key).is_empty() { panic!( "[region {}] register deleting data in [{}, {}) failed due to overlap", region_id, - log_wrappers::Value::key(&start_key), - log_wrappers::Value::key(&end_key), + log_wrappers::Value::key(start_key), + log_wrappers::Value::key(end_key), ); } let info = StalePeerInfo { @@ -436,13 +436,13 @@ where /// Cleans up the data within the range. fn cleanup_range(&self, ranges: &[Range]) -> Result<()> { self.engine - .delete_all_in_range(DeleteStrategy::DeleteFiles, &ranges) + .delete_all_in_range(DeleteStrategy::DeleteFiles, ranges) .unwrap_or_else(|e| { error!("failed to delete files in range"; "err" => %e); }); self.delete_all_in_range(ranges)?; self.engine - .delete_all_in_range(DeleteStrategy::DeleteBlobs, &ranges) + .delete_all_in_range(DeleteStrategy::DeleteBlobs, ranges) .unwrap_or_else(|e| { error!("failed to delete files in range"; "err" => %e); }); diff --git a/components/raftstore/src/store/worker/split_controller.rs b/components/raftstore/src/store/worker/split_controller.rs index 60952bf55fc..520b40a64b9 100644 --- a/components/raftstore/src/store/worker/split_controller.rs +++ b/components/raftstore/src/store/worker/split_controller.rs @@ -56,7 +56,7 @@ where let mut pre_sum = vec![]; let mut sum = 0; for item in iter { - sum += read(&item); + sum += read(item); pre_sum.push(sum); } pre_sum @@ -208,7 +208,7 @@ impl Recorder { let mut samples: Vec = self.convert(sampled_key_ranges); for key_ranges in &self.key_ranges { for key_range in key_ranges { - Recorder::sample(&mut samples, &key_range); + Recorder::sample(&mut samples, key_range); } } Recorder::split_key( diff --git a/components/resolved_ts/src/endpoint.rs b/components/resolved_ts/src/endpoint.rs index cffae0238ba..5b57df59f31 100644 --- a/components/resolved_ts/src/endpoint.rs +++ b/components/resolved_ts/src/endpoint.rs @@ -474,7 +474,7 @@ where let mut min_ts = TimeStamp::max(); for region_id in regions.iter() { - if let Some(observe_region) = self.regions.get_mut(®ion_id) { + if let Some(observe_region) = self.regions.get_mut(region_id) { if let ResolverStatus::Ready = observe_region.resolver_status { let resolved_ts = observe_region.resolver.resolve(ts); if resolved_ts < min_ts { diff --git a/components/server/src/raft_engine_switch.rs b/components/server/src/raft_engine_switch.rs index 8e57bd4db1f..db2064ee11f 100644 --- a/components/server/src/raft_engine_switch.rs +++ b/components/server/src/raft_engine_switch.rs @@ -97,7 +97,7 @@ pub fn check_and_dump_raft_db( let mut raft_db_opts = config_raftdb.build_opt(); raft_db_opts.set_env(env.clone()); let raft_db_cf_opts = config_raftdb.build_cf_opts(&None); - let db = engine_rocks::raw_util::new_engine_opt(&raftdb_path, raft_db_opts, raft_db_cf_opts) + let db = engine_rocks::raw_util::new_engine_opt(raftdb_path, raft_db_opts, raft_db_cf_opts) .unwrap_or_else(|s| fatal!("failed to create origin raft db: {}", s)); let src_engine = RocksEngine::from_db(Arc::new(db)); @@ -176,12 +176,12 @@ fn run_dump_raftdb_worker( match suffix { keys::RAFT_LOG_SUFFIX => { let mut entry = Entry::default(); - entry.merge_from_bytes(&value)?; + entry.merge_from_bytes(value)?; entries.push(entry); } keys::RAFT_STATE_SUFFIX => { let mut state = RaftLocalState::default(); - state.merge_from_bytes(&value)?; + state.merge_from_bytes(value)?; batch.put_raft_state(region_id, &state).unwrap(); // Assume that we always scan entry first and raft state at the end. batch diff --git a/components/server/src/setup.rs b/components/server/src/setup.rs index 8eacc0ab9b8..547f1cd8daa 100644 --- a/components/server/src/setup.rs +++ b/components/server/src/setup.rs @@ -58,7 +58,7 @@ fn make_engine_log_path(path: &str, sub_path: &str, filename: &str) -> String { config::ensure_dir_exist(path).unwrap_or_else(|e| { fatal!("failed to create engine log dir: {}", e); }); - config::canonicalize_log_dir(&path, filename).unwrap_or_else(|e| { + config::canonicalize_log_dir(path, filename).unwrap_or_else(|e| { fatal!("failed to canonicalize engine log dir {:?}: {}", path, e); }) } @@ -283,7 +283,7 @@ pub fn validate_and_persist_config(config: &mut TiKvConfig, persist: bool) { } if persist { - if let Err(e) = persist_config(&config) { + if let Err(e) = persist_config(config) { fatal!("persist critical config failed: {}", e); } } diff --git a/components/sst_importer/src/errors.rs b/components/sst_importer/src/errors.rs index 54d4482ec4b..1eafc0f3d24 100644 --- a/components/sst_importer/src/errors.rs +++ b/components/sst_importer/src/errors.rs @@ -88,7 +88,8 @@ pub enum Error { #[error( "{what} has wrong prefix: key {} does not start with {}", - log_wrappers::Value::key(&key), log_wrappers::Value::key(&prefix) + log_wrappers::Value::key(key), + log_wrappers::Value::key(prefix) )] WrongKeyPrefix { what: &'static str, diff --git a/components/sst_importer/src/import_file.rs b/components/sst_importer/src/import_file.rs index 659a270de8f..7cf5c6da0fc 100644 --- a/components/sst_importer/src/import_file.rs +++ b/components/sst_importer/src/import_file.rs @@ -275,7 +275,7 @@ impl ImportDir { let path = self.join(meta)?; let path_str = path.save.to_str().unwrap(); let env = get_env(key_manager, get_io_rate_limiter())?; - let sst_reader = RocksSstReader::open_with_env(&path_str, Some(env))?; + let sst_reader = RocksSstReader::open_with_env(path_str, Some(env))?; sst_reader.verify_checksum()?; // TODO: check the length and crc32 of ingested file. let meta_info = sst_reader.sst_meta_info(meta.to_owned()); diff --git a/components/sst_importer/src/sst_importer.rs b/components/sst_importer/src/sst_importer.rs index 3b6721315c1..208b6c16920 100644 --- a/components/sst_importer/src/sst_importer.rs +++ b/components/sst_importer/src/sst_importer.rs @@ -351,7 +351,7 @@ impl SSTImporter { while iter.valid()? { let old_key = keys::origin_key(iter.key()); - if is_after_end_bound(&old_key, &range_end) { + if is_after_end_bound(old_key, &range_end) { break; } if !old_key.starts_with(old_prefix) { @@ -429,7 +429,7 @@ impl SSTImporter { default_meta.set_cf_name(CF_DEFAULT.to_owned()); let default_path = self.dir.join(&default_meta)?; let default = E::SstWriterBuilder::new() - .set_db(&db) + .set_db(db) .set_cf(CF_DEFAULT) .set_compression_type(self.compression_types.get(CF_DEFAULT).copied()) .build(default_path.temp.to_str().unwrap()) @@ -439,7 +439,7 @@ impl SSTImporter { write_meta.set_cf_name(CF_WRITE.to_owned()); let write_path = self.dir.join(&write_meta)?; let write = E::SstWriterBuilder::new() - .set_db(&db) + .set_db(db) .set_cf(CF_WRITE) .set_compression_type(self.compression_types.get(CF_WRITE).copied()) .build(write_path.temp.to_str().unwrap()) @@ -464,7 +464,7 @@ impl SSTImporter { meta.set_cf_name(CF_DEFAULT.to_owned()); let default_path = self.dir.join(&meta)?; let default = E::SstWriterBuilder::new() - .set_db(&db) + .set_db(db) .set_cf(CF_DEFAULT) .build(default_path.temp.to_str().unwrap()) .unwrap(); diff --git a/components/test_backup/src/lib.rs b/components/test_backup/src/lib.rs index c34c492b7f2..684558fdb3c 100644 --- a/components/test_backup/src/lib.rs +++ b/components/test_backup/src/lib.rs @@ -76,8 +76,8 @@ impl TestSuite { let sim = cluster.sim.rl(); let backup_endpoint = backup::Endpoint::new( *id, - sim.storages[&id].clone(), - sim.region_info_accessors[&id].clone(), + sim.storages[id].clone(), + sim.region_info_accessors[id].clone(), engines.kv.as_inner().clone(), BackupConfig { num_threads: 4, diff --git a/components/test_coprocessor/src/fixture.rs b/components/test_coprocessor/src/fixture.rs index 51d33946576..3f5d4e7139f 100644 --- a/components/test_coprocessor/src/fixture.rs +++ b/components/test_coprocessor/src/fixture.rs @@ -73,7 +73,7 @@ pub fn init_data_with_details( store.begin(); for &(id, name, count) in vals { store - .insert_into(&tbl) + .insert_into(tbl) .set(&tbl["id"], Datum::I64(id)) .set(&tbl["name"], name.map(str::as_bytes).into()) .set(&tbl["count"], Datum::I64(count)) diff --git a/components/test_raftstore/src/cluster.rs b/components/test_raftstore/src/cluster.rs index c0617fb6b4e..ba61cda4dcb 100644 --- a/components/test_raftstore/src/cluster.rs +++ b/components/test_raftstore/src/cluster.rs @@ -336,11 +336,11 @@ impl Cluster { } pub fn get_engine(&self, node_id: u64) -> Arc { - Arc::clone(&self.engines[&node_id].kv.as_inner()) + Arc::clone(self.engines[&node_id].kv.as_inner()) } pub fn get_raft_engine(&self, node_id: u64) -> Arc { - Arc::clone(&self.engines[&node_id].raft.as_inner()) + Arc::clone(self.engines[&node_id].raft.as_inner()) } pub fn get_all_engines(&self, node_id: u64) -> Engines { @@ -603,11 +603,11 @@ impl Cluster { for (&id, engines) in &self.engines { let peer = new_peer(id, id); region.mut_peers().push(peer.clone()); - bootstrap_store(&engines, self.id(), id).unwrap(); + bootstrap_store(engines, self.id(), id).unwrap(); } for engines in self.engines.values() { - prepare_bootstrap_cluster(&engines, ®ion)?; + prepare_bootstrap_cluster(engines, ®ion)?; } self.bootstrap_cluster(region); @@ -627,7 +627,7 @@ impl Cluster { } for (&id, engines) in &self.engines { - bootstrap_store(&engines, self.id(), id).unwrap(); + bootstrap_store(engines, self.id(), id).unwrap(); } let node_id = 1; diff --git a/components/test_raftstore/src/pd.rs b/components/test_raftstore/src/pd.rs index 8876cb14c52..af635ce4222 100644 --- a/components/test_raftstore/src/pd.rs +++ b/components/test_raftstore/src/pd.rs @@ -134,7 +134,7 @@ impl Operator { match *self { Operator::AddPeer { ref peer, .. } => { if let Either::Left(ref peer) = *peer { - let conf_change_type = if is_learner(&peer) { + let conf_change_type = if is_learner(peer) { ConfChangeType::AddLearnerNode } else { ConfChangeType::AddNode @@ -184,7 +184,7 @@ impl Operator { } => { let mut cps = Vec::with_capacity(to_add_peers.len() + remove_peers.len()); for peer in to_add_peers.iter() { - let conf_change_type = if is_learner(&peer) { + let conf_change_type = if is_learner(peer) { ConfChangeType::AddLearnerNode } else { ConfChangeType::AddNode diff --git a/components/test_storage/src/assert_storage.rs b/components/test_storage/src/assert_storage.rs index 4dd849d9d70..da38ba881a2 100644 --- a/components/test_storage/src/assert_storage.rs +++ b/components/test_storage/src/assert_storage.rs @@ -260,7 +260,7 @@ impl AssertionStorage { pub fn batch_get_command_ok(&self, keys: &[&[u8]], ts: u64, expect: Vec<&[u8]>) { let result: Vec>> = self .store - .batch_get_command(self.ctx.clone(), &keys, ts) + .batch_get_command(self.ctx.clone(), keys, ts) .unwrap() .into_iter() .collect(); @@ -634,12 +634,12 @@ impl AssertionStorage { let start_key = if start_key.is_empty() { None } else { - Some(Key::from_raw(&start_key)) + Some(Key::from_raw(start_key)) }; let end_key = if end_key.is_empty() { None } else { - Some(Key::from_raw(&end_key)) + Some(Key::from_raw(end_key)) }; assert_eq!( diff --git a/components/tidb_query_aggr/src/impl_max_min.rs b/components/tidb_query_aggr/src/impl_max_min.rs index 13d94a59c7f..0070224a7d9 100644 --- a/components/tidb_query_aggr/src/impl_max_min.rs +++ b/components/tidb_query_aggr/src/impl_max_min.rs @@ -184,7 +184,7 @@ where return Ok(()); } - if C::sort_compare(&self.extremum.as_ref().unwrap(), &value.as_ref().unwrap())? == E::ORD { + if C::sort_compare(self.extremum.as_ref().unwrap(), value.as_ref().unwrap())? == E::ORD { self.extremum = value.map(|x| x.into_owned_value()); } Ok(()) @@ -257,11 +257,7 @@ where if value.is_some() && (self.extremum.is_none() - || extreme_ref - .unwrap() - .as_str()? - .cmp(&value.unwrap().as_str()?) - == E::ORD) + || extreme_ref.unwrap().as_str()?.cmp(value.unwrap().as_str()?) == E::ORD) { self.extremum = value.map(|x| x.into_owned_value()); } @@ -912,7 +908,7 @@ mod tests { &mut ctx, &src_schema, columns, - &logical_rows, + logical_rows, logical_rows.len(), ) .unwrap(); @@ -929,7 +925,7 @@ mod tests { &mut ctx, &src_schema, columns, - &logical_rows, + logical_rows, logical_rows.len(), ) .unwrap(); diff --git a/components/tidb_query_aggr/src/parser.rs b/components/tidb_query_aggr/src/parser.rs index c90cdfa51b0..e76095988da 100644 --- a/components/tidb_query_aggr/src/parser.rs +++ b/components/tidb_query_aggr/src/parser.rs @@ -44,7 +44,7 @@ pub trait AggrDefinitionParser { let child = aggr_def.take_children().into_iter().next().unwrap(); let exp = RpnExpressionBuilder::build_from_expr_tree(child, ctx, src_schema.len())?; - Self::parse_rpn(&self, aggr_def, exp, ctx, src_schema, out_schema, out_exp) + Self::parse_rpn(self, aggr_def, exp, ctx, src_schema, out_schema, out_exp) } #[inline] diff --git a/components/tidb_query_codegen/src/rpn_function.rs b/components/tidb_query_codegen/src/rpn_function.rs index ebedc07763c..7a34723f5a7 100644 --- a/components/tidb_query_codegen/src/rpn_function.rs +++ b/components/tidb_query_codegen/src/rpn_function.rs @@ -925,7 +925,7 @@ impl VargsRpnFn { } let fn_arg = item_fn.sig.inputs.iter().nth(attr.captures.len()).unwrap(); - let arg_type = Self::get_args_type(&attr, &fn_arg)?; + let arg_type = Self::get_args_type(&attr, fn_arg)?; let arg_type_anonymous = arg_type.eval_type.get_type_with_lifetime(quote! { '_ }); let ret_type = if attr.writer { @@ -1343,7 +1343,7 @@ impl NormalRpnFn { .skip(attr.captures.len()) .take(take_cnt); for fn_arg in fn_args { - let arg_type = Self::get_arg_type(&attr, &fn_arg)?; + let arg_type = Self::get_arg_type(&attr, fn_arg)?; arg_types.push(arg_type.eval_type.get_type_with_lifetime(quote! { 'arg_ })); arg_types_anonymous.push(arg_type.eval_type.get_type_with_lifetime(quote! { '_ })); arg_types_no_ref.push(arg_type.eval_type.get_type_with_lifetime(quote! {})); diff --git a/components/tidb_query_datatype/src/codec/chunk/column.rs b/components/tidb_query_datatype/src/codec/chunk/column.rs index 21ee6c096c4..8809bafff47 100644 --- a/components/tidb_query_datatype/src/codec/chunk/column.rs +++ b/components/tidb_query_datatype/src/codec/chunk/column.rs @@ -195,7 +195,7 @@ impl Column { col.append_null(); } Some(val) => { - col.append_decimal(&val)?; + col.append_decimal(val)?; } } } @@ -207,7 +207,7 @@ impl Column { col.append_null(); } Some(val) => { - col.append_bytes(&val)?; + col.append_bytes(val)?; } } } diff --git a/components/tidb_query_datatype/src/codec/collation/mod.rs b/components/tidb_query_datatype/src/codec/collation/mod.rs index 430112bf916..d1559e56178 100644 --- a/components/tidb_query_datatype/src/codec/collation/mod.rs +++ b/components/tidb_query_datatype/src/codec/collation/mod.rs @@ -156,7 +156,7 @@ where { #[inline] fn eq(&self, other: &Self) -> bool { - C::sort_compare(&self.inner.as_ref(), &other.inner.as_ref()).unwrap() + C::sort_compare(self.inner.as_ref(), other.inner.as_ref()).unwrap() == std::cmp::Ordering::Equal } } @@ -169,7 +169,7 @@ where { #[inline] fn partial_cmp(&self, other: &Self) -> Option { - C::sort_compare(&self.inner.as_ref(), &other.inner.as_ref()).ok() + C::sort_compare(self.inner.as_ref(), other.inner.as_ref()).ok() } } @@ -179,7 +179,7 @@ where { #[inline] fn cmp(&self, other: &Self) -> Ordering { - C::sort_compare(&self.inner.as_ref(), &other.inner.as_ref()).unwrap() + C::sort_compare(self.inner.as_ref(), other.inner.as_ref()).unwrap() } } diff --git a/components/tidb_query_datatype/src/codec/convert.rs b/components/tidb_query_datatype/src/codec/convert.rs index f9f1f35a8a2..f03af093041 100644 --- a/components/tidb_query_datatype/src/codec/convert.rs +++ b/components/tidb_query_datatype/src/codec/convert.rs @@ -979,7 +979,7 @@ fn exp_float_str_to_int_str<'a>( let int_cnt: i64; match dot_idx { None => { - digits.extend_from_slice(&valid_float[..e_idx].as_bytes()); + digits.extend_from_slice(valid_float[..e_idx].as_bytes()); // if digits.len() > i64::MAX, // then the input str has at least 9223372036854775808 chars, // which make the str >= 8388608.0 TB, @@ -987,9 +987,9 @@ fn exp_float_str_to_int_str<'a>( int_cnt = digits.len() as i64; } Some(dot_idx) => { - digits.extend_from_slice(&valid_float[..dot_idx].as_bytes()); + digits.extend_from_slice(valid_float[..dot_idx].as_bytes()); int_cnt = digits.len() as i64; - digits.extend_from_slice(&valid_float[(dot_idx + 1)..e_idx].as_bytes()); + digits.extend_from_slice(valid_float[(dot_idx + 1)..e_idx].as_bytes()); } } // make `digits` immutable diff --git a/components/tidb_query_datatype/src/codec/data_type/chunked_vec_enum.rs b/components/tidb_query_datatype/src/codec/data_type/chunked_vec_enum.rs index e9f5c697ecd..c27068b789f 100644 --- a/components/tidb_query_datatype/src/codec/data_type/chunked_vec_enum.rs +++ b/components/tidb_query_datatype/src/codec/data_type/chunked_vec_enum.rs @@ -135,7 +135,7 @@ impl<'a> ChunkRef<'a, EnumRef<'a>> for &'a ChunkedVecEnum { } fn get_bit_vec(self) -> &'a BitVec { - &self.values.get_bit_vec() + self.values.get_bit_vec() } #[inline] diff --git a/components/tidb_query_datatype/src/codec/data_type/mod.rs b/components/tidb_query_datatype/src/codec/data_type/mod.rs index 2191a4595ba..340e3d61d00 100644 --- a/components/tidb_query_datatype/src/codec/data_type/mod.rs +++ b/components/tidb_query_datatype/src/codec/data_type/mod.rs @@ -407,7 +407,7 @@ impl<'a, T: Evaluable + EvaluableRet> EvaluableRef<'a> for &'a T { #[inline] fn from_owned_value(value: &'a T) -> Self { - &value + value } } diff --git a/components/tidb_query_datatype/src/codec/data_type/scalar.rs b/components/tidb_query_datatype/src/codec/data_type/scalar.rs index f5418584da2..08c1501e664 100644 --- a/components/tidb_query_datatype/src/codec/data_type/scalar.rs +++ b/components/tidb_query_datatype/src/codec/data_type/scalar.rs @@ -259,7 +259,7 @@ impl<'a> ScalarValueRef<'a> { None => { output.write_evaluable_datum_null()?; } - Some(ref val) => { + Some(val) => { output.write_evaluable_datum_bytes(val)?; } } @@ -343,7 +343,7 @@ impl<'a> ScalarValueRef<'a> { TT = [Real, Decimal, DateTime, Duration, Json, Enum], match (self, other) { (ScalarValueRef::TT(v1), ScalarValueRef::TT(v2)) => v1.cmp(v2), - (ScalarValueRef::Int(v1), ScalarValueRef::Int(v2)) => compare_int(&v1.cloned(), &v2.cloned(), &field_type), + (ScalarValueRef::Int(v1), ScalarValueRef::Int(v2)) => compare_int(&v1.cloned(), &v2.cloned(), field_type), (ScalarValueRef::Bytes(None), ScalarValueRef::Bytes(None)) => Ordering::Equal, (ScalarValueRef::Bytes(Some(_)), ScalarValueRef::Bytes(None)) => Ordering::Greater, (ScalarValueRef::Bytes(None), ScalarValueRef::Bytes(Some(_))) => Ordering::Less, diff --git a/components/tidb_query_datatype/src/codec/data_type/vector.rs b/components/tidb_query_datatype/src/codec/data_type/vector.rs index c23ea40ded5..21b5c01b085 100644 --- a/components/tidb_query_datatype/src/codec/data_type/vector.rs +++ b/components/tidb_query_datatype/src/codec/data_type/vector.rs @@ -331,8 +331,8 @@ impl VectorValue { None => { output.write_evaluable_datum_null()?; } - Some(ref val) => { - output.write_evaluable_datum_bytes(*val)?; + Some(val) => { + output.write_evaluable_datum_bytes(val)?; } } Ok(()) @@ -403,7 +403,7 @@ impl VectorValue { None => { output.write_evaluable_datum_null()?; } - Some(ref val) => { + Some(val) => { let sort_key = match_template_collator! { TT, match field_type.collation()? { Collation::TT => TT::sort_key(val)? diff --git a/components/tidb_query_datatype/src/codec/mysql/decimal.rs b/components/tidb_query_datatype/src/codec/mysql/decimal.rs index 047a2eeb592..e8874fe439f 100644 --- a/components/tidb_query_datatype/src/codec/mysql/decimal.rs +++ b/components/tidb_query_datatype/src/codec/mysql/decimal.rs @@ -2397,7 +2397,7 @@ mod tests { use std::cmp::Ordering; use std::collections::hash_map::DefaultHasher; use std::f64::EPSILON; - use std::iter::repeat; + use std::sync::Arc; #[test] @@ -3199,9 +3199,9 @@ mod tests { #[test] fn test_add() { - let a = "2".to_owned() + &repeat('1').take(71).collect::(); - let b: String = repeat('8').take(81).collect(); - let c = "8888888890".to_owned() + &repeat('9').take(71).collect::(); + let a = "2".to_owned() + &"1".repeat(71); + let b: String = "8".repeat(81); + let c = "8888888890".to_owned() + &"9".repeat(71); let cases = vec![ ( ".00012345000098765", @@ -3294,8 +3294,8 @@ mod tests { #[test] fn test_mul() { - let a = "1".to_owned() + &repeat('0').take(60).collect::(); - let b = "1".to_owned() + &repeat("0").take(60).collect::(); + let a = "1".to_owned() + &"0".repeat(60); + let b = "1".to_owned() + &"0".repeat(60); let cases = vec![ ("12", "10", Res::Ok("120")), ("0", "-1.1", Res::Ok("0")), diff --git a/components/tidb_query_datatype/src/codec/mysql/enums.rs b/components/tidb_query_datatype/src/codec/mysql/enums.rs index 6f9cf48d44e..02689b5c9d9 100644 --- a/components/tidb_query_datatype/src/codec/mysql/enums.rs +++ b/components/tidb_query_datatype/src/codec/mysql/enums.rs @@ -162,7 +162,7 @@ impl<'a> PartialEq for EnumRef<'a> { impl<'a> Ord for EnumRef<'a> { fn cmp(&self, other: &Self) -> Ordering { - self.value.cmp(&other.value) + self.value.cmp(other.value) } } diff --git a/components/tidb_query_datatype/src/codec/mysql/json/jcodec.rs b/components/tidb_query_datatype/src/codec/mysql/json/jcodec.rs index ad89dd937d8..db5efd6b922 100644 --- a/components/tidb_query_datatype/src/codec/mysql/json/jcodec.rs +++ b/components/tidb_query_datatype/src/codec/mysql/json/jcodec.rs @@ -241,7 +241,7 @@ pub trait JsonDecoder: NumberDecoder { } JsonType::String => { let value = self.bytes(); - let (str_len, len_len) = NumberCodec::try_decode_var_u64(&value)?; + let (str_len, len_len) = NumberCodec::try_decode_var_u64(value)?; self.read_bytes(str_len as usize + len_len)? } JsonType::I64 | JsonType::U64 | JsonType::Double => self.read_bytes(NUMBER_LEN)?, diff --git a/components/tidb_query_datatype/src/codec/mysql/json/json_depth.rs b/components/tidb_query_datatype/src/codec/mysql/json/json_depth.rs index 3ae6e041466..92683ed335c 100644 --- a/components/tidb_query_datatype/src/codec/mysql/json/json_depth.rs +++ b/components/tidb_query_datatype/src/codec/mysql/json/json_depth.rs @@ -6,7 +6,7 @@ use super::{JsonRef, JsonType}; impl<'a> JsonRef<'a> { /// Returns maximum depth of JSON document pub fn depth(&self) -> Result { - depth_json(&self) + depth_json(self) } } diff --git a/components/tidb_query_datatype/src/codec/mysql/json/json_keys.rs b/components/tidb_query_datatype/src/codec/mysql/json/json_keys.rs index 25190b59937..2f03353b0f0 100644 --- a/components/tidb_query_datatype/src/codec/mysql/json/json_keys.rs +++ b/components/tidb_query_datatype/src/codec/mysql/json/json_keys.rs @@ -30,7 +30,7 @@ impl<'a> JsonRef<'a> { None => Ok(None), } } else { - json_keys(&self) + json_keys(self) } } } diff --git a/components/tidb_query_datatype/src/codec/mysql/json/json_modify.rs b/components/tidb_query_datatype/src/codec/mysql/json/json_modify.rs index a7d5d35819e..159ca66530f 100644 --- a/components/tidb_query_datatype/src/codec/mysql/json/json_modify.rs +++ b/components/tidb_query_datatype/src/codec/mysql/json/json_modify.rs @@ -47,9 +47,9 @@ impl<'a> JsonRef<'a> { for (expr, value) in path_expr_list.iter().zip(values.into_iter()) { let modifier = BinaryModifier::new(res.as_ref()); res = match mt { - ModifyType::Insert => modifier.insert(&expr, value)?, - ModifyType::Replace => modifier.replace(&expr, value)?, - ModifyType::Set => modifier.set(&expr, value)?, + ModifyType::Insert => modifier.insert(expr, value)?, + ModifyType::Replace => modifier.replace(expr, value)?, + ModifyType::Set => modifier.set(expr, value)?, }; } Ok(res) diff --git a/components/tidb_query_datatype/src/codec/mysql/json/mod.rs b/components/tidb_query_datatype/src/codec/mysql/json/mod.rs index e79d4b2d89f..760d6413590 100644 --- a/components/tidb_query_datatype/src/codec/mysql/json/mod.rs +++ b/components/tidb_query_datatype/src/codec/mysql/json/mod.rs @@ -145,7 +145,7 @@ impl<'a> JsonRef<'a> { /// Returns the underlying value slice pub fn value(&self) -> &'a [u8] { - &self.value + self.value } // Returns the JSON value as u64 diff --git a/components/tidb_query_datatype/src/codec/row/v2/compat_v1.rs b/components/tidb_query_datatype/src/codec/row/v2/compat_v1.rs index 5c1b246cfad..d601c2d4cb6 100644 --- a/components/tidb_query_datatype/src/codec/row/v2/compat_v1.rs +++ b/components/tidb_query_datatype/src/codec/row/v2/compat_v1.rs @@ -145,7 +145,7 @@ mod tests { fn encode_to_v1_compatible(mut ctx: &mut EvalContext, col: &Column) -> Vec { let mut buf_v2 = vec![]; - buf_v2.write_value(&mut ctx, &col).unwrap(); + buf_v2.write_value(&mut ctx, col).unwrap(); let mut buf_v1 = vec![]; buf_v1.write_v2_as_datum(&buf_v2, col.ft()).unwrap(); buf_v1 diff --git a/components/tidb_query_datatype/src/codec/table.rs b/components/tidb_query_datatype/src/codec/table.rs index f7c899b68c8..adf2f60b6af 100644 --- a/components/tidb_query_datatype/src/codec/table.rs +++ b/components/tidb_query_datatype/src/codec/table.rs @@ -814,16 +814,16 @@ mod tests { #[test] fn test_check_key_type() { let record_key = encode_row_key(TABLE_ID, 1); - assert!(check_key_type(&record_key.as_slice(), RECORD_PREFIX_SEP).is_ok()); - assert!(check_key_type(&record_key.as_slice(), INDEX_PREFIX_SEP).is_err()); + assert!(check_key_type(record_key.as_slice(), RECORD_PREFIX_SEP).is_ok()); + assert!(check_key_type(record_key.as_slice(), INDEX_PREFIX_SEP).is_err()); let (_, index_key) = generate_index_data_for_test(TABLE_ID, INDEX_ID, 1, &Datum::I64(1), true); - assert!(check_key_type(&index_key.as_slice(), RECORD_PREFIX_SEP).is_err()); - assert!(check_key_type(&index_key.as_slice(), INDEX_PREFIX_SEP).is_ok()); + assert!(check_key_type(index_key.as_slice(), RECORD_PREFIX_SEP).is_err()); + assert!(check_key_type(index_key.as_slice(), INDEX_PREFIX_SEP).is_ok()); let too_small_key = vec![0]; - assert!(check_key_type(&too_small_key.as_slice(), RECORD_PREFIX_SEP).is_err()); - assert!(check_key_type(&too_small_key.as_slice(), INDEX_PREFIX_SEP).is_err()); + assert!(check_key_type(too_small_key.as_slice(), RECORD_PREFIX_SEP).is_err()); + assert!(check_key_type(too_small_key.as_slice(), INDEX_PREFIX_SEP).is_err()); } } diff --git a/components/tidb_query_executors/src/index_scan_executor.rs b/components/tidb_query_executors/src/index_scan_executor.rs index 3614410a1ce..45759d03eba 100644 --- a/components/tidb_query_executors/src/index_scan_executor.rs +++ b/components/tidb_query_executors/src/index_scan_executor.rs @@ -94,7 +94,7 @@ impl BatchIndexScanExecutor { let schema: Vec<_> = columns_info .iter() - .map(|ci| field_type_from_column_info(&ci)) + .map(|ci| field_type_from_column_info(ci)) .collect(); let columns_id_without_handle: Vec<_> = columns_info diff --git a/components/tidb_query_executors/src/lib.rs b/components/tidb_query_executors/src/lib.rs index 68f04c7bbb6..1ca2a7e2771 100644 --- a/components/tidb_query_executors/src/lib.rs +++ b/components/tidb_query_executors/src/lib.rs @@ -10,8 +10,8 @@ #![allow(incomplete_features)] #![feature(proc_macro_hygiene)] #![feature(specialization)] -#![feature(const_fn)] #![feature(const_fn_fn_ptr_basics)] +#![feature(const_fn_trait_bound)] #![feature(const_mut_refs)] #[macro_use(box_try, warn)] diff --git a/components/tidb_query_executors/src/runner.rs b/components/tidb_query_executors/src/runner.rs index bdda997ba99..7e962afa68e 100644 --- a/components/tidb_query_executors/src/runner.rs +++ b/components/tidb_query_executors/src/runner.rs @@ -77,30 +77,30 @@ impl BatchExecutorsRunner<()> { match ed.get_tp() { ExecType::TypeTableScan => { let descriptor = ed.get_tbl_scan(); - BatchTableScanExecutor::check_supported(&descriptor) + BatchTableScanExecutor::check_supported(descriptor) .map_err(|e| other_err!("BatchTableScanExecutor: {}", e))?; } ExecType::TypeIndexScan => { let descriptor = ed.get_idx_scan(); - BatchIndexScanExecutor::check_supported(&descriptor) + BatchIndexScanExecutor::check_supported(descriptor) .map_err(|e| other_err!("BatchIndexScanExecutor: {}", e))?; } ExecType::TypeSelection => { let descriptor = ed.get_selection(); - BatchSelectionExecutor::check_supported(&descriptor) + BatchSelectionExecutor::check_supported(descriptor) .map_err(|e| other_err!("BatchSelectionExecutor: {}", e))?; } ExecType::TypeAggregation | ExecType::TypeStreamAgg if ed.get_aggregation().get_group_by().is_empty() => { let descriptor = ed.get_aggregation(); - BatchSimpleAggregationExecutor::check_supported(&descriptor) + BatchSimpleAggregationExecutor::check_supported(descriptor) .map_err(|e| other_err!("BatchSimpleAggregationExecutor: {}", e))?; } ExecType::TypeAggregation => { let descriptor = ed.get_aggregation(); - if BatchFastHashAggregationExecutor::check_supported(&descriptor).is_err() { - BatchSlowHashAggregationExecutor::check_supported(&descriptor) + if BatchFastHashAggregationExecutor::check_supported(descriptor).is_err() { + BatchSlowHashAggregationExecutor::check_supported(descriptor) .map_err(|e| other_err!("BatchSlowHashAggregationExecutor: {}", e))?; } } @@ -108,13 +108,13 @@ impl BatchExecutorsRunner<()> { // Note: We won't check whether the source of stream aggregation is in order. // It is undefined behavior if the source is unordered. let descriptor = ed.get_aggregation(); - BatchStreamAggregationExecutor::check_supported(&descriptor) + BatchStreamAggregationExecutor::check_supported(descriptor) .map_err(|e| other_err!("BatchStreamAggregationExecutor: {}", e))?; } ExecType::TypeLimit => {} ExecType::TypeTopN => { let descriptor = ed.get_top_n(); - BatchTopNExecutor::check_supported(&descriptor) + BatchTopNExecutor::check_supported(descriptor) .map_err(|e| other_err!("BatchTopNExecutor: {}", e))?; } ExecType::TypeJoin => { @@ -247,8 +247,7 @@ pub fn build_executors( ) } ExecType::TypeAggregation => { - if BatchFastHashAggregationExecutor::check_supported(&ed.get_aggregation()).is_ok() - { + if BatchFastHashAggregationExecutor::check_supported(ed.get_aggregation()).is_ok() { EXECUTOR_COUNT_METRICS.batch_fast_hash_aggr.inc(); Box::new( diff --git a/components/tidb_query_executors/src/table_scan_executor.rs b/components/tidb_query_executors/src/table_scan_executor.rs index d4a78a470f4..4cef26ac9a4 100644 --- a/components/tidb_query_executors/src/table_scan_executor.rs +++ b/components/tidb_query_executors/src/table_scan_executor.rs @@ -202,8 +202,8 @@ impl TableScanExecutorImpl { // but will output a log anyway. warn!( "Ignored duplicated row datum in table scan"; - "key" => log_wrappers::Value::key(&key), - "value" => log_wrappers::Value::value(&value), + "key" => log_wrappers::Value::key(key), + "value" => log_wrappers::Value::value(value), "dup_column_id" => column_id, ); } diff --git a/components/tidb_query_expr/src/impl_cast.rs b/components/tidb_query_expr/src/impl_cast.rs index 67686b0e884..23dd20c48f2 100644 --- a/components/tidb_query_expr/src/impl_cast.rs +++ b/components/tidb_query_expr/src/impl_cast.rs @@ -6102,7 +6102,7 @@ mod tests { } else { max_val_str = "838:59:59"; } - let max_expect = Duration::parse(&mut ctx, &max_val_str, fsp); + let max_expect = Duration::parse(&mut ctx, max_val_str, fsp); let log = format!( "func_name: {}, input: {}, output: {:?}, output_warn: {:?}, expect: {:?}", func_name, diff --git a/components/tidb_query_expr/src/impl_compare.rs b/components/tidb_query_expr/src/impl_compare.rs index c6d0e5e05ed..babe4819a22 100644 --- a/components/tidb_query_expr/src/impl_compare.rs +++ b/components/tidb_query_expr/src/impl_compare.rs @@ -106,7 +106,7 @@ impl Comparer for UintIntComparer { let ordering = if *rhs < 0 || *lhs as u64 > std::i64::MAX as u64 { Ordering::Greater } else { - lhs.cmp(&rhs) + lhs.cmp(rhs) }; Some(F::compare_order(ordering) as i64) } @@ -130,7 +130,7 @@ impl Comparer for IntUintComparer { let ordering = if *lhs < 0 || *rhs as u64 > std::i64::MAX as u64 { Ordering::Less } else { - lhs.cmp(&rhs) + lhs.cmp(rhs) }; Some(F::compare_order(ordering) as i64) } @@ -353,7 +353,7 @@ pub fn greatest_time(ctx: &mut EvalContext, args: &[Option]) -> Result .map(|_| Ok(None))?; } }; - match Time::parse_datetime(ctx, &s, Time::parse_fsp(&s), true) { + match Time::parse_datetime(ctx, s, Time::parse_fsp(s), true) { Ok(t) => greatest = max(greatest, Some(t)), Err(_) => { return ctx @@ -392,7 +392,7 @@ pub fn least_time(mut ctx: &mut EvalContext, args: &[Option]) -> Resul .map(|_| Ok(None))?; } }; - match Time::parse_datetime(ctx, &s, Time::parse_fsp(&s), true) { + match Time::parse_datetime(ctx, s, Time::parse_fsp(s), true) { Ok(t) => least = min(least, Some(t)), Err(_) => { return ctx diff --git a/components/tidb_query_expr/src/impl_compare_in.rs b/components/tidb_query_expr/src/impl_compare_in.rs index af498f7b6dd..ab6845711b3 100644 --- a/components/tidb_query_expr/src/impl_compare_in.rs +++ b/components/tidb_query_expr/src/impl_compare_in.rs @@ -821,7 +821,7 @@ mod tests { black_box(&mut ctx), black_box(schema), black_box(&mut columns), - black_box(&logical_rows), + black_box(logical_rows), black_box(1024), ); assert!(result.is_ok()); diff --git a/components/tidb_query_expr/src/impl_json.rs b/components/tidb_query_expr/src/impl_json.rs index 9da5cfceea7..ac5eb9539ad 100644 --- a/components/tidb_query_expr/src/impl_json.rs +++ b/components/tidb_query_expr/src/impl_json.rs @@ -211,7 +211,7 @@ fn json_unquote(arg: BytesRef) -> Result> { let first_char = tmp_str.chars().next(); let last_char = tmp_str.chars().last(); if tmp_str.len() >= 2 && first_char == Some('"') && last_char == Some('"') { - let _: IgnoredAny = serde_json::from_str(&tmp_str)?; + let _: IgnoredAny = serde_json::from_str(tmp_str)?; } Ok(Some(Bytes::from(self::unquote_string(tmp_str)?))) } @@ -227,7 +227,7 @@ fn valid_paths(expr: &tipb::Expr) -> Result<()> { let children = expr.get_children(); super::function::validate_expr_return_type(&children[0], EvalType::Json)?; for child in children.iter().skip(1) { - super::function::validate_expr_return_type(&child, EvalType::Bytes)?; + super::function::validate_expr_return_type(child, EvalType::Bytes)?; } Ok(()) } @@ -319,10 +319,10 @@ fn parse_json_path_list(args: &[ScalarValueRef]) -> Result) -> Result> { let json_path = match path { None => return Ok(None), - Some(p) => std::str::from_utf8(&p).map_err(tidb_query_datatype::codec::Error::from), + Some(p) => std::str::from_utf8(p).map_err(tidb_query_datatype::codec::Error::from), }?; - Ok(Some(parse_json_path_expr(&json_path)?)) + Ok(Some(parse_json_path_expr(json_path)?)) } #[cfg(test)] @@ -549,13 +549,11 @@ mod tests { ]; for (vargs, expected) in cases { - let vargs = vargs + let mut new_vargs: Vec = vec![]; + for (key, value) in vargs .into_iter() .map(|(key, value)| (Bytes::from(key), value.map(|s| Json::from_str(s).unwrap()))) - .collect::>(); - - let mut new_vargs: Vec = vec![]; - for (key, value) in vargs.into_iter() { + { new_vargs.push(ScalarValue::from(key)); new_vargs.push(ScalarValue::from(value)); } diff --git a/components/tidb_query_expr/src/impl_math.rs b/components/tidb_query_expr/src/impl_math.rs index 96275667e74..4cddd5a4a37 100644 --- a/components/tidb_query_expr/src/impl_math.rs +++ b/components/tidb_query_expr/src/impl_math.rs @@ -24,7 +24,7 @@ pub fn pi() -> Result> { #[rpn_fn] #[inline] pub fn crc32(arg: BytesRef) -> Result> { - Ok(Some(i64::from(file_system::calc_crc32_bytes(&arg)))) + Ok(Some(i64::from(file_system::calc_crc32_bytes(arg)))) } #[inline] diff --git a/components/tidb_query_expr/src/impl_string.rs b/components/tidb_query_expr/src/impl_string.rs index d985ef9ec7f..a75fe056d81 100644 --- a/components/tidb_query_expr/src/impl_string.rs +++ b/components/tidb_query_expr/src/impl_string.rs @@ -117,7 +117,7 @@ pub fn locate_2_args_utf8(substr: BytesRef, s: BytesRef) -> Result< let offset = if C::IS_CASE_INSENSITIVE { find_str(&s.to_lowercase(), &substr.to_lowercase()) } else { - find_str(&s, &substr) + find_str(s, substr) }; Ok(Some(offset.map_or(0, |i| 1 + i as i64))) } @@ -146,7 +146,7 @@ pub fn locate_3_args_utf8( let offset = if C::IS_CASE_INSENSITIVE { find_str(&s[start..].to_lowercase(), &substr.to_lowercase()) } else { - find_str(&s[start..], &substr) + find_str(&s[start..], substr) }; Ok(Some(offset.map_or(0, |i| pos + i as i64))) } @@ -260,14 +260,14 @@ pub fn lpad(arg: BytesRef, len: &Int, pad: BytesRef, writer: BytesWriter) -> Res // Write full pads let num_pads = (target_len - arg.len()) / pad.len(); for _ in 0..num_pads { - writer.partial_write(&pad); + writer.partial_write(pad); } // Write last incomplete pad (might be none) let last_pad_len = (target_len - arg.len()) % pad.len(); writer.partial_write(&pad[..last_pad_len]); - writer.partial_write(&arg); + writer.partial_write(arg); Ok(writer.finish()) } } @@ -323,12 +323,12 @@ pub fn rpad(arg: BytesRef, len: &Int, pad: BytesRef, writer: BytesWriter) -> Res } Some(target_len) => { let mut writer = writer.begin(); - writer.partial_write(&arg); + writer.partial_write(arg); // Write full pads let num_pads = (target_len - arg.len()) / pad.len(); for _ in 0..num_pads { - writer.partial_write(&pad); + writer.partial_write(pad); } // Write last incomplete pad (might be none) @@ -497,7 +497,7 @@ pub fn insert( } let mut ret = Vec::with_capacity(newstr.len() + s.len()); ret.extend_from_slice(&s[0..upos - 1]); - ret.extend_from_slice(&newstr); + ret.extend_from_slice(newstr); ret.extend_from_slice(&s[upos + ulen - 1..]); Ok(writer.write(Some(ret))) } @@ -671,7 +671,7 @@ fn elt_validator(expr: &tipb::Expr) -> Result<()> { assert!(children.len() >= 2); super::function::validate_expr_return_type(&children[0], EvalType::Int)?; for child in children.iter().skip(1) { - super::function::validate_expr_return_type(&child, EvalType::Bytes)?; + super::function::validate_expr_return_type(child, EvalType::Bytes)?; } Ok(()) } @@ -711,7 +711,7 @@ pub fn substring_index( let mut remaining_pattern_count = count.abs(); let mut bound = 0; while remaining_pattern_count > 0 { - if let Some(offset) = finder(&remaining, delim) { + if let Some(offset) = finder(remaining, delim) { if count > 0 { bound += offset + delim.len(); remaining = &s[bound..]; @@ -750,7 +750,7 @@ pub fn strcmp(left: BytesRef, right: BytesRef) -> Result Result> { - Ok(twoway::find_bytes(&s, &substr) + Ok(twoway::find_bytes(s, substr) .map(|i| 1 + i as i64) .or(Some(0))) } diff --git a/components/tidb_query_expr/src/impl_time.rs b/components/tidb_query_expr/src/impl_time.rs index 01959305855..75b6437ac76 100644 --- a/components/tidb_query_expr/src/impl_time.rs +++ b/components/tidb_query_expr/src/impl_time.rs @@ -323,7 +323,7 @@ pub fn add_datetime_and_string( arg0: &DateTime, arg1: BytesRef, ) -> Result> { - let arg1 = std::str::from_utf8(&arg1).map_err(Error::Encoding)?; + let arg1 = std::str::from_utf8(arg1).map_err(Error::Encoding)?; let arg1 = match Duration::parse(ctx, arg1, MAX_FSP) { Ok(arg) => arg, Err(_) => return Ok(None), @@ -350,7 +350,7 @@ pub fn add_date_and_string( arg0: &DateTime, arg1: BytesRef, ) -> Result> { - let arg1 = std::str::from_utf8(&arg1).map_err(Error::Encoding)?; + let arg1 = std::str::from_utf8(arg1).map_err(Error::Encoding)?; let arg1 = match Duration::parse(ctx, arg1, MAX_FSP) { Ok(arg) => arg, Err(_) => return Ok(None), @@ -423,7 +423,7 @@ pub fn sub_datetime_and_string( datetime: &DateTime, duration_str: BytesRef, ) -> Result> { - let duration_str = std::str::from_utf8(&duration_str).map_err(Error::Encoding)?; + let duration_str = std::str::from_utf8(duration_str).map_err(Error::Encoding)?; let duration = match Duration::parse(ctx, duration_str, MAX_FSP) { Ok(duration) => duration, Err(_) => return Ok(None), @@ -450,7 +450,7 @@ pub fn sub_duration_and_string( arg1: &Duration, arg2: BytesRef, ) -> Result> { - let arg2 = std::str::from_utf8(&arg2).map_err(Error::Encoding)?; + let arg2 = std::str::from_utf8(arg2).map_err(Error::Encoding)?; let arg2 = match Duration::parse(ctx, arg2, MAX_FSP) { Ok(arg) => arg, Err(_) => return Ok(None), @@ -741,7 +741,7 @@ pub fn add_duration_and_string( arg1: &Duration, arg2: BytesRef, ) -> Result> { - let arg2 = std::str::from_utf8(&arg2).map_err(Error::Encoding)?; + let arg2 = std::str::from_utf8(arg2).map_err(Error::Encoding)?; let arg2 = match Duration::parse(ctx, arg2, MAX_FSP) { Ok(arg) => arg, Err(_) => return Ok(None), diff --git a/components/tidb_query_expr/src/lib.rs b/components/tidb_query_expr/src/lib.rs index e4014a15c80..d861e48d001 100644 --- a/components/tidb_query_expr/src/lib.rs +++ b/components/tidb_query_expr/src/lib.rs @@ -10,10 +10,9 @@ #![allow(incomplete_features)] #![feature(proc_macro_hygiene)] #![feature(specialization)] -#![feature(const_fn)] #![feature(test)] -#![feature(int_error_matching)] #![feature(const_fn_fn_ptr_basics)] +#![feature(const_fn_trait_bound)] #![feature(const_mut_refs)] #[macro_use(box_err, box_try, try_opt)] diff --git a/components/tidb_query_expr/src/types/expr_eval.rs b/components/tidb_query_expr/src/types/expr_eval.rs index 38d2ad6458a..43b84730dd4 100644 --- a/components/tidb_query_expr/src/types/expr_eval.rs +++ b/components/tidb_query_expr/src/types/expr_eval.rs @@ -36,7 +36,7 @@ impl<'a> RpnStackNodeVectorValue<'a> { /// Gets a reference to the inner physical vector value. pub fn as_ref(&self) -> &VectorValue { match self { - RpnStackNodeVectorValue::Generated { physical_value, .. } => &physical_value, + RpnStackNodeVectorValue::Generated { physical_value, .. } => physical_value, RpnStackNodeVectorValue::Ref { physical_value, .. } => *physical_value, } } @@ -99,7 +99,7 @@ impl<'a> RpnStackNode<'a> { pub fn vector_value(&self) -> Option<&RpnStackNodeVectorValue<'_>> { match self { RpnStackNode::Scalar { .. } => None, - RpnStackNode::Vector { value, .. } => Some(&value), + RpnStackNode::Vector { value, .. } => Some(value), } } @@ -224,7 +224,7 @@ impl RpnExpression { assert_eq!(input_logical_rows.len(), output_rows); stack.push(RpnStackNode::Vector { value: RpnStackNodeVectorValue::Ref { - physical_value: &decoded_physical_column, + physical_value: decoded_physical_column, logical_rows: input_logical_rows, }, field_type, diff --git a/components/tikv_kv/src/btree_engine.rs b/components/tikv_kv/src/btree_engine.rs index 07f748ec9dd..3b385c630ed 100644 --- a/components/tikv_kv/src/btree_engine.rs +++ b/components/tikv_kv/src/btree_engine.rs @@ -94,7 +94,7 @@ impl Engine for BTreeEngine { if batch.modifies.is_empty() { return Err(EngineError::from(EngineErrorInner::EmptyRequest)); } - cb((CbContext::new(), write_modifies(&self, batch.modifies))); + cb((CbContext::new(), write_modifies(self, batch.modifies))); Ok(()) } @@ -105,7 +105,7 @@ impl Engine for BTreeEngine { _ctx: SnapContext<'_>, cb: EngineCallback, ) -> EngineResult<()> { - cb((CbContext::new(), Ok(BTreeEngineSnapshot::new(&self)))); + cb((CbContext::new(), Ok(BTreeEngineSnapshot::new(self)))); Ok(()) } } diff --git a/components/tikv_util/src/config.rs b/components/tikv_util/src/config.rs index a48d4b83c4c..95b805c6e48 100644 --- a/components/tikv_util/src/config.rs +++ b/components/tikv_util/src/config.rs @@ -1255,7 +1255,7 @@ impl TomlWriter { pub fn write_change(&mut self, src: String, mut change: HashMap) { for line in src.lines() { - match TomlLine::parse(&line) { + match TomlLine::parse(line) { TomlLine::Table(keys) => { self.write_current_table(&mut change); self.write(line.as_bytes()); diff --git a/components/tikv_util/src/future.rs b/components/tikv_util/src/future.rs index fae8f47bbfa..f9ee7712269 100644 --- a/components/tikv_util/src/future.rs +++ b/components/tikv_util/src/future.rs @@ -83,7 +83,7 @@ impl ArcWake for BatchCommandsWaker { fn wake_by_ref(arc_self: &Arc) { let mut future_slot = arc_self.0.lock().unwrap(); if let Some(mut future) = future_slot.take() { - let waker = task::waker_ref(&arc_self); + let waker = task::waker_ref(arc_self); let cx = &mut Context::from_waker(&*waker); match future.as_mut().poll(cx) { Poll::Pending => { diff --git a/components/tikv_util/src/metrics/threads_linux.rs b/components/tikv_util/src/metrics/threads_linux.rs index 12740a62174..f93dc07849b 100644 --- a/components/tikv_util/src/metrics/threads_linux.rs +++ b/components/tikv_util/src/metrics/threads_linux.rs @@ -314,7 +314,7 @@ fn collect_metrics_by_name( let mut new_map: HashMap = HashMap::default(); for (tid, name) in names { let new_value = new_map.entry(name.to_string()).or_insert(0); - if let Some(value) = values.get(&tid) { + if let Some(value) = values.get(tid) { *new_value += *value as u64; } } diff --git a/components/tikv_util/src/sys/cgroup.rs b/components/tikv_util/src/sys/cgroup.rs index 59dc8436397..429e6d8c90f 100644 --- a/components/tikv_util/src/sys/cgroup.rs +++ b/components/tikv_util/src/sys/cgroup.rs @@ -188,7 +188,7 @@ mod tests { 4:memory:/kubepods/burstable/poda2ebe2cd-64c7-11ea-8799-eeeeeeeeeeee/a026c487f1168b7f5442444ac8e35161dfcde87c175ef27d9a806270e267a575 5:cpuacct,cpu:/kubepods/burstable/poda2ebe2cd-64c7-11ea-8799-eeeeeeeeeeee/a026c487f1168b7f5442444ac8e35161dfcde87c175ef27d9a806270e267a575 "#; - let cgroups = parse_proc_cgroup_v1(&content); + let cgroups = parse_proc_cgroup_v1(content); assert_eq!( cgroups.get("memory").unwrap(), "/kubepods/burstable/poda2ebe2cd-64c7-11ea-8799-eeeeeeeeeeee/a026c487f1168b7f5442444ac8e35161dfcde87c175ef27d9a806270e267a575" @@ -203,7 +203,7 @@ mod tests { #[test] fn test_parse_proc_cgroup_v2() { let content = "0::/test-all"; - let cgroups = parse_proc_cgroup_v2(&content); + let cgroups = parse_proc_cgroup_v2(content); assert_eq!(cgroups.get("").unwrap(), "/test-all"); } diff --git a/fuzz/cli.rs b/fuzz/cli.rs index cc49f6a32b7..26418b84031 100644 --- a/fuzz/cli.rs +++ b/fuzz/cli.rs @@ -121,7 +121,7 @@ fn write_fuzz_target_source_file(fuzzer: Fuzzer, target: &str) -> Result<()> { target_file_path.display() ))?; - let source = template.replace("__FUZZ_CLI_TARGET__", &target).replace( + let source = template.replace("__FUZZ_CLI_TARGET__", target).replace( "__FUZZ_GENERATE_COMMENT__", "NOTE: AUTO GENERATED FROM `template.rs`", ); @@ -318,7 +318,7 @@ fn run_libfuzzer(target: &str) -> Result<()> { asan_options.push_str(" detect_odr_violation=0"); let fuzzer_bin = Command::new("cargo") - .args(&["run", "--target", &target_platform, "--bin", target, "--"]) + .args(&["run", "--target", target_platform, "--bin", target, "--"]) .arg(&corpus_dir) .arg(&seed_dir) .env("RUSTFLAGS", &rust_flags) diff --git a/rust-toolchain b/rust-toolchain index 414d33e9a55..08c09fbcd18 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2021-04-15 +nightly-2021-07-28 diff --git a/scripts/clippy b/scripts/clippy index e80fda1d83a..d5b9f4f40fd 100755 --- a/scripts/clippy +++ b/scripts/clippy @@ -29,9 +29,12 @@ CLIPPY_LINTS=(-A clippy::module_inception \ -A clippy::new_ret_no_self \ -A clippy::unnecessary_sort_by \ -A clippy::unnecessary_wraps \ + -A clippy::bool_assert_comparison \ + -A clippy::self_named_constructor \ + -A clippy::enum_variant_names \ -W clippy::dbg_macro \ -W clippy::todo) -cargo clippy --workspace --no-default-features \ +cargo clippy --workspace --fix --allow-staged --no-default-features \ --exclude fuzz-targets --exclude fuzzer-honggfuzz --exclude fuzzer-afl --exclude fuzzer-libfuzzer \ --features "${TIKV_ENABLE_FEATURES}" "$@" -- "${CLIPPY_LINTS[@]}" diff --git a/src/config.rs b/src/config.rs index 0070dcfe9ff..dc34a7bfbfc 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1502,7 +1502,7 @@ impl ConfigManager for DBConfigManger { let cf_name = &cf_name[..(cf_name.len() - 2)]; if let Some(v) = cf_change.remove("block_cache_size") { // currently we can't modify block_cache_size via set_options_cf - self.set_block_cache_size(&cf_name, v.into())?; + self.set_block_cache_size(cf_name, v.into())?; } if let Some(ConfigValue::Module(titan_change)) = cf_change.remove("titan") { for (name, value) in titan_change { @@ -1512,7 +1512,7 @@ impl ConfigManager for DBConfigManger { if !cf_change.is_empty() { let cf_change = config_value_to_string(cf_change.into_iter().collect()); let cf_change_slice = config_to_slice(&cf_change); - self.set_cf_config(&cf_name, &cf_change_slice)?; + self.set_cf_config(cf_name, &cf_change_slice)?; } } } @@ -2982,8 +2982,12 @@ fn to_config_change(change: HashMap) -> CfgResult } let mut res = HashMap::new(); for (name, value) in change { - let fields: Vec<_> = name.as_str().split('.').collect(); - let fields: Vec<_> = fields.into_iter().map(|s| s.to_owned()).rev().collect(); + let fields: Vec<_> = name + .as_str() + .split('.') + .map(|s| s.to_owned()) + .rev() + .collect(); helper(fields, &mut res, &TIKVCONFIG_TYPED, value)?; } Ok(res) @@ -3045,8 +3049,12 @@ fn to_toml_encode(change: HashMap) -> CfgResult = name.as_str().split('.').collect(); - let fields: Vec<_> = fields.into_iter().map(|s| s.to_owned()).rev().collect(); + let fields: Vec<_> = name + .as_str() + .split('.') + .map(|s| s.to_owned()) + .rev() + .collect(); if helper(fields, &TIKVCONFIG_TYPED)? { dst.insert(name.replace("_", "-"), format!("\"{}\"", value)); } else { diff --git a/src/coprocessor_v2/endpoint.rs b/src/coprocessor_v2/endpoint.rs index f3ea659412f..d126e24f386 100644 --- a/src/coprocessor_v2/endpoint.rs +++ b/src/coprocessor_v2/endpoint.rs @@ -87,7 +87,7 @@ impl Endpoint { .map_err(|e| CoprocessorError::Other(format!("{}", e)))?; let plugin_version = plugin.version(); - if !version_req.matches(&plugin_version) { + if !version_req.matches(plugin_version) { return Err(CoprocessorError::Other(format!( "The plugin '{}' with version '{}' does not satisfy the version constraint '{}'", plugin.name(), diff --git a/src/import/duplicate_detect.rs b/src/import/duplicate_detect.rs index 0b283d94efd..ab6366ccd2c 100644 --- a/src/import/duplicate_detect.rs +++ b/src/import/duplicate_detect.rs @@ -127,7 +127,7 @@ impl DuplicateDetector { end_commit_ts, )?); } - duplicate_pairs.push(self.make_kv_pair(¤t_key, write_value, commit_ts)?); + duplicate_pairs.push(self.make_kv_pair(current_key, write_value, commit_ts)?); } if commit_ts <= self.min_commit_ts { self.skip_all_version(&start_key)?; diff --git a/src/import/sst_service.rs b/src/import/sst_service.rs index b2a941d0f2b..903daf8ebaa 100644 --- a/src/import/sst_service.rs +++ b/src/import/sst_service.rs @@ -196,7 +196,7 @@ where // must execute after geting a snapshot from raftstore to make sure that the // current leader has applied to current term. for sst in ssts.iter() { - if !importer.exist(&sst) { + if !importer.exist(sst) { warn!( "sst [{:?}] not exist. we may retry an operation that has already succeeded", sst diff --git a/src/lib.rs b/src/lib.rs index dbffc28cd34..ce37df5a0dc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,12 +22,10 @@ #![feature(cell_update)] #![feature(proc_macro_hygiene)] #![feature(min_specialization)] -#![feature(const_fn)] #![feature(box_patterns)] #![feature(shrink_to)] #![feature(drain_filter)] #![feature(negative_impls)] -#![feature(num_as_ne_bytes)] #[macro_use(fail_point)] extern crate fail; diff --git a/src/server/debug.rs b/src/server/debug.rs index 9f5e693f3eb..bd5987f73c5 100644 --- a/src/server/debug.rs +++ b/src/server/debug.rs @@ -491,7 +491,7 @@ impl Debugger { let check_value = |value: &[u8]| -> Result<()> { let mut local_state = RegionLocalState::default(); - box_try!(local_state.merge_from_bytes(&value)); + box_try!(local_state.merge_from_bytes(value)); match local_state.get_state() { PeerState::Tombstone | PeerState::Applying => return Ok(()), @@ -537,7 +537,7 @@ impl Debugger { while box_try!(iter.valid()) { let (key, value) = (iter.key(), iter.value()); - if let Ok((region_id, suffix)) = keys::decode_region_meta_key(&key) { + if let Ok((region_id, suffix)) = keys::decode_region_meta_key(key) { if suffix != keys::REGION_STATE_SUFFIX { box_try!(iter.next()); continue; @@ -777,7 +777,7 @@ impl Debugger { fn dump_mvcc_properties(db: &Arc, start: &[u8], end: &[u8]) -> Result> { let mut num_entries = 0; // number of Rocksdb K/V entries. - let collection = box_try!(db.c().get_range_properties_cf(CF_WRITE, &start, &end)); + let collection = box_try!(db.c().get_range_properties_cf(CF_WRITE, start, end)); let num_files = collection.len(); let mut mvcc_properties = MvccProperties::new(); @@ -831,7 +831,7 @@ fn recover_mvcc_for_range( read_only: bool, thread_index: usize, ) -> Result<()> { - let mut mvcc_checker = box_try!(MvccChecker::new(Arc::clone(&db), start_key, end_key)); + let mut mvcc_checker = box_try!(MvccChecker::new(Arc::clone(db), start_key, end_key)); mvcc_checker.thread_index = thread_index; let wb_limit: usize = 10240; diff --git a/src/server/gc_worker/compaction_filter.rs b/src/server/gc_worker/compaction_filter.rs index cf672b3094b..ee8604fb359 100644 --- a/src/server/gc_worker/compaction_filter.rs +++ b/src/server/gc_worker/compaction_filter.rs @@ -536,7 +536,7 @@ impl Drop for WriteCompactionFilter { #[cfg(any(test, feature = "failpoints"))] for callback in &self.callbacks_on_drop { - callback(&self); + callback(self); } } } diff --git a/src/server/gc_worker/gc_worker.rs b/src/server/gc_worker/gc_worker.rs index bc3d79851d0..b1303cd19ac 100644 --- a/src/server/gc_worker/gc_worker.rs +++ b/src/server/gc_worker/gc_worker.rs @@ -142,8 +142,8 @@ where .. } => f .debug_struct("Gc") - .field("start_key", &log_wrappers::Value::key(&start_key)) - .field("end_key", &log_wrappers::Value::key(&end_key)) + .field("start_key", &log_wrappers::Value::key(start_key)) + .field("end_key", &log_wrappers::Value::key(end_key)) .field("safe_point", safe_point) .finish(), GcTask::GcKeys { .. } => f.debug_struct("GcKeys").finish(), @@ -220,7 +220,7 @@ where fn need_gc(&self, start_key: &[u8], end_key: &[u8], safe_point: TimeStamp) -> bool { let props = match self .engine - .get_mvcc_properties_cf(CF_WRITE, safe_point, &start_key, &end_key) + .get_mvcc_properties_cf(CF_WRITE, safe_point, start_key, end_key) { Some(c) => c, None => return true, @@ -353,7 +353,7 @@ where let mut gc_info = GcInfo::default(); let mut next_gc_key = keys.next(); while let Some(ref key) = next_gc_key { - if let Err(e) = self.gc_key(safe_point, &key, &mut gc_info, &mut txn, &mut reader) { + if let Err(e) = self.gc_key(safe_point, key, &mut gc_info, &mut txn, &mut reader) { error!(?e; "GC meets failure"; "key" => %key,); // Switch to the next key if meets failure. gc_info.is_completed = true; diff --git a/src/server/gc_worker/mod.rs b/src/server/gc_worker/mod.rs index f814851b473..3410282fd82 100644 --- a/src/server/gc_worker/mod.rs +++ b/src/server/gc_worker/mod.rs @@ -72,7 +72,7 @@ mod tests { .c() .get_mvcc_properties_cf(CF_WRITE, safe_point, &start, &end); if let Some(props) = props.as_ref() { - assert_eq!(check_need_gc(safe_point, 1.0, &props), need_gc); + assert_eq!(check_need_gc(safe_point, 1.0, props), need_gc); } props } @@ -91,7 +91,7 @@ mod tests { fn test_without_properties(path: &str, region: &Region) { let db = open_db(path, false); - let mut engine = RegionEngine::new(&db, ®ion); + let mut engine = RegionEngine::new(&db, region); // Put 2 keys. engine.put(&[1], 1, 1); @@ -109,7 +109,7 @@ mod tests { fn test_with_properties(path: &str, region: &Region) { let db = open_db(path, true); - let mut engine = RegionEngine::new(&db, ®ion); + let mut engine = RegionEngine::new(&db, region); // Put 2 keys. engine.put(&[2], 3, 3); diff --git a/src/server/node.rs b/src/server/node.rs index 2689ce79abe..c48c277e1ec 100644 --- a/src/server/node.rs +++ b/src/server/node.rs @@ -286,7 +286,7 @@ where ); let region = initial_region(store_id, region_id, peer_id); - store::prepare_bootstrap_cluster(&engines, ®ion)?; + store::prepare_bootstrap_cluster(engines, ®ion)?; Ok(region) } @@ -321,16 +321,16 @@ where fail_point!("node_after_bootstrap_cluster", |_| Err(box_err!( "injected error: node_after_bootstrap_cluster" ))); - store::clear_prepare_bootstrap_key(&engines)?; + store::clear_prepare_bootstrap_key(engines)?; return Ok(()); } Err(PdError::ClusterBootstrapped(_)) => match self.pd_client.get_region(b"") { Ok(region) => { if region == first_region { - store::clear_prepare_bootstrap_key(&engines)?; + store::clear_prepare_bootstrap_key(engines)?; } else { info!("cluster is already bootstrapped"; "cluster_id" => self.cluster_id); - store::clear_prepare_bootstrap_cluster(&engines, region_id)?; + store::clear_prepare_bootstrap_cluster(engines, region_id)?; } return Ok(()); } diff --git a/src/server/raft_client.rs b/src/server/raft_client.rs index f01251c30ac..e8c78dc918d 100644 --- a/src/server/raft_client.rs +++ b/src/server/raft_client.rs @@ -849,7 +849,7 @@ where if self.last_hash.0 == 0 || msg.region_id != self.last_hash.0 { self.last_hash = ( msg.region_id, - seahash::hash(msg.region_id.as_ne_bytes()) + seahash::hash(&msg.region_id.to_ne_bytes()) % self.builder.cfg.grpc_raft_conn_num as u64, ); }; diff --git a/src/server/ttl/ttl_checker.rs b/src/server/ttl/ttl_checker.rs index 8043794e628..5a97b361e5d 100644 --- a/src/server/ttl/ttl_checker.rs +++ b/src/server/ttl/ttl_checker.rs @@ -154,8 +154,8 @@ pub fn check_ttl_and_compact_files( Err(e) => { error!( "get range ttl properties failed"; - "range_start" => log_wrappers::Value::key(&start_key), - "range_end" => log_wrappers::Value::key(&end_key), + "range_start" => log_wrappers::Value::key(start_key), + "range_end" => log_wrappers::Value::key(end_key), "err" => %e, ); TTL_CHECKER_ACTIONS_COUNTER_VEC @@ -189,8 +189,8 @@ pub fn check_ttl_and_compact_files( if let Err(e) = engine.compact_files_cf(CF_DEFAULT, vec![file], None, 0, exclude_l0) { error!( "execute ttl compact files failed"; - "range_start" => log_wrappers::Value::key(&start_key), - "range_end" => log_wrappers::Value::key(&end_key), + "range_start" => log_wrappers::Value::key(start_key), + "range_end" => log_wrappers::Value::key(end_key), "err" => %e, ); TTL_CHECKER_ACTIONS_COUNTER_VEC diff --git a/src/server/ttl/ttl_compaction_filter.rs b/src/server/ttl/ttl_compaction_filter.rs index 0703ea631ae..9d1e9dccc94 100644 --- a/src/server/ttl/ttl_compaction_filter.rs +++ b/src/server/ttl/ttl_compaction_filter.rs @@ -60,7 +60,7 @@ impl CompactionFilter for TTLCompactionFilter { return CompactionFilterDecision::Keep; } - let expire_ts = get_expire_ts(&value).unwrap_or_else(|_| { + let expire_ts = get_expire_ts(value).unwrap_or_else(|_| { TTL_CHECKER_ACTIONS_COUNTER_VEC .with_label_values(&["ts_error"]) .inc(); diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 57824a8cd6e..310daf87234 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -706,7 +706,7 @@ impl Storage { .read_range_check(Some(&start_key), end_key.as_ref(), |key, lock| { Lock::check_ts_conflict( Cow::Borrowed(lock), - &key, + key, start_ts, &bypass_locks, ) @@ -1186,12 +1186,13 @@ impl Storage { let store = RawStore::new(snapshot, enable_ttl); { let begin_instant = Instant::now_coarse(); - let keys: Vec = keys.into_iter().map(Key::from_encoded).collect(); + let cf = Self::rawkv_cf(&cf)?; // no scan_count for this kind of op. let mut stats = Statistics::default(); let result: Vec> = keys .into_iter() + .map(Key::from_encoded) .map(|k| { let v = store.raw_get_key_value(cf, &k, &mut stats); (k, v) @@ -1541,7 +1542,7 @@ impl Storage { let pairs: Vec> = if reverse_scan { store .reverse_raw_scan( - &cf, + cf, &start_key, end_key.as_ref(), each_limit, @@ -1552,7 +1553,7 @@ impl Storage { } else { store .forward_raw_scan( - &cf, + cf, &start_key, end_key.as_ref(), each_limit, @@ -1802,8 +1803,8 @@ fn prepare_snap_ctx<'a>( let begin_instant = Instant::now(); for key in keys.clone() { concurrency_manager - .read_key_check(&key, |lock| { - Lock::check_ts_conflict(Cow::Borrowed(lock), &key, start_ts, bypass_locks) + .read_key_check(key, |lock| { + Lock::check_ts_conflict(Cow::Borrowed(lock), key, start_ts, bypass_locks) }) .map_err(|e| { CHECK_MEM_LOCK_DURATION_HISTOGRAM_VEC @@ -6252,7 +6253,7 @@ mod tests { .unwrap(); let res = consumer.take_data(); assert!(res[0].is_ok()); - let key_error = extract_key_error(&res[1].as_ref().unwrap_err()); + let key_error = extract_key_error(res[1].as_ref().unwrap_err()); assert_eq!(key_error.get_locked().get_key(), b"key"); } diff --git a/src/storage/mvcc/consistency_check.rs b/src/storage/mvcc/consistency_check.rs index 49f6a360196..087a24febb3 100644 --- a/src/storage/mvcc/consistency_check.rs +++ b/src/storage/mvcc/consistency_check.rs @@ -236,7 +236,7 @@ impl MvccInfoObserver for MvccInfoCollector { return Ok(false); } - let write = box_try!(WriteRef::parse(&value)); + let write = box_try!(WriteRef::parse(value)); let mut write_info = MvccWrite::default(); match write.write_type { WriteType::Put => write_info.set_type(Op::Put), @@ -246,7 +246,7 @@ impl MvccInfoObserver for MvccInfoCollector { } write_info.set_start_ts(write.start_ts.into_inner()); write_info.set_commit_ts(commit_ts.into_inner()); - if let Some(ref value) = write.short_value { + if let Some(value) = write.short_value { write_info.set_short_value(value.to_vec()); } @@ -373,7 +373,7 @@ impl MvccInfoObserver for MvccChecksum { return Ok(true); } - let write = box_try!(WriteRef::parse(&value)); + let write = box_try!(WriteRef::parse(value)); let start_ts = write.start_ts.into_inner(); self.digest.update(key); diff --git a/src/storage/mvcc/reader/reader.rs b/src/storage/mvcc/reader/reader.rs index 49e65d0e495..f266a91a715 100644 --- a/src/storage/mvcc/reader/reader.rs +++ b/src/storage/mvcc/reader/reader.rs @@ -45,7 +45,7 @@ impl SnapshotReader { pub fn key_exist(&mut self, key: &Key, ts: TimeStamp) -> Result { Ok(self .reader - .get_write(&key, ts, Some(self.start_ts))? + .get_write(key, ts, Some(self.start_ts))? .is_some()) } @@ -377,7 +377,7 @@ impl MvccReader { self.create_lock_cursor()?; let cursor = self.lock_cursor.as_mut().unwrap(); let ok = match start { - Some(ref x) => cursor.seek(x, &mut self.statistics.lock)?, + Some(x) => cursor.seek(x, &mut self.statistics.lock)?, None => cursor.seek_to_first(&mut self.statistics.lock), }; if !ok { @@ -545,7 +545,7 @@ pub mod tests { impl RegionEngine { pub fn new(db: &Arc, region: &Region) -> RegionEngine { RegionEngine { - db: Arc::clone(&db), + db: Arc::clone(db), region: region.clone(), } } @@ -878,7 +878,7 @@ pub mod tests { let path = dir.path().to_str().unwrap(); let region = make_region(1, vec![0], vec![]); - let db = open_db(&path, true); + let db = open_db(path, true); let mut engine = RegionEngine::new(&db, ®ion); let key1 = &[1]; diff --git a/src/storage/raw/store.rs b/src/storage/raw/store.rs index 0c37b4c8430..539134f250a 100644 --- a/src/storage/raw/store.rs +++ b/src/storage/raw/store.rs @@ -162,7 +162,7 @@ impl<'a, S: Snapshot> RawStoreInner { ) -> Result>> { let mut cursor = Cursor::new(self.snapshot.iter_cf(cf, option)?, ScanMode::Forward, false); let statistics = statistics.mut_cf_statistics(cf); - if !cursor.seek(&start_key, statistics)? { + if !cursor.seek(start_key, statistics)? { return Ok(vec![]); } let mut pairs = vec![]; @@ -210,7 +210,7 @@ impl<'a, S: Snapshot> RawStoreInner { false, ); let statistics = statistics.mut_cf_statistics(cf); - if !cursor.reverse_seek(&start_key, statistics)? { + if !cursor.reverse_seek(start_key, statistics)? { return Ok(vec![]); } let mut pairs = vec![]; diff --git a/src/storage/txn/actions/acquire_pessimistic_lock.rs b/src/storage/txn/actions/acquire_pessimistic_lock.rs index c946d271b84..739a00dd8a3 100644 --- a/src/storage/txn/actions/acquire_pessimistic_lock.rs +++ b/src/storage/txn/actions/acquire_pessimistic_lock.rs @@ -63,7 +63,7 @@ pub fn acquire_pessimistic_lock( None => OldValue::None, }) } else { - reader.get_old_value(&key, for_update_ts, prev_write_loaded, prev_write) + reader.get_old_value(key, for_update_ts, prev_write_loaded, prev_write) } } diff --git a/src/storage/txn/actions/check_data_constraint.rs b/src/storage/txn/actions/check_data_constraint.rs index d314a85739e..341f9a65d33 100644 --- a/src/storage/txn/actions/check_data_constraint.rs +++ b/src/storage/txn/actions/check_data_constraint.rs @@ -25,7 +25,7 @@ pub(crate) fn check_data_constraint( // The current key exists under any of the following conditions: // 1.The current write type is `PUT` // 2.The current write type is `Rollback` or `Lock`, and the key have an older version. - if write.write_type == WriteType::Put || reader.key_exist(&key, write_commit_ts.prev())? { + if write.write_type == WriteType::Put || reader.key_exist(key, write_commit_ts.prev())? { return Err(ErrorInner::AlreadyExist { key: key.to_raw()? }.into()); } Ok(()) diff --git a/src/storage/txn/actions/tests.rs b/src/storage/txn/actions/tests.rs index ba85b9656cc..b25bf5491fc 100644 --- a/src/storage/txn/actions/tests.rs +++ b/src/storage/txn/actions/tests.rs @@ -58,7 +58,7 @@ pub fn must_prewrite_put_impl( is_retry_request, }, mutation, - &secondary_keys, + secondary_keys, is_pessimistic_lock, ) .unwrap(); diff --git a/src/storage/txn/store.rs b/src/storage/txn/store.rs index 4ef4d81b927..383f9cebc96 100644 --- a/src/storage/txn/store.rs +++ b/src/storage/txn/store.rs @@ -445,7 +445,7 @@ impl SnapshotStore { REQUEST_EXCEED_BOUND.inc(); return Err(Error::from(ErrorInner::InvalidReqRange { start: Some(l.as_encoded().clone()), - end: upper_bound.as_ref().map(|ref b| b.as_encoded().clone()), + end: upper_bound.as_ref().map(|b| b.as_encoded().clone()), lower_bound: Some(b.to_vec()), upper_bound: self.snapshot.upper_bound().map(|b| b.to_vec()), })); @@ -457,7 +457,7 @@ impl SnapshotStore { if !b.is_empty() && (u.as_encoded().as_slice() > b || u.as_encoded().is_empty()) { REQUEST_EXCEED_BOUND.inc(); return Err(Error::from(ErrorInner::InvalidReqRange { - start: lower_bound.as_ref().map(|ref b| b.as_encoded().clone()), + start: lower_bound.as_ref().map(|b| b.as_encoded().clone()), end: Some(u.as_encoded().clone()), lower_bound: self.snapshot.lower_bound().map(|b| b.to_vec()), upper_bound: Some(b.to_vec()), diff --git a/tests/benches/coprocessor_executors/util/fixture.rs b/tests/benches/coprocessor_executors/util/fixture.rs index cd3fde92a9f..5588adbc672 100644 --- a/tests/benches/coprocessor_executors/util/fixture.rs +++ b/tests/benches/coprocessor_executors/util/fixture.rs @@ -232,7 +232,7 @@ impl FixtureBuilder { let mut store = Store::new(); for row_index in 0..self.rows { store.begin(); - let mut si = store.insert_into(&table); + let mut si = store.insert_into(table); for col_index in 0..columns.len() { si = si.set( &table[columns[col_index]], diff --git a/tests/benches/hierarchy/engine/mod.rs b/tests/benches/hierarchy/engine/mod.rs index c6f3d20f6f9..b9d87ade42a 100644 --- a/tests/benches/hierarchy/engine/mod.rs +++ b/tests/benches/hierarchy/engine/mod.rs @@ -23,7 +23,7 @@ fn bench_engine_put>( ) .generate(DEFAULT_ITERATIONS) .iter() - .map(|(key, value)| (Key::from_raw(&key), value.clone())) + .map(|(key, value)| (Key::from_raw(key), value.clone())) .collect(); (test_kvs, &ctx) }, @@ -61,7 +61,7 @@ fn bench_engine_get>( ) .generate(DEFAULT_ITERATIONS) .iter() - .map(|(key, _)| Key::from_raw(&key)) + .map(|(key, _)| Key::from_raw(key)) .collect(); bencher.iter_batched( diff --git a/tests/benches/hierarchy/mvcc/mod.rs b/tests/benches/hierarchy/mvcc/mod.rs index c8d77e04043..839204ef92f 100644 --- a/tests/benches/hierarchy/mvcc/mod.rs +++ b/tests/benches/hierarchy/mvcc/mod.rs @@ -51,7 +51,7 @@ where &mut txn, &mut reader, &txn_props, - Mutation::Put((Key::from_raw(&k), v.clone())), + Mutation::Put((Key::from_raw(k), v.clone())), &None, false, ) @@ -59,7 +59,7 @@ where } let write_data = WriteData::from_modifies(txn.into_modifies()); let _ = engine.async_write(&ctx, write_data, Box::new(move |(..)| {})); - let keys: Vec = kvs.iter().map(|(k, _)| Key::from_raw(&k)).collect(); + let keys: Vec = kvs.iter().map(|(k, _)| Key::from_raw(k)).collect(); let snapshot = engine.snapshot(Default::default()).unwrap(); (snapshot, keys) } @@ -76,7 +76,7 @@ fn mvcc_prewrite>(b: &mut Bencher, config: &Bench ) .generate(DEFAULT_ITERATIONS) .iter() - .map(|(k, v)| (Mutation::Put((Key::from_raw(&k), v.clone())), k.clone())) + .map(|(k, v)| (Mutation::Put((Key::from_raw(k), v.clone())), k.clone())) .collect(); let snapshot = engine.snapshot(Default::default()).unwrap(); (mutations, snapshot) @@ -107,7 +107,7 @@ fn mvcc_commit>(b: &mut Bencher, config: &BenchCo let engine = config.engine_factory.build(); let cm = ConcurrencyManager::new(1.into()); b.iter_batched( - || setup_prewrite(&engine, &config, 1), + || setup_prewrite(&engine, config, 1), |(snapshot, keys)| { for key in keys { let mut txn = mvcc::MvccTxn::new(1.into(), cm.clone()); @@ -126,7 +126,7 @@ fn mvcc_rollback_prewrote>( let engine = config.engine_factory.build(); let cm = ConcurrencyManager::new(1.into()); b.iter_batched( - || setup_prewrite(&engine, &config, 1), + || setup_prewrite(&engine, config, 1), |(snapshot, keys)| { for key in keys { let mut txn = mvcc::MvccTxn::new(1.into(), cm.clone()); @@ -152,7 +152,7 @@ fn mvcc_rollback_conflict>( let engine = config.engine_factory.build(); let cm = ConcurrencyManager::new(1.into()); b.iter_batched( - || setup_prewrite(&engine, &config, 2), + || setup_prewrite(&engine, config, 2), |(snapshot, keys)| { for key in keys { let mut txn = mvcc::MvccTxn::new(1.into(), cm.clone()); @@ -185,7 +185,7 @@ fn mvcc_rollback_non_prewrote>( DEFAULT_KV_GENERATOR_SEED, ) .generate(DEFAULT_ITERATIONS); - let keys: Vec = kvs.iter().map(|(k, _)| Key::from_raw(&k)).collect(); + let keys: Vec = kvs.iter().map(|(k, _)| Key::from_raw(k)).collect(); let snapshot = engine.snapshot(Default::default()).unwrap(); (snapshot, keys) }, @@ -216,7 +216,7 @@ fn mvcc_reader_load_lock>(b: &mut Bencher, config ) .generate(DEFAULT_ITERATIONS) .iter() - .map(|(k, _)| Key::from_raw(&k)) + .map(|(k, _)| Key::from_raw(k)) .collect(); b.iter_batched( @@ -227,7 +227,7 @@ fn mvcc_reader_load_lock>(b: &mut Bencher, config |(snapshot, test_kvs)| { for key in test_kvs { let mut reader = MvccReader::new(snapshot.clone(), None, true); - black_box(reader.load_lock(&key).unwrap()); + black_box(reader.load_lock(key).unwrap()); } }, BatchSize::SmallInput, @@ -249,14 +249,14 @@ fn mvcc_reader_seek_write>( ) .generate(DEFAULT_ITERATIONS) .iter() - .map(|(k, _)| Key::from_raw(&k)) + .map(|(k, _)| Key::from_raw(k)) .collect(); (snapshot, test_keys) }, |(snapshot, test_keys)| { for key in &test_keys { let mut reader = MvccReader::new(snapshot.clone(), None, true); - black_box(reader.seek_write(&key, TimeStamp::max()).unwrap()); + black_box(reader.seek_write(key, TimeStamp::max()).unwrap()); } }, BatchSize::SmallInput, diff --git a/tests/benches/hierarchy/storage/mod.rs b/tests/benches/hierarchy/storage/mod.rs index 920496b4a46..8a1644fc45c 100644 --- a/tests/benches/hierarchy/storage/mod.rs +++ b/tests/benches/hierarchy/storage/mod.rs @@ -45,7 +45,7 @@ fn storage_prewrite>(b: &mut Bencher, config: &Be .map(|(k, v)| { ( Context::default(), - vec![Mutation::Put((Key::from_raw(&k), v.clone()))], + vec![Mutation::Put((Key::from_raw(k), v.clone()))], k.clone(), ) }) @@ -73,7 +73,7 @@ fn storage_commit>(b: &mut Bencher, config: &Benc store .prewrite( Context::default(), - vec![Mutation::Put((Key::from_raw(&k), v.clone()))], + vec![Mutation::Put((Key::from_raw(k), v.clone()))], k.clone(), 1, ) diff --git a/tests/benches/hierarchy/txn/mod.rs b/tests/benches/hierarchy/txn/mod.rs index dac48fb352c..c8e5ca64dc2 100644 --- a/tests/benches/hierarchy/txn/mod.rs +++ b/tests/benches/hierarchy/txn/mod.rs @@ -47,7 +47,7 @@ where &mut txn, &mut reader, &txn_props, - Mutation::Put((Key::from_raw(&k), v.clone())), + Mutation::Put((Key::from_raw(k), v.clone())), &None, false, ) @@ -55,7 +55,7 @@ where } let write_data = WriteData::from_modifies(txn.into_modifies()); let _ = engine.write(&ctx, write_data); - let keys: Vec = kvs.iter().map(|(k, _)| Key::from_raw(&k)).collect(); + let keys: Vec = kvs.iter().map(|(k, _)| Key::from_raw(k)).collect(); keys } @@ -69,7 +69,7 @@ fn txn_prewrite>(b: &mut Bencher, config: &BenchC KvGenerator::new(config.key_length, config.value_length) .generate(DEFAULT_ITERATIONS) .iter() - .map(|(k, v)| (Mutation::Put((Key::from_raw(&k), v.clone())), k.clone())) + .map(|(k, v)| (Mutation::Put((Key::from_raw(k), v.clone())), k.clone())) .collect(); mutations }, @@ -103,7 +103,7 @@ fn txn_commit>(b: &mut Bencher, config: &BenchCon let ctx = Context::default(); let cm = ConcurrencyManager::new(1.into()); b.iter_batched( - || setup_prewrite(&engine, &config, 1), + || setup_prewrite(&engine, config, 1), |keys| { for key in keys { let snapshot = engine.snapshot(Default::default()).unwrap(); @@ -123,7 +123,7 @@ fn txn_rollback_prewrote>(b: &mut Bencher, config let ctx = Context::default(); let cm = ConcurrencyManager::new(1.into()); b.iter_batched( - || setup_prewrite(&engine, &config, 1), + || setup_prewrite(&engine, config, 1), |keys| { for key in keys { let snapshot = engine.snapshot(Default::default()).unwrap(); @@ -143,7 +143,7 @@ fn txn_rollback_conflict>(b: &mut Bencher, config let ctx = Context::default(); let cm = ConcurrencyManager::new(1.into()); b.iter_batched( - || setup_prewrite(&engine, &config, 2), + || setup_prewrite(&engine, config, 2), |keys| { for key in keys { let snapshot = engine.snapshot(Default::default()).unwrap(); @@ -169,7 +169,7 @@ fn txn_rollback_non_prewrote>( || { let kvs = KvGenerator::new(config.key_length, config.value_length) .generate(DEFAULT_ITERATIONS); - let keys: Vec = kvs.iter().map(|(k, _)| Key::from_raw(&k)).collect(); + let keys: Vec = kvs.iter().map(|(k, _)| Key::from_raw(k)).collect(); keys }, |keys| { diff --git a/tests/failpoints/cases/test_gc_worker.rs b/tests/failpoints/cases/test_gc_worker.rs index c63ef706130..4b3dbf59bbb 100644 --- a/tests/failpoints/cases/test_gc_worker.rs +++ b/tests/failpoints/cases/test_gc_worker.rs @@ -249,7 +249,7 @@ fn test_collect_applying_locks() { // Write 1 lock. must_kv_prewrite( - &store_1_client, + store_1_client, ctx, vec![new_mutation(Op::Put, b"k1", b"v")], b"k1".to_vec(),