diff --git a/Cargo.toml b/Cargo.toml index ce9f66c..3218442 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "caches" -version = "0.2.9" +version = "0.3.0" authors = ["Al Liu <scygliu1@gmail.com>"] description = "This is a Rust implementation for popular caches (support no_std)." homepage = "https://github.com/al8n/caches-rs" @@ -42,18 +42,18 @@ nightly = ["rand/nightly"] [dependencies] bitvec = { version = "1", default-features = false } -cfg-if = "1.0.0" -hashbrown = { version = "0.14", optional = true } -libm = {version = "0.2.8", optional = true} -rand = {version = "0.8", optional = true} +cfg-if = "1" +hashbrown = { version = "0.15", optional = true } +libm = { version = "0.2", optional = true } +rand = { version = "0.8", optional = true } [dev-dependencies] -scoped_threadpool = "0.1.*" -stats_alloc = "0.1.*" -criterion = "0.5.1" -fnv = "1.0.7" -rand = "0.8.4" -rustc-hash = "1.0" +scoped_threadpool = "0.1" +stats_alloc = "0.1" +criterion = "0.5" +fnv = "1" +rand = "0.8" +rustc-hash = "1" cascara = "0.1.0" [package.metadata.docs.rs] diff --git a/README.md b/README.md index b4cbe01..f8a600d 100644 --- a/README.md +++ b/README.md @@ -30,15 +30,19 @@ The MSRV for this crate is 1.55.0. - `TinyLFU`, `SampledLFU`, and `WTinyLFUCache` ## Installation + - std + ```toml [dependencies] - caches = "0.2" + caches = "0.3" ``` + - no_std + ```toml [dependencies] - caches = {version: "0.2", default-features = false } + caches = { version: "0.3", default-features = false, features = ["libm", "hashbrown"] } ``` ## Usages diff --git a/benches/wtinylfu_cache.rs b/benches/wtinylfu_cache.rs index 652a38e..fc004be 100644 --- a/benches/wtinylfu_cache.rs +++ b/benches/wtinylfu_cache.rs @@ -59,11 +59,17 @@ fn bench_wtinylfu_cache_fx_hasher(c: &mut Criterion) { .collect(), ); - let builder = WTinyLFUCacheBuilder::<u64, DefaultKeyHasher<u64>, BuildHasherDefault<FxHasher>, BuildHasherDefault<FxHasher>, BuildHasherDefault<FxHasher>>::new(82, 6488, 1622, 8192) - .set_window_hasher(BuildHasherDefault::<FxHasher>::default()) - .set_protected_hasher(BuildHasherDefault::<FxHasher>::default()) - .set_probationary_hasher(BuildHasherDefault::<FxHasher>::default()) - .set_key_hasher(DefaultKeyHasher::default()); + let builder = WTinyLFUCacheBuilder::< + u64, + DefaultKeyHasher<u64>, + BuildHasherDefault<FxHasher>, + BuildHasherDefault<FxHasher>, + BuildHasherDefault<FxHasher>, + >::new(82, 6488, 1622, 8192) + .set_window_hasher(BuildHasherDefault::<FxHasher>::default()) + .set_protected_hasher(BuildHasherDefault::<FxHasher>::default()) + .set_probationary_hasher(BuildHasherDefault::<FxHasher>::default()) + .set_key_hasher(DefaultKeyHasher::default()); let l = WTinyLFUCache::from_builder(builder).unwrap(); (l, nums) }, @@ -100,11 +106,17 @@ fn bench_wtinylfu_cache_fnv_hasher(c: &mut Criterion) { }) .collect(), ); - let builder = WTinyLFUCacheBuilder::<u64, DefaultKeyHasher<u64>, BuildHasherDefault<fnv::FnvHasher>, BuildHasherDefault<fnv::FnvHasher>, BuildHasherDefault<fnv::FnvHasher>>::new(82, 6488, 1622, 8192) - .set_key_hasher(DefaultKeyHasher::default()) - .set_window_hasher(FnvBuildHasher::default()) - .set_protected_hasher(FnvBuildHasher::default()) - .set_probationary_hasher(FnvBuildHasher::default()); + let builder = WTinyLFUCacheBuilder::< + u64, + DefaultKeyHasher<u64>, + BuildHasherDefault<fnv::FnvHasher>, + BuildHasherDefault<fnv::FnvHasher>, + BuildHasherDefault<fnv::FnvHasher>, + >::new(82, 6488, 1622, 8192) + .set_key_hasher(DefaultKeyHasher::default()) + .set_window_hasher(FnvBuildHasher::default()) + .set_protected_hasher(FnvBuildHasher::default()) + .set_probationary_hasher(FnvBuildHasher::default()); let l = WTinyLFUCache::from_builder(builder).unwrap(); (l, nums) }, diff --git a/src/lfu/sampled.rs b/src/lfu/sampled.rs index 823e255..8ed70a5 100644 --- a/src/lfu/sampled.rs +++ b/src/lfu/sampled.rs @@ -180,9 +180,8 @@ impl<K: Hash + Eq, KH: KeyHasher<K>, S: BuildHasher> SampledLFU<K, KH, S> { /// Remove an entry from SampledLFU by hashed key #[inline] pub fn remove_hashed_key(&mut self, kh: u64) -> Option<i64> { - self.key_costs.remove(&kh).map(|cost| { + self.key_costs.remove(&kh).inspect(|&cost| { self.used -= cost; - cost }) } diff --git a/src/lfu/tinylfu.rs b/src/lfu/tinylfu.rs index 8b96e21..ed90e44 100644 --- a/src/lfu/tinylfu.rs +++ b/src/lfu/tinylfu.rs @@ -176,9 +176,9 @@ impl<K: Hash + Eq, KH: KeyHasher<K>> TinyLFU<K, KH> { /// # Details /// Explanation from [TinyLFU: A Highly Efficient Cache Admission Policy §3.4.2]: /// - When querying items, we use both the Doorkeeper and the main structures. - /// That is, if the item is included in the Doorkeeper, - /// TinyLFU estimates the frequency of this item as its estimation in the main structure plus 1. - /// Otherwise, TinyLFU returns just the estimation from the main structure. + /// That is, if the item is included in the Doorkeeper, + /// TinyLFU estimates the frequency of this item as its estimation in the main structure plus 1. + /// Otherwise, TinyLFU returns just the estimation from the main structure. /// /// [TinyLFU: A Highly Efficient Cache Admission Policy §3.4.2]: https://arxiv.org/pdf/1512.00727.pdf pub fn estimate<Q>(&self, key: &Q) -> u64 @@ -199,9 +199,9 @@ impl<K: Hash + Eq, KH: KeyHasher<K>> TinyLFU<K, KH> { /// # Details /// Explanation from [TinyLFU: A Highly Efficient Cache Admission Policy §3.4.2]: /// - When querying items, we use both the Doorkeeper and the main structures. - /// That is, if the item is included in the Doorkeeper, - /// TinyLFU estimates the frequency of this item as its estimation in the main structure plus 1. - /// Otherwise, TinyLFU returns just the estimation from the main structure. + /// That is, if the item is included in the Doorkeeper, + /// TinyLFU estimates the frequency of this item as its estimation in the main structure plus 1. + /// Otherwise, TinyLFU returns just the estimation from the main structure. /// /// [TinyLFU: A Highly Efficient Cache Admission Policy §3.4.2]: https://arxiv.org/pdf/1512.00727.pdf pub fn estimate_hashed_key(&self, kh: u64) -> u64 { diff --git a/src/lfu/wtinylfu.rs b/src/lfu/wtinylfu.rs index aebb831..6518729 100644 --- a/src/lfu/wtinylfu.rs +++ b/src/lfu/wtinylfu.rs @@ -456,22 +456,22 @@ impl<K: Hash + Eq, V, KH: KeyHasher<K>, FH: BuildHasher, RH: BuildHasher, WH: Bu WTinyLFUCacheBuilder::default() } - /// + /// Returns the window cache len pub fn window_cache_len(&self) -> usize { self.lru.len() } - /// + /// Returns the window cache cap pub fn window_cache_cap(&self) -> usize { self.lru.cap() } - /// + /// Returns the main cache len pub fn main_cache_len(&self) -> usize { self.slru.len() } - /// + /// Returns the main cache cap pub fn main_cache_cap(&self) -> usize { self.slru.cap() } diff --git a/src/lib.rs b/src/lib.rs index 5f84d5a..517826e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -53,11 +53,11 @@ //! //! ## Acknowledgments //! - The implementation of `RawLRU` is highly inspired by -//! [Jerome Froelich's LRU implementation](https://github.com/jeromefroe/lru-rs) -//! and [`std::collections`] library of Rust. +//! [Jerome Froelich's LRU implementation](https://github.com/jeromefroe/lru-rs) +//! and [`std::collections`] library of Rust. //! //! - Thanks for [HashiCorp's golang-lru](https://github.com/hashicorp/golang-lru) -//! providing the amazing Go implementation. +//! providing the amazing Go implementation. //! //! - Ramakrishna's paper: [Caching strategies to improve disk system performance] //! @@ -90,7 +90,11 @@ #![cfg_attr(docsrs, allow(unused_attributes))] #![deny(missing_docs)] #![allow(unused_doc_comments)] -#![allow(clippy::blocks_in_conditions, clippy::enum_variant_names)] +#![allow( + clippy::blocks_in_conditions, + clippy::enum_variant_names, + clippy::missing_transmute_annotations +)] extern crate alloc; @@ -212,13 +216,13 @@ pub trait OnEvictCallback { /// `PutResult` is returned when try to put a entry in cache. /// /// - **`PutResult::Put`** means that the key is not in cache previously, and the cache has enough -/// capacity, no evict happens. +/// capacity, no evict happens. /// /// - **`PutResult::Update`** means that the key already exists in the cache, -/// and this operation updates the key's value and the inner is the old value. +/// and this operation updates the key's value and the inner is the old value. /// /// - **`PutResult::Evicted`** means that the the key is not in cache previously, -/// but the cache is full, so the evict happens. The inner is the evicted entry `(Key, Value)`. +/// but the cache is full, so the evict happens. The inner is the evicted entry `(Key, Value)`. /// /// - **`PutResult::EvictedAndUpdate`** is only possible to be returned by [`TwoQueueCache`] and [`AdaptiveCache`]. For more information, please see the related examples of [`TwoQueueCache`] and [`AdaptiveCache`] /// diff --git a/src/lru.rs b/src/lru.rs index 3882e10..7b69ff5 100644 --- a/src/lru.rs +++ b/src/lru.rs @@ -13,14 +13,14 @@ //! - [`SegmentedCache`] is a fixed size Segmented LRU cache. //! //! - [`AdaptiveCache`] is a fixed size Adaptive Replacement Cache (ARC). -//! ARC is an enhancement over the standard LRU cache in that tracks both -//! frequency and recency of use. This avoids a burst in access to new -//! entries from evicting the frequently used older entries. +//! ARC is an enhancement over the standard LRU cache in that tracks both +//! frequency and recency of use. This avoids a burst in access to new +//! entries from evicting the frequently used older entries. //! //! //! - [`TwoQueueCache`] is a fixed size 2Q cache. 2Q is an enhancement -//! over the standard LRU cache in that it tracks both frequently -//! and recently used entries separately. +//! over the standard LRU cache in that it tracks both frequently +//! and recently used entries separately. //! //! ## Trade-Off //! In theory, [`AdaptiveCache`] and [`TwoQueueCache`] add some additional @@ -86,11 +86,11 @@ //! //! ## Acknowledgments //! - The implementation of `RawLRU` is highly inspired by -//! [Jerome Froelich's LRU implementation](https://github.com/jeromefroe/lru-rs) -//! and [`std::collections`] library of Rust. +//! [Jerome Froelich's LRU implementation](https://github.com/jeromefroe/lru-rs) +//! and [`std::collections`] library of Rust. //! //! - Thanks for [HashiCorp's golang-lru](https://github.com/hashicorp/golang-lru) -//! providing the amazing Go implementation. +//! providing the amazing Go implementation. //! //! - Ramakrishna's paper: [Caching strategies to improve disk system performance] //! diff --git a/src/lru/raw.rs b/src/lru/raw.rs index 42a713d..9ec5551 100644 --- a/src/lru/raw.rs +++ b/src/lru/raw.rs @@ -33,7 +33,6 @@ use core::iter::{FromIterator, FusedIterator}; use core::marker::PhantomData; use core::mem; use core::ptr::{self, NonNull}; -use core::usize; use crate::cache_api::ResizableCache; use crate::lru::CacheError; @@ -2141,7 +2140,7 @@ mod tests { #[test] #[cfg(feature = "hashbrown")] fn test_with_hasher() { - use hashbrown::hash_map::DefaultHashBuilder; + use hashbrown::DefaultHashBuilder; let s = DefaultHashBuilder::default(); let mut cache = RawLRU::with_hasher(16, s).unwrap(); diff --git a/src/lru/two_queue.rs b/src/lru/two_queue.rs index 9d1aefa..69e1de8 100644 --- a/src/lru/two_queue.rs +++ b/src/lru/two_queue.rs @@ -390,7 +390,7 @@ impl<K: Hash + Eq, V, RH: BuildHasher, FH: BuildHasher, GH: BuildHasher> Cache<K /// /// # Note /// - [`TwoQueueCache`] guarantees that the size of the recent LRU plus the size of the freq LRU - /// is less or equal to the [`TwoQueueCache`]'s size. + /// is less or equal to the [`TwoQueueCache`]'s size. /// - The ghost LRU has its own size. /// /// # Example