diff --git a/Cargo.toml b/Cargo.toml index fdf4a21..d078751 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,6 @@ authors = ["Thomas Korrison "] license = "MIT OR Apache-2.0" description = "High-performance cache primitives with pluggable eviction policies (LRU, LFU, FIFO, 2Q, Clock-PRO, S3-FIFO) and optional metrics." homepage = "https://oxidizelabs.github.io/cachekit/" -documentation = "https://docs.rs/cachekit" repository = "https://github.com/OxidizeLabs/cachekit" readme = "README.md" keywords = ["cache", "lru", "lfu", "eviction", "s3-fifo"] diff --git a/examples/basic_fifo.rs b/examples/basic_fifo.rs index 3afdf98..e531959 100644 --- a/examples/basic_fifo.rs +++ b/examples/basic_fifo.rs @@ -1,4 +1,4 @@ -use cachekit::prelude::FifoCache; +use cachekit::policy::fifo::FifoCache; use cachekit::traits::CoreCache; fn main() { diff --git a/src/lib.rs b/src/lib.rs index 4edf959..ad32135 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -36,7 +36,7 @@ //! │ │ //! │ traits Trait hierarchy (ReadOnlyCache → CoreCache → …) │ //! │ builder Unified CacheBuilder + Cache wrapper │ -//! │ policy 17 eviction policies behind feature flags │ +//! │ policy 18 eviction policies behind feature flags │ //! │ ds Arena, ring buffer, intrusive list, ghost list, … │ //! │ store Storage backends (HashMap, slab, weighted) │ //! │ metrics Hit/miss counters and snapshots (feature-gated) │ @@ -162,6 +162,11 @@ //! return [`ConfigError`](error::ConfigError) for invalid parameters. Debug-only //! invariant checks produce [`InvariantError`](error::InvariantError). //! +//! # Release Notes +//! +//! See the [changelog](https://github.com/OxidizeLabs/cachekit/blob/main/CHANGELOG.md) +//! for a summary of changes in each published version. +//! //! # Choosing a Policy //! //! ```text diff --git a/src/policy/fifo.rs b/src/policy/fifo.rs index f80221e..6bde42b 100644 --- a/src/policy/fifo.rs +++ b/src/policy/fifo.rs @@ -736,7 +736,9 @@ where } #[cfg(feature = "concurrency")] -impl ConcurrentCache for ConcurrentFifoCache +// SAFETY: ConcurrentFifoCache uses parking_lot RwLock internally for all +// shared-state access, so concurrent operations are correctly synchronised. +unsafe impl ConcurrentCache for ConcurrentFifoCache where K: Clone + Eq + Hash + Send + Sync, V: Send + Sync, @@ -859,16 +861,14 @@ where None } - fn pop_oldest_batch(&mut self, count: usize) -> Vec<(K, V)> { - let mut result = Vec::with_capacity(count.min(self.len())); + fn pop_oldest_batch_into(&mut self, count: usize, out: &mut Vec<(K, V)>) { + out.reserve(count.min(self.len())); for _ in 0..count { - if let Some(entry) = self.pop_oldest() { - result.push(entry); - } else { - break; + match self.pop_oldest() { + Some(entry) => out.push(entry), + None => break, } } - result } fn age_rank(&self, key: &K) -> Option { @@ -1513,8 +1513,8 @@ mod tests { partial_cache.insert("d", 4); // Remove 2 items - partial_cache.pop_oldest(); // Remove "a" - partial_cache.pop_oldest(); // Remove "b" + let _ = partial_cache.pop_oldest(); // Remove "a" + let _ = partial_cache.pop_oldest(); // Remove "b" assert_eq!(partial_cache.len(), 2); // Add 2 new items diff --git a/src/policy/lfu.rs b/src/policy/lfu.rs index 9c37cee..319067e 100644 --- a/src/policy/lfu.rs +++ b/src/policy/lfu.rs @@ -2579,10 +2579,10 @@ mod tests { assert_eq!(cache.len(), 2); // Test pop_lfu operations - cache.pop_lfu(); + let _ = cache.pop_lfu(); assert_eq!(cache.len(), 1); - cache.pop_lfu(); + let _ = cache.pop_lfu(); assert_eq!(cache.len(), 0); // Test pop_lfu on empty cache doesn't change length @@ -2642,7 +2642,7 @@ mod tests { cache.remove(&format!("key{}", capacity - 1)); assert_eq!(cache.capacity(), capacity); - cache.pop_lfu(); + let _ = cache.pop_lfu(); assert_eq!(cache.capacity(), capacity); cache.clear(); @@ -2673,7 +2673,7 @@ mod tests { cache.remove(&format!("key{}", i % 10)); }, 3 => { - cache.pop_lfu(); + let _ = cache.pop_lfu(); }, _ => unreachable!(), } @@ -3113,7 +3113,7 @@ mod tests { cache.remove(&format!("temp{}", i - 1)); }, 3 => { - cache.pop_lfu(); + let _ = cache.pop_lfu(); }, 4 => { cache.increment_frequency(&"key2".to_string()); @@ -3140,7 +3140,7 @@ mod tests { // Test invariants after pop_lfu operations while cache.len() > 0 { - cache.pop_lfu(); + let _ = cache.pop_lfu(); verify_invariants(&mut cache); } diff --git a/src/policy/lru.rs b/src/policy/lru.rs index 82f3461..d9bb965 100644 --- a/src/policy/lru.rs +++ b/src/policy/lru.rs @@ -2155,8 +2155,8 @@ mod tests { assert_eq!(cache.len(), 5); // Simulate capacity reduction to 3 by removing 2 LRU items - cache.pop_lru(); // Remove 1 - cache.pop_lru(); // Remove 2 + let _ = cache.pop_lru(); // Remove 1 + let _ = cache.pop_lru(); // Remove 2 assert_eq!(cache.len(), 3); // Remaining items should be 3, 4, 5 @@ -3175,12 +3175,12 @@ mod tests { assert_eq!(*new_tail_key, 2); // Pop again - cache.pop_lru(); + let _ = cache.pop_lru(); let (final_tail_key, _) = cache.peek_lru().unwrap(); assert_eq!(*final_tail_key, 3); // Pop last item - cache.pop_lru(); + let _ = cache.pop_lru(); assert!(cache.peek_lru().is_none()); assert_eq!(cache.len(), 0); } @@ -4258,7 +4258,7 @@ mod tests { cache.insert(6, Arc::new(600)); verify_ranks(&cache); - cache.pop_lru(); + let _ = cache.pop_lru(); verify_ranks(&cache); } @@ -6165,7 +6165,7 @@ mod tests { cache.insert(3, Arc::new(3)); // Pop LRU - cache.pop_lru(); // Removes 1 + let _ = cache.pop_lru(); // Removes 1 // Check recency rank of remaining items to force traversal assert!(cache.recency_rank(&2).is_some()); @@ -6231,7 +6231,7 @@ mod tests { cache.get(&2); // Remove head (LRU) - cache.pop_lru(); // Should remove 0 (LRU) + let _ = cache.pop_lru(); // Should remove 0 (LRU) // Remove tail (MRU) cache.remove(&2); // 2 was MRU diff --git a/src/policy/s3_fifo.rs b/src/policy/s3_fifo.rs index c4f899b..eacc0db 100644 --- a/src/policy/s3_fifo.rs +++ b/src/policy/s3_fifo.rs @@ -1547,7 +1547,9 @@ where } #[cfg(feature = "concurrency")] -impl ConcurrentCache for ConcurrentS3FifoCache +// SAFETY: ConcurrentS3FifoCache uses parking_lot RwLock internally for all +// shared-state access, so concurrent operations are correctly synchronised. +unsafe impl ConcurrentCache for ConcurrentS3FifoCache where K: Clone + Eq + Hash + Send + Sync, V: Send + Sync, diff --git a/src/prelude.rs b/src/prelude.rs index 5f14a97..168535e 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -1,19 +1,22 @@ -pub use crate::ds::{ - ClockRing, FixedHistory, FrequencyBucketEntryMeta, FrequencyBuckets, FrequencyBucketsHandle, - GhostList, IntrusiveList, KeyInterner, LazyMinHeap, ShardSelector, SlotArena, SlotId, -}; +//! Convenience re-exports for the most common cachekit types. +//! +//! Import everything with: +//! +//! ``` +//! use cachekit::prelude::*; +//! ``` +//! +//! This gives you the core traits ([`CoreCache`], [`ReadOnlyCache`], +//! [`MutableCache`]), the policy-specific traits, and the [`CacheBuilder`] +//! entry point. Internal data structures and concrete policy types are +//! available from their respective modules ([`ds`](crate::ds), +//! [`policy`](crate::policy)). -#[cfg(feature = "concurrency")] -pub use crate::ds::{ - ConcurrentClockRing, ConcurrentIntrusiveList, ConcurrentSlotArena, - ShardedFrequencyBucketEntryMeta, ShardedFrequencyBuckets, ShardedSlotArena, ShardedSlotId, +pub use crate::builder::{Cache, CacheBuilder, CachePolicy}; +pub use crate::traits::{ + ConcurrentCache, CoreCache, FifoCacheTrait, LfuCacheTrait, LruCacheTrait, LrukCacheTrait, + MutableCache, ReadOnlyCache, }; + #[cfg(feature = "metrics")] pub use crate::metrics::snapshot::CacheMetricsSnapshot; -#[cfg(feature = "policy-fifo")] -pub use crate::policy::fifo::FifoCache; -pub use crate::traits::{ - AsyncCacheFuture, CacheConfig, CacheFactory, CacheTier, CacheTierManager, ConcurrentCache, - CoreCache, FifoCacheTrait, LfuCacheTrait, LruCacheTrait, LrukCacheTrait, MutableCache, - ReadOnlyCache, -}; diff --git a/src/traits.rs b/src/traits.rs index cc9765d..1cbd241 100644 --- a/src/traits.rs +++ b/src/traits.rs @@ -92,7 +92,7 @@ //! | `LfuCacheTrait` | `MutableCache` | LFU-specific with frequency tracking | //! | `LrukCacheTrait` | `MutableCache` | LRU-K with K-distance tracking | //! | - | - | - | -//! | `ConcurrentCache` | `Send + Sync` | Marker for thread-safe caches | +//! | `ConcurrentCache` | `Send + Sync` | Safety marker for thread-safe caches | //! | `CacheTierManager` | - | Multi-tier cache management | //! | `CacheFactory` | - | Cache instance creation | //! | `AsyncCacheFuture` | `Send + Sync` | Future async operation support | @@ -134,9 +134,9 @@ //! //! ```text //! ┌─────────────────────────────────────────────────────────────────────────┐ -//! │ ConcurrentCache │ +//! │ unsafe trait ConcurrentCache │ //! │ │ -//! │ Marker trait: Send + Sync │ +//! │ Safety marker: Send + Sync │ //! │ Purpose: Guarantee thread-safe cache implementations │ //! │ Usage: fn use_cache + ConcurrentCache>(c: &C) │ //! └─────────────────────────────────────────────────────────────────────────┘ @@ -153,10 +153,10 @@ //! //! ## Example Usage //! -//! ```rust,ignore -//! use crate::storage::disk::async_disk::cache::cache_traits::{ -//! ReadOnlyCache, ReadOnlyLruCache, ReadOnlyFifoCache, -//! CoreCache, MutableCache, FifoCacheTrait, LruCacheTrait, LfuCacheTrait, +//! ``` +//! use cachekit::traits::{ +//! ReadOnlyCache, CoreCache, MutableCache, +//! FifoCacheTrait, LruCacheTrait, LfuCacheTrait, //! }; //! //! // Read-only inspection - no side effects, works with shared references @@ -167,11 +167,6 @@ //! (len, cap, utilization) //! } //! -//! // Policy-specific read-only inspection -//! fn inspect_lru_order>>(cache: &C) -> Option { -//! cache.peek_lru().map(|(k, _)| *k) -//! } -//! //! // Function accepting any cache //! fn warm_cache>>(cache: &mut C, data: &[(u64, Vec)]) { //! for (key, value) in data { @@ -197,27 +192,14 @@ //! // LRU-specific function //! fn touch_hot_keys>>(cache: &mut C, keys: &[u64]) { //! for key in keys { -//! cache.touch(key); // Mark as recently used without retrieving +//! cache.touch(key); //! } //! } //! //! // LFU-specific function with frequency-based prioritization //! fn boost_key_priority>>(cache: &mut C, key: &u64) { -//! // Increment frequency without accessing value //! cache.increment_frequency(key); //! } -//! -//! // Thread-safe cache usage -//! use std::sync::{Arc, RwLock}; -//! use crate::storage::disk::async_disk::cache::lru::ConcurrentLruCache; -//! -//! let shared_cache = Arc::new(ConcurrentLruCache::>::new(1000)); -//! -//! // Safe to use from multiple threads -//! let cache_clone = shared_cache.clone(); -//! std::thread::spawn(move || { -//! cache_clone.insert(42, vec![1, 2, 3]); -//! }); //! ``` //! //! ## Thread Safety @@ -479,9 +461,10 @@ pub trait MutableCache: CoreCache { /// ``` fn remove(&mut self, key: &K) -> Option; - /// Removes multiple keys efficiently. + /// Removes multiple keys, appending results to the provided buffer. /// - /// Returns a vector of `Option` in the same order as the input keys. + /// Results are appended in the same order as the input keys. Callers + /// can reuse the buffer across calls to avoid repeated allocation. /// The default implementation loops over [`remove`](Self::remove). /// /// # Example @@ -495,12 +478,25 @@ pub trait MutableCache: CoreCache { /// cache.insert(2, "two"); /// cache.insert(3, "three"); /// - /// let removed = cache.remove_batch(&[1, 99, 3]); - /// assert_eq!(removed, vec![Some("one"), None, Some("three")]); + /// let mut results = Vec::new(); + /// cache.remove_batch_into(&[1, 99, 3], &mut results); + /// assert_eq!(results, vec![Some("one"), None, Some("three")]); /// assert_eq!(cache.len(), 1); /// ``` + fn remove_batch_into(&mut self, keys: &[K], out: &mut Vec>) { + out.reserve(keys.len()); + out.extend(keys.iter().map(|k| self.remove(k))); + } + + /// Removes multiple keys, returning results in a new `Vec`. + /// + /// Convenience wrapper around [`remove_batch_into`](Self::remove_batch_into). + /// Prefer `remove_batch_into` when reusing a buffer across calls. + #[must_use] fn remove_batch(&mut self, keys: &[K]) -> Vec> { - keys.iter().map(|k| self.remove(k)).collect() + let mut out = Vec::with_capacity(keys.len()); + self.remove_batch_into(keys, &mut out); + out } } @@ -558,6 +554,7 @@ pub trait FifoCacheTrait: CoreCache { /// assert_eq!(cache.pop_oldest(), Some((2, "second"))); /// assert_eq!(cache.pop_oldest(), None); /// ``` + #[must_use] fn pop_oldest(&mut self) -> Option<(K, V)>; /// Peeks at the oldest entry without removing it. @@ -580,9 +577,10 @@ pub trait FifoCacheTrait: CoreCache { /// ``` fn peek_oldest(&self) -> Option<(&K, &V)>; - /// Removes multiple oldest entries efficiently. + /// Removes up to `count` oldest entries, appending them to the provided buffer. /// - /// Returns up to `count` entries in FIFO order (oldest first). + /// Entries are appended in FIFO order (oldest first). Callers can reuse + /// the buffer across calls to avoid repeated allocation. /// The default implementation calls [`pop_oldest`](Self::pop_oldest) in a loop. /// /// # Example @@ -596,12 +594,30 @@ pub trait FifoCacheTrait: CoreCache { /// cache.insert(2, "b"); /// cache.insert(3, "c"); /// - /// let batch = cache.pop_oldest_batch(2); + /// let mut batch = Vec::new(); + /// cache.pop_oldest_batch_into(2, &mut batch); /// assert_eq!(batch, vec![(1, "a"), (2, "b")]); /// assert_eq!(cache.len(), 1); /// ``` + fn pop_oldest_batch_into(&mut self, count: usize, out: &mut Vec<(K, V)>) { + out.reserve(count); + for _ in 0..count { + match self.pop_oldest() { + Some(entry) => out.push(entry), + None => break, + } + } + } + + /// Removes up to `count` oldest entries, returning them in a new `Vec`. + /// + /// Convenience wrapper around [`pop_oldest_batch_into`](Self::pop_oldest_batch_into). + /// Prefer `pop_oldest_batch_into` when reusing a buffer across calls. + #[must_use] fn pop_oldest_batch(&mut self, count: usize) -> Vec<(K, V)> { - (0..count).filter_map(|_| self.pop_oldest()).collect() + let mut out = Vec::with_capacity(count.min(self.len())); + self.pop_oldest_batch_into(count, &mut out); + out } /// Gets the age rank of a key (0 = oldest, higher = newer). @@ -677,6 +693,7 @@ pub trait LruCacheTrait: MutableCache { /// let (key, _) = cache.pop_lru().unwrap(); /// assert_eq!(key, 1); // First inserted, not accessed since /// ``` + #[must_use] fn pop_lru(&mut self) -> Option<(K, V)>; /// Peeks at the LRU entry without removing it. @@ -813,6 +830,7 @@ pub trait LfuCacheTrait: MutableCache { /// let (key, _) = cache.pop_lfu().unwrap(); /// assert_eq!(key, 1); /// ``` + #[must_use] fn pop_lfu(&mut self) -> Option<(K, V)>; /// Peeks at the LFU entry without removing it. @@ -966,6 +984,7 @@ pub trait LrukCacheTrait: MutableCache { /// let (key, _) = cache.pop_lru_k().unwrap(); /// assert_eq!(key, 1); /// ``` + #[must_use] fn pop_lru_k(&mut self) -> Option<(K, V)>; /// Peeks at the LRU-K entry without removing it. @@ -1127,6 +1146,12 @@ pub trait LrukCacheTrait: MutableCache { /// Implementors guarantee thread-safe operations. This trait extends /// `Send + Sync` and can be used as a bound to require concurrent access. /// +/// # Safety +/// +/// Implementing this trait asserts that the cache handles internal +/// synchronization correctly. An incorrect implementation may lead to +/// data races when the cache is shared across threads. +/// /// # Example /// /// ``` @@ -1162,7 +1187,7 @@ pub trait LrukCacheTrait: MutableCache { /// guard.insert(1, "value".to_string()); /// }); /// ``` -pub trait ConcurrentCache: Send + Sync {} +pub unsafe trait ConcurrentCache: Send + Sync {} /// High-level cache tier management. /// @@ -1241,6 +1266,7 @@ pub trait CacheTierManager { /// /// let tier = CacheTier::Hot; /// assert_eq!(tier, CacheTier::Hot); +/// assert_eq!(tier.to_string(), "Hot"); /// /// // Tiers can be compared and hashed /// use std::collections::HashSet; @@ -1251,6 +1277,7 @@ pub trait CacheTierManager { /// assert_eq!(tiers.len(), 3); /// ``` #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[non_exhaustive] pub enum CacheTier { /// Hot tier: frequently accessed data (LRU-managed). /// @@ -1268,6 +1295,16 @@ pub enum CacheTier { Cold, } +impl std::fmt::Display for CacheTier { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Hot => f.write_str("Hot"), + Self::Warm => f.write_str("Warm"), + Self::Cold => f.write_str("Cold"), + } + } +} + /// Factory trait for creating cache instances. /// /// Provides a standard interface for cache construction, allowing generic @@ -1287,18 +1324,18 @@ pub enum CacheTier { /// impl CacheFactory for LruFactory { /// type Cache = LruCache; /// -/// fn create(capacity: usize) -> Self::Cache { +/// fn new(capacity: usize) -> Self::Cache { /// LruCache::new(capacity) /// } /// -/// fn create_with_config(config: CacheConfig) -> Self::Cache { +/// fn with_config(config: CacheConfig) -> Self::Cache { /// LruCache::new(config.capacity) /// } /// } /// /// // Generic function using factory /// fn build_cache>() -> F::Cache { -/// F::create(100) +/// F::new(100) /// } /// ``` pub trait CacheFactory { @@ -1306,15 +1343,15 @@ pub trait CacheFactory { type Cache: CoreCache; /// Creates a new cache instance with the specified capacity. - fn create(capacity: usize) -> Self::Cache; + fn new(capacity: usize) -> Self::Cache; /// Creates a cache with custom configuration. - fn create_with_config(config: CacheConfig) -> Self::Cache; + fn with_config(config: CacheConfig) -> Self::Cache; } /// Configuration for cache creation. /// -/// Used with [`CacheFactory::create_with_config`] to customize cache behavior. +/// Used with [`CacheFactory::with_config`] to customize cache behavior. /// /// # Fields /// @@ -1335,17 +1372,14 @@ pub trait CacheFactory { /// assert_eq!(config.capacity, 1000); /// assert!(!config.enable_stats); /// -/// // Custom configuration -/// let config = CacheConfig { -/// capacity: 5000, -/// enable_stats: true, -/// ..Default::default() -/// }; +/// // Custom configuration via builder methods +/// let config = CacheConfig::new(5000).with_stats(true); /// assert_eq!(config.capacity, 5000); /// assert!(config.enable_stats); /// assert!(config.prealloc_memory); // from default /// ``` -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[non_exhaustive] pub struct CacheConfig { /// Maximum number of entries the cache can hold. pub capacity: usize, @@ -1369,14 +1403,64 @@ pub struct CacheConfig { pub thread_safe: bool, } -impl Default for CacheConfig { - /// Creates a default configuration. +impl CacheConfig { + /// Creates a new configuration with the given capacity and default options. + /// + /// # Example + /// + /// ``` + /// use cachekit::traits::CacheConfig; + /// + /// let config = CacheConfig::new(500); + /// assert_eq!(config.capacity, 500); + /// assert!(!config.enable_stats); + /// ``` + pub fn new(capacity: usize) -> Self { + Self { + capacity, + ..Default::default() + } + } + + /// Enables or disables hit/miss statistics tracking. + pub fn with_stats(mut self, enable: bool) -> Self { + self.enable_stats = enable; + self + } + + /// Enables or disables upfront memory pre-allocation. + pub fn with_prealloc(mut self, prealloc: bool) -> Self { + self.prealloc_memory = prealloc; + self + } + + /// Enables or disables internal thread-safe synchronization. + pub fn with_thread_safe(mut self, thread_safe: bool) -> Self { + self.thread_safe = thread_safe; + self + } + + /// Validates the configuration, returning an error if any parameter is invalid. + /// + /// # Example + /// + /// ``` + /// use cachekit::traits::CacheConfig; /// - /// Defaults: - /// - `capacity`: 1000 - /// - `enable_stats`: false - /// - `prealloc_memory`: true - /// - `thread_safe`: false + /// assert!(CacheConfig::new(100).validate().is_ok()); + /// assert!(CacheConfig::new(0).validate().is_err()); + /// ``` + pub fn validate(&self) -> Result<(), crate::error::ConfigError> { + if self.capacity == 0 { + return Err(crate::error::ConfigError::new( + "capacity must be greater than 0", + )); + } + Ok(()) + } +} + +impl Default for CacheConfig { fn default() -> Self { Self { capacity: 1000, diff --git a/tests/fifo_concurrency.rs b/tests/fifo_concurrency.rs index d3525d9..41940c8 100644 --- a/tests/fifo_concurrency.rs +++ b/tests/fifo_concurrency.rs @@ -7,7 +7,7 @@ use std::thread; use std::time::{Duration, Instant}; mod thread_safe_wrapper { - use cachekit::prelude::FifoCache; + use cachekit::policy::fifo::FifoCache; use cachekit::traits::{CoreCache, FifoCacheTrait, ReadOnlyCache}; use super::*; @@ -567,7 +567,7 @@ mod thread_safe_wrapper { // Stress Testing mod stress_testing { - use cachekit::prelude::FifoCache; + use cachekit::policy::fifo::FifoCache; use cachekit::traits::{CoreCache, FifoCacheTrait, ReadOnlyCache}; use super::*; diff --git a/tests/lfu_concurrency.rs b/tests/lfu_concurrency.rs index 3e72d6c..519bade 100644 --- a/tests/lfu_concurrency.rs +++ b/tests/lfu_concurrency.rs @@ -379,7 +379,7 @@ mod thread_safety { if i % 2 == 0 { cache_clone.lock().unwrap().peek_lfu(); } else { - cache_clone.lock().unwrap().pop_lfu(); + let _ = cache_clone.lock().unwrap().pop_lfu(); } counts_clone.lock().unwrap().4 += 1; },