diff --git a/CHANGELOG.md b/CHANGELOG.md index 687f41c..c5b28f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Breaking +- Replace `ReadOnlyCache`, `CoreCache`, `MutableCache`, `FifoCacheTrait`, `LruCacheTrait`, `LfuCacheTrait`, `LrukCacheTrait` with a single unified `Cache` trait. +- Add five optional capability traits: `EvictingCache`, `VictimInspectable`, `RecencyTracking`, `FrequencyTracking`, `HistoryTracking`. +- Policy-specific methods (`pop_oldest`, `pop_lru`, `pop_lfu`, `pop_lru_k`, `age_rank`, etc.) are now inherent methods, not trait methods. +- Rename `builder::Cache` to `builder::DynCache` to avoid collision with the new `traits::Cache` trait. +- Remove `CacheTierManager` and `CacheTier` traits (no existing implementations). +- All 18 policies now implement `Cache` with universal `peek` and `remove` support. + ## [0.6.0] - 2026-03-31 ### Breaking diff --git a/README.md b/README.md index 9f42f85..43f7361 100644 --- a/README.md +++ b/README.md @@ -218,7 +218,7 @@ For advanced use cases requiring policy-specific operations, use the underlying ```rust use std::sync::Arc; use cachekit::policy::lru::LruCore; -use cachekit::traits::{CoreCache, LruCacheTrait}; +use cachekit::traits::Cache; fn main() { // LRU with policy-specific operations diff --git a/bench-support/src/metrics.rs b/bench-support/src/metrics.rs index 8d96bfd..471c0e7 100644 --- a/bench-support/src/metrics.rs +++ b/bench-support/src/metrics.rs @@ -9,7 +9,7 @@ use std::time::{Duration, Instant}; -use cachekit::traits::CoreCache; +use cachekit::traits::Cache; use rand::SeedableRng; use crate::workload::WorkloadSpec; @@ -279,7 +279,7 @@ pub fn run_benchmark( value_for_key: F, ) -> BenchmarkResult where - C: CoreCache, + C: Cache, F: Fn(u64) -> V, { let mut generator = config.workload.generator(); @@ -386,7 +386,7 @@ pub fn measure_scan_resistance( value_for_key: F, ) -> ScanResistanceResult where - C: CoreCache, + C: Cache, F: Fn(u64) -> V, { let warmup_ops = capacity * 2; @@ -496,7 +496,7 @@ pub fn measure_adaptation_speed( value_for_key: F, ) -> AdaptationResult where - C: CoreCache, + C: Cache, F: Fn(u64) -> V, { let warmup_ops = capacity * 2; diff --git a/bench-support/src/operation.rs b/bench-support/src/operation.rs index 1570a7a..f5cbbbe 100644 --- a/bench-support/src/operation.rs +++ b/bench-support/src/operation.rs @@ -5,7 +5,7 @@ use std::sync::Arc; -use cachekit::traits::CoreCache; +use cachekit::traits::Cache; use rand::rngs::SmallRng; use rand::{RngExt, SeedableRng}; @@ -87,7 +87,7 @@ pub fn run_operations( value_for_key: F, ) -> OpCounts where - C: CoreCache>, + C: Cache>, F: Fn(u64) -> Arc, M: OpModel, { @@ -110,7 +110,7 @@ fn apply_op( counts: &mut OpCounts, op: Operation, ) where - C: CoreCache>, + C: Cache>, F: Fn(u64) -> Arc, M: OpModel, { diff --git a/bench-support/src/workload.rs b/bench-support/src/workload.rs index 2634b58..0787850 100644 --- a/bench-support/src/workload.rs +++ b/bench-support/src/workload.rs @@ -6,7 +6,7 @@ use std::sync::Arc; -use cachekit::traits::CoreCache; +use cachekit::traits::Cache; use rand::rngs::SmallRng; use rand::{RngExt, SeedableRng}; use rand_distr::{Distribution, Exp, Pareto as ParetoDistr, Zipf}; @@ -495,7 +495,7 @@ pub fn run_hit_rate( value_for_key: F, ) -> HitRate where - C: CoreCache>, + C: Cache>, F: Fn(u64) -> Arc, { let mut op_model = ReadThrough::new(1.0, 0); diff --git a/benches/README.md b/benches/README.md index 15c053d..76811a5 100644 --- a/benches/README.md +++ b/benches/README.md @@ -176,7 +176,7 @@ To add a new workload: ### Adding new policies fails - Check `for_each_policy!` macro syntax -- Verify policy implements `CoreCache` trait +- Verify policy implements `Cache` trait - Ensure type parameters match (some policies use `Arc`, others use `V`) ## See Also diff --git a/benches/comparison.rs b/benches/comparison.rs index b4ee313..69b5162 100644 --- a/benches/comparison.rs +++ b/benches/comparison.rs @@ -18,7 +18,7 @@ use cachekit::policy::lru::LruCore; use cachekit::policy::lru_k::LrukCache; use cachekit::policy::s3_fifo::S3FifoCache; use cachekit::policy::two_q::TwoQCore; -use cachekit::traits::CoreCache; +use cachekit::traits::Cache; const CAPACITY: usize = 4096; const OPS: u64 = 100_000; diff --git a/benches/ops.rs b/benches/ops.rs index 358db44..63528a4 100644 --- a/benches/ops.rs +++ b/benches/ops.rs @@ -18,7 +18,7 @@ use cachekit::policy::lru::LruCore; use cachekit::policy::lru_k::LrukCache; use cachekit::policy::s3_fifo::S3FifoCache; use cachekit::policy::two_q::TwoQCore; -use cachekit::traits::CoreCache; +use cachekit::traits::Cache; use criterion::{Criterion, Throughput, criterion_group, criterion_main}; const CAPACITY: usize = 16_384; diff --git a/benches/policy/lfu.rs b/benches/policy/lfu.rs index 45adfc4..9750c8f 100644 --- a/benches/policy/lfu.rs +++ b/benches/policy/lfu.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use cachekit::ds::FrequencyBucketsHandle; use cachekit::policy::lfu::LfuCache; -use cachekit::traits::{CoreCache, LfuCacheTrait}; +use cachekit::traits::Cache; use criterion::{BatchSize, Criterion, Throughput, criterion_group, criterion_main}; // ============================================================================ diff --git a/benches/policy/lru.rs b/benches/policy/lru.rs index 2694ea7..20880f8 100644 --- a/benches/policy/lru.rs +++ b/benches/policy/lru.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use cachekit::policy::lru::LruCore; -use cachekit::traits::{CoreCache, LruCacheTrait}; +use cachekit::traits::Cache; use criterion::{BatchSize, Criterion, Throughput, criterion_group, criterion_main}; // ============================================================================ diff --git a/benches/policy/lru_k.rs b/benches/policy/lru_k.rs index c16e0f3..7aa4ade 100644 --- a/benches/policy/lru_k.rs +++ b/benches/policy/lru_k.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use cachekit::policy::lru_k::LrukCache; -use cachekit::traits::{CoreCache, LrukCacheTrait}; +use cachekit::traits::Cache; use criterion::{BatchSize, Criterion, Throughput, criterion_group, criterion_main}; // ============================================================================ diff --git a/benches/policy/s3_fifo.rs b/benches/policy/s3_fifo.rs index ee7bc19..ba3b28b 100644 --- a/benches/policy/s3_fifo.rs +++ b/benches/policy/s3_fifo.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use cachekit::policy::s3_fifo::S3FifoCache; #[allow(unused_imports)] -use cachekit::traits::CoreCache; +use cachekit::traits::Cache; use criterion::{BatchSize, Criterion, Throughput, criterion_group, criterion_main}; // ============================================================================ diff --git a/benches/reports.rs b/benches/reports.rs index 20d2ab8..11926b0 100644 --- a/benches/reports.rs +++ b/benches/reports.rs @@ -8,7 +8,7 @@ use bench_support::for_each_policy; use std::sync::Arc; -use cachekit::traits::{CoreCache, ReadOnlyCache}; +use cachekit::traits::Cache; use common::metrics::{ BenchmarkConfig, PolicyComparison, estimate_entry_overhead, measure_adaptation_speed, measure_scan_resistance, run_benchmark, standard_workload_suite, @@ -70,7 +70,7 @@ fn main() { // Helper functions // ============================================================================ -fn run_workload>>( +fn run_workload>>( cache: &mut C, workload_case: &common::registry::WorkloadCase, ) -> f64 { diff --git a/examples/basic_fifo.rs b/examples/basic_fifo.rs index e531959..8a7e1dd 100644 --- a/examples/basic_fifo.rs +++ b/examples/basic_fifo.rs @@ -1,5 +1,5 @@ use cachekit::policy::fifo::FifoCache; -use cachekit::traits::CoreCache; +use cachekit::traits::Cache; fn main() { // Create a FIFO cache with a capacity of 100 entries diff --git a/examples/basic_heap_lfu.rs b/examples/basic_heap_lfu.rs index 825ffca..facf7d6 100644 --- a/examples/basic_heap_lfu.rs +++ b/examples/basic_heap_lfu.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use cachekit::policy::heap_lfu::HeapLfuCache; -use cachekit::traits::CoreCache; +use cachekit::traits::Cache; fn main() { let mut cache: HeapLfuCache<&str, String> = HeapLfuCache::new(2); diff --git a/examples/basic_lfu.rs b/examples/basic_lfu.rs index 9338d35..13280bb 100644 --- a/examples/basic_lfu.rs +++ b/examples/basic_lfu.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use cachekit::policy::lfu::LfuCache; -use cachekit::traits::{CoreCache, ReadOnlyCache}; +use cachekit::traits::Cache; fn main() { let mut cache: LfuCache<&str, String> = LfuCache::new(2); diff --git a/examples/basic_lru.rs b/examples/basic_lru.rs index 66bfc1a..a70a87a 100644 --- a/examples/basic_lru.rs +++ b/examples/basic_lru.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use cachekit::policy::lru::LruCore; -use cachekit::traits::{CoreCache, ReadOnlyCache}; +use cachekit::traits::Cache; fn main() { let mut cache: LruCore = LruCore::new(2); diff --git a/examples/basic_lru_k.rs b/examples/basic_lru_k.rs index fd3d35e..dfe550e 100644 --- a/examples/basic_lru_k.rs +++ b/examples/basic_lru_k.rs @@ -1,5 +1,5 @@ use cachekit::policy::lru_k::LrukCache; -use cachekit::traits::{CoreCache, ReadOnlyCache}; +use cachekit::traits::Cache; fn main() { let mut cache: LrukCache<&str, i32> = LrukCache::with_k(2, 2); diff --git a/examples/basic_nru.rs b/examples/basic_nru.rs index 02ca694..b0d6ecd 100644 --- a/examples/basic_nru.rs +++ b/examples/basic_nru.rs @@ -6,7 +6,7 @@ //! Run with: cargo run --example basic_nru use cachekit::policy::nru::NruCache; -use cachekit::traits::{CoreCache, ReadOnlyCache}; +use cachekit::traits::Cache; fn main() { println!("=== NRU (Not Recently Used) Cache Example ===\n"); diff --git a/examples/dhat_profile.rs b/examples/dhat_profile.rs index 41c16b5..501783c 100644 --- a/examples/dhat_profile.rs +++ b/examples/dhat_profile.rs @@ -14,7 +14,7 @@ use cachekit::policy::lfu::LfuCache; use cachekit::policy::lru::LruCore; use cachekit::policy::lru_k::LrukCache; use cachekit::policy::two_q::TwoQCore; -use cachekit::traits::{CoreCache, ReadOnlyCache}; +use cachekit::traits::Cache; /// Simple XorShift64 RNG for deterministic workloads. struct XorShift64 { @@ -42,7 +42,7 @@ impl XorShift64 { } /// Run a hotset workload: 90% of accesses hit 10% of keys. -fn hotset_workload>>( +fn hotset_workload>>( cache: &mut C, operations: usize, universe: u64, @@ -67,7 +67,7 @@ fn hotset_workload>>( } /// Run a scan workload: sequential access pattern. -fn scan_workload>>(cache: &mut C, operations: usize, universe: u64) { +fn scan_workload>>(cache: &mut C, operations: usize, universe: u64) { for i in 0..operations { let key = (i as u64) % universe; if cache.get(&key).is_none() { @@ -77,7 +77,7 @@ fn scan_workload>>(cache: &mut C, operations: usize, } /// Run eviction churn: insert more items than capacity. -fn eviction_churn>>(cache: &mut C, operations: usize) { +fn eviction_churn>>(cache: &mut C, operations: usize) { for i in 0..operations { let _ = cache.insert(i as u64, Arc::new(i as u64)); } diff --git a/src/builder.rs b/src/builder.rs index aa6ca9b..6a4336f 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -33,10 +33,12 @@ //! │ └─── CachePolicy::Nru ─────► NruCache │ //! │ │ //! │ ▼ │ -//! │ Cache (unified wrapper) │ +//! │ DynCache (unified wrapper) │ //! │ ┌─────────────────────────────────────────────────────────────────────┐ │ //! │ │ .insert(key, value) → Option │ │ //! │ │ .get(&key) → Option<&V> │ │ +//! │ │ .peek(&key) → Option<&V> │ │ +//! │ │ .remove(&key) → Option │ │ //! │ │ .contains(&key) → bool │ │ //! │ │ .len() / .is_empty() → usize / bool │ │ //! │ │ .capacity() → usize │ │ @@ -141,7 +143,7 @@ use crate::policy::s3_fifo::S3FifoCache; use crate::policy::slru::SlruCore; #[cfg(feature = "policy-two-q")] use crate::policy::two_q::TwoQCore; -use crate::traits::{CoreCache, ReadOnlyCache}; +use crate::traits::Cache as CacheTrait; /// Available cache eviction policies. /// @@ -415,7 +417,7 @@ pub enum CachePolicy { /// cache.clear(); /// assert!(cache.is_empty()); /// ``` -pub struct Cache +pub struct DynCache where K: Copy + Eq + Hash + Ord, V: Clone + Debug, @@ -464,7 +466,7 @@ where Nru(NruCache), } -impl Cache +impl DynCache where K: Copy + Eq + Hash + Ord, V: Clone + Debug, @@ -490,7 +492,7 @@ where pub fn insert(&mut self, key: K, value: V) -> Option { match &mut self.inner { #[cfg(feature = "policy-fifo")] - CacheInner::Fifo(fifo) => CoreCache::insert(fifo, key, value), + CacheInner::Fifo(fifo) => fifo.insert(key, value), #[cfg(feature = "policy-lru")] CacheInner::Lru(lru) => { let arc_value = Arc::new(value); @@ -500,7 +502,7 @@ where #[cfg(feature = "policy-fast-lru")] CacheInner::FastLru(fast_lru) => fast_lru.insert(key, value), #[cfg(feature = "policy-lru-k")] - CacheInner::LruK(lruk) => CoreCache::insert(lruk, key, value), + CacheInner::LruK(lruk) => lruk.insert(key, value), #[cfg(feature = "policy-lfu")] CacheInner::Lfu(lfu) => { let arc_value = Arc::new(value); @@ -515,27 +517,27 @@ where .map(|arc| Arc::try_unwrap(arc).unwrap_or_else(|arc| (*arc).clone())) }, #[cfg(feature = "policy-two-q")] - CacheInner::TwoQ(twoq) => CoreCache::insert(twoq, key, value), + CacheInner::TwoQ(twoq) => twoq.insert(key, value), #[cfg(feature = "policy-s3-fifo")] - CacheInner::S3Fifo(s3fifo) => CoreCache::insert(s3fifo, key, value), + CacheInner::S3Fifo(s3fifo) => s3fifo.insert(key, value), #[cfg(feature = "policy-arc")] - CacheInner::Arc(arc) => CoreCache::insert(arc, key, value), + CacheInner::Arc(arc) => arc.insert(key, value), #[cfg(feature = "policy-lifo")] - CacheInner::Lifo(lifo) => CoreCache::insert(lifo, key, value), + CacheInner::Lifo(lifo) => lifo.insert(key, value), #[cfg(feature = "policy-mfu")] - CacheInner::Mfu(mfu) => CoreCache::insert(mfu, key, value), + CacheInner::Mfu(mfu) => mfu.insert(key, value), #[cfg(feature = "policy-mru")] - CacheInner::Mru(mru) => CoreCache::insert(mru, key, value), + CacheInner::Mru(mru) => mru.insert(key, value), #[cfg(feature = "policy-random")] - CacheInner::Random(random) => CoreCache::insert(random, key, value), + CacheInner::Random(random) => random.insert(key, value), #[cfg(feature = "policy-slru")] - CacheInner::Slru(slru) => CoreCache::insert(slru, key, value), + CacheInner::Slru(slru) => slru.insert(key, value), #[cfg(feature = "policy-clock")] - CacheInner::Clock(clock) => CoreCache::insert(clock, key, value), + CacheInner::Clock(clock) => clock.insert(key, value), #[cfg(feature = "policy-clock-pro")] - CacheInner::ClockPro(clock_pro) => CoreCache::insert(clock_pro, key, value), + CacheInner::ClockPro(clock_pro) => clock_pro.insert(key, value), #[cfg(feature = "policy-nru")] - CacheInner::Nru(nru) => CoreCache::insert(nru, key, value), + CacheInner::Nru(nru) => nru.insert(key, value), } } @@ -664,13 +666,13 @@ where pub fn len(&self) -> usize { match &self.inner { #[cfg(feature = "policy-fifo")] - CacheInner::Fifo(fifo) => >::len(fifo), + CacheInner::Fifo(fifo) => fifo.len(), #[cfg(feature = "policy-lru")] CacheInner::Lru(lru) => lru.len(), #[cfg(feature = "policy-fast-lru")] CacheInner::FastLru(fast_lru) => fast_lru.len(), #[cfg(feature = "policy-lru-k")] - CacheInner::LruK(lruk) => >::len(lruk), + CacheInner::LruK(lruk) => lruk.len(), #[cfg(feature = "policy-lfu")] CacheInner::Lfu(lfu) => lfu.len(), #[cfg(feature = "policy-heap-lfu")] @@ -682,21 +684,21 @@ where #[cfg(feature = "policy-arc")] CacheInner::Arc(arc) => arc.len(), #[cfg(feature = "policy-lifo")] - CacheInner::Lifo(lifo) => >::len(lifo), + CacheInner::Lifo(lifo) => lifo.len(), #[cfg(feature = "policy-mfu")] CacheInner::Mfu(mfu) => mfu.len(), #[cfg(feature = "policy-mru")] CacheInner::Mru(mru) => mru.len(), #[cfg(feature = "policy-random")] - CacheInner::Random(random) => >::len(random), + CacheInner::Random(random) => random.len(), #[cfg(feature = "policy-slru")] CacheInner::Slru(slru) => slru.len(), #[cfg(feature = "policy-clock")] - CacheInner::Clock(clock) => >::len(clock), + CacheInner::Clock(clock) => clock.len(), #[cfg(feature = "policy-clock-pro")] - CacheInner::ClockPro(clock_pro) => >::len(clock_pro), + CacheInner::ClockPro(clock_pro) => clock_pro.len(), #[cfg(feature = "policy-nru")] - CacheInner::Nru(nru) => >::len(nru), + CacheInner::Nru(nru) => nru.len(), } } @@ -730,13 +732,13 @@ where pub fn capacity(&self) -> usize { match &self.inner { #[cfg(feature = "policy-fifo")] - CacheInner::Fifo(fifo) => >::capacity(fifo), + CacheInner::Fifo(fifo) => fifo.capacity(), #[cfg(feature = "policy-lru")] CacheInner::Lru(lru) => lru.capacity(), #[cfg(feature = "policy-fast-lru")] CacheInner::FastLru(fast_lru) => fast_lru.capacity(), #[cfg(feature = "policy-lru-k")] - CacheInner::LruK(lruk) => >::capacity(lruk), + CacheInner::LruK(lruk) => lruk.capacity(), #[cfg(feature = "policy-lfu")] CacheInner::Lfu(lfu) => lfu.capacity(), #[cfg(feature = "policy-heap-lfu")] @@ -748,21 +750,21 @@ where #[cfg(feature = "policy-arc")] CacheInner::Arc(arc) => arc.capacity(), #[cfg(feature = "policy-lifo")] - CacheInner::Lifo(lifo) => >::capacity(lifo), + CacheInner::Lifo(lifo) => lifo.capacity(), #[cfg(feature = "policy-mfu")] CacheInner::Mfu(mfu) => mfu.capacity(), #[cfg(feature = "policy-mru")] CacheInner::Mru(mru) => mru.capacity(), #[cfg(feature = "policy-random")] - CacheInner::Random(random) => >::capacity(random), + CacheInner::Random(random) => random.capacity(), #[cfg(feature = "policy-slru")] CacheInner::Slru(slru) => slru.capacity(), #[cfg(feature = "policy-clock")] - CacheInner::Clock(clock) => >::capacity(clock), + CacheInner::Clock(clock) => clock.capacity(), #[cfg(feature = "policy-clock-pro")] - CacheInner::ClockPro(clock_pro) => >::capacity(clock_pro), + CacheInner::ClockPro(clock_pro) => clock_pro.capacity(), #[cfg(feature = "policy-nru")] - CacheInner::Nru(nru) => >::capacity(nru), + CacheInner::Nru(nru) => nru.capacity(), } } @@ -820,9 +822,97 @@ where CacheInner::Nru(nru) => nru.clear(), } } + + /// Side-effect-free lookup by key. + /// + /// Does not update access patterns, eviction order, or any internal state. + /// Use [`get`](Self::get) if you need a policy-tracked read. + pub fn peek(&self, key: &K) -> Option<&V> { + match &self.inner { + #[cfg(feature = "policy-fifo")] + CacheInner::Fifo(fifo) => CacheTrait::peek(fifo, key), + #[cfg(feature = "policy-lru")] + CacheInner::Lru(lru) => CacheTrait::peek(lru, key).map(|arc| arc.as_ref()), + #[cfg(feature = "policy-fast-lru")] + CacheInner::FastLru(fast_lru) => fast_lru.peek(key), + #[cfg(feature = "policy-lru-k")] + CacheInner::LruK(lruk) => CacheTrait::peek(lruk, key), + #[cfg(feature = "policy-lfu")] + CacheInner::Lfu(lfu) => CacheTrait::peek(lfu, key).map(|arc| arc.as_ref()), + #[cfg(feature = "policy-heap-lfu")] + CacheInner::HeapLfu(heap_lfu) => { + CacheTrait::peek(heap_lfu, key).map(|arc| arc.as_ref()) + }, + #[cfg(feature = "policy-two-q")] + CacheInner::TwoQ(twoq) => CacheTrait::peek(twoq, key), + #[cfg(feature = "policy-s3-fifo")] + CacheInner::S3Fifo(s3fifo) => s3fifo.peek(key), + #[cfg(feature = "policy-arc")] + CacheInner::Arc(arc) => CacheTrait::peek(arc, key), + #[cfg(feature = "policy-lifo")] + CacheInner::Lifo(lifo) => lifo.peek(key), + #[cfg(feature = "policy-mfu")] + CacheInner::Mfu(mfu) => CacheTrait::peek(mfu, key), + #[cfg(feature = "policy-mru")] + CacheInner::Mru(mru) => CacheTrait::peek(mru, key), + #[cfg(feature = "policy-random")] + CacheInner::Random(random) => random.peek(key), + #[cfg(feature = "policy-slru")] + CacheInner::Slru(slru) => slru.peek(key), + #[cfg(feature = "policy-clock")] + CacheInner::Clock(clock) => CacheTrait::peek(clock, key), + #[cfg(feature = "policy-clock-pro")] + CacheInner::ClockPro(clock_pro) => CacheTrait::peek(clock_pro, key), + #[cfg(feature = "policy-nru")] + CacheInner::Nru(nru) => CacheTrait::peek(nru, key), + } + } + + /// Removes a specific key-value pair, returning the value if it existed. + pub fn remove(&mut self, key: &K) -> Option { + match &mut self.inner { + #[cfg(feature = "policy-fifo")] + CacheInner::Fifo(fifo) => CacheTrait::remove(fifo, key), + #[cfg(feature = "policy-lru")] + CacheInner::Lru(lru) => CacheTrait::remove(lru, key) + .map(|arc| Arc::try_unwrap(arc).unwrap_or_else(|arc| (*arc).clone())), + #[cfg(feature = "policy-fast-lru")] + CacheInner::FastLru(fast_lru) => fast_lru.remove(key), + #[cfg(feature = "policy-lru-k")] + CacheInner::LruK(lruk) => CacheTrait::remove(lruk, key), + #[cfg(feature = "policy-lfu")] + CacheInner::Lfu(lfu) => CacheTrait::remove(lfu, key) + .map(|arc| Arc::try_unwrap(arc).unwrap_or_else(|arc| (*arc).clone())), + #[cfg(feature = "policy-heap-lfu")] + CacheInner::HeapLfu(heap_lfu) => CacheTrait::remove(heap_lfu, key) + .map(|arc| Arc::try_unwrap(arc).unwrap_or_else(|arc| (*arc).clone())), + #[cfg(feature = "policy-two-q")] + CacheInner::TwoQ(twoq) => CacheTrait::remove(twoq, key), + #[cfg(feature = "policy-s3-fifo")] + CacheInner::S3Fifo(s3fifo) => s3fifo.remove(key), + #[cfg(feature = "policy-arc")] + CacheInner::Arc(arc) => CacheTrait::remove(arc, key), + #[cfg(feature = "policy-lifo")] + CacheInner::Lifo(lifo) => CacheTrait::remove(lifo, key), + #[cfg(feature = "policy-mfu")] + CacheInner::Mfu(mfu) => mfu.remove(key), + #[cfg(feature = "policy-mru")] + CacheInner::Mru(mru) => CacheTrait::remove(mru, key), + #[cfg(feature = "policy-random")] + CacheInner::Random(random) => random.remove(key), + #[cfg(feature = "policy-slru")] + CacheInner::Slru(slru) => CacheTrait::remove(slru, key), + #[cfg(feature = "policy-clock")] + CacheInner::Clock(clock) => CacheTrait::remove(clock, key), + #[cfg(feature = "policy-clock-pro")] + CacheInner::ClockPro(clock_pro) => CacheTrait::remove(clock_pro, key), + #[cfg(feature = "policy-nru")] + CacheInner::Nru(nru) => CacheTrait::remove(nru, key), + } + } } -impl fmt::Debug for Cache +impl fmt::Debug for DynCache where K: Copy + Eq + Hash + Ord, V: Clone + Debug, @@ -864,7 +954,7 @@ where #[cfg(feature = "policy-nru")] CacheInner::Nru(_) => "Nru", }; - f.debug_struct("Cache") + f.debug_struct("DynCache") .field("policy", &policy) .field("len", &self.len()) .field("capacity", &self.capacity()) @@ -933,7 +1023,7 @@ impl CacheBuilder { /// // 2Q with 25% probation /// let cache = CacheBuilder::new(100).build::(CachePolicy::TwoQ { probation_frac: 0.25 }); /// ``` - pub fn build(self, policy: CachePolicy) -> Cache + pub fn build(self, policy: CachePolicy) -> DynCache where K: Copy + Eq + Hash + Ord, V: Clone + Debug, @@ -993,7 +1083,7 @@ impl CacheBuilder { CachePolicy::Nru => CacheInner::Nru(NruCache::new(self.capacity)), }; - Cache { inner } + DynCache { inner } } fn validate_policy(&self, policy: &CachePolicy) { @@ -1139,7 +1229,7 @@ mod tests { let mut cache = CacheBuilder::new(10).build::(CachePolicy::Lru); cache.insert(1, "one".to_string()); let debug = format!("{:?}", cache); - assert!(debug.contains("Cache")); + assert!(debug.contains("DynCache")); assert!(debug.contains("Lru")); assert!(debug.contains("len: 1")); } @@ -1167,7 +1257,7 @@ mod tests { }); } - // Cache is Send+Sync only when policy-fast-lru is disabled: + // DynCache is Send+Sync only when policy-fast-lru is disabled: // FastLru uses NonNull for single-threaded performance, which is !Send + !Sync. #[cfg(all(feature = "policy-lru", not(feature = "policy-fast-lru")))] #[allow(dead_code)] @@ -1175,8 +1265,8 @@ mod tests { fn assert_send() {} fn assert_sync() {} fn check() { - assert_send::>(); - assert_sync::>(); + assert_send::>(); + assert_sync::>(); } }; diff --git a/src/lib.rs b/src/lib.rs index ad32135..eb19423 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,7 +20,7 @@ //! //! ``` //! use cachekit::policy::lru_k::LrukCache; -//! use cachekit::traits::{CoreCache, LrukCacheTrait}; +//! use cachekit::traits::Cache; //! //! let mut cache = LrukCache::with_k(1000, 2); //! cache.insert(42, "value"); @@ -34,8 +34,8 @@ //! ┌──────────────────────────────────────────────────────────────────────┐ //! │ cachekit │ //! │ │ -//! │ traits Trait hierarchy (ReadOnlyCache → CoreCache → …) │ -//! │ builder Unified CacheBuilder + Cache wrapper │ +//! │ traits Cache trait + capability traits │ +//! │ builder CacheBuilder + DynCache runtime wrapper │ //! │ policy 18 eviction policies behind feature flags │ //! │ ds Arena, ring buffer, intrusive list, ghost list, … │ //! │ store Storage backends (HashMap, slab, weighted) │ @@ -51,26 +51,26 @@ //! //! # Trait Hierarchy //! -//! All caches implement [`traits::CoreCache`], which extends -//! [`traits::ReadOnlyCache`]. Policy-specific behaviour is expressed through -//! additional traits: +//! All 18 caches implement [`traits::Cache`], which provides the full +//! CRUD surface (`contains`, `len`, `capacity`, `peek`, `get`, `insert`, +//! `remove`, `clear`). Optional capabilities are expressed as extension +//! traits: //! //! | Trait | Extends | Purpose | //! |---|---|---| -//! | [`ReadOnlyCache`](traits::ReadOnlyCache) | — | `contains`, `len`, `capacity` (no side effects) | -//! | [`CoreCache`](traits::CoreCache) | `ReadOnlyCache` | `insert`, `get`, `clear` | -//! | [`MutableCache`](traits::MutableCache) | `CoreCache` | `remove` (not available on FIFO) | -//! | [`FifoCacheTrait`](traits::FifoCacheTrait) | `CoreCache` | `pop_oldest`, `age_rank` | -//! | [`LruCacheTrait`](traits::LruCacheTrait) | `MutableCache` | `pop_lru`, `touch`, `recency_rank` | -//! | [`LfuCacheTrait`](traits::LfuCacheTrait) | `MutableCache` | `pop_lfu`, `frequency` | -//! | [`LrukCacheTrait`](traits::LrukCacheTrait) | `MutableCache` | `pop_lru_k`, `k_distance` | +//! | [`Cache`](traits::Cache) | — | Core CRUD: `peek`, `get`, `insert`, `remove`, `clear`, `len`, `capacity` | +//! | [`EvictingCache`](traits::EvictingCache) | `Cache` | `evict_one` — explicit single-item eviction | +//! | [`VictimInspectable`](traits::VictimInspectable) | `Cache` | `peek_victim` — inspect next eviction candidate | +//! | [`RecencyTracking`](traits::RecencyTracking) | `Cache` | `touch`, `recency_rank` | +//! | [`FrequencyTracking`](traits::FrequencyTracking) | `Cache` | `frequency` | +//! | [`HistoryTracking`](traits::HistoryTracking) | `Cache` | `access_count`, `k_distance`, `access_history`, `k_value` | //! //! Write generic code against the trait you need: //! //! ``` -//! use cachekit::traits::{CoreCache, ReadOnlyCache}; +//! use cachekit::traits::Cache; //! -//! fn utilization>(cache: &C) -> f64 { +//! fn utilization>(cache: &C) -> f64 { //! cache.len() as f64 / cache.capacity() as f64 //! } //! ``` diff --git a/src/policy/arc.rs b/src/policy/arc.rs index 6f10e17..086fc93 100644 --- a/src/policy/arc.rs +++ b/src/policy/arc.rs @@ -121,7 +121,7 @@ //! //! ``` //! use cachekit::policy::arc::ArcCore; -//! use cachekit::traits::{CoreCache, ReadOnlyCache}; +//! use cachekit::traits::Cache; //! //! // Create ARC cache with 100 entry capacity //! let mut cache = ArcCore::new(100); @@ -173,8 +173,7 @@ use crate::metrics::metrics_impl::ArcMetrics; use crate::metrics::snapshot::ArcMetricsSnapshot; #[cfg(feature = "metrics")] use crate::metrics::traits::{ArcMetricsRecorder, CoreMetricsRecorder, MetricsSnapshotProvider}; -use crate::prelude::ReadOnlyCache; -use crate::traits::{CoreCache, MutableCache}; +use crate::traits::Cache; use rustc_hash::FxHashMap; use std::hash::Hash; use std::iter::FusedIterator; @@ -223,7 +222,7 @@ struct Node { /// /// ``` /// use cachekit::policy::arc::ArcCore; -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// /// // 100 capacity ARC cache /// let mut cache = ArcCore::new(100); @@ -291,8 +290,8 @@ where { } -// SAFETY: Shared references (`&ArcCore`) only expose `ReadOnlyCache` methods -// (`contains`, `len`, `capacity`), none of which dereference the internal +// SAFETY: Shared references (`&ArcCore`) only expose `Cache` read-only methods +// (`contains`, `len`, `capacity`, `peek`), none of which dereference the internal // `NonNull` pointers through interior mutability. Sharing `&ArcCore` across // threads is safe when K and V are Sync. unsafe impl Sync for ArcCore @@ -330,7 +329,7 @@ impl ArcCore { /// /// ``` /// use cachekit::policy::arc::ArcCore; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ArcCore::new(10); /// cache.insert("a", 1); @@ -358,7 +357,7 @@ impl ArcCore { /// /// ``` /// use cachekit::policy::arc::ArcCore; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let mut cache = ArcCore::new(10); /// cache.insert("a", 1); @@ -397,7 +396,7 @@ where /// /// ``` /// use cachekit::policy::arc::ArcCore; - /// use cachekit::traits::ReadOnlyCache; + /// use cachekit::traits::Cache; /// /// // 100 capacity ARC cache /// let cache: ArcCore = ArcCore::new(100); @@ -596,7 +595,7 @@ where /// /// ``` /// use cachekit::policy::arc::ArcCore; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ArcCore::new(100); /// cache.insert("key", "value"); @@ -612,7 +611,7 @@ where /// /// ``` /// use cachekit::policy::arc::ArcCore; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ArcCore::new(100); /// cache.insert("key", "value"); @@ -629,7 +628,7 @@ where /// /// ``` /// use cachekit::policy::arc::ArcCore; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let mut cache = ArcCore::new(2); /// cache.insert("a", 1); @@ -647,7 +646,7 @@ where /// /// ``` /// use cachekit::policy::arc::ArcCore; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let mut cache = ArcCore::new(2); /// cache.insert("a", 1); @@ -824,7 +823,7 @@ where } } -impl ReadOnlyCache for ArcCore +impl Cache for ArcCore where K: Clone + Eq + Hash, { @@ -839,12 +838,13 @@ where fn capacity(&self) -> usize { self.capacity } -} -impl CoreCache for ArcCore -where - K: Clone + Eq + Hash, -{ + fn peek(&self, key: &K) -> Option<&V> { + self.map + .get(key) + .map(|&node_ptr| unsafe { &(*node_ptr.as_ptr()).value }) + } + fn get(&mut self, key: &K) -> Option<&V> { let node_ptr = match self.map.get(key) { Some(&ptr) => ptr, @@ -1009,6 +1009,17 @@ where None } + fn remove(&mut self, key: &K) -> Option { + let node_ptr = self.map.remove(key)?; + + self.detach(node_ptr); + + unsafe { + let node = Box::from_raw(node_ptr.as_ptr()); + Some(node.value) + } + } + fn clear(&mut self) { #[cfg(feature = "metrics")] self.metrics.record_clear(); @@ -1027,22 +1038,6 @@ where } } -impl MutableCache for ArcCore -where - K: Clone + Eq + Hash, -{ - fn remove(&mut self, key: &K) -> Option { - let node_ptr = self.map.remove(key)?; - - self.detach(node_ptr); - - unsafe { - let node = Box::from_raw(node_ptr.as_ptr()); - Some(node.value) - } - } -} - impl Drop for ArcCore { fn drop(&mut self) { self.drop_all_nodes(); diff --git a/src/policy/car.rs b/src/policy/car.rs index f318c47..470010a 100644 --- a/src/policy/car.rs +++ b/src/policy/car.rs @@ -65,7 +65,7 @@ //! //! ``` //! use cachekit::policy::car::CarCore; -//! use cachekit::traits::{CoreCache, ReadOnlyCache}; +//! use cachekit::traits::Cache; //! //! let mut cache = CarCore::new(100); //! cache.insert("key1", "value1"); @@ -102,8 +102,7 @@ use crate::metrics::metrics_impl::CarMetrics; use crate::metrics::snapshot::CarMetricsSnapshot; #[cfg(feature = "metrics")] use crate::metrics::traits::{CarMetricsRecorder, CoreMetricsRecorder, MetricsSnapshotProvider}; -use crate::prelude::ReadOnlyCache; -use crate::traits::{CoreCache, MutableCache}; +use crate::traits::Cache; use rustc_hash::FxHashMap; use std::hash::Hash; use std::iter::FusedIterator; @@ -152,7 +151,7 @@ impl Clone for SlotPayload { /// /// ``` /// use cachekit::policy::car::CarCore; -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// /// let mut cache = CarCore::new(100); /// cache.insert("key1", "value1"); @@ -234,7 +233,7 @@ where /// /// ``` /// use cachekit::policy::car::CarCore; - /// use cachekit::traits::ReadOnlyCache; + /// use cachekit::traits::Cache; /// /// let cache: CarCore = CarCore::new(100); /// assert_eq!(cache.capacity(), 100); @@ -500,7 +499,7 @@ where /// /// ``` /// use cachekit::policy::car::CarCore; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = CarCore::new(100); /// cache.insert("key", "value"); @@ -516,7 +515,7 @@ where /// /// ``` /// use cachekit::policy::car::CarCore; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = CarCore::new(3); /// cache.insert("a", 1); @@ -539,7 +538,7 @@ where /// /// ``` /// use cachekit::policy::car::CarCore; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = CarCore::new(2); /// cache.insert("a", 1); @@ -557,7 +556,7 @@ where /// /// ``` /// use cachekit::policy::car::CarCore; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let cache: CarCore = CarCore::new(10); /// assert_eq!(cache.ghost_frequent_len(), 0); @@ -574,7 +573,7 @@ where /// /// ``` /// use cachekit::policy::car::CarCore; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = CarCore::new(10); /// cache.insert("a", 1); @@ -598,7 +597,7 @@ where /// /// ``` /// use cachekit::policy::car::CarCore; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = CarCore::new(10); /// cache.insert("a", 1); @@ -812,7 +811,7 @@ where } } -impl ReadOnlyCache for CarCore +impl Cache for CarCore where K: Clone + Eq + Hash, { @@ -827,12 +826,12 @@ where fn capacity(&self) -> usize { self.capacity } -} -impl CoreCache for CarCore -where - K: Clone + Eq + Hash, -{ + fn peek(&self, key: &K) -> Option<&V> { + let &idx = self.index.get(key)?; + self.slots[idx].as_ref().map(|s| &s.value) + } + fn get(&mut self, key: &K) -> Option<&V> { let &idx = match self.index.get(key) { Some(idx) => { @@ -912,6 +911,20 @@ where None } + fn remove(&mut self, key: &K) -> Option { + let idx = self.index.remove(key)?; + let list = self.ring_kind[idx]; + self.unlink(idx); + match list { + Ring::Recent => self.recent_len -= 1, + Ring::Frequent => self.frequent_len -= 1, + } + let slot = self.slots[idx].take()?; + self.referenced[idx] = false; + self.free.push(idx); + Some(slot.value) + } + fn clear(&mut self) { #[cfg(feature = "metrics")] self.metrics.record_clear(); @@ -935,25 +948,6 @@ where } } -impl MutableCache for CarCore -where - K: Clone + Eq + Hash, -{ - fn remove(&mut self, key: &K) -> Option { - let idx = self.index.remove(key)?; - let list = self.ring_kind[idx]; - self.unlink(idx); - match list { - Ring::Recent => self.recent_len -= 1, - Ring::Frequent => self.frequent_len -= 1, - } - let slot = self.slots[idx].take()?; - self.referenced[idx] = false; - self.free.push(idx); - Some(slot.value) - } -} - #[cfg(feature = "metrics")] impl CarCore where diff --git a/src/policy/clock.rs b/src/policy/clock.rs index 0a17119..5d75029 100644 --- a/src/policy/clock.rs +++ b/src/policy/clock.rs @@ -85,7 +85,7 @@ //! //! ``` //! use cachekit::policy::clock::ClockCache; -//! use cachekit::traits::{CoreCache, ReadOnlyCache}; +//! use cachekit::traits::Cache; //! //! let mut cache = ClockCache::new(100); //! @@ -111,7 +111,7 @@ use std::hash::Hash; use crate::ds::ClockRing; use crate::ds::clock_ring::{IntoIter, Iter, IterMut}; -use crate::traits::{CoreCache, MutableCache, ReadOnlyCache}; +use crate::traits::{Cache, EvictingCache}; #[cfg(feature = "metrics")] use crate::metrics::metrics_impl::ClockMetrics; @@ -134,7 +134,7 @@ use crate::metrics::traits::MetricsSnapshotProvider; /// /// ``` /// use cachekit::policy::clock::ClockCache; -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// /// let mut cache = ClockCache::new(100); /// @@ -157,14 +157,14 @@ where /// Creates a new Clock cache with the specified capacity. /// /// A capacity of `0` is valid and produces a cache that accepts no entries; - /// all [`insert`](CoreCache::insert) calls will return `None` and the + /// all [`insert`](Cache::insert) calls will return `None` and the /// value is silently dropped. /// /// # Example /// /// ``` /// use cachekit::policy::clock::ClockCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let cache: ClockCache = ClockCache::new(100); /// assert_eq!(cache.capacity(), 100); @@ -185,7 +185,7 @@ where /// /// ``` /// use cachekit::policy::clock::ClockCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockCache::new(10); /// assert!(cache.is_empty()); @@ -206,7 +206,7 @@ where /// /// ``` /// use cachekit::policy::clock::ClockCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockCache::new(10); /// cache.insert("a", 1); @@ -228,7 +228,7 @@ where /// /// ``` /// use cachekit::policy::clock::ClockCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockCache::new(10); /// cache.insert("a", 1); @@ -253,7 +253,7 @@ where /// /// ``` /// use cachekit::policy::clock::ClockCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockCache::new(10); /// cache.insert("a", 1); @@ -272,7 +272,7 @@ where /// /// ``` /// use cachekit::policy::clock::ClockCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockCache::new(10); /// cache.insert("a", 1); @@ -286,7 +286,7 @@ where } } -impl ReadOnlyCache for ClockCache +impl Cache for ClockCache where K: Clone + Eq + Hash, { @@ -301,31 +301,25 @@ where fn capacity(&self) -> usize { self.ring.capacity() } -} -impl CoreCache for ClockCache -where - K: Clone + Eq + Hash, -{ - /// Inserts a key-value pair into the cache. - /// - /// If the key exists, updates the value and sets the reference bit. - /// If at capacity, evicts using the clock algorithm. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::clock::ClockCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; - /// - /// let mut cache = ClockCache::new(2); - /// cache.insert("a", 1); - /// cache.insert("b", 2); - /// - /// // Update existing - /// let old = cache.insert("a", 10); - /// assert_eq!(old, Some(1)); - /// ``` + fn peek(&self, key: &K) -> Option<&V> { + self.ring.peek(key) + } + + #[inline] + fn get(&mut self, key: &K) -> Option<&V> { + let result = self.ring.get(key); + #[cfg(feature = "metrics")] + if result.is_some() { + self.metrics.get_calls += 1; + self.metrics.get_hits += 1; + } else { + self.metrics.get_calls += 1; + self.metrics.get_misses += 1; + } + result + } + #[inline] fn insert(&mut self, key: K, value: V) -> Option { #[cfg(feature = "metrics")] @@ -373,51 +367,11 @@ where None } - /// Gets a reference to the value for a key. - /// - /// Sets the reference bit on access (O(1) - no list operations!). - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::clock::ClockCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; - /// - /// let mut cache = ClockCache::new(10); - /// cache.insert("key", 42); - /// - /// // Access sets reference bit - this entry gets "second chance" - /// assert_eq!(cache.get(&"key"), Some(&42)); - /// ``` #[inline] - fn get(&mut self, key: &K) -> Option<&V> { - let result = self.ring.get(key); - #[cfg(feature = "metrics")] - if result.is_some() { - self.metrics.get_calls += 1; - self.metrics.get_hits += 1; - } else { - self.metrics.get_calls += 1; - self.metrics.get_misses += 1; - } - result + fn remove(&mut self, key: &K) -> Option { + self.ring.remove(key) } - /// Clears all entries from the cache. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::clock::ClockCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; - /// - /// let mut cache = ClockCache::new(10); - /// cache.insert("a", 1); - /// cache.insert("b", 2); - /// - /// cache.clear(); - /// assert!(cache.is_empty()); - /// ``` fn clear(&mut self) { self.ring.clear(); #[cfg(feature = "metrics")] @@ -428,28 +382,12 @@ where } } -impl MutableCache for ClockCache +impl EvictingCache for ClockCache where K: Clone + Eq + Hash, { - /// Removes a key from the cache. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::clock::ClockCache; - /// use cachekit::traits::{CoreCache, MutableCache, ReadOnlyCache}; - /// - /// let mut cache = ClockCache::new(10); - /// cache.insert("key", 42); - /// - /// let removed = cache.remove(&"key"); - /// assert_eq!(removed, Some(42)); - /// assert!(!cache.contains(&"key")); - /// ``` - #[inline] - fn remove(&mut self, key: &K) -> Option { - self.ring.remove(key) + fn evict_one(&mut self) -> Option<(K, V)> { + self.ring.pop_victim() } } @@ -464,7 +402,7 @@ where /// /// ``` /// use cachekit::policy::clock::ClockCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockCache::new(10); /// cache.insert("a", 1); @@ -581,7 +519,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::traits::MutableCache; + use crate::traits::Cache; #[allow(dead_code)] const _: () = { diff --git a/src/policy/clock_pro.rs b/src/policy/clock_pro.rs index f520a86..a6c7837 100644 --- a/src/policy/clock_pro.rs +++ b/src/policy/clock_pro.rs @@ -96,7 +96,7 @@ //! //! ``` //! use cachekit::policy::clock_pro::ClockProCache; -//! use cachekit::traits::{CoreCache, ReadOnlyCache}; +//! use cachekit::traits::Cache; //! //! let mut cache: ClockProCache = ClockProCache::new(100); //! @@ -125,8 +125,7 @@ use crate::metrics::snapshot::ClockProMetricsSnapshot; use crate::metrics::traits::{ ClockProMetricsRecorder, CoreMetricsRecorder, MetricsSnapshotProvider, }; -use crate::prelude::ReadOnlyCache; -use crate::traits::{CoreCache, MutableCache}; +use crate::traits::Cache; use rustc_hash::FxHashMap; use std::hash::Hash; @@ -160,13 +159,13 @@ struct GhostEntry { /// (candidates for eviction) pages, plus tracking ghost entries for recently /// evicted cold pages. /// -/// Implements [`CoreCache`], [`ReadOnlyCache`], and [`MutableCache`]. +/// Implements [`Cache`]. /// /// # Example /// /// ``` /// use cachekit::policy::clock_pro::ClockProCache; -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// /// let mut cache = ClockProCache::new(100); /// cache.insert("key", 42); @@ -213,7 +212,7 @@ where /// /// ``` /// use cachekit::policy::clock_pro::ClockProCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let cache: ClockProCache = ClockProCache::new(100); /// assert_eq!(cache.capacity(), 100); @@ -235,7 +234,7 @@ where /// /// ``` /// use cachekit::policy::clock_pro::ClockProCache; - /// use cachekit::traits::ReadOnlyCache; + /// use cachekit::traits::Cache; /// /// let cache: ClockProCache = ClockProCache::with_ghost_capacity(100, 200); /// assert_eq!(cache.capacity(), 100); @@ -273,7 +272,7 @@ where /// /// ``` /// use cachekit::policy::clock_pro::ClockProCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockProCache::new(10); /// assert!(cache.is_empty()); @@ -294,7 +293,7 @@ where /// /// ``` /// use cachekit::policy::clock_pro::ClockProCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockProCache::new(10); /// cache.insert("a", 1); @@ -311,7 +310,7 @@ where /// /// ``` /// use cachekit::policy::clock_pro::ClockProCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockProCache::new(10); /// cache.insert("a", 1); @@ -328,7 +327,7 @@ where /// /// ``` /// use cachekit::policy::clock_pro::ClockProCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockProCache::new(2); /// cache.insert("a", 1); @@ -480,35 +479,30 @@ where } } -impl ReadOnlyCache for ClockProCache +impl Cache for ClockProCache where K: Clone + Eq + Hash, { - /// Returns `true` if the cache contains the key. - /// - /// Does not affect the reference bit or page status. #[inline] fn contains(&self, key: &K) -> bool { self.index.contains_key(key) } - /// Returns the number of resident entries in the cache. #[inline] fn len(&self) -> usize { self.len } - /// Returns the maximum capacity of the cache. #[inline] fn capacity(&self) -> usize { self.capacity } -} -impl CoreCache for ClockProCache -where - K: Clone + Eq + Hash, -{ + fn peek(&self, key: &K) -> Option<&V> { + let &slot = self.index.get(key)?; + self.entries[slot].as_ref().map(|e| &e.value) + } + /// Inserts a key-value pair into the cache. /// /// New entries start as cold unless the key was recently evicted (ghost hit), @@ -518,7 +512,7 @@ where /// /// ``` /// use cachekit::policy::clock_pro::ClockProCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockProCache::new(2); /// cache.insert("a", 1); // Inserted as cold @@ -591,7 +585,7 @@ where /// /// ``` /// use cachekit::policy::clock_pro::ClockProCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockProCache::new(10); /// cache.insert("key", 42); @@ -625,7 +619,7 @@ where /// /// ``` /// use cachekit::policy::clock_pro::ClockProCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let mut cache = ClockProCache::new(10); /// cache.insert("a", 1); @@ -654,27 +648,7 @@ where self.ghost_hand = 0; self.target_hot_ratio = 0.5; } -} -impl MutableCache for ClockProCache -where - K: Clone + Eq + Hash, -{ - /// Removes a key from the cache. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::clock_pro::ClockProCache; - /// use cachekit::traits::{CoreCache, MutableCache, ReadOnlyCache}; - /// - /// let mut cache = ClockProCache::new(10); - /// cache.insert("key", 42); - /// - /// let removed = cache.remove(&"key"); - /// assert_eq!(removed, Some(42)); - /// assert!(!cache.contains(&"key")); - /// ``` #[inline] fn remove(&mut self, key: &K) -> Option { let slot = self.index.remove(key)?; @@ -776,7 +750,6 @@ where #[cfg(test)] mod tests { use super::*; - use crate::traits::MutableCache; #[test] fn test_basic_operations() { diff --git a/src/policy/fast_lru.rs b/src/policy/fast_lru.rs index ad0d927..c921211 100644 --- a/src/policy/fast_lru.rs +++ b/src/policy/fast_lru.rs @@ -28,6 +28,8 @@ use std::hash::Hash; use std::mem; use std::ptr::NonNull; +use crate::traits::{Cache, EvictingCache, RecencyTracking, VictimInspectable}; + #[cfg(feature = "metrics")] use crate::metrics::metrics_impl::LruMetrics; #[cfg(feature = "metrics")] @@ -376,6 +378,36 @@ where } } + /// Returns the recency rank (0 = most recent) for a key. + /// + /// Walks the linked list from head (MRU) to tail counting position. + /// Returns `None` if the key is not found. + pub fn recency_rank(&self, key: &K) -> Option { + #[cfg(feature = "metrics")] + (&self.metrics).record_recency_rank_call(); + + if !self.map.contains_key(key) { + return None; + } + + let mut rank = 0usize; + let mut current = self.head; + while let Some(node_ptr) = current { + #[cfg(feature = "metrics")] + (&self.metrics).record_recency_rank_scan_step(); + + let node = unsafe { &*node_ptr.as_ptr() }; + if &node.key == key { + #[cfg(feature = "metrics")] + (&self.metrics).record_recency_rank_found(); + return Some(rank); + } + rank += 1; + current = node.next; + } + None + } + // ========================================================================= // Internal linked-list operations // ========================================================================= @@ -461,6 +493,84 @@ where } } +impl Cache for FastLru +where + K: Eq + Hash + Clone, +{ + #[inline] + fn contains(&self, key: &K) -> bool { + FastLru::contains(self, key) + } + + #[inline] + fn len(&self) -> usize { + FastLru::len(self) + } + + #[inline] + fn capacity(&self) -> usize { + FastLru::capacity(self) + } + + #[inline] + fn peek(&self, key: &K) -> Option<&V> { + FastLru::peek(self, key) + } + + #[inline] + fn get(&mut self, key: &K) -> Option<&V> { + FastLru::get(self, key) + } + + #[inline] + fn insert(&mut self, key: K, value: V) -> Option { + FastLru::insert(self, key, value) + } + + #[inline] + fn remove(&mut self, key: &K) -> Option { + FastLru::remove(self, key) + } + + fn clear(&mut self) { + FastLru::clear(self); + } +} + +impl EvictingCache for FastLru +where + K: Eq + Hash + Clone, +{ + #[inline] + fn evict_one(&mut self) -> Option<(K, V)> { + self.pop_lru() + } +} + +impl VictimInspectable for FastLru +where + K: Eq + Hash + Clone, +{ + #[inline] + fn peek_victim(&self) -> Option<(&K, &V)> { + self.peek_lru() + } +} + +impl RecencyTracking for FastLru +where + K: Eq + Hash + Clone, +{ + #[inline] + fn touch(&mut self, key: &K) -> bool { + FastLru::touch(self, key) + } + + fn recency_rank(&self, key: &K) -> Option { + FastLru::recency_rank(self, key) + } +} + impl Drop for FastLru { fn drop(&mut self) { // Free all nodes diff --git a/src/policy/fifo.rs b/src/policy/fifo.rs index f0f6c2e..285a724 100644 --- a/src/policy/fifo.rs +++ b/src/policy/fifo.rs @@ -127,7 +127,7 @@ //! | `insertion_order` | `VecDeque` | Tracks insertion order | //! | `capacity` | `usize` | Maximum entries | //! -//! ## Core Operations (CoreCache) +//! ## Core Operations (Cache) //! //! | Method | Complexity | Description | //! |------------------|------------|------------------------------------------| @@ -142,7 +142,7 @@ //! //! \* Amortized, may skip stale entries during eviction //! -//! ## FIFO-Specific Operations (FifoCacheTrait) +//! ## FIFO-Specific Operations //! //! | Method | Complexity | Description | //! |-----------------------|------------|--------------------------------------| @@ -193,7 +193,7 @@ //! //! ``` //! use cachekit::policy::fifo::FifoCache; -//! use cachekit::traits::{CoreCache, FifoCacheTrait, ReadOnlyCache}; +//! use cachekit::traits::Cache; //! //! let mut cache: FifoCache = FifoCache::new(100); //! @@ -254,12 +254,11 @@ use std::hash::Hash; #[cfg(feature = "concurrency")] use std::sync::Arc; -use crate::prelude::ReadOnlyCache; use crate::store::hashmap::HashMapStore; use crate::store::traits::{StoreCore, StoreMut}; #[cfg(feature = "concurrency")] use crate::traits::ConcurrentCache; -use crate::traits::{CoreCache, FifoCacheTrait}; +use crate::traits::{Cache, EvictingCache, VictimInspectable}; #[cfg(feature = "concurrency")] use parking_lot::RwLock; @@ -352,7 +351,7 @@ where /// /// ``` /// use cachekit::policy::fifo::FifoCache; - /// use cachekit::traits::ReadOnlyCache; + /// use cachekit::traits::Cache; /// /// let cache: FifoCache = FifoCache::new(100); /// assert_eq!(cache.capacity(), 100); @@ -392,14 +391,14 @@ where /// Returns the internal queue length (may include stale entries). /// - /// This count may exceed [`len`](ReadOnlyCache::len) when stale + /// This count may exceed [`len`](Cache::len) when stale /// entries are present. Primarily useful for diagnostics. /// /// # Example /// /// ``` /// use cachekit::policy::fifo::FifoCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = FifoCache::new(10); /// cache.insert("a", 1); @@ -418,7 +417,7 @@ where /// /// ``` /// use cachekit::policy::fifo::FifoCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = FifoCache::new(10); /// cache.insert("a", 1); @@ -443,7 +442,7 @@ where /// /// ``` /// use cachekit::policy::fifo::FifoCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = FifoCache::new(10); /// cache.insert("a", 1); @@ -550,7 +549,7 @@ where /// Returns a clone of the value for the given key. /// - /// Because the underlying [`CoreCache::get`] takes `&mut self` (some + /// Because the underlying [`Cache::get`] takes `&mut self` (some /// policies update access metadata), this acquires a **write lock**. /// For FIFO, where `get` is side-effect-free, prefer [`peek`](Self::peek) /// which only takes a read lock. @@ -785,9 +784,9 @@ where { } -impl ReadOnlyCache for FifoCache +impl Cache for FifoCache where - K: Eq + Hash, + K: Clone + Eq + Hash, { fn contains(&self, key: &K) -> bool { self.inner.store.contains(key) @@ -800,12 +799,26 @@ where fn capacity(&self) -> usize { self.inner.store.capacity() } -} -impl CoreCache for FifoCache -where - K: Clone + Eq + Hash, -{ + fn peek(&self, key: &K) -> Option<&V> { + self.inner.store.peek(key) + } + + fn get(&mut self, key: &K) -> Option<&V> { + match self.inner.store.get(key) { + Some(value) => { + #[cfg(feature = "metrics")] + self.inner.metrics.record_get_hit(); + Some(value) + }, + None => { + #[cfg(feature = "metrics")] + self.inner.metrics.record_get_miss(); + None + }, + } + } + fn insert(&mut self, key: K, value: V) -> Option { #[cfg(feature = "metrics")] self.inner.metrics.record_insert_call(); @@ -841,19 +854,8 @@ where None } - fn get(&mut self, key: &K) -> Option<&V> { - match self.inner.store.get(key) { - Some(value) => { - #[cfg(feature = "metrics")] - self.inner.metrics.record_get_hit(); - Some(value) - }, - None => { - #[cfg(feature = "metrics")] - self.inner.metrics.record_get_miss(); - None - }, - } + fn remove(&mut self, key: &K) -> Option { + self.inner.store.remove(key) } fn clear(&mut self) { @@ -865,11 +867,16 @@ where } } -impl FifoCacheTrait for FifoCache +// FIFO-specific inherent methods (eviction order inspection and manipulation). +impl FifoCache where K: Clone + Eq + Hash, { - fn pop_oldest(&mut self) -> Option<(K, V)> { + /// Removes and returns the oldest entry. + /// + /// Skips stale entries (keys present in the queue but not the store). + /// Returns `None` if the cache is empty. + pub fn pop_oldest(&mut self) -> Option<(K, V)> { #[cfg(feature = "metrics")] self.inner.metrics.record_pop_oldest_call(); @@ -887,7 +894,10 @@ where None } - fn peek_oldest(&self) -> Option<(&K, &V)> { + /// Peeks at the oldest entry without removing it. + /// + /// Scans past stale entries. Returns `None` if the cache is empty. + pub fn peek_oldest(&self) -> Option<(&K, &V)> { #[cfg(feature = "metrics")] (&self.inner.metrics).record_peek_oldest_call(); @@ -901,7 +911,8 @@ where None } - fn pop_oldest_batch_into(&mut self, count: usize, out: &mut Vec<(K, V)>) { + /// Removes up to `count` oldest entries into the provided buffer. + pub fn pop_oldest_batch_into(&mut self, count: usize, out: &mut Vec<(K, V)>) { out.reserve(count.min(self.len())); for _ in 0..count { match self.pop_oldest() { @@ -911,7 +922,17 @@ where } } - fn age_rank(&self, key: &K) -> Option { + /// Removes up to `count` oldest entries. + pub fn pop_oldest_batch(&mut self, count: usize) -> Vec<(K, V)> { + let mut out = Vec::new(); + self.pop_oldest_batch_into(count, &mut out); + out + } + + /// Returns the age rank of a key (0 = oldest). + /// + /// Returns `None` if the key is not in the cache. + pub fn age_rank(&self, key: &K) -> Option { #[cfg(feature = "metrics")] (&self.inner.metrics).record_age_rank_call(); @@ -933,6 +954,24 @@ where } } +impl EvictingCache for FifoCache +where + K: Clone + Eq + Hash, +{ + fn evict_one(&mut self) -> Option<(K, V)> { + self.pop_oldest() + } +} + +impl VictimInspectable for FifoCache +where + K: Clone + Eq + Hash, +{ + fn peek_victim(&self) -> Option<(&K, &V)> { + self.peek_oldest() + } +} + #[cfg(feature = "metrics")] impl FifoCache where @@ -944,7 +983,7 @@ where /// /// ``` /// use cachekit::policy::fifo::FifoCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = FifoCache::new(10); /// cache.insert("key", 42); @@ -1034,7 +1073,7 @@ mod tests { use std::collections::HashSet; use crate::policy::fifo::FifoCache; - use crate::traits::{CoreCache, FifoCacheTrait, ReadOnlyCache}; + use crate::traits::Cache; // C-SEND-SYNC: Verify auto-trait derivation is not accidentally broken. fn _assert_send() {} diff --git a/src/policy/heap_lfu.rs b/src/policy/heap_lfu.rs index f398739..aea07f5 100644 --- a/src/policy/heap_lfu.rs +++ b/src/policy/heap_lfu.rs @@ -182,7 +182,7 @@ //! ```rust,ignore //! use cachekit::policy::heap_lfu::HeapLfuCache; //! use std::sync::Arc; -//! use cachekit::traits::{CoreCache, MutableCache, LfuCacheTrait}; +//! use cachekit::traits::{Cache, EvictingCache, FrequencyTracking}; //! //! // Create cache //! let mut cache: HeapLfuCache = HeapLfuCache::new(100); @@ -235,19 +235,18 @@ //! ## Implementation Notes //! //! - **Stale entries**: Accumulate in heap, cleaned lazily during -//! [`pop_lfu()`](LfuCacheTrait::pop_lfu) +//! [`pop_lfu()`](HeapLfuCache::pop_lfu) //! - **Bounded rebuilds**: Heap is rebuilt when size exceeds //! `MAX_HEAP_FACTOR × live_entries` -//! - **[`peek_lfu()`](LfuCacheTrait::peek_lfu)**: Falls back to O(n) scan +//! - **[`peek_lfu()`](HeapLfuCache::peek_lfu)**: Falls back to O(n) scan //! (avoiding heap borrow issues) //! - **Memory overhead**: ~3× standard LFU due to three data structures //! - **[`Reverse`] wrapper**: Converts max-heap to //! min-heap for LFU semantics -use crate::prelude::ReadOnlyCache; use crate::store::hashmap::HashMapStore; use crate::store::traits::{StoreCore, StoreMut}; -use crate::traits::{CoreCache, LfuCacheTrait, MutableCache}; +use crate::traits::{Cache, EvictingCache, FrequencyTracking, VictimInspectable}; use std::cmp::Reverse; use std::collections::{BinaryHeap, HashMap}; use std::hash::Hash; @@ -266,8 +265,8 @@ use crate::metrics::traits::{ /// /// Uses a binary min-heap for efficient least-frequently-used item /// identification. Values are stored as [`Arc`] to avoid cloning on -/// eviction. Implements [`CoreCache`], [`MutableCache`], and -/// [`LfuCacheTrait`]. +/// eviction. Implements [`Cache`], [`EvictingCache`], +/// [`VictimInspectable`], and [`FrequencyTracking`]. /// /// # Type Parameters /// @@ -278,7 +277,7 @@ use crate::metrics::traits::{ /// /// ``` /// use cachekit::policy::heap_lfu::HeapLfuCache; -/// use cachekit::traits::{CoreCache, LfuCacheTrait}; +/// use cachekit::traits::{Cache, FrequencyTracking}; /// use std::sync::Arc; /// /// let mut cache: HeapLfuCache = HeapLfuCache::new(3); @@ -303,7 +302,7 @@ use crate::metrics::traits::{ /// # Stale Entry Handling /// /// The heap may contain stale entries with outdated frequencies. These are -/// lazily cleaned during [`pop_lfu()`](LfuCacheTrait::pop_lfu) operations. +/// lazily cleaned during [`pop_lfu()`](HeapLfuCache::pop_lfu) operations. /// Periodic heap rebuilds bound memory growth. #[derive(Debug)] pub struct HeapLfuCache { @@ -383,7 +382,7 @@ where /// /// ``` /// use cachekit::policy::heap_lfu::HeapLfuCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: HeapLfuCache<&str, i32> = HeapLfuCache::new(10); @@ -403,7 +402,7 @@ where /// /// ``` /// use cachekit::policy::heap_lfu::HeapLfuCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: HeapLfuCache<&str, i32> = HeapLfuCache::new(10); @@ -425,7 +424,7 @@ where /// /// ``` /// use cachekit::policy::heap_lfu::HeapLfuCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: HeapLfuCache<&str, i32> = HeapLfuCache::new(10); @@ -445,7 +444,7 @@ where /// /// ``` /// use cachekit::policy::heap_lfu::HeapLfuCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: HeapLfuCache<&str, i32> = HeapLfuCache::new(10); @@ -478,7 +477,7 @@ where /// /// ``` /// use cachekit::policy::heap_lfu::HeapLfuCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: HeapLfuCache<&str, i32> = HeapLfuCache::new(10); @@ -572,71 +571,122 @@ where None } } -} -impl ReadOnlyCache> for HeapLfuCache -where - K: Clone + Eq + Hash + Ord, -{ - fn contains(&self, key: &K) -> bool { - self.store.contains(key) + /// Removes and returns the least frequently used entry. + pub fn pop_lfu(&mut self) -> Option<(K, Arc)> { + #[cfg(feature = "metrics")] + self.metrics.record_pop_lfu_call(); + + let (lfu_key, _freq) = self.pop_lfu_internal()?; + + let value = self.store.remove(&lfu_key)?; + self.frequencies.remove(&lfu_key); + self.store.record_eviction(); + + #[cfg(feature = "metrics")] + self.metrics.record_pop_lfu_found(); + + Some((lfu_key, value)) } - fn len(&self) -> usize { - self.store.len() + /// Peeks at the least frequently used entry without removing it. + pub fn peek_lfu(&self) -> Option<(&K, &Arc)> { + #[cfg(feature = "metrics")] + (&self.metrics).record_peek_lfu_call(); + + if self.frequencies.is_empty() { + return None; + } + + let min_freq = *self.frequencies.values().min()?; + + for (key, &freq) in &self.frequencies { + if freq == min_freq { + let result = self.store.peek(key).map(|value| (key, value)); + + #[cfg(feature = "metrics")] + if result.is_some() { + (&self.metrics).record_peek_lfu_found(); + } + + return result; + } + } + + None } - fn capacity(&self) -> usize { - self.store.capacity() + /// Manually increments the frequency of a key. + pub fn increment_frequency(&mut self, key: &K) -> Option { + if let Some(freq) = self.frequencies.get_mut(key) { + *freq += 1; + let new_freq = *freq; + self.add_to_heap(key, new_freq); + Some(new_freq) + } else { + None + } + } + + /// Resets the frequency of a key to 1. + pub fn reset_frequency(&mut self, key: &K) -> Option { + if let Some(freq) = self.frequencies.get_mut(key) { + let old_freq = *freq; + *freq = 1; + self.add_to_heap(key, 1); + Some(old_freq) + } else { + None + } } } -/// [`CoreCache`] operations for heap-based LFU. -/// -/// # Insert behaviour -/// -/// When the key already exists, the value is replaced and the previous value -/// is returned. When the key is new and the cache is at capacity, the least -/// frequently used entry is evicted first. -/// -/// If the underlying store rejects a new-key insertion (store full after -/// eviction), `insert` returns `None` *without* adding the entry. This is -/// indistinguishable from a successful new-key insert via the return value -/// alone; use [`len`](HeapLfuCache::len) to confirm the entry was stored. +/// [`Cache`] implementation for heap-based LFU. /// /// # Example /// /// ``` /// use cachekit::policy::heap_lfu::HeapLfuCache; -/// use cachekit::traits::CoreCache; +/// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: HeapLfuCache<&str, i32> = HeapLfuCache::new(3); /// -/// // Insert items /// cache.insert("a", Arc::new(1)); /// cache.insert("b", Arc::new(2)); /// -/// // Get returns reference /// assert_eq!(**cache.get(&"a").unwrap(), 1); /// -/// // Contains check /// assert!(cache.contains(&"a")); /// assert!(!cache.contains(&"z")); /// -/// // Length and capacity /// assert_eq!(cache.len(), 2); /// assert_eq!(cache.capacity(), 3); /// ``` -impl CoreCache> for HeapLfuCache +impl Cache> for HeapLfuCache where K: Eq + Hash + Clone + Ord, { + fn contains(&self, key: &K) -> bool { + self.store.contains(key) + } + + fn len(&self) -> usize { + self.store.len() + } + + fn capacity(&self) -> usize { + self.store.capacity() + } + + fn peek(&self, key: &K) -> Option<&Arc> { + self.store.peek(key) + } + fn insert(&mut self, key: K, value: Arc) -> Option> { #[cfg(feature = "metrics")] self.metrics.record_insert_call(); - // If key already exists, just update the value (don't change frequency) if self.store.contains(&key) { #[cfg(feature = "metrics")] self.metrics.record_insert_update(); @@ -644,7 +694,6 @@ where return self.store.try_insert(key, value).ok().flatten(); } - // Evict if at capacity #[cfg(feature = "metrics")] if self.store.len() >= self.store.capacity() { self.metrics.record_evict_call(); @@ -660,7 +709,6 @@ where #[cfg(feature = "metrics")] self.metrics.record_insert_new(); - // Insert new item with frequency 1 if self.store.try_insert(key.clone(), value).is_err() { return None; } @@ -675,13 +723,11 @@ where #[cfg(feature = "metrics")] self.metrics.record_get_hit(); - // Increment frequency let new_freq = self.frequencies.get_mut(key).map(|f| { *f += 1; *f })?; - // Add new frequency entry to heap (old entry becomes stale) self.add_to_heap(key, new_freq); self.store.get(key) @@ -693,153 +739,47 @@ where } } - fn clear(&mut self) { - HeapLfuCache::clear(self); - } -} - -/// [`MutableCache`] operations for heap-based LFU. -/// -/// # Example -/// -/// ``` -/// use cachekit::policy::heap_lfu::HeapLfuCache; -/// use cachekit::traits::{CoreCache, MutableCache}; -/// use std::sync::Arc; -/// -/// let mut cache: HeapLfuCache<&str, i32> = HeapLfuCache::new(10); -/// cache.insert("key", Arc::new(42)); -/// -/// let removed = cache.remove(&"key"); -/// assert_eq!(*removed.unwrap(), 42); -/// assert!(!cache.contains(&"key")); -/// ``` -impl MutableCache> for HeapLfuCache -where - K: Eq + Hash + Clone + Ord, -{ fn remove(&mut self, key: &K) -> Option> { - // Remove from store and frequencies maps let value = self.store.remove(key); let had_frequency = self.frequencies.remove(key).is_some(); - // Note: We don't remove from heap immediately (lazy removal) - // Stale entries will be filtered out during pop_lfu operations - if value.is_some() || had_frequency { self.maybe_rebuild_heap(); } value } + + fn clear(&mut self) { + HeapLfuCache::clear(self); + } } -/// [`LfuCacheTrait`] operations for heap-based cache. -/// -/// # Example -/// -/// ``` -/// use cachekit::policy::heap_lfu::HeapLfuCache; -/// use cachekit::traits::{CoreCache, LfuCacheTrait}; -/// use std::sync::Arc; -/// -/// let mut cache: HeapLfuCache<&str, i32> = HeapLfuCache::new(3); -/// cache.insert("a", Arc::new(1)); -/// cache.insert("b", Arc::new(2)); -/// cache.get(&"a"); // freq: 1 → 2 -/// -/// // Check frequencies -/// assert_eq!(cache.frequency(&"a"), Some(2)); -/// assert_eq!(cache.frequency(&"b"), Some(1)); -/// -/// // Peek at LFU victim (O(n) scan) -/// let (key, _) = cache.peek_lfu().unwrap(); -/// assert_eq!(*key, "b"); // lowest frequency -/// -/// // Pop LFU (O(log n) amortized) -/// let (key, value) = cache.pop_lfu().unwrap(); -/// assert_eq!(key, "b"); -/// assert_eq!(*value, 2); -/// -/// // Manual frequency control -/// cache.insert("c", Arc::new(3)); -/// cache.increment_frequency(&"c"); // freq: 1 → 2 -/// cache.reset_frequency(&"a"); // freq: 2 → 1 -/// assert_eq!(cache.frequency(&"a"), Some(1)); -/// ``` -impl LfuCacheTrait> for HeapLfuCache +impl EvictingCache> for HeapLfuCache where K: Eq + Hash + Clone + Ord, { - fn pop_lfu(&mut self) -> Option<(K, Arc)> { - #[cfg(feature = "metrics")] - self.metrics.record_pop_lfu_call(); - - // Find the key with minimum frequency (handling stale entries) - let (lfu_key, _freq) = self.pop_lfu_internal()?; - - // Remove from all data structures - let value = self.store.remove(&lfu_key)?; - self.frequencies.remove(&lfu_key); - self.store.record_eviction(); - - #[cfg(feature = "metrics")] - self.metrics.record_pop_lfu_found(); - - Some((lfu_key, value)) + fn evict_one(&mut self) -> Option<(K, Arc)> { + self.pop_lfu() } +} - fn peek_lfu(&self) -> Option<(&K, &Arc)> { - #[cfg(feature = "metrics")] - (&self.metrics).record_peek_lfu_call(); - - if self.frequencies.is_empty() { - return None; - } - - let min_freq = *self.frequencies.values().min()?; - - for (key, &freq) in &self.frequencies { - if freq == min_freq { - let result = self.store.peek(key).map(|value| (key, value)); - - #[cfg(feature = "metrics")] - if result.is_some() { - (&self.metrics).record_peek_lfu_found(); - } - - return result; - } - } - - None +impl VictimInspectable> for HeapLfuCache +where + K: Eq + Hash + Clone + Ord, +{ + fn peek_victim(&self) -> Option<(&K, &Arc)> { + self.peek_lfu() } +} +impl FrequencyTracking> for HeapLfuCache +where + K: Eq + Hash + Clone + Ord, +{ fn frequency(&self, key: &K) -> Option { HeapLfuCache::frequency(self, key) } - - fn increment_frequency(&mut self, key: &K) -> Option { - if let Some(freq) = self.frequencies.get_mut(key) { - *freq += 1; - let new_freq = *freq; - self.add_to_heap(key, new_freq); - Some(new_freq) - } else { - None - } - } - - fn reset_frequency(&mut self, key: &K) -> Option { - if let Some(freq) = self.frequencies.get_mut(key) { - let old_freq = *freq; - *freq = 1; - self.add_to_heap(key, 1); - Some(old_freq) - } else { - None - } - } } #[cfg(feature = "metrics")] @@ -853,7 +793,7 @@ where /// /// ``` /// use cachekit::policy::heap_lfu::HeapLfuCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: HeapLfuCache<&str, i32> = HeapLfuCache::new(10); diff --git a/src/policy/lfu.rs b/src/policy/lfu.rs index 53bc54c..4c27f47 100644 --- a/src/policy/lfu.rs +++ b/src/policy/lfu.rs @@ -149,7 +149,7 @@ //! | `Entry` | SlotArena entry with key + freq + bucket links | //! | `Bucket` | Per-frequency list with head/tail SlotId | //! -//! ## Core Operations (CoreCache + MutableCache) +//! ## Core Operations (Cache) //! //! | Method | Complexity | Description | //! |------------------|------------|--------------------------------------------| @@ -162,7 +162,7 @@ //! | `capacity()` | O(1) | Maximum capacity | //! | `clear()` | O(n) | Remove all entries | //! -//! ## LFU-Specific Operations (LfuCacheTrait) +//! ## LFU-Specific Operations (capability traits) //! //! | Method | Complexity | Description | //! |--------------------------|------------|-----------------------------------| @@ -221,7 +221,7 @@ //! use crate::storage::disk::async_disk::cache::lfu::LfuCache; //! use std::sync::Arc; //! use crate::storage::disk::async_disk::cache::cache_traits::{ -//! CoreCache, MutableCache, LfuCacheTrait, +//! Cache, EvictingCache, FrequencyTracking, VictimInspectable, //! }; //! //! // Create cache @@ -268,7 +268,7 @@ //! ```rust,ignore //! use crate::ds::KeyInterner; //! use crate::policy::lfu::LfuHandleCache; -//! use crate::traits::{CoreCache, LfuCacheTrait}; +//! use cachekit::traits::{Cache, FrequencyTracking}; //! use std::sync::Arc; //! //! let mut interner = KeyInterner::new(); @@ -322,10 +322,9 @@ use crate::metrics::snapshot::LfuMetricsSnapshot; use crate::metrics::traits::{ CoreMetricsRecorder, LfuMetricsReadRecorder, LfuMetricsRecorder, MetricsSnapshotProvider, }; -use crate::prelude::ReadOnlyCache; use crate::store::hashmap::HashMapStore; use crate::store::traits::{StoreCore, StoreMut}; -use crate::traits::{CoreCache, LfuCacheTrait, MutableCache}; +use crate::traits::{Cache, EvictingCache, FrequencyTracking, VictimInspectable}; /// LFU (Least Frequently Used) Cache. /// @@ -341,7 +340,7 @@ use crate::traits::{CoreCache, LfuCacheTrait, MutableCache}; /// /// ``` /// use cachekit::policy::lfu::LfuCache; -/// use cachekit::traits::{CoreCache, LfuCacheTrait, ReadOnlyCache}; +/// use cachekit::traits::{Cache, FrequencyTracking}; /// use std::sync::Arc; /// /// let mut cache: LfuCache<&str, i32> = LfuCache::new(3); @@ -385,7 +384,7 @@ pub struct LfuCache { /// /// ``` /// use cachekit::policy::lfu::LfuHandleCache; -/// use cachekit::traits::{CoreCache, LfuCacheTrait}; +/// use cachekit::traits::{Cache, FrequencyTracking}; /// use std::sync::Arc; /// /// // Using u64 handles (e.g., from a KeyInterner) @@ -423,7 +422,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let cache: LfuCache = LfuCache::new(100); /// assert_eq!(cache.capacity(), 100); @@ -453,7 +452,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// // Expect many distinct frequencies (long-running cache) /// let cache: LfuCache = LfuCache::with_bucket_hint(100, 64); @@ -476,7 +475,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: LfuCache<&str, i32> = LfuCache::new(10); @@ -509,7 +508,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: LfuCache<&str, i32> = LfuCache::new(10); @@ -539,7 +538,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuCache; - /// use cachekit::traits::{CoreCache, LfuCacheTrait}; + /// use cachekit::traits::{Cache, FrequencyTracking}; /// use std::sync::Arc; /// /// let mut cache: LfuCache<&str, i32> = LfuCache::new(10); @@ -571,7 +570,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: LfuCache<&str, i32> = LfuCache::new(10); @@ -599,6 +598,77 @@ where let value = self.store.remove(&key)?; Some((key, value)) } + + /// Removes and returns the least frequently used entry. + pub fn pop_lfu(&mut self) -> Option<(K, Arc)> { + #[cfg(feature = "metrics")] + self.metrics.record_pop_lfu_call(); + + let result = self.evict_min_freq(); + + #[cfg(feature = "metrics")] + if result.is_some() { + self.metrics.record_pop_lfu_found(); + } + + result + } + + /// Peeks at the least frequently used entry without removing it. + pub fn peek_lfu(&self) -> Option<(&K, &Arc)> { + #[cfg(feature = "metrics")] + (&self.metrics).record_peek_lfu_call(); + + let (key, _freq) = self.buckets.peek_min()?; + let value = self.store.peek(key)?; + + #[cfg(feature = "metrics")] + (&self.metrics).record_peek_lfu_found(); + + Some((key, value)) + } + + /// Returns the access frequency for a key. + pub fn frequency(&self, key: &K) -> Option { + #[cfg(feature = "metrics")] + (&self.metrics).record_frequency_call(); + + let result = self.buckets.frequency(key); + + #[cfg(feature = "metrics")] + if result.is_some() { + (&self.metrics).record_frequency_found(); + } + + result + } + + /// Resets the frequency of a key to 1. + pub fn reset_frequency(&mut self, key: &K) -> Option { + #[cfg(feature = "metrics")] + self.metrics.record_reset_frequency_call(); + + let previous_freq = self.buckets.remove(key)?; + self.buckets.insert(key.clone()); + + #[cfg(feature = "metrics")] + self.metrics.record_reset_frequency_found(); + + Some(previous_freq) + } + + /// Manually increments the frequency of a key. + pub fn increment_frequency(&mut self, key: &K) -> Option { + #[cfg(feature = "metrics")] + self.metrics.record_increment_frequency_call(); + + let new_freq = self.buckets.touch(key)?; + + #[cfg(feature = "metrics")] + self.metrics.record_increment_frequency_found(); + + Some(new_freq) + } } impl LfuHandleCache @@ -611,7 +681,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuHandleCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let cache: LfuHandleCache = LfuHandleCache::new(100); /// assert_eq!(cache.capacity(), 100); @@ -636,7 +706,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuHandleCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let cache: LfuHandleCache = LfuHandleCache::with_bucket_hint(100, 64); /// assert_eq!(cache.capacity(), 100); @@ -656,7 +726,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuHandleCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: LfuHandleCache = LfuHandleCache::new(10); @@ -687,7 +757,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuHandleCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: LfuHandleCache = LfuHandleCache::new(10); @@ -716,7 +786,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuHandleCache; - /// use cachekit::traits::{CoreCache, LfuCacheTrait}; + /// use cachekit::traits::{Cache, FrequencyTracking}; /// use std::sync::Arc; /// /// let mut cache: LfuHandleCache = LfuHandleCache::new(10); @@ -747,7 +817,7 @@ where /// /// ``` /// use cachekit::policy::lfu::LfuHandleCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: LfuHandleCache = LfuHandleCache::new(10); @@ -775,59 +845,124 @@ where let value = self.store.remove(&handle)?; Some((handle, value)) } -} -impl ReadOnlyCache> for LfuCache -where - K: Clone + Eq + Hash, -{ - fn contains(&self, key: &K) -> bool { - self.store.contains(key) + /// Removes and returns the least frequently used entry. + pub fn pop_lfu(&mut self) -> Option<(H, Arc)> { + #[cfg(feature = "metrics")] + self.metrics.record_pop_lfu_call(); + + let result = self.evict_min_freq(); + + #[cfg(feature = "metrics")] + if result.is_some() { + self.metrics.record_pop_lfu_found(); + } + + result } - fn len(&self) -> usize { - self.store.len() + /// Peeks at the least frequently used entry without removing it. + pub fn peek_lfu(&self) -> Option<(&H, &Arc)> { + #[cfg(feature = "metrics")] + (&self.metrics).record_peek_lfu_call(); + + let (handle, _freq) = self.buckets.peek_min_ref()?; + let value = self.store.peek(handle)?; + + #[cfg(feature = "metrics")] + (&self.metrics).record_peek_lfu_found(); + + Some((handle, value)) } - fn capacity(&self) -> usize { - self.store.capacity() + /// Returns the access frequency for a handle. + pub fn frequency(&self, handle: &H) -> Option { + #[cfg(feature = "metrics")] + (&self.metrics).record_frequency_call(); + + let result = self.buckets.frequency(handle); + + #[cfg(feature = "metrics")] + if result.is_some() { + (&self.metrics).record_frequency_found(); + } + + result + } + + /// Resets the frequency of a handle to 1. + pub fn reset_frequency(&mut self, handle: &H) -> Option { + #[cfg(feature = "metrics")] + self.metrics.record_reset_frequency_call(); + + let previous_freq = self.buckets.remove(handle)?; + self.buckets.insert(*handle); + + #[cfg(feature = "metrics")] + self.metrics.record_reset_frequency_found(); + + Some(previous_freq) + } + + /// Manually increments the frequency of a handle. + pub fn increment_frequency(&mut self, handle: &H) -> Option { + #[cfg(feature = "metrics")] + self.metrics.record_increment_frequency_call(); + + let new_freq = self.buckets.touch(handle)?; + + #[cfg(feature = "metrics")] + self.metrics.record_increment_frequency_found(); + + Some(new_freq) } } -/// Core cache operations for LFU. +/// [`Cache`] implementation for LFU. /// /// # Example /// /// ``` /// use cachekit::policy::lfu::LfuCache; -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: LfuCache<&str, i32> = LfuCache::new(3); /// -/// // Insert items /// cache.insert("a", Arc::new(1)); /// cache.insert("b", Arc::new(2)); /// -/// // Get returns reference /// assert_eq!(**cache.get(&"a").unwrap(), 1); /// -/// // Check existence /// assert!(cache.contains(&"a")); /// assert!(!cache.contains(&"z")); /// -/// // Length and capacity /// assert_eq!(cache.len(), 2); /// assert_eq!(cache.capacity(), 3); /// -/// // Clear /// cache.clear(); /// assert_eq!(cache.len(), 0); /// ``` -impl CoreCache> for LfuCache +impl Cache> for LfuCache where K: Eq + Hash + Clone, { + fn contains(&self, key: &K) -> bool { + self.store.contains(key) + } + + fn len(&self) -> usize { + self.store.len() + } + + fn capacity(&self) -> usize { + self.store.capacity() + } + + fn peek(&self, key: &K) -> Option<&Arc> { + self.store.peek(key) + } + fn insert(&mut self, key: K, value: Arc) -> Option> { #[cfg(feature = "metrics")] self.metrics.record_insert_call(); @@ -882,6 +1017,11 @@ where self.store.get(key) } + fn remove(&mut self, key: &K) -> Option> { + let _ = self.buckets.remove(key)?; + self.store.remove(key) + } + fn clear(&mut self) { #[cfg(feature = "metrics")] self.metrics.record_clear(); @@ -890,30 +1030,13 @@ where } } -impl ReadOnlyCache> for LfuHandleCache -where - H: Copy + Eq + Hash, -{ - fn contains(&self, handle: &H) -> bool { - self.store.contains(handle) - } - - fn len(&self) -> usize { - self.store.len() - } - - fn capacity(&self) -> usize { - self.store.capacity() - } -} - -/// Core cache operations for handle-based LFU. +/// [`Cache`] implementation for handle-based LFU. /// /// # Example /// /// ``` /// use cachekit::policy::lfu::LfuHandleCache; -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: LfuHandleCache = LfuHandleCache::new(3); @@ -925,10 +1048,26 @@ where /// assert!(cache.contains(&1u64)); /// assert_eq!(cache.len(), 2); /// ``` -impl CoreCache> for LfuHandleCache +impl Cache> for LfuHandleCache where H: Eq + Hash + Copy, { + fn contains(&self, handle: &H) -> bool { + self.store.contains(handle) + } + + fn len(&self) -> usize { + self.store.len() + } + + fn capacity(&self) -> usize { + self.store.capacity() + } + + fn peek(&self, handle: &H) -> Option<&Arc> { + self.store.peek(handle) + } + fn insert(&mut self, handle: H, value: Arc) -> Option> { #[cfg(feature = "metrics")] self.metrics.record_insert_call(); @@ -982,6 +1121,11 @@ where self.store.get(handle) } + fn remove(&mut self, handle: &H) -> Option> { + let _ = self.buckets.remove(handle)?; + self.store.remove(handle) + } + fn clear(&mut self) { #[cfg(feature = "metrics")] self.metrics.record_clear(); @@ -990,247 +1134,57 @@ where } } -/// Mutable cache operations for LFU. -/// -/// # Example -/// -/// ``` -/// use cachekit::policy::lfu::LfuCache; -/// use cachekit::traits::{CoreCache, MutableCache, ReadOnlyCache}; -/// use std::sync::Arc; -/// -/// let mut cache: LfuCache<&str, i32> = LfuCache::new(10); -/// cache.insert("key", Arc::new(42)); -/// -/// let removed = cache.remove(&"key"); -/// assert_eq!(*removed.unwrap(), 42); -/// assert!(!cache.contains(&"key")); -/// ``` -impl MutableCache> for LfuCache +impl EvictingCache> for LfuCache where K: Eq + Hash + Clone, { - fn remove(&mut self, key: &K) -> Option> { - let _ = self.buckets.remove(key)?; - self.store.remove(key) + fn evict_one(&mut self) -> Option<(K, Arc)> { + self.pop_lfu() } } -/// Mutable cache operations for handle-based LFU. -/// -/// # Example -/// -/// ``` -/// use cachekit::policy::lfu::LfuHandleCache; -/// use cachekit::traits::{CoreCache, MutableCache}; -/// use std::sync::Arc; -/// -/// let mut cache: LfuHandleCache = LfuHandleCache::new(10); -/// cache.insert(1u64, Arc::new(42)); -/// -/// let removed = cache.remove(&1u64); -/// assert_eq!(*removed.unwrap(), 42); -/// ``` -impl MutableCache> for LfuHandleCache +impl VictimInspectable> for LfuCache where - H: Eq + Hash + Copy, + K: Eq + Hash + Clone, { - fn remove(&mut self, handle: &H) -> Option> { - let _ = self.buckets.remove(handle)?; - self.store.remove(handle) + fn peek_victim(&self) -> Option<(&K, &Arc)> { + self.peek_lfu() } } -/// LFU-specific operations. -/// -/// # Example -/// -/// ``` -/// use cachekit::policy::lfu::LfuCache; -/// use cachekit::traits::{CoreCache, LfuCacheTrait}; -/// use std::sync::Arc; -/// -/// let mut cache: LfuCache<&str, i32> = LfuCache::new(3); -/// cache.insert("a", Arc::new(1)); -/// cache.insert("b", Arc::new(2)); -/// cache.get(&"a"); // freq: 1 → 2 -/// -/// // Check frequencies -/// assert_eq!(cache.frequency(&"a"), Some(2)); -/// assert_eq!(cache.frequency(&"b"), Some(1)); -/// -/// // Peek at LFU victim -/// let (key, _) = cache.peek_lfu().unwrap(); -/// assert_eq!(*key, "b"); // lowest frequency -/// -/// // Manual frequency control -/// cache.increment_frequency(&"b"); // freq: 1 → 2 -/// cache.reset_frequency(&"a"); // freq: 2 → 1 -/// -/// // Pop LFU -/// let (key, value) = cache.pop_lfu().unwrap(); -/// assert_eq!(key, "a"); // now has lowest freq -/// ``` -impl LfuCacheTrait> for LfuCache +impl FrequencyTracking> for LfuCache where K: Eq + Hash + Clone, { - fn pop_lfu(&mut self) -> Option<(K, Arc)> { - #[cfg(feature = "metrics")] - self.metrics.record_pop_lfu_call(); - - let result = self.evict_min_freq(); - - #[cfg(feature = "metrics")] - if result.is_some() { - self.metrics.record_pop_lfu_found(); - } - - result - } - - fn peek_lfu(&self) -> Option<(&K, &Arc)> { - #[cfg(feature = "metrics")] - (&self.metrics).record_peek_lfu_call(); - - let (key, _freq) = self.buckets.peek_min()?; - let value = self.store.peek(key)?; - - #[cfg(feature = "metrics")] - (&self.metrics).record_peek_lfu_found(); - - Some((key, value)) - } - fn frequency(&self, key: &K) -> Option { - #[cfg(feature = "metrics")] - (&self.metrics).record_frequency_call(); - - let result = self.buckets.frequency(key); - - #[cfg(feature = "metrics")] - if result.is_some() { - (&self.metrics).record_frequency_found(); - } - - result - } - - fn reset_frequency(&mut self, key: &K) -> Option { - #[cfg(feature = "metrics")] - self.metrics.record_reset_frequency_call(); - - let previous_freq = self.buckets.remove(key)?; - self.buckets.insert(key.clone()); - - #[cfg(feature = "metrics")] - self.metrics.record_reset_frequency_found(); - - Some(previous_freq) - } - - fn increment_frequency(&mut self, key: &K) -> Option { - #[cfg(feature = "metrics")] - self.metrics.record_increment_frequency_call(); - - let new_freq = self.buckets.touch(key)?; - - #[cfg(feature = "metrics")] - self.metrics.record_increment_frequency_found(); - - Some(new_freq) + LfuCache::frequency(self, key) } } -/// LFU-specific operations for handle-based cache. -/// -/// # Example -/// -/// ``` -/// use cachekit::policy::lfu::LfuHandleCache; -/// use cachekit::traits::{CoreCache, LfuCacheTrait}; -/// use std::sync::Arc; -/// -/// let mut cache: LfuHandleCache = LfuHandleCache::new(3); -/// cache.insert(1u64, Arc::new(100)); -/// cache.insert(2u64, Arc::new(200)); -/// cache.get(&1u64); // freq: 1 → 2 -/// -/// assert_eq!(cache.frequency(&1u64), Some(2)); -/// assert_eq!(cache.frequency(&2u64), Some(1)); -/// -/// // Peek at LFU victim -/// let (handle, _) = cache.peek_lfu().unwrap(); -/// assert_eq!(*handle, 2u64); -/// ``` -impl LfuCacheTrait> for LfuHandleCache +impl EvictingCache> for LfuHandleCache where H: Eq + Hash + Copy, { - fn pop_lfu(&mut self) -> Option<(H, Arc)> { - #[cfg(feature = "metrics")] - self.metrics.record_pop_lfu_call(); - - let result = self.evict_min_freq(); - - #[cfg(feature = "metrics")] - if result.is_some() { - self.metrics.record_pop_lfu_found(); - } - - result + fn evict_one(&mut self) -> Option<(H, Arc)> { + self.pop_lfu() } +} - fn peek_lfu(&self) -> Option<(&H, &Arc)> { - #[cfg(feature = "metrics")] - (&self.metrics).record_peek_lfu_call(); - - let (handle, _freq) = self.buckets.peek_min_ref()?; - let value = self.store.peek(handle)?; - - #[cfg(feature = "metrics")] - (&self.metrics).record_peek_lfu_found(); - - Some((handle, value)) +impl VictimInspectable> for LfuHandleCache +where + H: Eq + Hash + Copy, +{ + fn peek_victim(&self) -> Option<(&H, &Arc)> { + self.peek_lfu() } +} +impl FrequencyTracking> for LfuHandleCache +where + H: Eq + Hash + Copy, +{ fn frequency(&self, handle: &H) -> Option { - #[cfg(feature = "metrics")] - (&self.metrics).record_frequency_call(); - - let result = self.buckets.frequency(handle); - - #[cfg(feature = "metrics")] - if result.is_some() { - (&self.metrics).record_frequency_found(); - } - - result - } - - fn reset_frequency(&mut self, handle: &H) -> Option { - #[cfg(feature = "metrics")] - self.metrics.record_reset_frequency_call(); - - let previous_freq = self.buckets.remove(handle)?; - self.buckets.insert(*handle); - - #[cfg(feature = "metrics")] - self.metrics.record_reset_frequency_found(); - - Some(previous_freq) - } - - fn increment_frequency(&mut self, handle: &H) -> Option { - #[cfg(feature = "metrics")] - self.metrics.record_increment_frequency_call(); - - let new_freq = self.buckets.touch(handle)?; - - #[cfg(feature = "metrics")] - self.metrics.record_increment_frequency_found(); - - Some(new_freq) + LfuHandleCache::frequency(self, handle) } } @@ -1249,7 +1203,7 @@ where /// /// ```ignore /// use cachekit::policy::lfu::LfuCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: LfuCache<&str, i32> = LfuCache::new(100); @@ -1467,7 +1421,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::traits::CoreCache; + use crate::traits::Cache; // Basic LFU Behavior Tests mod basic_behavior { diff --git a/src/policy/lifo.rs b/src/policy/lifo.rs index 0bea816..dc6f6da 100644 --- a/src/policy/lifo.rs +++ b/src/policy/lifo.rs @@ -160,8 +160,7 @@ use crate::metrics::metrics_impl::CoreOnlyMetrics; use crate::metrics::snapshot::CoreOnlyMetricsSnapshot; #[cfg(feature = "metrics")] use crate::metrics::traits::{CoreMetricsRecorder, MetricsSnapshotProvider}; -use crate::prelude::ReadOnlyCache; -use crate::traits::CoreCache; +use crate::traits::{Cache, EvictingCache, VictimInspectable}; use rustc_hash::FxHashMap; use std::hash::Hash; @@ -586,7 +585,7 @@ impl std::fmt::Debug for LifoCore { } } -impl ReadOnlyCache for LifoCore +impl Cache for LifoCore where K: Clone + Eq + Hash, { @@ -604,32 +603,10 @@ where fn capacity(&self) -> usize { self.capacity } -} -/// Implementation of the [`CoreCache`] trait for LIFO. -/// -/// Allows `LifoCore` to be used through the unified cache interface. -/// -/// # Example -/// -/// ``` -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; -/// use cachekit::policy::lifo::LifoCore; -/// -/// let mut cache: LifoCore<&str, i32> = LifoCore::new(100); -/// -/// // Use via CoreCache trait -/// assert_eq!(cache.insert("key", 42), None); -/// assert_eq!(cache.get(&"key"), Some(&42)); -/// assert!(cache.contains(&"key")); -/// ``` -impl CoreCache for LifoCore -where - K: Clone + Eq + Hash, -{ #[inline] - fn insert(&mut self, key: K, value: V) -> Option { - LifoCore::insert(self, key, value) + fn peek(&self, key: &K) -> Option<&V> { + self.map.get(key) } #[inline] @@ -637,11 +614,44 @@ where LifoCore::get(self, key) } + #[inline] + fn insert(&mut self, key: K, value: V) -> Option { + LifoCore::insert(self, key, value) + } + + fn remove(&mut self, key: &K) -> Option { + let value = self.map.remove(key)?; + if let Some(pos) = self.stack.iter().position(|k| k == key) { + self.stack.remove(pos); + } + #[cfg(debug_assertions)] + self.validate_invariants(); + Some(value) + } + fn clear(&mut self) { LifoCore::clear(self); } } +impl EvictingCache for LifoCore +where + K: Clone + Eq + Hash, +{ + fn evict_one(&mut self) -> Option<(K, V)> { + self.pop_newest() + } +} + +impl VictimInspectable for LifoCore +where + K: Clone + Eq + Hash, +{ + fn peek_victim(&self) -> Option<(&K, &V)> { + self.peek_newest() + } +} + #[cfg(feature = "metrics")] impl LifoCore where diff --git a/src/policy/lru.rs b/src/policy/lru.rs index cf80ff0..6eabfdf 100644 --- a/src/policy/lru.rs +++ b/src/policy/lru.rs @@ -118,7 +118,7 @@ //! • No raw pointers — all links are SlotId indices //! ``` //! -//! ## LruCore Methods (CoreCache + MutableCache + LruCacheTrait) +//! ## LruCore Methods (Cache + capability traits) //! //! | Method | Complexity | Description | //! |------------------|------------|-------------------------------------------| @@ -224,7 +224,7 @@ //! //! ``` //! use cachekit::policy::lru::LruCore; -//! use cachekit::traits::{CoreCache, LruCacheTrait, ReadOnlyCache}; +//! use cachekit::traits::{Cache, RecencyTracking}; //! use std::sync::Arc; //! //! let mut cache: LruCore = LruCore::new(100); @@ -306,8 +306,7 @@ use crate::metrics::snapshot::LruMetricsSnapshot; use crate::metrics::traits::{ CoreMetricsRecorder, LruMetricsReadRecorder, LruMetricsRecorder, MetricsSnapshotProvider, }; -use crate::prelude::ReadOnlyCache; -use crate::traits::{CoreCache, LruCacheTrait, MutableCache}; +use crate::traits::{Cache, EvictingCache, RecencyTracking, VictimInspectable}; /// Node in the LRU linked list, stored in a `SlotArena`. /// @@ -481,7 +480,7 @@ where } } -impl ReadOnlyCache> for LruCore +impl Cache> for LruCore where K: Copy + Eq + Hash, { @@ -499,12 +498,36 @@ where fn capacity(&self) -> usize { self.capacity } -} -impl CoreCache> for LruCore -where - K: Copy + Eq + Hash, -{ + #[inline] + fn peek(&self, key: &K) -> Option<&Arc> { + let &id = self.map.get(key)?; + self.arena.get(id).map(|node| &node.value) + } + + #[inline] + fn get(&mut self, key: &K) -> Option<&Arc> { + let &id = match self.map.get(key) { + Some(id) => id, + None => { + #[cfg(feature = "metrics")] + self.metrics.record_get_miss(); + return None; + }, + }; + + #[cfg(feature = "metrics")] + self.metrics.record_get_hit(); + + self.detach(id); + self.attach_front(id); + + #[cfg(debug_assertions)] + self.validate_invariants(); + + self.arena.get(id).map(|node| &node.value) + } + #[inline] fn insert(&mut self, key: K, value: Arc) -> Option> { #[cfg(feature = "metrics")] @@ -561,26 +584,16 @@ where } #[inline] - fn get(&mut self, key: &K) -> Option<&Arc> { - let &id = match self.map.get(key) { - Some(id) => id, - None => { - #[cfg(feature = "metrics")] - self.metrics.record_get_miss(); - return None; - }, - }; - - #[cfg(feature = "metrics")] - self.metrics.record_get_hit(); + fn remove(&mut self, key: &K) -> Option> { + let id = self.map.remove(key)?; self.detach(id); - self.attach_front(id); + let node = self.arena.remove(id).expect("remove: stale SlotId"); #[cfg(debug_assertions)] self.validate_invariants(); - self.arena.get(id).map(|node| &node.value) + Some(node.value) } fn clear(&mut self) { @@ -600,23 +613,23 @@ impl LruCore where K: Copy + Eq + Hash, { - /// Zero-copy peek: read-only lookup without LRU update. + /// Returns an `Arc` clone without updating LRU order. /// - /// Returns `Arc` clone for zero-copy sharing. Unlike [`get`](CoreCache::get), - /// this does not move the item to the MRU position. + /// This inherent method clones the Arc for zero-copy sharing across + /// threads. Calling `cache.peek(key)` resolves here (inherent methods + /// shadow trait methods); trait-generic code uses [`Cache::peek`]. /// /// # Example /// /// ``` /// use cachekit::policy::lru::LruCore; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: LruCore = LruCore::new(3); /// cache.insert(1, Arc::new("first".to_string())); /// cache.insert(2, Arc::new("second".to_string())); /// - /// // Peek doesn't affect LRU order /// let value = cache.peek(&1); /// assert_eq!(*value.unwrap(), "first"); /// @@ -638,32 +651,10 @@ where } None } -} - -impl MutableCache> for LruCore -where - K: Copy + Eq + Hash, -{ - #[inline] - fn remove(&mut self, key: &K) -> Option> { - let id = self.map.remove(key)?; - - self.detach(id); - let node = self.arena.remove(id).expect("remove: stale SlotId"); - - #[cfg(debug_assertions)] - self.validate_invariants(); - - Some(node.value) - } -} -impl LruCacheTrait> for LruCore -where - K: Copy + Eq + Hash, -{ + /// Removes and returns the least recently used entry. #[inline] - fn pop_lru(&mut self) -> Option<(K, Arc)> { + pub fn pop_lru(&mut self) -> Option<(K, Arc)> { #[cfg(feature = "metrics")] self.metrics.record_pop_lru_call(); @@ -679,8 +670,9 @@ where Some((key, value)) } + /// Peeks at the least recently used entry without removing it. #[inline] - fn peek_lru(&self) -> Option<(&K, &Arc)> { + pub fn peek_lru(&self) -> Option<(&K, &Arc)> { #[cfg(feature = "metrics")] (&self.metrics).record_peek_lru_call(); @@ -693,8 +685,11 @@ where Some((&node.key, &node.value)) } + /// Moves a key to MRU position without returning its value. + /// + /// Returns `true` if the key was found and touched. #[inline] - fn touch(&mut self, key: &K) -> bool { + pub fn touch(&mut self, key: &K) -> bool { #[cfg(feature = "metrics")] self.metrics.record_touch_call(); @@ -714,7 +709,8 @@ where } } - fn recency_rank(&self, key: &K) -> Option { + /// Returns the recency rank (0 = most recent) for a key. + pub fn recency_rank(&self, key: &K) -> Option { #[cfg(feature = "metrics")] (&self.metrics).record_recency_rank_call(); @@ -738,6 +734,40 @@ where } } +impl EvictingCache> for LruCore +where + K: Copy + Eq + Hash, +{ + #[inline] + fn evict_one(&mut self) -> Option<(K, Arc)> { + self.pop_lru() + } +} + +impl VictimInspectable> for LruCore +where + K: Copy + Eq + Hash, +{ + #[inline] + fn peek_victim(&self) -> Option<(&K, &Arc)> { + self.peek_lru() + } +} + +impl RecencyTracking> for LruCore +where + K: Copy + Eq + Hash, +{ + #[inline] + fn touch(&mut self, key: &K) -> bool { + LruCore::touch(self, key) + } + + fn recency_rank(&self, key: &K) -> Option { + LruCore::recency_rank(self, key) + } +} + #[cfg(feature = "metrics")] impl LruCore where @@ -933,7 +963,7 @@ where /// /// ``` /// use cachekit::policy::lru::LruCore; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// use std::sync::Arc; /// /// let mut cache: LruCore = LruCore::new(3); @@ -1402,7 +1432,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::traits::CoreCache; + use crate::traits::Cache; // ============================================== // CORRECTNESS TESTS MODULE diff --git a/src/policy/lru_k.rs b/src/policy/lru_k.rs index 9aa64dc..6c3f60e 100644 --- a/src/policy/lru_k.rs +++ b/src/policy/lru_k.rs @@ -116,7 +116,7 @@ //! | `store` | Stores key -> `Arc` ownership | //! | `k` | Number of accesses to track (default: 2) | //! -//! ## Core Operations ([`CoreCache`] + [`MutableCache`] + [`LrukCacheTrait`]) +//! ## Core Operations ([`Cache`] + capability traits) //! //! | Method | Complexity | Description | //! |---------------------|------------|------------------------------------------| @@ -130,7 +130,7 @@ //! | `capacity()` | O(1) | Maximum capacity | //! | `clear()` | O(N) | Remove all entries | //! -//! ## LRU-K Specific Operations ([`LrukCacheTrait`]) +//! ## LRU-K Specific Operations (capability traits) //! //! | Method | Complexity | Description | //! |----------------------|------------|-----------------------------------------| @@ -187,7 +187,7 @@ //! //! ``` //! use cachekit::policy::lru_k::LrukCache; -//! use cachekit::traits::{CoreCache, LrukCacheTrait}; +//! use cachekit::traits::{Cache, HistoryTracking}; //! //! // Create LRU-2 cache (default K=2) //! let _cache: LrukCache = LrukCache::new(100); @@ -274,8 +274,9 @@ use crate::metrics::traits::{ CoreMetricsRecorder, LruKMetricsReadRecorder, LruKMetricsRecorder, LruMetricsRecorder, MetricsSnapshotProvider, }; -use crate::prelude::ReadOnlyCache; -use crate::traits::{CoreCache, LrukCacheTrait, MutableCache}; +use crate::traits::{ + Cache, EvictingCache, FrequencyTracking, HistoryTracking, RecencyTracking, VictimInspectable, +}; #[derive(Debug, Copy, Clone, Eq, PartialEq)] enum Segment { @@ -315,7 +316,7 @@ struct Node { /// /// ``` /// use cachekit::policy::lru_k::LrukCache; -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// /// // Create LRU-2 cache (default K=2) /// let mut cache: LrukCache = LrukCache::new(100); @@ -336,7 +337,7 @@ struct Node { /// /// ``` /// use cachekit::policy::lru_k::LrukCache; -/// use cachekit::traits::{CoreCache, LrukCacheTrait}; +/// use cachekit::traits::Cache; /// /// let mut cache: LrukCache = LrukCache::with_k(3, 2); /// @@ -431,7 +432,7 @@ where /// /// ``` /// use cachekit::policy::lru_k::LrukCache; - /// use cachekit::traits::{CoreCache, LrukCacheTrait, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let cache: LrukCache = LrukCache::new(100); /// @@ -461,7 +462,7 @@ where /// /// ``` /// use cachekit::policy::lru_k::LrukCache; - /// use cachekit::traits::{CoreCache, LrukCacheTrait, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// // LRU-3: requires 3 accesses to be considered "hot" /// let cache: LrukCache = LrukCache::with_k(100, 3); @@ -625,61 +626,56 @@ where } } -impl ReadOnlyCache for LrukCache -where - K: Clone + Eq + Hash, - V: Clone, -{ - #[inline] - fn contains(&self, key: &K) -> bool { - self.map.contains_key(key) - } - - #[inline] - fn len(&self) -> usize { - self.map.len() - } - - #[inline] - fn capacity(&self) -> usize { - self.capacity - } -} - -/// [`CoreCache`] implementation for LRU-K. +/// [`Cache`] implementation for LRU-K. /// /// # Example /// /// ``` /// use cachekit::policy::lru_k::LrukCache; -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// /// let mut cache: LrukCache = LrukCache::new(3); /// -/// // Insert items /// cache.insert(1, "one".to_string()); /// cache.insert(2, "two".to_string()); /// -/// // Get with access history update /// assert_eq!(cache.get(&1).map(|s| s.as_str()), Some("one")); /// -/// // Contains check /// assert!(cache.contains(&1)); /// assert!(!cache.contains(&999)); /// -/// // Length and capacity /// assert_eq!(cache.len(), 2); /// assert_eq!(cache.capacity(), 3); /// -/// // Clear all entries /// cache.clear(); /// assert_eq!(cache.len(), 0); /// ``` -impl CoreCache for LrukCache +impl Cache for LrukCache where K: Eq + Hash + Clone, V: Clone, { + #[inline] + fn contains(&self, key: &K) -> bool { + self.map.contains_key(key) + } + + #[inline] + fn len(&self) -> usize { + self.map.len() + } + + #[inline] + fn capacity(&self) -> usize { + self.capacity + } + + #[inline] + fn peek(&self, key: &K) -> Option<&V> { + let &node_ptr = self.map.get(key)?; + unsafe { Some((*node_ptr.as_ptr()).value.as_ref()) } + } + #[inline] fn insert(&mut self, key: K, value: V) -> Option { #[cfg(feature = "metrics")] @@ -694,7 +690,6 @@ where #[cfg(feature = "metrics")] self.metrics.record_insert_update(); - // Update value and record access let old_value = unsafe { let node = &mut *node_ptr.as_ptr(); let old = std::mem::replace(&mut node.value, Arc::new(value)); @@ -764,42 +759,6 @@ where unsafe { Some((*node_ptr.as_ptr()).value.as_ref()) } } - fn clear(&mut self) { - #[cfg(feature = "metrics")] - self.metrics.record_clear(); - - // Free all nodes - while self.pop_cold_tail_inner().is_some() {} - while self.pop_hot_tail_inner().is_some() {} - self.map.clear(); - self.tick = 0; - } -} - -/// [`MutableCache`] implementation for LRU-K. -/// -/// # Example -/// -/// ``` -/// use cachekit::policy::lru_k::LrukCache; -/// use cachekit::traits::{CoreCache, MutableCache, ReadOnlyCache}; -/// -/// let mut cache: LrukCache = LrukCache::new(10); -/// cache.insert(1, "value".to_string()); -/// -/// // Remove an entry -/// let removed = cache.remove(&1); -/// assert_eq!(removed.as_deref(), Some("value")); -/// assert!(!cache.contains(&1)); -/// -/// // Remove non-existent key -/// assert!(cache.remove(&999).is_none()); -/// ``` -impl MutableCache for LrukCache -where - K: Eq + Hash + Clone, - V: Clone, -{ #[inline] fn remove(&mut self, key: &K) -> Option { let node_ptr = self.map.remove(key)?; @@ -809,55 +768,27 @@ where Some((*node.value).clone()) } + + fn clear(&mut self) { + #[cfg(feature = "metrics")] + self.metrics.record_clear(); + + while self.pop_cold_tail_inner().is_some() {} + while self.pop_hot_tail_inner().is_some() {} + self.map.clear(); + self.tick = 0; + } } -/// [`LrukCacheTrait`] implementation for LRU-K. -/// -/// Provides eviction, access history inspection, and K-distance queries. -/// -/// # Example -/// -/// ``` -/// use cachekit::policy::lru_k::LrukCache; -/// use cachekit::traits::{CoreCache, LrukCacheTrait}; -/// -/// let mut cache: LrukCache = LrukCache::with_k(10, 2); -/// -/// // Insert and access items -/// cache.insert(1, "one"); -/// cache.get(&1); // Now has 2 accesses -/// -/// cache.insert(2, "two"); // Only 1 access -/// -/// // Check access counts -/// assert_eq!(cache.access_count(&1), Some(2)); -/// assert_eq!(cache.access_count(&2), Some(1)); -/// -/// // K-distance is only available for items with >= K accesses -/// assert!(cache.k_distance(&1).is_some()); // Has 2 accesses -/// assert!(cache.k_distance(&2).is_none()); // Only 1 access -/// -/// // Peek at eviction victim (item 2 has < K accesses) -/// let (key, _) = cache.peek_lru_k().unwrap(); -/// assert_eq!(*key, 2); -/// -/// // Pop eviction victim -/// let (key, value) = cache.pop_lru_k().unwrap(); -/// assert_eq!(key, 2); -/// assert_eq!(value, "two"); -/// ``` -impl LrukCacheTrait for LrukCache +/// LRU-K specific inherent methods. +impl LrukCache where K: Eq + Hash + Clone, V: Clone, { /// Removes and returns the LRU-K eviction victim. - /// - /// Eviction priority: - /// 1. Items with fewer than K accesses (evicts oldest first) - /// 2. Items with K+ accesses (evicts oldest K-distance first) #[inline] - fn pop_lru_k(&mut self) -> Option<(K, V)> { + pub fn pop_lru_k(&mut self) -> Option<(K, V)> { #[cfg(feature = "metrics")] self.metrics.record_pop_lru_k_call(); @@ -870,11 +801,8 @@ where } /// Peeks at the LRU-K eviction victim without removing it. - /// - /// Returns references to the key and value that would be evicted - /// by the next `pop_lru_k()` call. #[inline] - fn peek_lru_k(&self) -> Option<(&K, &V)> { + pub fn peek_lru_k(&self) -> Option<(&K, &V)> { #[cfg(feature = "metrics")] (&self.metrics).record_peek_lru_k_call(); @@ -891,37 +819,29 @@ where /// Returns the K value used by this cache. #[inline] - fn k_value(&self) -> usize { + pub fn k_value(&self) -> usize { self.k } /// Returns the access history for a key (most recent first). - /// - /// The history is capped at K entries. Timestamps are monotonic - /// logical ticks, not wall-clock time. - fn access_history(&self, key: &K) -> Option> { + pub fn access_history(&self, key: &K) -> Option> { let node_ptr = self.map.get(key)?; unsafe { let node = node_ptr.as_ref(); - Some(node.history.iter().rev().copied().collect()) // Most recent first + Some(node.history.iter().rev().copied().collect()) } } /// Returns the number of accesses recorded for a key. - /// - /// The count is capped at K (the history size limit). #[inline] - fn access_count(&self, key: &K) -> Option { + pub fn access_count(&self, key: &K) -> Option { let node_ptr = self.map.get(key)?; unsafe { Some(node_ptr.as_ref().history.len()) } } /// Returns the K-distance for a key. - /// - /// K-distance is the timestamp of the K-th most recent access. - /// Only available for items with at least K accesses. #[inline] - fn k_distance(&self, key: &K) -> Option { + pub fn k_distance(&self, key: &K) -> Option { #[cfg(feature = "metrics")] (&self.metrics).record_k_distance_call(); @@ -943,11 +863,8 @@ where } /// Updates the access time for a key without retrieving its value. - /// - /// Useful for scenarios like pinned pages where you want to update - /// access history without reading the value. #[inline] - fn touch(&mut self, key: &K) -> bool { + pub fn touch(&mut self, key: &K) -> bool { #[cfg(feature = "metrics")] self.metrics.record_touch_call(); @@ -966,9 +883,8 @@ where /// Returns the eviction priority rank for a key. /// - /// Rank 0 means the key would be evicted first. Higher ranks mean - /// the key is safer from eviction. - fn k_distance_rank(&self, key: &K) -> Option { + /// Rank 0 means the key would be evicted first. + pub fn k_distance_rank(&self, key: &K) -> Option { #[cfg(feature = "metrics")] (&self.metrics).record_k_distance_rank_call(); @@ -986,28 +902,21 @@ where let num_accesses = history.len(); if num_accesses < self.k { - // Items with fewer than K accesses use their earliest access time let earliest = history.front().copied().unwrap_or(u64::MAX); - items_with_distances.push((false, earliest)); // false = not full K accesses + items_with_distances.push((false, earliest)); } else { - // Items with K or more accesses use their K-distance let k_distance = history.front().copied().unwrap_or(u64::MAX); - items_with_distances.push((true, k_distance)); // true = has full K accesses + items_with_distances.push((true, k_distance)); } } - // Sort by priority: items with fewer than K accesses first (by earliest access), - // then items with K+ accesses (by K-distance) - items_with_distances.sort_by(|a, b| { - match (a.0, b.0) { - (false, false) => a.1.cmp(&b.1), // Both have < K accesses, sort by earliest - (true, true) => a.1.cmp(&b.1), // Both have >= K accesses, sort by K-distance - (false, true) => std::cmp::Ordering::Less, // < K accesses comes first - (true, false) => std::cmp::Ordering::Greater, // >= K accesses comes second - } + items_with_distances.sort_by(|a, b| match (a.0, b.0) { + (false, false) => a.1.cmp(&b.1), + (true, true) => a.1.cmp(&b.1), + (false, true) => std::cmp::Ordering::Less, + (true, false) => std::cmp::Ordering::Greater, }); - // Find the rank of the target key let target_node = self.map.get(key)?; let target_history = unsafe { &target_node.as_ref().history }; let target_num_accesses = target_history.len(); @@ -1027,6 +936,99 @@ where } } +impl EvictingCache for LrukCache +where + K: Eq + Hash + Clone, + V: Clone, +{ + fn evict_one(&mut self) -> Option<(K, V)> { + self.pop_lru_k() + } +} + +impl VictimInspectable for LrukCache +where + K: Eq + Hash + Clone, + V: Clone, +{ + fn peek_victim(&self) -> Option<(&K, &V)> { + self.peek_lru_k() + } +} + +impl RecencyTracking for LrukCache +where + K: Eq + Hash + Clone, + V: Clone, +{ + fn touch(&mut self, key: &K) -> bool { + LrukCache::touch(self, key) + } + + fn recency_rank(&self, key: &K) -> Option { + if !self.map.contains_key(key) { + return None; + } + + let target_ptr = *self.map.get(key)?; + let mut rank = 0; + + // Walk hot list head (MRU) to tail + let mut current = self.hot_head; + while let Some(ptr) = current { + if ptr == target_ptr { + return Some(rank); + } + rank += 1; + current = unsafe { ptr.as_ref().next }; + } + + // Walk cold list head to tail + let mut current = self.cold_head; + while let Some(ptr) = current { + if ptr == target_ptr { + return Some(rank); + } + rank += 1; + current = unsafe { ptr.as_ref().next }; + } + + None + } +} + +impl FrequencyTracking for LrukCache +where + K: Eq + Hash + Clone, + V: Clone, +{ + fn frequency(&self, key: &K) -> Option { + self.access_count(key).map(|c| c as u64) + } +} + +impl HistoryTracking for LrukCache +where + K: Eq + Hash + Clone, + V: Clone, +{ + fn access_count(&self, key: &K) -> Option { + LrukCache::access_count(self, key) + } + + fn k_distance(&self, key: &K) -> Option { + LrukCache::k_distance(self, key) + } + + fn access_history(&self, key: &K) -> Option> { + LrukCache::access_history(self, key) + } + + fn k_value(&self) -> usize { + LrukCache::k_value(self) + } +} + // Proper cleanup when cache is dropped - free all heap-allocated nodes impl Drop for LrukCache { fn drop(&mut self) { @@ -1160,7 +1162,7 @@ where /// /// ```ignore /// use cachekit::policy::lru_k::LrukCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let mut cache: LrukCache = LrukCache::new(100); /// cache.insert(1, "one"); diff --git a/src/policy/mfu.rs b/src/policy/mfu.rs index 2a5789f..58299e8 100644 --- a/src/policy/mfu.rs +++ b/src/policy/mfu.rs @@ -169,8 +169,7 @@ use crate::metrics::snapshot::MfuMetricsSnapshot; use crate::metrics::traits::{ CoreMetricsRecorder, MetricsSnapshotProvider, MfuMetricsReadRecorder, MfuMetricsRecorder, }; -use crate::prelude::ReadOnlyCache; -use crate::traits::{CoreCache, MutableCache}; +use crate::traits::{Cache, EvictingCache, FrequencyTracking, VictimInspectable}; use rustc_hash::FxHashMap; use std::cmp::Ordering; use std::collections::BinaryHeap; @@ -213,8 +212,9 @@ impl Ord for HeapEntry { /// MFU cache core that evicts the most frequently used entry. /// -/// Implements [`CoreCache`], [`ReadOnlyCache`], and [`MutableCache`] for -/// generic cache access. Uses a [`BinaryHeap`] with `HeapEntry` wrappers +/// Implements [`Cache`] for generic cache access, plus +/// [`EvictingCache`], [`VictimInspectable`], and [`FrequencyTracking`] +/// capability traits. Uses a [`BinaryHeap`] with `HeapEntry` wrappers /// for O(log n) eviction of the highest-frequency entry. pub struct MfuCore { map: FxHashMap, @@ -403,6 +403,11 @@ where self.map.contains_key(key) } + /// Peeks at a value without updating frequency. + pub fn peek(&self, key: &K) -> Option<&V> { + self.map.get(key) + } + /// Removes all entries from the cache. pub fn clear(&mut self) { #[cfg(feature = "metrics")] @@ -544,7 +549,7 @@ where } } -impl ReadOnlyCache for MfuCore +impl Cache for MfuCore where K: Clone + Eq + Hash, { @@ -559,31 +564,52 @@ where fn capacity(&self) -> usize { MfuCore::capacity(self) } -} -impl CoreCache for MfuCore -where - K: Clone + Eq + Hash, -{ - fn insert(&mut self, key: K, value: V) -> Option { - MfuCore::insert(self, key, value) + fn peek(&self, key: &K) -> Option<&V> { + MfuCore::peek(self, key) } fn get(&mut self, key: &K) -> Option<&V> { MfuCore::get(self, key) } + fn insert(&mut self, key: K, value: V) -> Option { + MfuCore::insert(self, key, value) + } + + fn remove(&mut self, key: &K) -> Option { + MfuCore::remove(self, key) + } + fn clear(&mut self) { MfuCore::clear(self) } } -impl MutableCache for MfuCore +impl EvictingCache for MfuCore where K: Clone + Eq + Hash, { - fn remove(&mut self, key: &K) -> Option { - MfuCore::remove(self, key) + fn evict_one(&mut self) -> Option<(K, V)> { + self.pop_mfu() + } +} + +impl VictimInspectable for MfuCore +where + K: Clone + Eq + Hash, +{ + fn peek_victim(&self) -> Option<(&K, &V)> { + self.peek_mfu() + } +} + +impl FrequencyTracking for MfuCore +where + K: Clone + Eq + Hash, +{ + fn frequency(&self, key: &K) -> Option { + MfuCore::frequency(self, key) } } @@ -1056,12 +1082,12 @@ mod tests { } #[test] - fn mutable_cache_trait() { + fn cache_trait_remove() { let mut cache = MfuCore::new(5); cache.insert(1, 100); cache.insert(2, 200); - assert_eq!(MutableCache::remove(&mut cache, &1), Some(100)); + assert_eq!(Cache::remove(&mut cache, &1), Some(100)); assert!(!cache.contains(&1)); } diff --git a/src/policy/mru.rs b/src/policy/mru.rs index 6e4e95d..480cbe3 100644 --- a/src/policy/mru.rs +++ b/src/policy/mru.rs @@ -154,8 +154,7 @@ use crate::metrics::metrics_impl::CoreOnlyMetrics; use crate::metrics::snapshot::CoreOnlyMetricsSnapshot; #[cfg(feature = "metrics")] use crate::metrics::traits::{CoreMetricsRecorder, MetricsSnapshotProvider}; -use crate::prelude::ReadOnlyCache; -use crate::traits::CoreCache; +use crate::traits::{Cache, EvictingCache}; use rustc_hash::FxHashMap; use std::hash::Hash; use std::marker::PhantomData; @@ -600,6 +599,37 @@ where self.validate_invariants(); } + /// Peeks at a value without updating MRU order. + #[inline] + pub fn peek(&self, key: &K) -> Option<&V> { + self.map + .get(key) + .map(|&node_ptr| unsafe { &(*node_ptr.as_ptr()).value }) + } + + /// Removes a key from the cache, returning its value. + pub fn remove(&mut self, key: &K) -> Option { + let node_ptr = self.map.remove(key)?; + self.detach(node_ptr); + let node = unsafe { Box::from_raw(node_ptr.as_ptr()) }; + + #[cfg(debug_assertions)] + self.validate_invariants(); + + Some(node.value) + } + + /// Removes and returns the most recently used entry. + pub fn pop_mru(&mut self) -> Option<(K, V)> { + let head_node = self.pop_head()?; + self.map.remove(&head_node.key); + + #[cfg(debug_assertions)] + self.validate_invariants(); + + Some((head_node.key, head_node.value)) + } + /// Validates internal data structure invariants. /// /// This method checks that: @@ -656,50 +686,28 @@ impl std::fmt::Debug for MruCore { } } -impl ReadOnlyCache for MruCore +impl Cache for MruCore where K: Clone + Eq + Hash, { #[inline] fn contains(&self, key: &K) -> bool { - self.map.contains_key(key) + MruCore::contains(self, key) } #[inline] fn len(&self) -> usize { - self.map.len() + MruCore::len(self) } #[inline] fn capacity(&self) -> usize { - self.capacity + MruCore::capacity(self) } -} -/// Implementation of the [`CoreCache`] trait for MRU. -/// -/// Allows `MruCore` to be used through the unified cache interface. -/// -/// # Example -/// -/// ``` -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; -/// use cachekit::policy::mru::MruCore; -/// -/// let mut cache: MruCore<&str, i32> = MruCore::new(100); -/// -/// // Use via CoreCache trait -/// cache.insert("key", 42); -/// assert_eq!(cache.get(&"key"), Some(&42)); -/// assert!(cache.contains(&"key")); -/// ``` -impl CoreCache for MruCore -where - K: Clone + Eq + Hash, -{ #[inline] - fn insert(&mut self, key: K, value: V) -> Option { - MruCore::insert(self, key, value) + fn peek(&self, key: &K) -> Option<&V> { + MruCore::peek(self, key) } #[inline] @@ -707,11 +715,31 @@ where MruCore::get(self, key) } + #[inline] + fn insert(&mut self, key: K, value: V) -> Option { + MruCore::insert(self, key, value) + } + + #[inline] + fn remove(&mut self, key: &K) -> Option { + MruCore::remove(self, key) + } + fn clear(&mut self) { MruCore::clear(self); } } +impl EvictingCache for MruCore +where + K: Clone + Eq + Hash, +{ + #[inline] + fn evict_one(&mut self) -> Option<(K, V)> { + self.pop_mru() + } +} + impl<'a, K, V> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); @@ -1239,10 +1267,10 @@ mod tests { fn trait_insert_returns_old_value() { let mut cache: MruCore<&str, i32> = MruCore::new(10); - let first = CoreCache::insert(&mut cache, "key", 1); + let first = Cache::insert(&mut cache, "key", 1); assert_eq!(first, None); - let second = CoreCache::insert(&mut cache, "key", 2); + let second = Cache::insert(&mut cache, "key", 2); assert_eq!( second, Some(1), diff --git a/src/policy/nru.rs b/src/policy/nru.rs index a5a350d..0095ad1 100644 --- a/src/policy/nru.rs +++ b/src/policy/nru.rs @@ -129,7 +129,7 @@ //! //! ``` //! use cachekit::policy::nru::NruCache; -//! use cachekit::traits::{CoreCache, ReadOnlyCache}; +//! use cachekit::traits::Cache; //! //! let mut cache = NruCache::new(100); //! @@ -161,8 +161,7 @@ //! //! - Wikipedia: Cache replacement policies -use crate::prelude::ReadOnlyCache; -use crate::traits::{CoreCache, MutableCache}; +use crate::traits::Cache; use rustc_hash::FxHashMap; use std::fmt::{Debug, Formatter}; use std::hash::Hash; @@ -199,7 +198,7 @@ struct Entry { /// /// ``` /// use cachekit::policy::nru::NruCache; -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// /// let mut cache = NruCache::new(100); /// @@ -247,7 +246,7 @@ where /// /// ``` /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let cache: NruCache = NruCache::new(100); /// assert_eq!(cache.capacity(), 100); @@ -270,7 +269,7 @@ where /// /// ``` /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; + /// use cachekit::traits::Cache; /// /// let mut cache = NruCache::<&str, i32>::new(10); /// assert!(cache.is_empty()); @@ -291,7 +290,7 @@ where /// /// ``` /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = NruCache::new(10); /// cache.insert("a", 1); @@ -316,7 +315,7 @@ where /// /// ``` /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = NruCache::new(10); /// cache.insert("a", 1); @@ -398,90 +397,50 @@ where } } -impl ReadOnlyCache for NruCache +impl Cache for NruCache where K: Clone + Eq + Hash, { - /// Returns `true` if the cache contains the key. - /// - /// Does not affect the reference bit. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; - /// - /// let mut cache = NruCache::new(10); - /// cache.insert("key", 1); - /// - /// assert!(cache.contains(&"key")); - /// assert!(!cache.contains(&"missing")); - /// ``` #[inline] fn contains(&self, key: &K) -> bool { self.map.contains_key(key) } - /// Returns the number of entries in the cache. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; - /// - /// let mut cache = NruCache::new(10); - /// assert_eq!(cache.len(), 0); - /// - /// cache.insert("a", 1); - /// assert_eq!(cache.len(), 1); - /// ``` #[inline] fn len(&self) -> usize { self.map.len() } - /// Returns the maximum capacity of the cache. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::ReadOnlyCache; - /// - /// let cache = NruCache::::new(50); - /// assert_eq!(cache.capacity(), 50); - /// ``` #[inline] fn capacity(&self) -> usize { self.capacity } -} -impl CoreCache for NruCache -where - K: Clone + Eq + Hash, -{ - /// Inserts a key-value pair into the cache. - /// - /// If the key exists, updates the value and sets the reference bit. - /// If at capacity, evicts using the NRU algorithm. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; - /// - /// let mut cache = NruCache::new(2); - /// cache.insert("a", 1); - /// cache.insert("b", 2); - /// - /// // Update existing - /// let old = cache.insert("a", 10); - /// assert_eq!(old, Some(1)); - /// ``` + #[inline] + fn peek(&self, key: &K) -> Option<&V> { + self.map.get(key).map(|e| &e.value) + } + + #[inline] + fn get(&mut self, key: &K) -> Option<&V> { + if let Some(entry) = self.map.get_mut(key) { + entry.referenced = true; + #[cfg(feature = "metrics")] + { + self.metrics.get_calls += 1; + self.metrics.get_hits += 1; + } + Some(&entry.value) + } else { + #[cfg(feature = "metrics")] + { + self.metrics.get_calls += 1; + self.metrics.get_misses += 1; + } + None + } + } + #[inline] fn insert(&mut self, key: K, value: V) -> Option { #[cfg(feature = "metrics")] @@ -534,96 +493,13 @@ where None } - /// Gets a reference to the value for a key. - /// - /// Sets the reference bit on access. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; - /// - /// let mut cache = NruCache::new(10); - /// cache.insert("key", 42); - /// - /// // Access sets reference bit - this entry gets protection - /// assert_eq!(cache.get(&"key"), Some(&42)); - /// ``` - #[inline] - fn get(&mut self, key: &K) -> Option<&V> { - if let Some(entry) = self.map.get_mut(key) { - entry.referenced = true; - #[cfg(feature = "metrics")] - { - self.metrics.get_calls += 1; - self.metrics.get_hits += 1; - } - Some(&entry.value) - } else { - #[cfg(feature = "metrics")] - { - self.metrics.get_calls += 1; - self.metrics.get_misses += 1; - } - None - } - } - - /// Clears all entries from the cache. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; - /// - /// let mut cache = NruCache::new(10); - /// cache.insert("a", 1); - /// cache.insert("b", 2); - /// - /// cache.clear(); - /// assert!(cache.is_empty()); - /// ``` - fn clear(&mut self) { - self.map.clear(); - self.keys.clear(); - #[cfg(feature = "metrics")] - { - use crate::metrics::traits::CoreMetricsRecorder; - self.metrics.record_clear(); - } - } -} - -impl MutableCache for NruCache -where - K: Clone + Eq + Hash, -{ - /// Removes a key from the cache. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::{CoreCache, MutableCache, ReadOnlyCache}; - /// - /// let mut cache = NruCache::new(10); - /// cache.insert("key", 42); - /// - /// let removed = cache.remove(&"key"); - /// assert_eq!(removed, Some(42)); - /// assert!(!cache.contains(&"key")); - /// ``` #[inline] fn remove(&mut self, key: &K) -> Option { let entry = self.map.remove(key)?; let idx = entry.index; - // Swap-remove from keys vec self.keys.swap_remove(idx); - // Update index of swapped key if we didn't remove the last element if idx < self.keys.len() { let swapped_key = &self.keys[idx]; if let Some(swapped_entry) = self.map.get_mut(swapped_key) { @@ -633,6 +509,16 @@ where Some(entry.value) } + + fn clear(&mut self) { + self.map.clear(); + self.keys.clear(); + #[cfg(feature = "metrics")] + { + use crate::metrics::traits::CoreMetricsRecorder; + self.metrics.record_clear(); + } + } } #[cfg(feature = "metrics")] @@ -646,7 +532,7 @@ where /// /// ``` /// use cachekit::policy::nru::NruCache; - /// use cachekit::traits::CoreCache; + /// use cachekit::traits::Cache; /// /// let mut cache = NruCache::new(10); /// cache.insert("a", 1); @@ -853,7 +739,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::traits::{CoreCache, MutableCache}; + use crate::traits::Cache; #[allow(dead_code)] const _: () = { diff --git a/src/policy/random.rs b/src/policy/random.rs index 0879175..cc92cc6 100644 --- a/src/policy/random.rs +++ b/src/policy/random.rs @@ -160,8 +160,7 @@ use crate::metrics::metrics_impl::CoreOnlyMetrics; use crate::metrics::snapshot::CoreOnlyMetricsSnapshot; #[cfg(feature = "metrics")] use crate::metrics::traits::{CoreMetricsRecorder, MetricsSnapshotProvider}; -use crate::prelude::ReadOnlyCache; -use crate::traits::{CoreCache, MutableCache}; +use crate::traits::Cache; use rustc_hash::FxHashMap; use std::hash::Hash; @@ -635,7 +634,7 @@ impl std::fmt::Debug for RandomCore { } } -impl ReadOnlyCache for RandomCore +impl Cache for RandomCore where K: Clone + Eq + Hash, { @@ -653,32 +652,10 @@ where fn capacity(&self) -> usize { self.capacity } -} -/// Implementation of the [`CoreCache`] trait for Random. -/// -/// Allows `RandomCore` to be used through the unified cache interface. -/// -/// # Example -/// -/// ``` -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; -/// use cachekit::policy::random::RandomCore; -/// -/// let mut cache: RandomCore<&str, i32> = RandomCore::new(100); -/// -/// // Use via CoreCache trait -/// cache.insert("key", 42); -/// assert_eq!(cache.get(&"key"), Some(&42)); -/// assert!(cache.contains(&"key")); -/// ``` -impl CoreCache for RandomCore -where - K: Clone + Eq + Hash, -{ #[inline] - fn insert(&mut self, key: K, value: V) -> Option { - RandomCore::insert(self, key, value) + fn peek(&self, key: &K) -> Option<&V> { + self.map.get(key).map(|(_, v)| v) } #[inline] @@ -686,6 +663,15 @@ where RandomCore::get(self, key) } + #[inline] + fn insert(&mut self, key: K, value: V) -> Option { + RandomCore::insert(self, key, value) + } + + fn remove(&mut self, key: &K) -> Option { + RandomCore::remove(self, key) + } + fn clear(&mut self) { RandomCore::clear(self); } @@ -723,30 +709,6 @@ where } } -impl MutableCache for RandomCore -where - K: Clone + Eq + Hash, -{ - /// Removes a specific key-value pair. - /// - /// # Example - /// - /// ``` - /// use cachekit::policy::random::RandomCore; - /// use cachekit::traits::{CoreCache, MutableCache, ReadOnlyCache}; - /// - /// let mut cache = RandomCore::new(10); - /// cache.insert("key", 42); - /// - /// assert_eq!(cache.remove(&"key"), Some(42)); - /// assert!(!cache.contains(&"key")); - /// ``` - #[inline] - fn remove(&mut self, key: &K) -> Option { - RandomCore::remove(self, key) - } -} - // --------------------------------------------------------------------------- // Clone // --------------------------------------------------------------------------- @@ -1359,11 +1321,11 @@ mod tests { #[test] fn trait_insert_matches_inherent() { - use crate::traits::CoreCache; + use crate::traits::Cache; let mut cache = RandomCore::new(100); - assert_eq!(CoreCache::insert(&mut cache, "a", 1), None); - assert_eq!(CoreCache::insert(&mut cache, "a", 2), Some(1)); + assert_eq!(Cache::insert(&mut cache, "a", 1), None); + assert_eq!(Cache::insert(&mut cache, "a", 2), Some(1)); } } @@ -1439,13 +1401,13 @@ mod tests { #[test] fn remove_via_mutable_cache_trait() { - use crate::traits::MutableCache; + use crate::traits::Cache; let mut cache = RandomCore::new(100); cache.insert("a", 1); cache.insert("b", 2); - assert_eq!(MutableCache::remove(&mut cache, &"a"), Some(1)); + assert_eq!(Cache::remove(&mut cache, &"a"), Some(1)); assert!(!cache.contains(&"a")); assert!(cache.contains(&"b")); } diff --git a/src/policy/s3_fifo.rs b/src/policy/s3_fifo.rs index db98742..316c4f6 100644 --- a/src/policy/s3_fifo.rs +++ b/src/policy/s3_fifo.rs @@ -48,7 +48,7 @@ //! //! ``` //! use cachekit::policy::s3_fifo::S3FifoCache; -//! use cachekit::traits::CoreCache; +//! use cachekit::traits::Cache; //! //! let mut cache: S3FifoCache = S3FifoCache::new(100); //! @@ -88,9 +88,7 @@ use crate::metrics::snapshot::S3FifoMetricsSnapshot; #[cfg(feature = "metrics")] use crate::metrics::traits::{CoreMetricsRecorder, MetricsSnapshotProvider, S3FifoMetricsRecorder}; -use crate::traits::CoreCache; -use crate::traits::MutableCache; -use crate::traits::ReadOnlyCache; +use crate::traits::{Cache, EvictingCache}; /// Maximum frequency value (2 bits = 0-3). const MAX_FREQ: u8 = 3; @@ -363,7 +361,7 @@ impl Debug for IntoIter { /// /// ``` /// use cachekit::policy::s3_fifo::S3FifoCache; -/// use cachekit::traits::CoreCache; +/// use cachekit::traits::Cache; /// /// let mut cache: S3FifoCache = S3FifoCache::new(100); /// @@ -1215,7 +1213,7 @@ where // Cache trait implementations // --------------------------------------------------------------------------- -impl ReadOnlyCache for S3FifoCache +impl Cache for S3FifoCache where K: Clone + Eq + Hash, { @@ -1233,12 +1231,12 @@ where fn capacity(&self) -> usize { self.capacity } -} -impl CoreCache for S3FifoCache -where - K: Clone + Eq + Hash, -{ + #[inline] + fn peek(&self, key: &K) -> Option<&V> { + S3FifoCache::peek(self, key) + } + #[inline] fn insert(&mut self, key: K, value: V) -> Option { S3FifoCache::insert(self, key, value) @@ -1249,18 +1247,63 @@ where S3FifoCache::get(self, key) } + #[inline] + fn remove(&mut self, key: &K) -> Option { + S3FifoCache::remove(self, key) + } + fn clear(&mut self) { S3FifoCache::clear(self); } } -impl MutableCache for S3FifoCache +impl EvictingCache for S3FifoCache where K: Clone + Eq + Hash, { - #[inline] - fn remove(&mut self, key: &K) -> Option { - S3FifoCache::remove(self, key) + fn evict_one(&mut self) -> Option<(K, V)> { + self.evict_if_needed(); + // S3-FIFO eviction is integrated into insert; explicit eviction + // pops from Small (freq==0) then Main (freq==0). + let (id, freq) = if self.small_tail.is_some() { + self.pop_small_tail()? + } else { + self.pop_main_tail()? + }; + + if freq > 0 { + // Re-insert promoted entry, then try again from the other queue + let queue = self.arena.get(id).expect("arena/map out of sync").queue; + match queue { + QueueKind::Small => { + *self.arena.get_mut(id).unwrap().freq.get_mut() = 0; + self.attach_main_head(id); + }, + QueueKind::Main => { + *self.arena.get_mut(id).unwrap().freq.get_mut() = freq - 1; + self.attach_main_head(id); + }, + } + // Retry - pop from the queue we didn't just try + let (id2, freq2) = if self.main_tail.is_some() { + self.pop_main_tail()? + } else { + self.pop_small_tail()? + }; + if freq2 > 0 { + // Re-insert again; give up on explicit eviction + *self.arena.get_mut(id2).unwrap().freq.get_mut() = freq2.saturating_sub(1); + self.attach_main_head(id2); + return None; + } + let node = self.arena.remove(id2).expect("arena/map out of sync"); + self.map.remove(&node.key); + Some((node.key, node.value)) + } else { + let node = self.arena.remove(id).expect("arena/map out of sync"); + self.map.remove(&node.key); + Some((node.key, node.value)) + } } } @@ -2181,20 +2224,13 @@ mod tests { mod trait_impls { use super::*; - use crate::traits::{CoreCache, MutableCache}; - - #[test] - fn implements_core_cache() { - fn assert_core_cache, K, V>(_: &T) {} - let cache: S3FifoCache<&str, i32> = S3FifoCache::new(10); - assert_core_cache(&cache); - } + use crate::traits::Cache; #[test] - fn implements_mutable_cache() { - fn assert_mutable_cache, K, V>(_: &T) {} + fn implements_cache() { + fn assert_cache, K, V>(_: &T) {} let cache: S3FifoCache<&str, i32> = S3FifoCache::new(10); - assert_mutable_cache(&cache); + assert_cache(&cache); } #[test] diff --git a/src/policy/slru.rs b/src/policy/slru.rs index 2adbd77..576e1ce 100644 --- a/src/policy/slru.rs +++ b/src/policy/slru.rs @@ -122,11 +122,8 @@ //! //! ## Removal Policy //! -//! `SlruCore` intentionally does not support arbitrary key removal -//! ([`MutableCache`](crate::traits::MutableCache)). Allowing `remove(&K)` would -//! break the probationary/protected segment accounting — the segment a removed -//! entry belonged to cannot be reliably adjusted without re-scanning the lists. -//! Entries leave the cache only via the SLRU eviction policy or [`clear`](SlruCore::clear). +//! `SlruCore` supports key removal via [`Cache::remove`]. +//! Removing an entry detaches it from its segment and adjusts the segment counters. //! //! ## Thread Safety //! @@ -151,8 +148,7 @@ use crate::metrics::metrics_impl::SlruMetrics; use crate::metrics::snapshot::SlruMetricsSnapshot; #[cfg(feature = "metrics")] use crate::metrics::traits::{CoreMetricsRecorder, MetricsSnapshotProvider, SlruMetricsRecorder}; -use crate::prelude::ReadOnlyCache; -use crate::traits::CoreCache; +use crate::traits::Cache; use rustc_hash::FxHashMap; use std::hash::Hash; use std::iter::FusedIterator; @@ -190,9 +186,8 @@ struct Node { /// to protected. This provides scan resistance by keeping one-time accesses /// from polluting the main cache. /// -/// `SlruCore` does not support arbitrary key removal (no -/// [`MutableCache`](crate::traits::MutableCache)). Entries leave only via the -/// SLRU eviction policy or [`clear`](SlruCore::clear). +/// `SlruCore` supports key removal via [`Cache::remove`]. +/// Removing an entry detaches it from its segment and adjusts the segment counters. /// /// # Type Parameters /// @@ -700,6 +695,30 @@ where self.validate_invariants(); } + /// Removes a specific key-value pair, returning the value if it existed. + /// + /// Detaches the entry from its segment (probationary or protected) and + /// adjusts the segment counters. + /// + /// # Example + /// + /// ``` + /// use cachekit::policy::slru::SlruCore; + /// + /// let mut cache = SlruCore::new(100, 0.25); + /// cache.insert("key", 42); + /// assert_eq!(cache.remove(&"key"), Some(42)); + /// assert!(!cache.contains(&"key")); + /// ``` + pub fn remove(&mut self, key: &K) -> Option { + let node_ptr = self.map.remove(key)?; + self.detach(node_ptr); + unsafe { + let node = Box::from_raw(node_ptr.as_ptr()); + Some(node.value) + } + } + /// Returns an iterator over shared references to cached key-value pairs. /// /// Visits probationary entries (MRU to LRU) then protected entries. @@ -958,50 +977,45 @@ where } } -impl ReadOnlyCache for SlruCore -where - K: Clone + Eq + Hash, -{ - #[inline] - fn contains(&self, key: &K) -> bool { - self.map.contains_key(key) - } - - #[inline] - fn len(&self) -> usize { - self.map.len() - } - - #[inline] - fn capacity(&self) -> usize { - self.capacity - } -} - -/// Implementation of the [`CoreCache`] trait for SLRU. +/// Implementation of the [`Cache`] trait for SLRU. /// /// Allows `SlruCore` to be used through the unified cache interface. /// /// # Example /// /// ``` -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// use cachekit::policy::slru::SlruCore; /// /// let mut cache: SlruCore<&str, i32> = SlruCore::new(100, 0.25); /// -/// // Use via CoreCache trait +/// // Use via Cache trait /// cache.insert("key", 42); /// assert_eq!(cache.get(&"key"), Some(&42)); /// assert!(cache.contains(&"key")); /// ``` -impl CoreCache for SlruCore +impl Cache for SlruCore where K: Clone + Eq + Hash, { #[inline] - fn insert(&mut self, key: K, value: V) -> Option { - SlruCore::insert(self, key, value) + fn contains(&self, key: &K) -> bool { + SlruCore::contains(self, key) + } + + #[inline] + fn len(&self) -> usize { + SlruCore::len(self) + } + + #[inline] + fn capacity(&self) -> usize { + SlruCore::capacity(self) + } + + #[inline] + fn peek(&self, key: &K) -> Option<&V> { + SlruCore::peek(self, key) } #[inline] @@ -1009,6 +1023,16 @@ where SlruCore::get(self, key) } + #[inline] + fn insert(&mut self, key: K, value: V) -> Option { + SlruCore::insert(self, key, value) + } + + #[inline] + fn remove(&mut self, key: &K) -> Option { + SlruCore::remove(self, key) + } + fn clear(&mut self) { SlruCore::clear(self); } @@ -2066,10 +2090,10 @@ mod tests { fn trait_insert_returns_old_value() { let mut cache: SlruCore<&str, i32> = SlruCore::new(10, 0.25); - let first = CoreCache::insert(&mut cache, "key", 1); + let first = Cache::insert(&mut cache, "key", 1); assert_eq!(first, None); - let second = CoreCache::insert(&mut cache, "key", 2); + let second = Cache::insert(&mut cache, "key", 2); assert_eq!( second, Some(1), diff --git a/src/policy/two_q.rs b/src/policy/two_q.rs index 41a9330..b853943 100644 --- a/src/policy/two_q.rs +++ b/src/policy/two_q.rs @@ -143,8 +143,7 @@ use crate::metrics::metrics_impl::TwoQMetrics; use crate::metrics::snapshot::TwoQMetricsSnapshot; #[cfg(feature = "metrics")] use crate::metrics::traits::{CoreMetricsRecorder, MetricsSnapshotProvider, TwoQMetricsRecorder}; -use crate::prelude::ReadOnlyCache; -use crate::traits::CoreCache; +use crate::traits::Cache; use rustc_hash::FxHashMap; use std::collections::VecDeque; use std::hash::Hash; @@ -772,6 +771,49 @@ where while self.pop_protected_tail().is_some() {} self.map.clear(); } + + /// Side-effect-free lookup by key. + /// + /// Does not promote entries between queues or update MRU position. + /// + /// # Example + /// + /// ``` + /// use cachekit::policy::two_q::TwoQCore; + /// + /// let mut cache = TwoQCore::new(100, 0.25); + /// cache.insert("key", 42); + /// assert_eq!(cache.peek(&"key"), Some(&42)); + /// assert_eq!(cache.peek(&"missing"), None); + /// ``` + #[inline] + pub fn peek(&self, key: &K) -> Option<&V> { + self.map.get(key).map(|&ptr| unsafe { &ptr.as_ref().value }) + } + + /// Removes a specific key-value pair, returning the value if it existed. + /// + /// Detaches the entry from its queue (probation or protected) and + /// adjusts the queue counters. + /// + /// # Example + /// + /// ``` + /// use cachekit::policy::two_q::TwoQCore; + /// + /// let mut cache = TwoQCore::new(100, 0.25); + /// cache.insert("key", 42); + /// assert_eq!(cache.remove(&"key"), Some(42)); + /// assert!(!cache.contains(&"key")); + /// ``` + pub fn remove(&mut self, key: &K) -> Option { + let node_ptr = self.map.remove(key)?; + self.detach(node_ptr); + unsafe { + let node = Box::from_raw(node_ptr.as_ptr()); + Some(node.value) + } + } } impl Drop for TwoQCore { @@ -796,50 +838,45 @@ where } } -impl ReadOnlyCache for TwoQCore -where - K: Clone + Eq + Hash, -{ - #[inline] - fn contains(&self, key: &K) -> bool { - self.map.contains_key(key) - } - - #[inline] - fn len(&self) -> usize { - self.map.len() - } - - #[inline] - fn capacity(&self) -> usize { - self.protected_cap - } -} - -/// Implementation of the [`CoreCache`] trait for 2Q. +/// Implementation of the [`Cache`] trait for 2Q. /// /// Allows `TwoQCore` to be used through the unified cache interface. /// /// # Example /// /// ``` -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// use cachekit::policy::two_q::TwoQCore; /// /// let mut cache: TwoQCore<&str, i32> = TwoQCore::new(100, 0.25); /// -/// // Use via CoreCache trait +/// // Use via Cache trait /// cache.insert("key", 42); /// assert_eq!(cache.get(&"key"), Some(&42)); /// assert!(cache.contains(&"key")); /// ``` -impl CoreCache for TwoQCore +impl Cache for TwoQCore where K: Clone + Eq + Hash, { #[inline] - fn insert(&mut self, key: K, value: V) -> Option { - TwoQCore::insert(self, key, value) + fn contains(&self, key: &K) -> bool { + TwoQCore::contains(self, key) + } + + #[inline] + fn len(&self) -> usize { + TwoQCore::len(self) + } + + #[inline] + fn capacity(&self) -> usize { + TwoQCore::capacity(self) + } + + #[inline] + fn peek(&self, key: &K) -> Option<&V> { + TwoQCore::peek(self, key) } #[inline] @@ -847,6 +884,16 @@ where TwoQCore::get(self, key) } + #[inline] + fn insert(&mut self, key: K, value: V) -> Option { + TwoQCore::insert(self, key, value) + } + + #[inline] + fn remove(&mut self, key: &K) -> Option { + TwoQCore::remove(self, key) + } + fn clear(&mut self) { TwoQCore::clear(self); } @@ -1734,10 +1781,10 @@ mod tests { fn trait_insert_returns_old_value() { let mut cache: TwoQCore<&str, i32> = TwoQCore::new(10, 0.25); - let first = CoreCache::insert(&mut cache, "key", 1); + let first = Cache::insert(&mut cache, "key", 1); assert_eq!(first, None, "First insert of new key should return None"); - let second = CoreCache::insert(&mut cache, "key", 2); + let second = Cache::insert(&mut cache, "key", 2); assert_eq!(second, Some(1), "Second insert should return old value"); } diff --git a/src/prelude.rs b/src/prelude.rs index 168535e..eb9b6da 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -6,16 +6,14 @@ //! use cachekit::prelude::*; //! ``` //! -//! This gives you the core traits ([`CoreCache`], [`ReadOnlyCache`], -//! [`MutableCache`]), the policy-specific traits, and the [`CacheBuilder`] -//! entry point. Internal data structures and concrete policy types are -//! available from their respective modules ([`ds`](crate::ds), -//! [`policy`](crate::policy)). +//! This gives you the main [`Cache`] trait, the optional capability traits, +//! and the [`CacheBuilder`] entry point. Concrete policy types are available +//! from their respective modules ([`policy`](crate::policy)). -pub use crate::builder::{Cache, CacheBuilder, CachePolicy}; +pub use crate::builder::{CacheBuilder, CachePolicy, DynCache}; pub use crate::traits::{ - ConcurrentCache, CoreCache, FifoCacheTrait, LfuCacheTrait, LruCacheTrait, LrukCacheTrait, - MutableCache, ReadOnlyCache, + Cache, ConcurrentCache, EvictingCache, FrequencyTracking, HistoryTracking, RecencyTracking, + VictimInspectable, }; #[cfg(feature = "metrics")] diff --git a/src/traits.rs b/src/traits.rs index 1cbd241..11297b1 100644 --- a/src/traits.rs +++ b/src/traits.rs @@ -1,226 +1,98 @@ //! # Cache Trait Hierarchy //! -//! This module defines the trait hierarchy for the cache subsystem, providing a unified -//! interface for different cache eviction policies (FIFO, LRU, LFU, LRU-K) while ensuring -//! type safety and policy-appropriate operation sets. +//! This module defines a pluggable-policy cache interface. Users code against the +//! main [`Cache`] trait to swap eviction policies with minimal code changes; +//! optional capability traits expose advanced behaviour for users who need it. //! //! ## Architecture //! //! ```text -//! ┌─────────────────────────────────────────┐ -//! │ ReadOnlyCache │ -//! │ │ -//! │ contains(&, &K) → bool │ -//! │ len(&) → usize │ -//! │ is_empty(&) → bool │ -//! │ capacity(&) → usize │ -//! └──────────────────┬──────────────────────┘ -//! │ -//! ┌────────────────────────────┴────────────────────────────┐ -//! │ │ -//! ▼ ▼ -//! ┌────────────────────────────┐ ┌─────────────────────────────┐ -//! │ CoreCache │ │ ReadOnly*Cache Traits │ -//! │ │ │ (FIFO, LRU, LFU, LRU-K) │ -//! │ insert(&mut, K, V) │ │ │ -//! │ get(&mut, &K) → &V │ │ peek_*, *_rank, frequency │ -//! │ clear(&mut) │ │ │ -//! └────────────┬───────────────┘ └─────────────────────────────┘ -//! │ -//! ├────────────────────────────┬────────────────────────────┐ -//! │ │ │ -//! ▼ ▼ ▼ -//! ┌────────────────────────────┐ ┌────────────────────────────┐ ┌──────────────────────┐ -//! │ FifoCacheTrait │ │ MutableCache │ │ Policy-specific │ -//! │ │ │ │ │ read-only traits │ -//! │ pop_oldest() → (K, V) │ │ remove(&K) → Option │ │ enable inspection │ -//! │ peek_oldest() → (&K, &V) │ │ remove_batch(&[K]) │ │ without side effects│ -//! │ age_rank(&K) → usize │ │ │ └──────────────────────┘ -//! │ │ └──────────────┬─────────────┘ -//! │ ⚠ No arbitrary removal! │ │ -//! └────────────────────────────┘ │ -//! ┌───────────────┴───────────────┬──────────────────────┐ -//! │ │ │ -//! ▼ ▼ ▼ -//! ┌────────────────────────────┐ ┌────────────────────────────┐ ┌────────────────────┐ -//! │ LruCacheTrait │ │ LfuCacheTrait │ │ LrukCacheTrait │ -//! │ │ │ │ │ │ -//! │ pop_lru() → (K, V) │ │ pop_lfu() → (K, V) │ │ pop_lru_k() │ -//! │ peek_lru() → (&K, &V) │ │ peek_lfu() → (&K, &V) │ │ peek_lru_k() │ -//! │ touch(&K) → bool │ │ frequency(&K) → u64 │ │ k_value() → usize │ -//! │ recency_rank(&K) → usize │ │ reset_frequency(&K) │ │ access_history │ -//! └────────────────────────────┘ └────────────────────────────┘ └────────────────────┘ -//! ``` -//! -//! ## Trait Design Philosophy -//! -//! ```text -//! ┌──────────────────────────────────────────────────────────────────────────┐ -//! │ TRAIT HIERARCHY DESIGN │ -//! │ │ -//! │ 0. ReadOnlyCache: Pure inspection operations (no side effects) │ -//! │ └── contains, len, capacity, is_empty │ -//! │ │ -//! │ 1. CoreCache: Universal operations ALL caches must support │ -//! │ └── insert, get, clear (extends ReadOnlyCache) │ -//! │ │ -//! │ 2. MutableCache: Adds arbitrary key-based removal │ -//! │ └── remove(&K) - NOT suitable for FIFO (breaks insertion order) │ -//! │ │ -//! │ 3. Policy-Specific Traits: Add policy-appropriate eviction │ -//! │ ├── FIFO: pop_oldest (no arbitrary removal!) │ -//! │ ├── LRU: pop_lru + touch (recency-based) │ -//! │ ├── LFU: pop_lfu + frequency (frequency-based) │ -//! │ └── LRU-K: pop_lru_k + k_distance (scan-resistant) │ -//! │ │ -//! │ Key Insights: │ -//! │ • ReadOnlyCache enables const-safe APIs and concurrent readers │ -//! │ • FIFO extends CoreCache directly (NOT MutableCache) │ -//! │ • Read-only traits allow policy inspection without eviction changes │ -//! └──────────────────────────────────────────────────────────────────────────┘ +//! ┌────────────────────────────────────────────────────────────────────┐ +//! │ Cache │ +//! │ Main trait — every policy implements this. │ +//! │ │ +//! │ contains, len, is_empty, capacity │ +//! │ peek (side-effect-free), get (policy-tracked) │ +//! │ insert, remove, clear │ +//! └────────────────────┬──────────────────────────────────────────────┘ +//! │ +//! ┌──────────────────┼──────────────────┬───────────────────────┐ +//! │ │ │ │ +//! ▼ ▼ ▼ ▼ +//! EvictingCache VictimInspectable RecencyTracking FrequencyTracking +//! evict_one() peek_victim() touch, rank frequency +//! │ +//! HistoryTracking +//! access_history, +//! k_distance, ... //! ``` //! //! ## Trait Summary //! -//! | Trait | Extends | Purpose | -//! |------------------------|-------------------|--------------------------------------| -//! | `ReadOnlyCache` | - | Read-only inspection operations | -//! | `CoreCache` | `ReadOnlyCache` | Universal cache operations | -//! | `MutableCache` | `CoreCache` | Adds arbitrary key removal | -//! | `FifoCacheTrait` | `CoreCache` | FIFO-specific (no remove!) | -//! | `LruCacheTrait` | `MutableCache` | LRU-specific with recency tracking | -//! | `LfuCacheTrait` | `MutableCache` | LFU-specific with frequency tracking | -//! | `LrukCacheTrait` | `MutableCache` | LRU-K with K-distance tracking | -//! | - | - | - | -//! | `ConcurrentCache` | `Send + Sync` | Safety marker for thread-safe caches | -//! | `CacheTierManager` | - | Multi-tier cache management | -//! | `CacheFactory` | - | Cache instance creation | -//! | `AsyncCacheFuture` | `Send + Sync` | Future async operation support | -//! -//! ## Why FIFO Doesn't Extend MutableCache -//! -//! ```text -//! FIFO Cache Semantics: -//! ═══════════════════════════════════════════════════════════════════════════ -//! -//! VecDeque: [A] ─ [B] ─ [C] ─ [D] -//! ↑ ↑ -//! oldest newest -//! -//! If we allowed remove(&B): -//! VecDeque: [A] ─ [C] ─ [D] ← Order still intact, but... -//! -//! Problem: Now VecDeque doesn't track true insertion order! -//! - Stale entries accumulate -//! - age_rank() becomes O(n) scanning for valid entries -//! - FIFO semantics become muddled -//! -//! Solution: FifoCacheTrait extends CoreCache directly, ensuring -//! only FIFO-appropriate operations are available. -//! -//! ═══════════════════════════════════════════════════════════════════════════ -//! ``` -//! -//! ## Policy Comparison -//! -//! | Policy | Eviction Basis | Supports Remove | Best For | -//! |--------|------------------------|-----------------|--------------------------| -//! | FIFO | Insertion order | ❌ No | Predictable eviction | -//! | LRU | Last access time | ✅ Yes | Temporal locality | -//! | LFU | Access frequency | ✅ Yes | Stable hot spots | -//! | LRU-K | K-th access time | ✅ Yes | Scan resistance | -//! -//! ## Utility Traits -//! -//! ```text -//! ┌─────────────────────────────────────────────────────────────────────────┐ -//! │ unsafe trait ConcurrentCache │ -//! │ │ -//! │ Safety marker: Send + Sync │ -//! │ Purpose: Guarantee thread-safe cache implementations │ -//! │ Usage: fn use_cache + ConcurrentCache>(c: &C) │ -//! └─────────────────────────────────────────────────────────────────────────┘ -//! ``` -//! -//! ## CacheConfig -//! -//! | Field | Type | Default | Description | -//! |------------------|---------|---------|------------------------------------| -//! | `capacity` | `usize` | 1000 | Maximum entries | -//! | `enable_stats` | `bool` | false | Enable hit/miss tracking | -//! | `prealloc_memory`| `bool` | true | Pre-allocate memory for capacity | -//! | `thread_safe` | `bool` | false | Use internal synchronization | +//! | Trait | Extends | Purpose | +//! |------------------------|-------------|-----------------------------------------------| +//! | [`Cache`] | — | Universal cache operations (pluggable) | +//! | [`EvictingCache`] | `Cache` | Explicit policy-driven eviction | +//! | [`VictimInspectable`] | `Cache` | Read-only next-victim peek | +//! | [`RecencyTracking`] | `Cache` | Touch and recency-rank inspection | +//! | [`FrequencyTracking`] | `Cache` | Access-frequency inspection | +//! | [`HistoryTracking`] | `Cache` | LRU-K style access-history inspection | +//! | [`ConcurrentCache`] | `Send+Sync` | Thread-safety marker | +//! | [`CacheFactory`] | — | Cache instance creation | +//! | [`AsyncCacheFuture`] | `Send+Sync` | Future async operation support | //! //! ## Example Usage //! //! ``` -//! use cachekit::traits::{ -//! ReadOnlyCache, CoreCache, MutableCache, -//! FifoCacheTrait, LruCacheTrait, LfuCacheTrait, -//! }; -//! -//! // Read-only inspection - no side effects, works with shared references -//! fn cache_stats>>(cache: &C) -> (usize, usize, f64) { -//! let len = cache.len(); -//! let cap = cache.capacity(); -//! let utilization = len as f64 / cap as f64; -//! (len, cap, utilization) -//! } +//! use cachekit::traits::Cache; +//! use cachekit::policy::lru_k::LrukCache; //! -//! // Function accepting any cache -//! fn warm_cache>>(cache: &mut C, data: &[(u64, Vec)]) { +//! // Generic function — works with any policy +//! fn warm_cache>(cache: &mut C, data: &[(u64, String)]) { //! for (key, value) in data { //! cache.insert(*key, value.clone()); //! } //! } //! -//! // Function requiring removal capability (LRU, LFU - NOT FIFO) -//! fn invalidate_keys>>(cache: &mut C, keys: &[u64]) { -//! for key in keys { -//! cache.remove(key); -//! } -//! } -//! -//! // FIFO-specific function -//! fn evict_oldest_batch>>( -//! cache: &mut C, -//! count: usize, -//! ) -> Vec<(u64, Vec)> { -//! cache.pop_oldest_batch(count) -//! } -//! -//! // LRU-specific function -//! fn touch_hot_keys>>(cache: &mut C, keys: &[u64]) { -//! for key in keys { -//! cache.touch(key); -//! } -//! } -//! -//! // LFU-specific function with frequency-based prioritization -//! fn boost_key_priority>>(cache: &mut C, key: &u64) { -//! cache.increment_frequency(key); -//! } +//! let mut cache = LrukCache::new(100); +//! warm_cache(&mut cache, &[(1, "one".to_string()), (2, "two".to_string())]); +//! assert_eq!(cache.len(), 2); +//! assert_eq!(cache.peek(&1), Some(&"one".to_string())); //! ``` //! //! ## Thread Safety //! //! - Individual cache implementations are **NOT thread-safe** by default -//! - Use `ConcurrentCache` marker trait to identify thread-safe implementations +//! - Use [`ConcurrentCache`] marker trait to identify thread-safe implementations //! - Wrap non-concurrent caches in `Arc>` for shared access //! - Some implementations (e.g., `ConcurrentLruCache`) provide built-in concurrency //! +//! ## CacheConfig +//! +//! | Field | Type | Default | Description | +//! |------------------|---------|---------|------------------------------------| +//! | `capacity` | `usize` | 1000 | Maximum entries | +//! | `enable_stats` | `bool` | false | Enable hit/miss tracking | +//! | `prealloc_memory`| `bool` | true | Pre-allocate memory for capacity | +//! | `thread_safe` | `bool` | false | Use internal synchronization | +//! //! ## Implementation Notes //! -//! - **Trait Bounds**: `ReadOnlyCache` and `CoreCache` have no bounds on K, V; implementations add as needed -//! - **Default Implementations**: `is_empty()`, `total_misses()`, `remove_batch()`, `pop_oldest_batch()` -//! - **Batch Operations**: Default implementations loop over single operations -//! - **Async Support**: `AsyncCacheFuture` prepared for Phase 2 async-trait integration +//! - **Object safety**: [`Cache`] is intentionally object-safe (`Box>`) +//! - **Default Implementations**: `is_empty()` +//! - **Batch Operations**: Stay as inherent methods for buffer-reuse ergonomics +//! - **Async Support**: [`AsyncCacheFuture`] prepared for Phase 2 async-trait integration + +// --------------------------------------------------------------------------- +// Layer 1 — Main trait +// --------------------------------------------------------------------------- -/// Read-only cache operations that don't modify cache state. +/// Universal cache operations that all policies implement. +/// +/// This is the primary user-facing trait. Code written against `Cache` can +/// swap eviction policies without changing call sites. /// -/// This trait defines inspection operations that are safe to call from shared references -/// and don't affect eviction order or modify the cache contents. These operations are -/// guaranteed not to trigger evictions or update access patterns. +/// The trait is intentionally **object-safe** to support `Box>`. /// /// # Type Parameters /// @@ -230,917 +102,206 @@ /// # Example /// /// ``` -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; +/// use cachekit::traits::Cache; /// use cachekit::policy::lru_k::LrukCache; /// -/// fn cache_stats>(cache: &C) -> (usize, usize, bool) { -/// (cache.len(), cache.capacity(), cache.contains(&42)) +/// fn use_any_cache>(cache: &mut C) { +/// cache.insert(1, "hello".to_string()); +/// assert_eq!(cache.peek(&1), Some(&"hello".to_string())); +/// assert_eq!(cache.get(&1), Some(&"hello".to_string())); +/// assert_eq!(cache.remove(&1), Some("hello".to_string())); /// } /// /// let mut cache = LrukCache::new(100); -/// cache.insert(42, "answer".to_string()); -/// -/// let (len, cap, has_answer) = cache_stats(&cache); -/// assert_eq!(len, 1); -/// assert_eq!(cap, 100); -/// assert!(has_answer); +/// use_any_cache(&mut cache); /// ``` -/// -/// # Design Rationale -/// -/// Separating read-only operations enables: -/// - **Const-safe APIs**: Functions can require immutable access -/// - **Concurrent Readers**: Read-only views don't need write locks -/// - **Clear Intent**: Callers signal they won't modify the cache -/// - **Policy Independence**: Works with any cache implementation -pub trait ReadOnlyCache { +pub trait Cache { /// Checks if a key exists without updating access state. - /// - /// This operation never affects eviction order or triggers any policy updates. - /// Safe to call from any context without side effects. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{ReadOnlyCache, CoreCache}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::new(10); - /// cache.insert(1, "value"); - /// - /// assert!(cache.contains(&1)); - /// assert!(!cache.contains(&99)); - /// ``` fn contains(&self, key: &K) -> bool; - /// Returns the current number of entries in the cache. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{ReadOnlyCache, CoreCache}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::new(10); - /// assert_eq!(cache.len(), 0); - /// - /// cache.insert(1, "one"); - /// cache.insert(2, "two"); - /// assert_eq!(cache.len(), 2); - /// ``` + /// Returns the current number of entries. fn len(&self) -> usize; /// Returns `true` if the cache contains no entries. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{ReadOnlyCache, CoreCache}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache: LrukCache = LrukCache::new(10); - /// assert!(cache.is_empty()); - /// - /// cache.insert(1, "value"); - /// assert!(!cache.is_empty()); - /// ``` fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the maximum capacity of the cache. + fn capacity(&self) -> usize; + + /// Side-effect-free lookup by key. /// - /// # Example - /// - /// ``` - /// use cachekit::traits::ReadOnlyCache; - /// use cachekit::policy::lru_k::LrukCache; + /// Does not update access patterns, eviction order, or any internal state. + fn peek(&self, key: &K) -> Option<&V>; + + /// Policy-tracked lookup by key. /// - /// let cache: LrukCache = LrukCache::new(100); - /// assert_eq!(cache.capacity(), 100); - /// ``` - fn capacity(&self) -> usize; -} + /// May update internal state (access time, frequency, reference bits) + /// depending on the eviction policy. Use [`peek`](Self::peek) if you + /// need a read without side effects. + fn get(&mut self, key: &K) -> Option<&V>; -/// Core cache operations that all caches support. -/// -/// This trait defines the fundamental operations that make sense for any cache type, -/// regardless of eviction policy. All policy-specific traits extend this. It includes -/// both read-only operations (inherited from [`ReadOnlyCache`]) and write operations -/// like insert and get. -/// -/// # Type Parameters -/// -/// - `K`: Key type (implementations typically require `Eq + Hash`) -/// - `V`: Value type -/// -/// # Example -/// -/// ``` -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; -/// use cachekit::policy::lru_k::LrukCache; -/// -/// fn warm_cache>(cache: &mut C, data: &[(u64, String)]) { -/// for (key, value) in data { -/// cache.insert(*key, value.clone()); -/// } -/// } -/// -/// let mut cache = LrukCache::new(100); -/// warm_cache(&mut cache, &[(1, "one".to_string()), (2, "two".to_string())]); -/// assert_eq!(cache.len(), 2); -/// ``` -pub trait CoreCache: ReadOnlyCache { /// Inserts a key-value pair, returning the previous value if it existed. /// /// If the cache is at capacity, an entry may be evicted according to the /// cache's eviction policy before the new entry is inserted. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::new(10); - /// - /// // New key returns None - /// assert_eq!(cache.insert(1, "first"), None); - /// - /// // Existing key returns previous value - /// assert_eq!(cache.insert(1, "second"), Some("first")); - /// ``` fn insert(&mut self, key: K, value: V) -> Option; - /// Gets a reference to a value by key. - /// - /// May update internal state (access time, frequency) depending on the - /// eviction policy. Use [`contains`](ReadOnlyCache::contains) if you only need - /// to check existence without affecting eviction order. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::new(10); - /// cache.insert(1, "value"); - /// - /// assert_eq!(cache.get(&1), Some(&"value")); - /// assert_eq!(cache.get(&99), None); - /// ``` - fn get(&mut self, key: &K) -> Option<&V>; + /// Removes a specific key-value pair, returning the value if it existed. + fn remove(&mut self, key: &K) -> Option; /// Removes all entries from the cache. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, ReadOnlyCache}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::new(10); - /// cache.insert(1, "one"); - /// cache.insert(2, "two"); - /// assert_eq!(cache.len(), 2); - /// - /// cache.clear(); - /// assert!(cache.is_empty()); - /// ``` fn clear(&mut self); } -/// Caches that support arbitrary key-based removal. -/// -/// This trait extends [`CoreCache`] with the ability to remove entries by key. -/// Appropriate for LRU, LFU, and general hash-map style caches where arbitrary -/// removal doesn't violate policy semantics. +// --------------------------------------------------------------------------- +// Layer 2 — Optional capability traits +// --------------------------------------------------------------------------- + +/// Explicitly evict one entry according to the policy. /// -/// **Note**: FIFO caches intentionally do NOT implement this trait because -/// arbitrary removal would violate FIFO semantics. Use [`FifoCacheTrait`] instead. +/// Not all policies expose this; some (ARC, CAR, NRU, Random, SLRU, 2Q) only +/// evict implicitly during [`Cache::insert`]. /// /// # Example /// /// ``` -/// use cachekit::traits::{CoreCache, MutableCache, ReadOnlyCache}; -/// use cachekit::policy::lru_k::LrukCache; -/// -/// fn invalidate_keys>(cache: &mut C, keys: &[u64]) { -/// for key in keys { -/// cache.remove(key); -/// } -/// } +/// use cachekit::traits::{Cache, EvictingCache}; +/// use cachekit::policy::fifo::FifoCache; /// -/// let mut cache = LrukCache::new(100); -/// cache.insert(1, "one".to_string()); -/// cache.insert(2, "two".to_string()); -/// cache.insert(3, "three".to_string()); +/// let mut cache = FifoCache::new(10); +/// cache.insert(1, "first"); +/// cache.insert(2, "second"); /// -/// invalidate_keys(&mut cache, &[1, 3]); -/// assert!(!cache.contains(&1)); -/// assert!(cache.contains(&2)); -/// assert!(!cache.contains(&3)); +/// let evicted = cache.evict_one(); +/// assert_eq!(evicted, Some((1, "first"))); /// ``` -pub trait MutableCache: CoreCache { - /// Removes a specific key-value pair. - /// - /// Returns the removed value if the key existed, or `None` if it didn't. - /// - /// # Example +pub trait EvictingCache: Cache { + /// Removes and returns one entry selected by the eviction policy. /// - /// ``` - /// use cachekit::traits::{CoreCache, MutableCache, ReadOnlyCache}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::new(10); - /// cache.insert(1, "value"); - /// - /// assert_eq!(cache.remove(&1), Some("value")); - /// assert_eq!(cache.remove(&1), None); // Already removed - /// ``` - fn remove(&mut self, key: &K) -> Option; - - /// Removes multiple keys, appending results to the provided buffer. - /// - /// Results are appended in the same order as the input keys. Callers - /// can reuse the buffer across calls to avoid repeated allocation. - /// The default implementation loops over [`remove`](Self::remove). - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, MutableCache, ReadOnlyCache}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::new(10); - /// cache.insert(1, "one"); - /// cache.insert(2, "two"); - /// cache.insert(3, "three"); - /// - /// let mut results = Vec::new(); - /// cache.remove_batch_into(&[1, 99, 3], &mut results); - /// assert_eq!(results, vec![Some("one"), None, Some("three")]); - /// assert_eq!(cache.len(), 1); - /// ``` - fn remove_batch_into(&mut self, keys: &[K], out: &mut Vec>) { - out.reserve(keys.len()); - out.extend(keys.iter().map(|k| self.remove(k))); - } - - /// Removes multiple keys, returning results in a new `Vec`. - /// - /// Convenience wrapper around [`remove_batch_into`](Self::remove_batch_into). - /// Prefer `remove_batch_into` when reusing a buffer across calls. + /// Returns `None` if the cache is empty. #[must_use] - fn remove_batch(&mut self, keys: &[K]) -> Vec> { - let mut out = Vec::with_capacity(keys.len()); - self.remove_batch_into(keys, &mut out); - out - } + fn evict_one(&mut self) -> Option<(K, V)>; } -/// FIFO-specific operations that respect insertion order. -/// -/// This trait extends [`CoreCache`] with FIFO-appropriate operations. -/// Importantly, it does NOT extend [`MutableCache`] because arbitrary removal -/// would violate FIFO semantics (insertion order tracking). +/// Read-only peek at the next eviction candidate. /// -/// # Design Rationale -/// -/// FIFO caches evict in insertion order. If we allowed `remove(&key)`: -/// - The queue would have "holes" -/// - `age_rank()` would need expensive O(n) scanning -/// - True insertion order would be lost +/// Only implemented by policies where the victim is cheap and stable to +/// identify without mutating internal state. Policies that require sweeps +/// or adaptive decisions do not implement this. /// /// # Example /// /// ``` -/// use cachekit::traits::{CoreCache, FifoCacheTrait, ReadOnlyCache}; +/// use cachekit::traits::{Cache, VictimInspectable}; /// use cachekit::policy::fifo::FifoCache; /// -/// let mut cache = FifoCache::new(3); +/// let mut cache = FifoCache::new(10); /// cache.insert(1, "first"); /// cache.insert(2, "second"); -/// cache.insert(3, "third"); -/// -/// // Peek without removing -/// assert_eq!(cache.peek_oldest(), Some((&1, &"first"))); /// -/// // Pop oldest entry -/// assert_eq!(cache.pop_oldest(), Some((1, "first"))); -/// assert_eq!(cache.len(), 2); -/// -/// // Age rank (0 = oldest) -/// assert_eq!(cache.age_rank(&2), Some(0)); // Now oldest -/// assert_eq!(cache.age_rank(&3), Some(1)); +/// assert_eq!(cache.peek_victim(), Some((&1, &"first"))); +/// assert_eq!(cache.len(), 2); // not removed /// ``` -pub trait FifoCacheTrait: CoreCache { - /// Removes and returns the oldest entry (first inserted). - /// - /// Returns `None` if the cache is empty. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, FifoCacheTrait, ReadOnlyCache}; - /// use cachekit::policy::fifo::FifoCache; - /// - /// let mut cache = FifoCache::new(10); - /// cache.insert(1, "first"); - /// cache.insert(2, "second"); - /// - /// assert_eq!(cache.pop_oldest(), Some((1, "first"))); - /// assert_eq!(cache.pop_oldest(), Some((2, "second"))); - /// assert_eq!(cache.pop_oldest(), None); - /// ``` - #[must_use] - fn pop_oldest(&mut self) -> Option<(K, V)>; - - /// Peeks at the oldest entry without removing it. - /// - /// Returns `None` if the cache is empty. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, FifoCacheTrait, ReadOnlyCache}; - /// use cachekit::policy::fifo::FifoCache; - /// - /// let mut cache = FifoCache::new(10); - /// cache.insert(1, "first"); - /// - /// // Peek doesn't remove - /// assert_eq!(cache.peek_oldest(), Some((&1, &"first"))); - /// assert_eq!(cache.peek_oldest(), Some((&1, &"first"))); - /// assert_eq!(cache.len(), 1); - /// ``` - fn peek_oldest(&self) -> Option<(&K, &V)>; - - /// Removes up to `count` oldest entries, appending them to the provided buffer. - /// - /// Entries are appended in FIFO order (oldest first). Callers can reuse - /// the buffer across calls to avoid repeated allocation. - /// The default implementation calls [`pop_oldest`](Self::pop_oldest) in a loop. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, FifoCacheTrait, ReadOnlyCache}; - /// use cachekit::policy::fifo::FifoCache; - /// - /// let mut cache = FifoCache::new(10); - /// cache.insert(1, "a"); - /// cache.insert(2, "b"); - /// cache.insert(3, "c"); - /// - /// let mut batch = Vec::new(); - /// cache.pop_oldest_batch_into(2, &mut batch); - /// assert_eq!(batch, vec![(1, "a"), (2, "b")]); - /// assert_eq!(cache.len(), 1); - /// ``` - fn pop_oldest_batch_into(&mut self, count: usize, out: &mut Vec<(K, V)>) { - out.reserve(count); - for _ in 0..count { - match self.pop_oldest() { - Some(entry) => out.push(entry), - None => break, - } - } - } - - /// Removes up to `count` oldest entries, returning them in a new `Vec`. - /// - /// Convenience wrapper around [`pop_oldest_batch_into`](Self::pop_oldest_batch_into). - /// Prefer `pop_oldest_batch_into` when reusing a buffer across calls. - #[must_use] - fn pop_oldest_batch(&mut self, count: usize) -> Vec<(K, V)> { - let mut out = Vec::with_capacity(count.min(self.len())); - self.pop_oldest_batch_into(count, &mut out); - out - } - - /// Gets the age rank of a key (0 = oldest, higher = newer). +pub trait VictimInspectable: Cache { + /// Peeks at the entry that would be evicted next. /// - /// Returns `None` if the key is not found. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, FifoCacheTrait}; - /// use cachekit::policy::fifo::FifoCache; - /// - /// let mut cache = FifoCache::new(10); - /// cache.insert(1, "first"); - /// cache.insert(2, "second"); - /// cache.insert(3, "third"); - /// - /// assert_eq!(cache.age_rank(&1), Some(0)); // Oldest - /// assert_eq!(cache.age_rank(&2), Some(1)); - /// assert_eq!(cache.age_rank(&3), Some(2)); // Newest - /// assert_eq!(cache.age_rank(&99), None); // Not found - /// ``` - fn age_rank(&self, key: &K) -> Option; + /// Does not remove the entry or modify any state. + fn peek_victim(&self) -> Option<(&K, &V)>; } -/// LRU-specific operations that respect access order. +/// Recency-based inspection and manipulation. /// -/// This trait extends [`MutableCache`] with LRU-specific eviction and access -/// tracking operations. Entries are ordered by recency—the least recently -/// accessed entry is evicted first. +/// For policies that order entries by access recency (LRU, LRU-K, FastLRU). /// /// # Example /// /// ``` /// use std::sync::Arc; -/// use cachekit::traits::{CoreCache, MutableCache, LruCacheTrait}; +/// use cachekit::traits::{Cache, RecencyTracking}; /// use cachekit::policy::lru::LruCore; /// -/// let mut cache: LruCore = LruCore::new(3); +/// let mut cache: LruCore = LruCore::new(10); /// cache.insert(1, Arc::new("first")); /// cache.insert(2, Arc::new("second")); -/// cache.insert(3, Arc::new("third")); -/// -/// // Access key 1 to make it MRU -/// cache.get(&1); -/// -/// // Key 2 is now LRU -/// assert_eq!(cache.peek_lru().map(|(k, _)| *k), Some(2)); /// -/// // Touch without retrieving value -/// assert!(cache.touch(&2)); // Now key 3 is LRU -/// -/// // Pop LRU entry -/// let (key, _) = cache.pop_lru().unwrap(); -/// assert_eq!(key, 3); +/// assert!(cache.touch(&1)); +/// assert_eq!(cache.recency_rank(&1), Some(0)); // most recent /// ``` -pub trait LruCacheTrait: MutableCache { - /// Removes and returns the least recently used entry. - /// - /// Returns `None` if the cache is empty. - /// - /// # Example - /// - /// ``` - /// use std::sync::Arc; - /// use cachekit::traits::{CoreCache, LruCacheTrait}; - /// use cachekit::policy::lru::LruCore; - /// - /// let mut cache: LruCore = LruCore::new(10); - /// cache.insert(1, Arc::new("first")); - /// cache.insert(2, Arc::new("second")); - /// - /// let (key, _) = cache.pop_lru().unwrap(); - /// assert_eq!(key, 1); // First inserted, not accessed since - /// ``` - #[must_use] - fn pop_lru(&mut self) -> Option<(K, V)>; - - /// Peeks at the LRU entry without removing it. - /// - /// Returns `None` if the cache is empty. Does not update access time. - /// - /// # Example +pub trait RecencyTracking: Cache { + /// Marks a key as recently used without retrieving its value. /// - /// ``` - /// use std::sync::Arc; - /// use cachekit::traits::{CoreCache, LruCacheTrait}; - /// use cachekit::policy::lru::LruCore; - /// - /// let mut cache: LruCore = LruCore::new(10); - /// cache.insert(1, Arc::new("first")); - /// cache.insert(2, Arc::new("second")); - /// - /// // Peek doesn't affect order - /// assert_eq!(cache.peek_lru().map(|(k, _)| *k), Some(1)); - /// assert_eq!(cache.peek_lru().map(|(k, _)| *k), Some(1)); - /// ``` - fn peek_lru(&self) -> Option<(&K, &V)>; - - /// Marks an entry as recently used without retrieving the value. - /// - /// Returns `true` if the key was found and touched, `false` otherwise. - /// This is useful for refreshing eviction order without fetching data. - /// - /// # Example - /// - /// ``` - /// use std::sync::Arc; - /// use cachekit::traits::{CoreCache, LruCacheTrait}; - /// use cachekit::policy::lru::LruCore; - /// - /// let mut cache: LruCore = LruCore::new(10); - /// cache.insert(1, Arc::new("first")); - /// cache.insert(2, Arc::new("second")); - /// - /// // Key 1 is LRU - /// assert_eq!(cache.peek_lru().map(|(k, _)| *k), Some(1)); - /// - /// // Touch key 1 to make it MRU - /// assert!(cache.touch(&1)); - /// - /// // Now key 2 is LRU - /// assert_eq!(cache.peek_lru().map(|(k, _)| *k), Some(2)); - /// - /// // Touch non-existent key returns false - /// assert!(!cache.touch(&99)); - /// ``` + /// Returns `true` if the key was found and touched. fn touch(&mut self, key: &K) -> bool; - /// Gets the recency rank of a key (0 = most recent, higher = less recent). + /// Returns the recency rank (0 = most recent, higher = less recent). /// /// Returns `None` if the key is not found. - /// - /// # Example - /// - /// ``` - /// use std::sync::Arc; - /// use cachekit::traits::{CoreCache, LruCacheTrait}; - /// use cachekit::policy::lru::LruCore; - /// - /// let mut cache: LruCore = LruCore::new(10); - /// cache.insert(1, Arc::new("first")); - /// cache.insert(2, Arc::new("second")); - /// cache.insert(3, Arc::new("third")); - /// - /// // Most recent insertion is rank 0 - /// assert_eq!(cache.recency_rank(&3), Some(0)); - /// assert_eq!(cache.recency_rank(&2), Some(1)); - /// assert_eq!(cache.recency_rank(&1), Some(2)); // Oldest - /// assert_eq!(cache.recency_rank(&99), None); - /// ``` fn recency_rank(&self, key: &K) -> Option; } -/// LFU-specific operations that respect frequency order. +/// Frequency-based inspection. /// -/// This trait extends [`MutableCache`] with LFU-specific eviction and frequency -/// tracking operations. Entries are ordered by access frequency—the least -/// frequently accessed entry is evicted first. +/// For policies that track access frequency (LFU, HeapLFU, MFU, LRU-K). /// /// # Example /// /// ``` /// use std::sync::Arc; -/// use cachekit::traits::{CoreCache, MutableCache, LfuCacheTrait}; +/// use cachekit::traits::{Cache, FrequencyTracking}; /// use cachekit::policy::lfu::LfuCache; /// -/// let mut cache: LfuCache = LfuCache::new(3); -/// cache.insert(1, Arc::new("first")); -/// cache.insert(2, Arc::new("second")); -/// cache.insert(3, Arc::new("third")); -/// -/// // Access key 1 multiple times -/// cache.get(&1); -/// cache.get(&1); +/// let mut cache: LfuCache = LfuCache::new(10); +/// cache.insert(1, Arc::new("value")); /// cache.get(&1); -/// -/// // Key 1 now has frequency 4 (1 insert + 3 gets) -/// assert_eq!(cache.frequency(&1), Some(4)); -/// -/// // Key 2 has frequency 1 (just insert) -/// assert_eq!(cache.frequency(&2), Some(1)); -/// -/// // Pop LFU (key 2 or 3, both have freq=1) -/// let (key, _) = cache.pop_lfu().unwrap(); -/// assert!(key == 2 || key == 3); +/// assert_eq!(cache.frequency(&1), Some(2)); // 1 insert + 1 get /// ``` -pub trait LfuCacheTrait: MutableCache { - /// Removes and returns the least frequently used entry. - /// - /// If multiple entries have the same frequency, eviction order depends - /// on the implementation (typically FIFO among same-frequency entries). - /// Returns `None` if the cache is empty. - /// - /// # Example - /// - /// ``` - /// use std::sync::Arc; - /// use cachekit::traits::{CoreCache, LfuCacheTrait}; - /// use cachekit::policy::lfu::LfuCache; - /// - /// let mut cache: LfuCache = LfuCache::new(10); - /// cache.insert(1, Arc::new("first")); - /// cache.insert(2, Arc::new("second")); - /// - /// // Access key 2 to increase its frequency - /// cache.get(&2); - /// - /// // Key 1 is LFU (freq=1 vs freq=2) - /// let (key, _) = cache.pop_lfu().unwrap(); - /// assert_eq!(key, 1); - /// ``` - #[must_use] - fn pop_lfu(&mut self) -> Option<(K, V)>; - - /// Peeks at the LFU entry without removing it. - /// - /// Returns `None` if the cache is empty. Does not increment frequency. - /// - /// # Example - /// - /// ``` - /// use std::sync::Arc; - /// use cachekit::traits::{CoreCache, LfuCacheTrait}; - /// use cachekit::policy::lfu::LfuCache; - /// - /// let mut cache: LfuCache = LfuCache::new(10); - /// cache.insert(1, Arc::new("first")); - /// cache.insert(2, Arc::new("second")); - /// cache.get(&2); // freq=2 - /// - /// // Key 1 is LFU - /// assert_eq!(cache.peek_lfu().map(|(k, _)| *k), Some(1)); - /// ``` - fn peek_lfu(&self) -> Option<(&K, &V)>; - - /// Gets the access frequency for a key. +pub trait FrequencyTracking: Cache { + /// Returns the access frequency for a key. /// /// Returns `None` if the key is not found. - /// - /// # Example - /// - /// ``` - /// use std::sync::Arc; - /// use cachekit::traits::{CoreCache, LfuCacheTrait}; - /// use cachekit::policy::lfu::LfuCache; - /// - /// let mut cache: LfuCache = LfuCache::new(10); - /// cache.insert(1, Arc::new("value")); - /// assert_eq!(cache.frequency(&1), Some(1)); - /// - /// cache.get(&1); - /// assert_eq!(cache.frequency(&1), Some(2)); - /// - /// assert_eq!(cache.frequency(&99), None); - /// ``` fn frequency(&self, key: &K) -> Option; - - /// Resets the frequency counter for a key to 1. - /// - /// Returns the old frequency if the key existed, `None` otherwise. - /// Useful for demoting hot entries after access pattern changes. - /// - /// # Example - /// - /// ``` - /// use std::sync::Arc; - /// use cachekit::traits::{CoreCache, LfuCacheTrait}; - /// use cachekit::policy::lfu::LfuCache; - /// - /// let mut cache: LfuCache = LfuCache::new(10); - /// cache.insert(1, Arc::new("value")); - /// cache.get(&1); - /// cache.get(&1); - /// assert_eq!(cache.frequency(&1), Some(3)); - /// - /// // Reset to 1 - /// assert_eq!(cache.reset_frequency(&1), Some(3)); - /// assert_eq!(cache.frequency(&1), Some(1)); - /// ``` - fn reset_frequency(&mut self, key: &K) -> Option; - - /// Increments frequency without accessing the value. - /// - /// Returns the new frequency if the key existed, `None` otherwise. - /// Useful for boosting priority without triggering value access. - /// - /// # Example - /// - /// ``` - /// use std::sync::Arc; - /// use cachekit::traits::{CoreCache, LfuCacheTrait}; - /// use cachekit::policy::lfu::LfuCache; - /// - /// let mut cache: LfuCache = LfuCache::new(10); - /// cache.insert(1, Arc::new("value")); - /// assert_eq!(cache.frequency(&1), Some(1)); - /// - /// // Boost without accessing - /// assert_eq!(cache.increment_frequency(&1), Some(2)); - /// assert_eq!(cache.increment_frequency(&1), Some(3)); - /// - /// assert_eq!(cache.increment_frequency(&99), None); - /// ``` - fn increment_frequency(&mut self, key: &K) -> Option; } -/// LRU-K specific operations that respect K-distance access patterns. -/// -/// This trait extends [`MutableCache`] with LRU-K-specific eviction and access -/// history tracking. Unlike standard LRU which considers only the last access, -/// LRU-K tracks the K-th most recent access time, providing scan resistance. +/// LRU-K style access-history inspection. /// -/// # Scan Resistance -/// -/// LRU-K protects the cache from pollution by one-time scans. An entry needs -/// K accesses before it can displace frequently-accessed entries. +/// Only implemented by `LrukCache`. /// /// # Example /// /// ``` -/// use cachekit::traits::{CoreCache, MutableCache, LrukCacheTrait}; +/// use cachekit::traits::{Cache, HistoryTracking}; /// use cachekit::policy::lru_k::LrukCache; /// -/// // Create LRU-2 cache (K=2) -/// let mut cache = LrukCache::with_k(100, 2); +/// let mut cache = LrukCache::with_k(10, 2); /// cache.insert(1, "value"); -/// -/// // After insert, access_count is 1 -/// assert_eq!(cache.access_count(&1), Some(1)); -/// -/// // No K-distance yet (need K=2 accesses) -/// assert_eq!(cache.k_distance(&1), None); -/// -/// // Second access establishes K-distance /// cache.get(&1); +/// /// assert_eq!(cache.access_count(&1), Some(2)); /// assert!(cache.k_distance(&1).is_some()); -/// -/// // Access history (most recent first) -/// let history = cache.access_history(&1).unwrap(); -/// assert_eq!(history.len(), 2); +/// assert_eq!(cache.k_value(), 2); /// ``` -pub trait LrukCacheTrait: MutableCache { - /// Removes and returns the entry with the oldest K-th access time. - /// - /// Entries with fewer than K accesses are evicted first (cold entries). - /// Returns `None` if the cache is empty. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, LrukCacheTrait}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::with_k(10, 2); - /// cache.insert(1, "first"); - /// cache.insert(2, "second"); - /// - /// // Access key 2 twice (makes it "hot") - /// cache.get(&2); - /// - /// // Key 1 is evicted first (only 1 access, K=2 not reached) - /// let (key, _) = cache.pop_lru_k().unwrap(); - /// assert_eq!(key, 1); - /// ``` - #[must_use] - fn pop_lru_k(&mut self) -> Option<(K, V)>; - - /// Peeks at the LRU-K entry without removing it. - /// - /// Returns `None` if the cache is empty. Does not update access history. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, LrukCacheTrait}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::with_k(10, 2); - /// cache.insert(1, "first"); - /// cache.insert(2, "second"); - /// cache.get(&2); // Second access for key 2 - /// - /// // Key 1 is LRU-K (cold, only 1 access) - /// assert_eq!(cache.peek_lru_k().map(|(k, _)| *k), Some(1)); - /// ``` - fn peek_lru_k(&self) -> Option<(&K, &V)>; - - /// Gets the K value used by this cache. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::LrukCacheTrait; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let cache: LrukCache = LrukCache::with_k(100, 3); - /// assert_eq!(cache.k_value(), 3); - /// - /// // Default K=2 - /// let default_cache: LrukCache = LrukCache::new(100); - /// assert_eq!(default_cache.k_value(), 2); - /// ``` - fn k_value(&self) -> usize; - - /// Gets the access history for a key (most recent first). - /// - /// Returns up to K timestamps. Returns `None` if key not found. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, LrukCacheTrait}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::with_k(10, 3); - /// cache.insert(1, "value"); - /// cache.get(&1); - /// cache.get(&1); - /// - /// let history = cache.access_history(&1).unwrap(); - /// assert_eq!(history.len(), 3); // 1 insert + 2 gets, up to K=3 - /// // history[0] is most recent, history[2] is oldest - /// ``` - fn access_history(&self, key: &K) -> Option>; - - /// Gets the number of recorded accesses for a key. - /// - /// Returns `None` if the key is not found. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, LrukCacheTrait}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::with_k(10, 5); - /// cache.insert(1, "value"); - /// assert_eq!(cache.access_count(&1), Some(1)); - /// - /// cache.get(&1); - /// cache.get(&1); - /// assert_eq!(cache.access_count(&1), Some(3)); - /// - /// // Capped at K - /// cache.get(&1); - /// cache.get(&1); - /// cache.get(&1); - /// assert_eq!(cache.access_count(&1), Some(5)); // Max K=5 - /// ``` +pub trait HistoryTracking: Cache { + /// Returns the total access count for a key. fn access_count(&self, key: &K) -> Option; - /// Gets the K-th most recent access time for a key. - /// - /// Returns `None` if the key is not found or has fewer than K accesses. - /// This is the core metric for LRU-K eviction decisions. + /// Returns the K-distance for a key (time since the K-th most recent access). /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, LrukCacheTrait}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::with_k(10, 2); - /// cache.insert(1, "value"); - /// - /// // Only 1 access, no K-distance yet - /// assert_eq!(cache.k_distance(&1), None); - /// - /// // Second access establishes K-distance - /// cache.get(&1); - /// assert!(cache.k_distance(&1).is_some()); - /// ``` + /// Returns `None` if the key has fewer than K accesses. fn k_distance(&self, key: &K) -> Option; - /// Records an access without retrieving the value. - /// - /// Returns `true` if the key was found and touched, `false` otherwise. - /// This updates the access history for the entry. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, LrukCacheTrait}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::with_k(10, 2); - /// cache.insert(1, "value"); - /// assert_eq!(cache.access_count(&1), Some(1)); - /// - /// // Touch to record access - /// assert!(cache.touch(&1)); - /// assert_eq!(cache.access_count(&1), Some(2)); - /// - /// // Touch non-existent key - /// assert!(!cache.touch(&99)); - /// ``` - fn touch(&mut self, key: &K) -> bool; + /// Returns the access history timestamps (most recent first), up to K entries. + fn access_history(&self, key: &K) -> Option>; - /// Gets the rank of a key based on K-distance. - /// - /// Lower rank (0) means oldest K-distance (first to be evicted). - /// Entries with fewer than K accesses are ranked by their earliest access time. - /// Returns `None` if key not found. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::{CoreCache, LrukCacheTrait}; - /// use cachekit::policy::lru_k::LrukCache; - /// - /// let mut cache = LrukCache::with_k(10, 2); - /// cache.insert(1, "first"); - /// cache.insert(2, "second"); - /// - /// // Both have 1 access (cold), ranked by insertion order - /// assert_eq!(cache.k_distance_rank(&1), Some(0)); // Oldest - /// assert_eq!(cache.k_distance_rank(&2), Some(1)); - /// ``` - fn k_distance_rank(&self, key: &K) -> Option; + /// Returns the K parameter used by this cache. + fn k_value(&self) -> usize; } +// --------------------------------------------------------------------------- +// Utility traits +// --------------------------------------------------------------------------- + /// Marker trait for caches that are safe to use concurrently. /// /// Implementors guarantee thread-safe operations. This trait extends @@ -1155,169 +316,28 @@ pub trait LrukCacheTrait: MutableCache { /// # Example /// /// ``` -/// use cachekit::traits::{CoreCache, ConcurrentCache}; +/// use cachekit::traits::{Cache, ConcurrentCache}; /// -/// // Function requiring a thread-safe cache /// fn use_from_threads(cache: &C) /// where /// K: Send + Sync, /// V: Send + Sync, -/// C: CoreCache + ConcurrentCache, +/// C: Cache + ConcurrentCache, /// { /// // Safe to share between threads /// } /// ``` -/// -/// # Thread Safety -/// -/// Individual cache implementations are NOT thread-safe by default. -/// To use a non-concurrent cache from multiple threads, wrap it: -/// -/// ``` -/// use std::sync::{Arc, RwLock}; -/// use cachekit::traits::{CoreCache, ReadOnlyCache}; -/// use cachekit::policy::lru_k::LrukCache; -/// -/// let cache = Arc::new(RwLock::new(LrukCache::::new(100))); -/// -/// // Clone for use in another thread -/// let cache_clone = cache.clone(); -/// std::thread::spawn(move || { -/// let mut guard = cache_clone.write().unwrap(); -/// guard.insert(1, "value".to_string()); -/// }); -/// ``` pub unsafe trait ConcurrentCache: Send + Sync {} -/// High-level cache tier management. -/// -/// This trait defines a multi-tier cache architecture where entries can be -/// promoted or demoted between tiers based on access patterns: -/// -/// - **Hot tier**: Frequently accessed data (LRU-managed) -/// - **Warm tier**: Moderately accessed data (LFU-managed) -/// - **Cold tier**: Rarely accessed data (FIFO-managed) -/// -/// # Architecture -/// -/// ```text -/// ┌──────────────┐ promote() ┌──────────────┐ promote() ┌──────────────┐ -/// │ Cold Tier │ ───────────────►│ Warm Tier │───────────────► │ Hot Tier │ -/// │ (FIFO) │ │ (LFU) │ │ (LRU) │ -/// │ │◄─────────────── │ │◄─────────────── │ │ -/// └──────────────┘ demote() └──────────────┘ demote() └──────────────┘ -/// ``` -/// -/// # Associated Types -/// -/// - `HotCache`: LRU-based cache for frequently accessed data -/// - `WarmCache`: LFU-based cache for moderately accessed data -/// - `ColdCache`: FIFO-based cache for cold/new data -pub trait CacheTierManager { - /// LRU-based cache for hot (frequently accessed) data. - type HotCache: LruCacheTrait + ConcurrentCache; - - /// LFU-based cache for warm (moderately accessed) data. - type WarmCache: LfuCacheTrait + ConcurrentCache; - - /// FIFO-based cache for cold (rarely accessed) data. - type ColdCache: FifoCacheTrait + ConcurrentCache; - - /// Promotes an entry from a lower tier to a higher tier. - /// - /// Returns `true` if the promotion was successful, `false` if the key - /// wasn't found in the source tier. - fn promote(&mut self, key: &K, from_tier: CacheTier, to_tier: CacheTier) -> bool; - - /// Demotes an entry from a higher tier to a lower tier. - /// - /// Returns `true` if the demotion was successful, `false` if the key - /// wasn't found in the source tier. - fn demote(&mut self, key: &K, from_tier: CacheTier, to_tier: CacheTier) -> bool; - - /// Gets the tier where a key currently resides. - /// - /// Returns `None` if the key is not in any tier. - fn locate_key(&self, key: &K) -> Option; - - /// Forces eviction from a specific tier. - /// - /// Returns the evicted entry, or `None` if the tier is empty. - fn evict_from_tier(&mut self, tier: CacheTier) -> Option<(K, V)>; -} - -/// Cache tier enumeration for multi-tier cache architectures. -/// -/// Used with [`CacheTierManager`] to specify which tier to promote to, -/// demote from, or query. -/// -/// # Tier Characteristics -/// -/// | Tier | Access Pattern | Eviction Policy | Typical Use | -/// |------|----------------|-----------------|-------------| -/// | Hot | Frequent | LRU | Active working set | -/// | Warm | Moderate | LFU | Periodically accessed | -/// | Cold | Rare | FIFO | New or stale data | -/// -/// # Example -/// -/// ``` -/// use cachekit::traits::CacheTier; -/// -/// let tier = CacheTier::Hot; -/// assert_eq!(tier, CacheTier::Hot); -/// assert_eq!(tier.to_string(), "Hot"); -/// -/// // Tiers can be compared and hashed -/// use std::collections::HashSet; -/// let mut tiers = HashSet::new(); -/// tiers.insert(CacheTier::Hot); -/// tiers.insert(CacheTier::Warm); -/// tiers.insert(CacheTier::Cold); -/// assert_eq!(tiers.len(), 3); -/// ``` -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum CacheTier { - /// Hot tier: frequently accessed data (LRU-managed). - /// - /// Best for: active working set, recently accessed entries. - Hot, - - /// Warm tier: moderately accessed data (LFU-managed). - /// - /// Best for: periodically accessed entries, stable hot spots. - Warm, - - /// Cold tier: rarely accessed data (FIFO-managed). - /// - /// Best for: new entries, infrequently accessed data. - Cold, -} - -impl std::fmt::Display for CacheTier { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Hot => f.write_str("Hot"), - Self::Warm => f.write_str("Warm"), - Self::Cold => f.write_str("Cold"), - } - } -} - /// Factory trait for creating cache instances. /// /// Provides a standard interface for cache construction, allowing generic /// code to create cache instances without knowing the concrete type. /// -/// # Associated Types -/// -/// - `Cache`: The concrete cache type produced by this factory -/// /// # Example /// /// ```ignore -/// use cachekit::traits::{CoreCache, CacheFactory, CacheConfig}; +/// use cachekit::traits::{Cache, CacheFactory, CacheConfig}; /// /// struct LruFactory; /// @@ -1332,15 +352,10 @@ impl std::fmt::Display for CacheTier { /// LruCache::new(config.capacity) /// } /// } -/// -/// // Generic function using factory -/// fn build_cache>() -> F::Cache { -/// F::new(100) -/// } /// ``` pub trait CacheFactory { /// The concrete cache type produced by this factory. - type Cache: CoreCache; + type Cache: Cache; /// Creates a new cache instance with the specified capacity. fn new(capacity: usize) -> Self::Cache; @@ -1353,30 +368,18 @@ pub trait CacheFactory { /// /// Used with [`CacheFactory::with_config`] to customize cache behavior. /// -/// # Fields -/// -/// | Field | Type | Default | Description | -/// |-------|------|---------|-------------| -/// | `capacity` | `usize` | 1000 | Maximum number of entries | -/// | `enable_stats` | `bool` | false | Enable hit/miss tracking | -/// | `prealloc_memory` | `bool` | true | Pre-allocate memory for capacity | -/// | `thread_safe` | `bool` | false | Use internal synchronization | -/// /// # Example /// /// ``` /// use cachekit::traits::CacheConfig; /// -/// // Use defaults /// let config = CacheConfig::default(); /// assert_eq!(config.capacity, 1000); /// assert!(!config.enable_stats); /// -/// // Custom configuration via builder methods /// let config = CacheConfig::new(5000).with_stats(true); /// assert_eq!(config.capacity, 5000); /// assert!(config.enable_stats); -/// assert!(config.prealloc_memory); // from default /// ``` #[derive(Debug, Clone, PartialEq, Eq)] #[non_exhaustive] @@ -1385,36 +388,17 @@ pub struct CacheConfig { pub capacity: usize, /// Enable hit/miss statistics tracking. - /// - /// When enabled, the cache tracks hit rate, miss rate, and other metrics. - /// Has a small performance overhead. pub enable_stats: bool, /// Pre-allocate memory for the full capacity. - /// - /// When true, memory is allocated upfront to avoid reallocations. - /// When false, memory grows as needed (may cause latency spikes). pub prealloc_memory: bool, /// Use internal synchronization for thread safety. - /// - /// When true, the cache uses internal locks for thread-safe operations. - /// When false, external synchronization (e.g., `Arc>`) is required. pub thread_safe: bool, } impl CacheConfig { /// Creates a new configuration with the given capacity and default options. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::CacheConfig; - /// - /// let config = CacheConfig::new(500); - /// assert_eq!(config.capacity, 500); - /// assert!(!config.enable_stats); - /// ``` pub fn new(capacity: usize) -> Self { Self { capacity, @@ -1441,15 +425,6 @@ impl CacheConfig { } /// Validates the configuration, returning an error if any parameter is invalid. - /// - /// # Example - /// - /// ``` - /// use cachekit::traits::CacheConfig; - /// - /// assert!(CacheConfig::new(100).validate().is_ok()); - /// assert!(CacheConfig::new(0).validate().is_err()); - /// ``` pub fn validate(&self) -> Result<(), crate::error::ConfigError> { if self.capacity == 0 { return Err(crate::error::ConfigError::new( @@ -1471,21 +446,10 @@ impl Default for CacheConfig { } } -/// Extension trait for async cache operations. -/// -/// This trait is a placeholder for future async cache support. It will be -/// fully implemented in Phase 2 when the `async-trait` dependency is added. +/// Extension trait for async cache operations (Phase 2 placeholder). /// -/// Currently, all methods return `false` indicating async operations are -/// not supported. Implementations can override these to indicate support. -/// -/// # Future API (Phase 2) -/// -/// ```ignore -/// // Future async methods (not yet implemented) -/// async fn async_get(&self, key: &K) -> Option<&V>; -/// async fn async_insert(&mut self, key: K, value: V) -> Option; -/// ``` +/// Currently all methods return `false`. Implementations can override +/// to indicate support. /// /// # Example /// @@ -1504,31 +468,30 @@ impl Default for CacheConfig { /// ``` pub trait AsyncCacheFuture: Send + Sync { /// Returns whether this cache supports async get operations. - /// - /// Default returns `false`. Override to indicate async support. fn supports_async_get(&self) -> bool { false } /// Returns whether this cache supports async insert operations. - /// - /// Default returns `false`. Override to indicate async support. fn supports_async_insert(&self) -> bool { false } } +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + #[cfg(test)] mod tests { use super::*; - // Mock implementation for testing trait design - struct MockFifoCache { + struct MockCache { data: Vec<(i32, String)>, capacity: usize, } - impl ReadOnlyCache for MockFifoCache { + impl Cache for MockCache { fn contains(&self, key: &i32) -> bool { self.data.iter().any(|(k, _)| k == key) } @@ -1540,11 +503,16 @@ mod tests { fn capacity(&self) -> usize { self.capacity } - } - impl CoreCache for MockFifoCache { + fn peek(&self, key: &i32) -> Option<&String> { + self.data.iter().find(|(k, _)| k == key).map(|(_, v)| v) + } + + fn get(&mut self, key: &i32) -> Option<&String> { + self.data.iter().find(|(k, _)| k == key).map(|(_, v)| v) + } + fn insert(&mut self, key: i32, value: String) -> Option { - // Simple mock implementation if let Some((_, existing)) = self.data.iter_mut().find(|(k, _)| *k == key) { return Some(std::mem::replace(existing, value)); } @@ -1555,8 +523,12 @@ mod tests { None } - fn get(&mut self, key: &i32) -> Option<&String> { - self.data.iter().find(|(k, _)| k == key).map(|(_, v)| v) + fn remove(&mut self, key: &i32) -> Option { + if let Some(pos) = self.data.iter().position(|(k, _)| k == key) { + Some(self.data.remove(pos).1) + } else { + None + } } fn clear(&mut self) { @@ -1564,45 +536,69 @@ mod tests { } } - impl FifoCacheTrait for MockFifoCache { - fn pop_oldest(&mut self) -> Option<(i32, String)> { + impl EvictingCache for MockCache { + fn evict_one(&mut self) -> Option<(i32, String)> { if self.data.is_empty() { None } else { Some(self.data.remove(0)) } } + } - fn peek_oldest(&self) -> Option<(&i32, &String)> { + impl VictimInspectable for MockCache { + fn peek_victim(&self) -> Option<(&i32, &String)> { self.data.first().map(|(k, v)| (k, v)) } - - fn age_rank(&self, key: &i32) -> Option { - self.data.iter().position(|(k, _)| k == key) - } } #[test] - fn test_fifo_trait_design() { - let mut cache = MockFifoCache { + fn test_cache_trait() { + let mut cache = MockCache { data: Vec::new(), capacity: 2, }; - // Test CoreCache operations cache.insert(1, "first".to_string()); cache.insert(2, "second".to_string()); assert_eq!(cache.len(), 2); assert!(cache.contains(&1)); + assert_eq!(cache.peek(&1), Some(&"first".to_string())); + assert_eq!(cache.get(&1), Some(&"first".to_string())); + + assert_eq!(cache.remove(&1), Some("first".to_string())); + assert_eq!(cache.len(), 1); + assert!(!cache.contains(&1)); + } - // Test FIFO operations - assert_eq!(cache.peek_oldest(), Some((&1, &"first".to_string()))); - assert_eq!(cache.pop_oldest(), Some((1, "first".to_string()))); + #[test] + fn test_evicting_cache() { + let mut cache = MockCache { + data: Vec::new(), + capacity: 10, + }; + + cache.insert(1, "first".to_string()); + cache.insert(2, "second".to_string()); + + assert_eq!(cache.peek_victim(), Some((&1, &"first".to_string()))); + assert_eq!(cache.evict_one(), Some((1, "first".to_string()))); assert_eq!(cache.len(), 1); + } + + #[test] + fn test_insert_returns_previous_value() { + let mut cache = MockCache { + data: Vec::new(), + capacity: 2, + }; - // Test that FIFO cache doesn't have remove method - // This won't compile - which is exactly what we want! - // cache.remove(&2); // ❌ Compile error - good! + assert_eq!(cache.insert(1, "first".to_string()), None); + assert_eq!( + cache.insert(1, "second".to_string()), + Some("first".to_string()) + ); + assert_eq!(cache.get(&1), Some(&"second".to_string())); } #[test] @@ -1615,21 +611,19 @@ mod tests { assert_eq!(config.capacity, 500); assert!(config.enable_stats); - assert!(config.prealloc_memory); // from default + assert!(config.prealloc_memory); } #[test] - fn test_core_cache_insert_returns_previous_value() { - let mut cache = MockFifoCache { + fn test_object_safety() { + let mut cache = MockCache { data: Vec::new(), - capacity: 2, + capacity: 10, }; + cache.insert(1, "hello".to_string()); - assert_eq!(cache.insert(1, "first".to_string()), None); - assert_eq!( - cache.insert(1, "second".to_string()), - Some("first".to_string()) - ); - assert_eq!(cache.get(&1), Some(&"second".to_string())); + let cache_ref: &dyn Cache = &cache; + assert_eq!(cache_ref.len(), 1); + assert_eq!(cache_ref.peek(&1), Some(&"hello".to_string())); } } diff --git a/tests/fifo_concurrency.rs b/tests/fifo_concurrency.rs index 7d8735e..197046b 100644 --- a/tests/fifo_concurrency.rs +++ b/tests/fifo_concurrency.rs @@ -8,7 +8,7 @@ use std::time::{Duration, Instant}; mod thread_safe_wrapper { use cachekit::policy::fifo::FifoCache; - use cachekit::traits::{CoreCache, FifoCacheTrait, ReadOnlyCache}; + use cachekit::traits::Cache; use super::*; @@ -568,7 +568,7 @@ mod thread_safe_wrapper { // Stress Testing mod stress_testing { use cachekit::policy::fifo::FifoCache; - use cachekit::traits::{CoreCache, FifoCacheTrait, ReadOnlyCache}; + use cachekit::traits::Cache; use super::*; diff --git a/tests/lfu_concurrency.rs b/tests/lfu_concurrency.rs index 519bade..a23d77a 100644 --- a/tests/lfu_concurrency.rs +++ b/tests/lfu_concurrency.rs @@ -12,7 +12,7 @@ type ThreadSafeLfuCache = Arc>>; // Thread Safety Tests mod thread_safety { - use cachekit::traits::{CoreCache, LfuCacheTrait, MutableCache, ReadOnlyCache}; + use cachekit::traits::Cache; use super::*; @@ -719,7 +719,7 @@ mod stress_testing { use std::thread; use std::time::{Duration, Instant}; - use cachekit::traits::{CoreCache, LfuCacheTrait, ReadOnlyCache}; + use cachekit::traits::Cache; use super::*; diff --git a/tests/lru_integration_test.rs b/tests/lru_integration_test.rs index e972ad4..ca5f380 100644 --- a/tests/lru_integration_test.rs +++ b/tests/lru_integration_test.rs @@ -5,7 +5,7 @@ mod integration_tests { #[cfg(feature = "concurrency")] use cachekit::policy::lru::ConcurrentLruCache; use cachekit::policy::lru::LruCore; - use cachekit::traits::{CoreCache, LruCacheTrait, MutableCache}; + use cachekit::traits::Cache; use super::*; diff --git a/tests/lru_k_concurrency.rs b/tests/lru_k_concurrency.rs index 4c215fb..056e34a 100644 --- a/tests/lru_k_concurrency.rs +++ b/tests/lru_k_concurrency.rs @@ -10,7 +10,7 @@ use std::time::Duration; // Thread Safety Tests mod thread_safety { use cachekit::policy::lru_k::LrukCache; - use cachekit::traits::{CoreCache, LrukCacheTrait, ReadOnlyCache}; + use cachekit::traits::Cache; use super::*; @@ -239,7 +239,7 @@ mod thread_safety { // Stress Testing (kept lightweight to avoid long runtimes) mod stress_testing { use cachekit::policy::lru_k::LrukCache; - use cachekit::traits::{CoreCache, LrukCacheTrait, ReadOnlyCache}; + use cachekit::traits::Cache; use super::*; diff --git a/tests/nru_concurrency.rs b/tests/nru_concurrency.rs index a6c67b1..4b2f0d2 100644 --- a/tests/nru_concurrency.rs +++ b/tests/nru_concurrency.rs @@ -8,7 +8,7 @@ use std::time::{Duration, Instant}; mod thread_safe_wrapper { use cachekit::policy::nru::NruCache; - use cachekit::traits::{CoreCache, MutableCache, ReadOnlyCache}; + use cachekit::traits::Cache; use super::*; @@ -479,7 +479,7 @@ mod thread_safe_wrapper { mod performance { use super::*; use cachekit::policy::nru::NruCache; - use cachekit::traits::{CoreCache, ReadOnlyCache}; + use cachekit::traits::Cache; #[test] fn benchmark_throughput() { diff --git a/tests/performance_regression.rs b/tests/performance_regression.rs index bdcb4b6..15918ea 100644 --- a/tests/performance_regression.rs +++ b/tests/performance_regression.rs @@ -143,7 +143,7 @@ fn verify_get_complexity(mut create_cache: F, policy_name: &str) where K: std::hash::Hash + Eq + Clone + From, V: Clone + From, - C: cachekit::traits::CoreCache, + C: cachekit::traits::Cache, F: FnMut(usize) -> C, { let sizes = vec![1000, 2000, 4000, 8000]; @@ -217,7 +217,7 @@ fn verify_insert_complexity(mut create_cache: F, policy_name: &str) where K: std::hash::Hash + Eq + Clone + From, V: Clone + From, - C: cachekit::traits::CoreCache, + C: cachekit::traits::Cache, F: FnMut(usize) -> C, { let sizes = vec![1000, 2000, 4000, 8000]; @@ -272,7 +272,7 @@ fn verify_eviction_complexity(mut create_cache: F, policy_name: &str where K: std::hash::Hash + Eq + Clone + From, V: Clone + From, - C: cachekit::traits::CoreCache, + C: cachekit::traits::Cache, F: FnMut(usize) -> C, { let sizes = vec![1000, 2000, 4000, 8000]; @@ -336,7 +336,7 @@ where mod regression_guards { use super::*; use cachekit::policy::lru::LruCore; - use cachekit::traits::{CoreCache, ReadOnlyCache}; + use cachekit::traits::Cache; /// Ensure basic operations complete in reasonable time /// This catches catastrophic regressions (e.g., accidentally O(n) operations) diff --git a/tests/policy_invariants.rs b/tests/policy_invariants.rs index 30509d1..2b3611d 100644 --- a/tests/policy_invariants.rs +++ b/tests/policy_invariants.rs @@ -16,7 +16,7 @@ #[cfg(feature = "policy-clock")] mod clock_zero_capacity { use cachekit::policy::clock::ClockCache; - use cachekit::traits::ReadOnlyCache; + use cachekit::traits::Cache; #[test] fn capacity_zero_is_honored() { @@ -34,7 +34,7 @@ mod clock_zero_capacity { #[cfg(feature = "policy-clock-pro")] mod clock_pro_zero_capacity { use cachekit::policy::clock_pro::ClockProCache; - use cachekit::traits::ReadOnlyCache; + use cachekit::traits::Cache; #[test] fn capacity_zero_is_honored() { @@ -50,7 +50,7 @@ mod clock_pro_zero_capacity { #[test] fn capacity_zero_rejects_inserts() { - use cachekit::traits::CoreCache; + use cachekit::traits::Cache; let mut cache: ClockProCache<&str, i32> = ClockProCache::new(0); cache.insert("key", 42); @@ -66,7 +66,7 @@ mod clock_pro_zero_capacity { #[cfg(feature = "policy-nru")] mod nru_zero_capacity { use cachekit::policy::nru::NruCache; - use cachekit::traits::ReadOnlyCache; + use cachekit::traits::Cache; #[test] fn capacity_zero_is_honored() {