Skip to content

Commit 78846fd

Browse files
committed
adds LRU cache with lazy eviction
The commit implements a variant of LRU cache with lazy eviction: * Each entry maintains an associated ordinal value representing when the entry was last accessed. * The cache is allowed to grow up to 2 times specified capacity with no evictions, at which point, the excess entries are evicted based on LRU policy resulting in an _amortized_ `O(1)` performance. In many use cases which can allow the cache to store 2 times capacity and can tolerate amortized nature of performance, this results in better average performance as shown by the added benchmarks. Additionally, with the existing implementation, `.get` requires a mutable reference `&mut self`. In a multi-threaded setting, this requires an exclusive write-lock on the cache even on the read path, which can exacerbate lock contentions. With lazy eviction, the ordinal values can be updated using atomic operations, allowing shared lock for lookups.
1 parent cf063f6 commit 78846fd

File tree

4 files changed

+576
-0
lines changed

4 files changed

+576
-0
lines changed

Cargo.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,5 +19,9 @@ nightly = ["hashbrown", "hashbrown/nightly"]
1919
hashbrown = { version = "0.12", optional = true }
2020

2121
[dev-dependencies]
22+
rand = "0.8.5"
2223
scoped_threadpool = "0.1.*"
2324
stats_alloc = "0.1.*"
25+
26+
[[bench]]
27+
name = "benchmarks"

benches/benchmarks.rs

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
#![feature(test)]
2+
3+
extern crate core;
4+
extern crate lru;
5+
extern crate rand;
6+
extern crate test;
7+
8+
use core::num::NonZeroUsize;
9+
use rand::Rng;
10+
use test::Bencher;
11+
12+
const REPS: usize = 1 << 10;
13+
const NUM_KEYS: usize = 1 << 20;
14+
const CAPACITY: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(1 << 17) };
15+
16+
macro_rules! impl_put_bench {
17+
($name: ident, $cache: ty) => {
18+
fn $name(bencher: &mut Bencher, capacity: NonZeroUsize, num_keys: usize, reps: usize) {
19+
let mut rng = rand::thread_rng();
20+
let mut cache = <$cache>::new(capacity);
21+
for _ in 0..5 * capacity.get() {
22+
let key = rng.gen_range(0..num_keys);
23+
let _ = cache.put(key, ());
24+
}
25+
bencher.iter(|| {
26+
for _ in 0..reps {
27+
let key = rng.gen_range(0..num_keys);
28+
let _ = cache.put(key, ());
29+
}
30+
});
31+
}
32+
};
33+
}
34+
35+
macro_rules! impl_get_bench {
36+
($name: ident, $cache: ty) => {
37+
fn $name(bencher: &mut Bencher, capacity: NonZeroUsize, num_keys: usize, reps: usize) {
38+
let mut rng = rand::thread_rng();
39+
let mut cache = <$cache>::new(capacity);
40+
for _ in 0..5 * capacity.get() {
41+
let key = rng.gen_range(0..num_keys);
42+
let _ = cache.put(key, ());
43+
}
44+
bencher.iter(|| {
45+
for _ in 0..reps {
46+
let key = rng.gen_range(0..num_keys);
47+
let _ = cache.get(&key);
48+
}
49+
});
50+
}
51+
};
52+
}
53+
54+
impl_put_bench!(run_put_bench, lru::LruCache<usize, ()>);
55+
impl_put_bench!(run_put_bench_lazy, lru::lazy::LruCache<usize, ()>);
56+
57+
impl_get_bench!(run_get_bench, lru::LruCache<usize, ()>);
58+
impl_get_bench!(run_get_bench_lazy, lru::lazy::LruCache<usize, ()>);
59+
60+
#[bench]
61+
fn bench_put(bencher: &mut Bencher) {
62+
run_put_bench(bencher, CAPACITY, NUM_KEYS, REPS);
63+
}
64+
65+
#[bench]
66+
fn bench_put_lazy(bencher: &mut Bencher) {
67+
run_put_bench_lazy(bencher, CAPACITY, NUM_KEYS, REPS);
68+
}
69+
70+
#[bench]
71+
fn bench_get(bencher: &mut Bencher) {
72+
run_get_bench(bencher, CAPACITY, NUM_KEYS, REPS);
73+
}
74+
75+
#[bench]
76+
fn bench_get_lazy(bencher: &mut Bencher) {
77+
run_get_bench_lazy(bencher, CAPACITY, NUM_KEYS, REPS);
78+
}

0 commit comments

Comments
 (0)