Skip to content

Commit

Permalink
feat: make page cache upper levels configurable
Browse files Browse the repository at this point in the history
  • Loading branch information
rphmeier authored and pepyakin committed Feb 12, 2025
1 parent d4236e7 commit 0bab5c0
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 18 deletions.
2 changes: 1 addition & 1 deletion nomt/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ impl<T: HashAlgorithm> Nomt<T> {

if o.prepopulate_page_cache {
let io_handle = store.io_pool().make_handle();
merkle::prepopulate_cache(io_handle, &page_cache, &store, 3)?;
merkle::prepopulate_cache(io_handle, &page_cache, &store, o.page_cache_upper_levels)?;
}

Ok(Self {
Expand Down
18 changes: 17 additions & 1 deletion nomt/src/options.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ pub struct Options {
/// Whether to prepopulate the upper layers of the page cache on startup.
/// This incurs some I/O on startup but leads to predictable worst-case performance.
pub(crate) prepopulate_page_cache: bool,
pub(crate) page_cache_upper_levels: usize,
}

impl Options {
Expand All @@ -52,6 +53,7 @@ impl Options {
page_cache_size: 256,
leaf_cache_size: 256,
prepopulate_page_cache: false,
page_cache_upper_levels: 2,
}
}

Expand Down Expand Up @@ -139,6 +141,8 @@ impl Options {

/// Sets the size of the page cache in MiB.
///
/// This does not count the memory used by the upper levels of the page
/// cache. See [`Self::page_cache_upper_levels`]
/// Rounded down to the nearest byte multiple of 4096.
///
/// Default: 256MiB.
Expand All @@ -155,13 +159,25 @@ impl Options {
self.leaf_cache_size = leaf_cache_size;
}

/// Sets whether to prepopulate the upper layers of the page cache on startup.
/// Sets whether to prepopulate the upper levels of the page cache on startup.
/// Has no effect if [`Options::page_cache_upper_levels`] is set to 0.
///
/// This incurs some I/O on startup but leads to predictable worst-case performance.
/// Default: false
pub fn prepopulate_page_cache(&mut self, prepopulate: bool) {
self.prepopulate_page_cache = prepopulate;
}

/// Sets the number of upper levels of the page tree to keep permanently
/// cached.
///
/// Each level adds 64x the RAM burden of the previous.
/// Level 1 uses ≈256KiB, level 2 ≈16MiB, level 3 ≈1GiB.
///
/// Default: 2
pub fn page_cache_upper_levels(&mut self, upper_levels: usize) {
self.page_cache_upper_levels = upper_levels;
}
}

#[test]
Expand Down
36 changes: 20 additions & 16 deletions nomt/src/page_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,6 @@ use std::{collections::HashMap, fmt, num::NonZeroUsize, sync::Arc};
// (2^(DEPTH + 1)) - 2
pub const NODES_PER_PAGE: usize = (1 << DEPTH + 1) - 2;

// The fixed number of levels we always preserve in-memory.
const FIXED_LEVELS: usize = 3;

fn read_node(data: &FatPage, index: usize) -> Node {
assert!(index < NODES_PER_PAGE, "index out of bounds");
let start = index * 32;
Expand Down Expand Up @@ -155,8 +152,8 @@ struct CacheShardLocked {
}

impl CacheShardLocked {
fn get(&mut self, page_id: &PageId) -> Option<&CacheEntry> {
if page_id.depth() <= FIXED_LEVELS {
fn get(&mut self, fixed_levels: usize, page_id: &PageId) -> Option<&CacheEntry> {
if page_id.depth() <= fixed_levels {
self.fixed_level_cache.get(page_id)
} else {
self.cached.get(page_id)
Expand All @@ -165,26 +162,27 @@ impl CacheShardLocked {

fn get_or_insert(
&mut self,
fixed_levels: usize,
page_id: PageId,
entry: impl FnOnce() -> CacheEntry,
) -> &CacheEntry {
if page_id.depth() <= FIXED_LEVELS {
if page_id.depth() <= fixed_levels {
&*self.fixed_level_cache.entry(page_id).or_insert_with(entry)
} else {
self.cached.get_or_insert(page_id, entry)
}
}

fn insert(&mut self, page_id: PageId, entry: CacheEntry) {
if page_id.depth() <= FIXED_LEVELS {
fn insert(&mut self, fixed_levels: usize, page_id: PageId, entry: CacheEntry) {
if page_id.depth() <= fixed_levels {
self.fixed_level_cache.insert(page_id, entry);
} else {
self.cached.put(page_id, entry);
}
}

fn remove(&mut self, page_id: &PageId) {
if page_id.depth() <= FIXED_LEVELS {
fn remove(&mut self, fixed_levels: usize, page_id: &PageId) {
if page_id.depth() <= fixed_levels {
self.fixed_level_cache.remove(page_id);
} else {
self.cached.pop(page_id);
Expand All @@ -203,6 +201,7 @@ struct Shared {
shards: Vec<CacheShard>,
root_page: RwLock<Option<CacheEntry>>,
page_rw_pass_domain: RwPassDomain,
fixed_levels: usize,
metrics: Metrics,
}

Expand Down Expand Up @@ -302,6 +301,7 @@ impl PageCache {
root_page: RwLock::new(root_page_entry),
page_rw_pass_domain: domain,
metrics: metrics.into().unwrap_or(Metrics::new(false)),
fixed_levels: o.page_cache_upper_levels,
}),
}
}
Expand Down Expand Up @@ -339,7 +339,7 @@ impl PageCache {
};

let mut shard = self.shard(shard_index).locked.lock();
match shard.get(&page_id) {
match shard.get(self.shared.fixed_levels, &page_id) {
Some(cache_item) => Some((
Page {
inner: cache_item.page_data.clone(),
Expand Down Expand Up @@ -396,8 +396,9 @@ impl PageCache {
};

let mut shard = self.shard(shard_index).locked.lock();
let cache_entry =
shard.get_or_insert(page_id, || CacheEntry::init(page.inner, bucket_index));
let cache_entry = shard.get_or_insert(self.shared.fixed_levels, page_id, || {
CacheEntry::init(page.inner, bucket_index)
});

Page {
inner: cache_entry.page_data.clone(),
Expand Down Expand Up @@ -428,10 +429,13 @@ impl PageCache {
let shard_index = self.shard_index_for(&page_id).unwrap();

if let Some((page, bucket_index)) = maybe_page {
shard_guards[shard_index]
.insert(page_id.clone(), CacheEntry::init(page.inner, bucket_index));
shard_guards[shard_index].insert(
self.shared.fixed_levels,
page_id.clone(),
CacheEntry::init(page.inner, bucket_index),
);
} else {
shard_guards[shard_index].remove(&page_id)
shard_guards[shard_index].remove(self.shared.fixed_levels, &page_id)
}
}
}
Expand Down

0 comments on commit 0bab5c0

Please sign in to comment.