Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: redesign nonce cache #5602

Draft
wants to merge 3 commits into
base: develop
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion stacks-common/src/types/sqlite.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef};

use super::chainstate::VRFSeed;
use super::chainstate::{StacksAddress, VRFSeed};
use crate::deps_common::bitcoin::util::hash::Sha256dHash;
use crate::types::chainstate::{
BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, TrieHash,
Expand All @@ -42,6 +42,13 @@ impl ToSql for Sha256dHash {
}
}

impl rusqlite::types::ToSql for StacksAddress {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput> {
let addr_str = self.to_string();
Ok(addr_str.into())
}
}

// Implement rusqlite traits for a bunch of structs that used to be defined
// in the chainstate code
impl_byte_array_rusqlite_only!(ConsensusHash);
Expand Down
255 changes: 255 additions & 0 deletions stacks-common/src/util/lru_cache.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,255 @@
// Copyright (C) 2024 Stacks Open Internet Foundation
jbencin marked this conversation as resolved.
Show resolved Hide resolved
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.

use std::collections::HashMap;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A while ago we switched to using hashbrown in Clarity for performance reasons. It's a drop-in replacement with the same API. See #4389

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Switched to hashbrown in e4f0b7b.


/// Node in the doubly linked list
struct Node<K, V> {
key: K,
value: V,
dirty: bool,
next: usize,
prev: usize,
}

/// LRU cache for account nonces
pub struct LruCache<K, V> {
capacity: usize,
/// Map from address to an offset in the linked list
cache: HashMap<K, usize>,
/// Doubly linked list of values in order of most recently used
order: Vec<Node<K, V>>,
/// Index of the head of the linked list -- the most recently used element
head: usize,
/// Index of the tail of the linked list -- the least recently used element
tail: usize,
}

impl<K: Eq + std::hash::Hash + Clone, V: Copy> LruCache<K, V> {
/// Create a new LRU cache with the given capacity
pub fn new(capacity: usize) -> Self {
LruCache {
capacity,
cache: HashMap::new(),
order: Vec::with_capacity(capacity),
obycode marked this conversation as resolved.
Show resolved Hide resolved
head: capacity,
tail: capacity,
}
}

/// Get the value for the given key
pub fn get(&mut self, key: &K) -> Option<V> {
if let Some(node) = self.cache.get(key) {
// Move the node to the head of the LRU list
let node = *node;

if node != self.head {
let prev = self.order[node].prev;
let next = self.order[node].next;

if node == self.tail {
// If this is the tail, update the tail
self.tail = prev;
} else {
// Else, update the next node's prev pointer
self.order[next].prev = prev;
}

self.order[prev].next = next;
self.order[node].prev = self.capacity;
self.order[node].next = self.head;
self.order[self.head].prev = node;
self.head = node;
}

Some(self.order[node].value)
} else {
None
}
}

/// Insert a key-value pair into the cache, marking it as dirty.
/// Returns `Some((K, V))` if a dirty value was evicted.
pub fn insert(&mut self, key: K, value: V) -> Option<(K, V)> {
self.insert_with_dirty(key, value, true)
}

/// Insert a key-value pair into the cache, marking it as clean.
/// Returns `Some((K, V))` if a dirty value was evicted.
pub fn insert_clean(&mut self, key: K, value: V) -> Option<(K, V)> {
self.insert_with_dirty(key, value, false)
}

/// Insert a key-value pair into the cache
/// Returns `Some((K, V))` if a dirty value was evicted.
pub fn insert_with_dirty(&mut self, key: K, value: V, dirty: bool) -> Option<(K, V)> {
let mut evicted = None;
if let Some(node) = self.cache.get(&key) {
// Update the value for the key
let node = *node;
self.order[node].value = value;
self.order[node].dirty = dirty;

// Just call get to handle updating the LRU list
self.get(&key);
} else {
let index = if self.cache.len() == self.capacity {
// Take the place of the least recently used element.
// First, remove it from the tail of the LRU list
let index = self.tail;
let prev = self.order[index].prev;
self.order[prev].next = self.capacity;
self.tail = prev;

// Remove it from the cache
self.cache.remove(&self.order[index].key);

// If it is dirty, save the key-value pair to return
if self.order[index].dirty {
evicted = Some((
std::mem::replace(&mut self.order[index].key, key.clone()),
self.order[index].value,
));
}

// Insert this new value into the cache
self.cache.insert(key, index);

// Update the node with the new key-value pair, inserting it at
// the head of the LRU list
self.order[index].value = value;
self.order[index].dirty = dirty;
self.order[index].next = self.head;
self.order[index].prev = self.capacity;

index
} else {
// Insert a new key-value pair
let node = Node {
key: key.clone(),
value,
dirty: dirty,
next: self.head,
prev: self.capacity,
};

let index = self.order.len();
self.order.push(node);
self.cache.insert(key, index);

index
};

// Put it at the head of the LRU list
if self.head != self.capacity {
self.order[self.head].prev = index;
} else {
self.tail = index;
}

self.head = index;
}
evicted
}

pub fn flush<E>(&mut self, mut f: impl FnMut(&K, V) -> Result<(), E>) -> Result<(), E> {
let mut index = self.head;
while index != self.capacity {
let next = self.order[index].next;
if self.order[index].dirty {
let value = self.order[index].value;
f(&self.order[index].key, value)?;
self.order[index].dirty = false;
}
index = next;
}
Ok(())
}
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn test_lru_cache() {
let mut cache = LruCache::new(2);

cache.insert(1, 1);
cache.insert(2, 2);
assert_eq!(cache.get(&1), Some(1));
cache.insert(3, 3);
assert_eq!(cache.get(&2), None);
cache.insert(4, 4);
assert_eq!(cache.get(&1), None);
assert_eq!(cache.get(&3), Some(3));
assert_eq!(cache.get(&4), Some(4));
}

#[test]
fn test_lru_cache_update() {
let mut cache = LruCache::new(2);

cache.insert(1, 1);
cache.insert(2, 2);
cache.insert(1, 10);
assert_eq!(cache.get(&1), Some(10));
cache.insert(3, 3);
assert_eq!(cache.get(&2), None);
cache.insert(2, 4);
assert_eq!(cache.get(&2), Some(4));
assert_eq!(cache.get(&3), Some(3));
}

#[test]
fn test_lru_cache_evicted() {
let mut cache = LruCache::new(2);

assert!(cache.insert(1, 1).is_none());
assert!(cache.insert(2, 2).is_none());
let evicted = cache.insert(3, 3).expect("expected an eviction");
assert_eq!(evicted, (1, 1));
}

#[test]
fn test_lru_cache_flush() {
let mut cache = LruCache::new(2);

cache.insert(1, 1);

let mut flushed = Vec::new();
cache
.flush(|k, v| {
flushed.push((*k, v));
Ok::<(), ()>(())
})
.unwrap();

assert_eq!(flushed, vec![(1, 1)]);

cache.insert(1, 3);
cache.insert(2, 2);

let mut flushed = Vec::new();
cache
.flush(|k, v| {
flushed.push((*k, v));
Ok::<(), ()>(())
})
.unwrap();

assert_eq!(flushed, vec![(2, 2), (1, 3)]);
}
}
1 change: 1 addition & 0 deletions stacks-common/src/util/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ pub mod macros;
pub mod chunked_encoding;
pub mod db;
pub mod hash;
pub mod lru_cache;
pub mod pair;
pub mod pipe;
pub mod retry;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4912,7 +4912,7 @@ fn mempool_walk_test_users_10_rounds_3_cache_size_2000_null_prob_100() {
fn paramaterized_mempool_walk_test(
num_users: usize,
num_rounds: usize,
nonce_and_candidate_cache_size: u64,
nonce_and_candidate_cache_size: usize,
consider_no_estimate_tx_prob: u8,
timeout_ms: u128,
) {
Expand Down
8 changes: 4 additions & 4 deletions stackslib/src/config/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2109,8 +2109,8 @@ pub struct MinerConfig {
/// Wait for a downloader pass before mining.
/// This can only be disabled in testing; it can't be changed in the config file.
pub wait_for_block_download: bool,
pub nonce_cache_size: u64,
pub candidate_retry_cache_size: u64,
pub nonce_cache_size: usize,
pub candidate_retry_cache_size: usize,
pub unprocessed_block_deadline_secs: u64,
pub mining_key: Option<Secp256k1PrivateKey>,
/// Amount of time while mining in nakamoto to wait in between mining interim blocks
Expand Down Expand Up @@ -2563,8 +2563,8 @@ pub struct MinerConfigFile {
pub probability_pick_no_estimate_tx: Option<u8>,
pub block_reward_recipient: Option<String>,
pub segwit: Option<bool>,
pub nonce_cache_size: Option<u64>,
pub candidate_retry_cache_size: Option<u64>,
pub nonce_cache_size: Option<usize>,
pub candidate_retry_cache_size: Option<usize>,
pub unprocessed_block_deadline_secs: Option<u64>,
pub mining_key: Option<String>,
pub wait_on_interim_blocks_ms: Option<u64>,
Expand Down
Loading
Loading