Skip to content

Commit

Permalink
Add config to keep some extra days of data
Browse files Browse the repository at this point in the history
  • Loading branch information
coderofstuff committed Nov 15, 2024
1 parent 1d3b9a9 commit ceb2607
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 3 deletions.
4 changes: 4 additions & 0 deletions consensus/core/src/config/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,9 @@ pub struct Config {

/// A scale factor to apply to memory allocation bounds
pub ram_scale: f64,

/// The number of extra days of data from pruning point to keep
pub keep_extra_days_data: u32,
}

impl Config {
Expand Down Expand Up @@ -95,6 +98,7 @@ impl Config {
initial_utxo_set: Default::default(),
disable_upnp: false,
ram_scale: 1.0,
keep_extra_days_data: 0,
}
}

Expand Down
43 changes: 40 additions & 3 deletions consensus/src/pipeline/pruning_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ use crate::{
use crossbeam_channel::Receiver as CrossbeamReceiver;
use itertools::Itertools;
use kaspa_consensus_core::{
blockhash::ORIGIN,
blockhash::{BlockHashExtensions, ORIGIN},
blockstatus::BlockStatus::StatusHeaderOnly,
config::Config,
muhash::MuHashExtensions,
Expand Down Expand Up @@ -378,13 +378,16 @@ impl PruningProcessor {
drop(tips_write);
}

// Adjust the pruning point back if needed
let adjusted_root = self.adjust_for_extra_days(new_pruning_point);

// Now we traverse the anti-future of the new pruning point starting from origin and going up.
// The most efficient way to traverse the entire DAG from the bottom-up is via the reachability tree
let mut queue = VecDeque::<Hash>::from_iter(reachability_read.get_children(ORIGIN).unwrap().iter().copied());
let (mut counter, mut traversed) = (0, 0);
info!("Header and Block pruning: starting traversal from: {} (genesis: {})", queue.iter().reusable_format(", "), genesis);
while let Some(current) = queue.pop_front() {
if reachability_read.is_dag_ancestor_of_result(new_pruning_point, current).unwrap() {
if reachability_read.is_dag_ancestor_of_result(adjusted_root, current).unwrap() {
continue;
}
traversed += 1;
Expand Down Expand Up @@ -517,12 +520,46 @@ impl PruningProcessor {
// Set the history root to the new pruning point only after we successfully pruned its past
let mut pruning_point_write = self.pruning_point_store.write();
let mut batch = WriteBatch::default();
pruning_point_write.set_history_root(&mut batch, new_pruning_point).unwrap();
pruning_point_write.set_history_root(&mut batch, adjusted_root).unwrap();
self.db.write(batch).unwrap();
drop(pruning_point_write);
}
}

/// Adjusts the passed hash backwards through the selected parent chain until there's enough
/// to accommodate the configured extra number of days of data
fn adjust_for_extra_days(&self, reference_hash: Hash) -> Hash {
// Short circuit if not keeping extra days to avoid doing store lookups
if self.config.keep_extra_days_data == 0 {
return reference_hash;
}

let pp_reference_timestamp = self.headers_store.get_compact_header_data(reference_hash).unwrap().timestamp;
// days * seconds/day * milliseconds/second
let extra_days_ms = self.config.keep_extra_days_data as u64 * 86400 * 1000;

let mut adjusted_hash = reference_hash;

while pp_reference_timestamp.saturating_sub(self.headers_store.get_compact_header_data(adjusted_hash).unwrap().timestamp)
< extra_days_ms
{
let selected_parent = if let Ok(selected_parent) = self.ghostdag_store.get_selected_parent(adjusted_hash) {
selected_parent
} else {
break;
};

if selected_parent.is_origin() || !self.headers_store.has(selected_parent).unwrap() {
// Can't go further back
break;
}

adjusted_hash = selected_parent;
}

adjusted_hash
}

fn past_pruning_points(&self) -> BlockHashSet {
(0..self.pruning_point_store.read().get().unwrap().index)
.map(|index| self.past_pruning_points_store.get(index).unwrap())
Expand Down
11 changes: 11 additions & 0 deletions kaspad/src/args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ pub struct Args {
#[serde(rename = "nogrpc")]
pub disable_grpc: bool,
pub ram_scale: f64,
pub keep_extra_days_data: u32,
}

impl Default for Args {
Expand Down Expand Up @@ -140,6 +141,7 @@ impl Default for Args {
disable_dns_seeding: false,
disable_grpc: false,
ram_scale: 1.0,
keep_extra_days_data: 0,
}
}
}
Expand All @@ -159,6 +161,7 @@ impl Args {
config.p2p_listen_address = self.listen.unwrap_or(ContextualNetAddress::unspecified());
config.externalip = self.externalip.map(|v| v.normalize(config.default_p2p_port()));
config.ram_scale = self.ram_scale;
config.keep_extra_days_data = self.keep_extra_days_data;

#[cfg(feature = "devnet-prealloc")]
if let Some(num_prealloc_utxos) = self.num_prealloc_utxos {
Expand Down Expand Up @@ -369,6 +372,13 @@ Setting to 0 prevents the preallocation and sets the maximum to {}, leading to 0
.help("Apply a scale factor to memory allocation bounds. Nodes with limited RAM (~4-8GB) should set this to ~0.3-0.5 respectively. Nodes with
a large RAM (~64GB) can set this value to ~3.0-4.0 and gain superior performance especially for syncing peers faster"),
)
.arg(
Arg::new("keep-extra-days-data")
.long("keep-extra-days-data")
.require_equals(true)
.value_parser(clap::value_parser!(u32))
.help("Keep an extra N number of days of data before the pruning point after each pruning period")
)
;

#[cfg(feature = "devnet-prealloc")]
Expand Down Expand Up @@ -448,6 +458,7 @@ impl Args {
disable_dns_seeding: arg_match_unwrap_or::<bool>(&m, "nodnsseed", defaults.disable_dns_seeding),
disable_grpc: arg_match_unwrap_or::<bool>(&m, "nogrpc", defaults.disable_grpc),
ram_scale: arg_match_unwrap_or::<f64>(&m, "ram-scale", defaults.ram_scale),
keep_extra_days_data: arg_match_unwrap_or::<u32>(&m, "keep-extra-days-data", defaults.keep_extra_days_data),

#[cfg(feature = "devnet-prealloc")]
num_prealloc_utxos: m.get_one::<u64>("num-prealloc-utxos").cloned(),
Expand Down

0 comments on commit ceb2607

Please sign in to comment.