From fb306ea9a5cc591bcded9753970b2a2400537e47 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 1 Dec 2024 07:34:59 -0500 Subject: [PATCH 01/15] chore: fix #5502 by reducing the PoX sync watchdog --- testnet/stacks-node/src/run_loop/neon.rs | 48 +- testnet/stacks-node/src/syncctl.rs | 552 ++---------------- .../src/tests/nakamoto_integrations.rs | 2 +- 3 files changed, 48 insertions(+), 554 deletions(-) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 5e021e50ab..475265869a 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -1156,19 +1156,8 @@ impl RunLoop { let mut sortition_db_height = rc_aligned_height; let mut burnchain_height = sortition_db_height; - let mut num_sortitions_in_last_cycle = 1; // prepare to fetch the first reward cycle! - let mut target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(burnchain_height) - .expect("BUG: block height is not in a reward cycle") - + 1, - ), - burnchain.get_headers_height() - 1, - ); - debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; @@ -1196,17 +1185,13 @@ impl RunLoop { let remote_chain_height = burnchain.get_headers_height() - 1; - // wait for the p2p state-machine to do at least one pass - debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); - - // wait until it's okay to process the next reward cycle's sortitions - let ibd = match self.get_pox_watchdog().pox_sync_wait( + // wait until it's okay to process the next reward cycle's sortitions. + let (ibd, target_burnchain_block_height) = match self.get_pox_watchdog().pox_sync_wait( &burnchain_config, &burnchain_tip, remote_chain_height, - num_sortitions_in_last_cycle, ) { - Ok(ibd) => ibd, + Ok(x) => x, Err(e) => { debug!("Runloop: PoX sync wait routine aborted: {e:?}"); continue; @@ -1220,9 +1205,6 @@ impl RunLoop { 0.0 }; - // will recalculate this in the following loop - num_sortitions_in_last_cycle = 0; - // Download each burnchain block and process their sortitions. This, in turn, will // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and // process them. This loop runs for one reward cycle, so that the next pass of the @@ -1270,8 +1252,6 @@ impl RunLoop { "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); - let mut sort_count = 0; - debug!("Runloop: block mining until we process all sortitions"); signal_mining_blocked(globals.get_miner_status()); @@ -1289,9 +1269,6 @@ impl RunLoop { "Failed to find block in fork processed by burnchain indexer", ) }; - if block.sortition { - sort_count += 1; - } let sortition_id = &block.sortition_id; @@ -1338,9 +1315,8 @@ impl RunLoop { debug!("Runloop: enable miner after processing sortitions"); signal_mining_ready(globals.get_miner_status()); - num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height})" ); sortition_db_height = next_sortition_height; @@ -1359,22 +1335,6 @@ impl RunLoop { } } - // advance one reward cycle at a time. - // If we're still downloading, then this is simply target_burnchain_block_height + reward_cycle_len. - // Otherwise, this is burnchain_tip + reward_cycle_len - let next_target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: burnchain height before system start") - + 1, - ), - remote_chain_height, - ); - - debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); - target_burnchain_block_height = next_target_burnchain_block_height; - if sortition_db_height >= burnchain_height && !ibd { let canonical_stacks_tip_height = SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn()) diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index 395d829c8f..488234d21d 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -1,20 +1,28 @@ -use std::collections::VecDeque; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::Arc; use stacks::burnchains::{Burnchain, Error as burnchain_error}; -use stacks::chainstate::stacks::db::StacksChainState; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::burnchains::BurnchainTip; use crate::Config; -// amount of time to wait for an inv or download sync to complete. -// These _really should_ complete before the PoX sync watchdog permits processing the next reward -// cycle, so this number is intentionally high (like, there's something really wrong with your -// network if your node is actualy waiting a day in-between reward cycles). -const SYNC_WAIT_SECS: u64 = 24 * 3600; - #[derive(Clone)] pub struct PoxSyncWatchdogComms { /// how many passes in the p2p state machine have taken place since startup? @@ -56,22 +64,6 @@ impl PoxSyncWatchdogComms { self.last_ibd.load(Ordering::SeqCst) } - /// Wait for at least one inv-sync state-machine passes - pub fn wait_for_inv_sync_pass(&self, timeout: u64) -> Result { - let current = self.get_inv_sync_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_inv_sync_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one inv-sync pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - fn interruptable_sleep(&self, secs: u64) -> Result<(), burnchain_error> { let deadline = secs + get_epoch_time_secs(); while get_epoch_time_secs() < deadline { @@ -83,21 +75,6 @@ impl PoxSyncWatchdogComms { Ok(()) } - pub fn wait_for_download_pass(&self, timeout: u64) -> Result { - let current = self.get_download_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_download_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one download pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - pub fn should_keep_running(&self) -> bool { self.should_keep_running.load(Ordering::SeqCst) } @@ -124,82 +101,25 @@ impl PoxSyncWatchdogComms { /// unless it's reasonably sure that it has processed all Stacks blocks for this reward cycle. /// This struct monitors the Stacks chainstate to make this determination. pub struct PoxSyncWatchdog { - /// number of attachable but unprocessed staging blocks over time - new_attachable_blocks: VecDeque, - /// number of newly-processed staging blocks over time - new_processed_blocks: VecDeque, - /// last time we asked for attachable blocks - last_attachable_query: u64, - /// last time we asked for processed blocks - last_processed_query: u64, - /// number of samples to take - max_samples: u64, - /// maximum number of blocks to count per query (affects performance!) - max_staging: u64, - /// when did we first start watching? - watch_start_ts: u64, - /// when did we first see a flatline in block-processing rate? - last_block_processed_ts: u64, - /// estimated time for a block to get downloaded. Used to infer how long to wait for the first - /// blocks to show up when waiting for this reward cycle. - estimated_block_download_time: f64, - /// estimated time for a block to get processed -- from when it shows up as attachable to when - /// it shows up as processed. Used to infer how long to wait for the last block to get - /// processed before unblocking burnchain sync for the next reward cycle. - estimated_block_process_time: f64, - /// time between burnchain syncs in stead state + /// time between burnchain syncs in steady state steady_state_burnchain_sync_interval: u64, - /// when to re-sync under steady state - steady_state_resync_ts: u64, - /// chainstate handle - chainstate: StacksChainState, /// handle to relayer thread that informs the watchdog when the P2P state-machine does stuff relayer_comms: PoxSyncWatchdogComms, /// should this sync watchdog always download? used in integration tests. unconditionally_download: bool, } -const PER_SAMPLE_WAIT_MS: u64 = 1000; - impl PoxSyncWatchdog { pub fn new( config: &Config, watchdog_comms: PoxSyncWatchdogComms, ) -> Result { - let mainnet = config.is_mainnet(); - let chain_id = config.burnchain.chain_id; - let chainstate_path = config.get_chainstate_path_str(); let burnchain_poll_time = config.burnchain.poll_time_secs; - let download_timeout = config.connection_options.timeout; - let max_samples = config.node.pox_sync_sample_secs; let unconditionally_download = config.node.pox_sync_sample_secs == 0; - let marf_opts = config.node.get_marf_opts(); - - let (chainstate, _) = - match StacksChainState::open(mainnet, chain_id, &chainstate_path, Some(marf_opts)) { - Ok(cs) => cs, - Err(e) => { - return Err(format!( - "Failed to open chainstate at '{chainstate_path}': {e:?}" - )); - } - }; Ok(PoxSyncWatchdog { unconditionally_download, - new_attachable_blocks: VecDeque::new(), - new_processed_blocks: VecDeque::new(), - last_attachable_query: 0, - last_processed_query: 0, - max_samples, - max_staging: 10, - watch_start_ts: 0, - last_block_processed_ts: 0, - estimated_block_download_time: download_timeout as f64, - estimated_block_process_time: 5.0, steady_state_burnchain_sync_interval: burnchain_poll_time, - steady_state_resync_ts: 0, - chainstate, relayer_comms: watchdog_comms, }) } @@ -208,39 +128,9 @@ impl PoxSyncWatchdog { self.relayer_comms.clone() } - /// How many recently-added Stacks blocks are in an attachable state, up to $max_staging? - fn count_attachable_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_attachable_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_attachable_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_attachable_query = get_epoch_time_secs(); - Ok(cnt) - } - - /// How many recently-processed Stacks blocks are there, up to $max_staging? - /// ($max_staging is necessary to limit the runtime of this method, since the underlying SQL - /// uses COUNT(*), which in Sqlite is a _O(n)_ operation for _n_ rows) - fn count_processed_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_processed_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_processed_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_processed_query = get_epoch_time_secs(); - Ok(cnt) - } - /// Are we in the initial burnchain block download? i.e. is the burn tip snapshot far enough away /// from the burnchain height that we should be eagerly downloading snapshots? - pub fn infer_initial_burnchain_block_download( + fn infer_initial_burnchain_block_download( burnchain: &Burnchain, last_processed_height: u64, burnchain_height: u64, @@ -261,182 +151,23 @@ impl PoxSyncWatchdog { ibd } - /// Calculate the first derivative of a list of points - fn derivative(sample_list: &VecDeque) -> Vec { - let mut deltas = vec![]; - let mut prev = 0; - for (i, sample) in sample_list.iter().enumerate() { - if i == 0 { - prev = *sample; - continue; - } - let delta = *sample - prev; - prev = *sample; - deltas.push(delta); - } - deltas - } - - /// Is a derivative approximately flat, with a maximum absolute deviation from 0? - /// Return whether or not the sample is mostly flat, and how many points were over the given - /// error bar in either direction. - fn is_mostly_flat(deriv: &[i64], error: i64) -> (bool, usize) { - let mut total_deviates = 0; - let mut ret = true; - for d in deriv.iter() { - if d.abs() > error { - total_deviates += 1; - ret = false; - } - } - (ret, total_deviates) - } - - /// low and high pass filter average -- take average without the smallest and largest values - fn hilo_filter_avg(samples: &[i64]) -> f64 { - // take average with low and high pass - let mut min = i64::MAX; - let mut max = i64::MIN; - for s in samples.iter() { - if *s < 0 { - // nonsensical result (e.g. due to clock drift?) - continue; - } - if *s < min { - min = *s; - } - if *s > max { - max = *s; - } - } - - let mut count = 0; - let mut sum = 0; - for s in samples.iter() { - if *s < 0 { - // nonsensical result - continue; - } - if *s == min { - continue; - } - if *s == max { - continue; - } - count += 1; - sum += *s; - } - - if count == 0 { - // no viable samples - 1.0 - } else { - (sum as f64) / (count as f64) - } - } - - /// estimate how long a block remains in an unprocessed state - fn estimate_block_process_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_wait_times = - StacksChainState::measure_block_wait_time(chainstate.db(), start_height, end_height) - .expect("BUG: failed to query chainstate block-processing times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_wait_times) - } - - /// estimate how long a block takes to download - fn estimate_block_download_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_download_times = StacksChainState::measure_block_download_time( - chainstate.db(), - start_height, - end_height, - ) - .expect("BUG: failed to query chainstate block-download times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_download_times) - } - - /// Reset internal state. Performed when it's okay to begin syncing the burnchain. - /// Updates estimate for block-processing time and block-downloading time. - fn reset(&mut self, burnchain: &Burnchain, tip_height: u64) { - // find the average (with low/high pass filter) time a block spends in the DB without being - // processed, during this reward cycle - self.estimated_block_process_time = - PoxSyncWatchdog::estimate_block_process_time(&self.chainstate, burnchain, tip_height); - - // find the average (with low/high pass filter) time a block spends downloading - self.estimated_block_download_time = - PoxSyncWatchdog::estimate_block_download_time(&self.chainstate, burnchain, tip_height); - - debug!( - "Estimated block download time: {}s. Estimated block processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time - ); - - self.new_attachable_blocks.clear(); - self.new_processed_blocks.clear(); - self.last_block_processed_ts = 0; - self.watch_start_ts = 0; - self.steady_state_resync_ts = 0; - } - - /// Wait until all of the Stacks blocks for the given reward cycle are seemingly downloaded and - /// processed. Do so by watching the _rate_ at which attachable Stacks blocks arrive and get - /// processed. - /// Returns whether or not we're still in the initial block download -- i.e. true if we're - /// still downloading burnchain blocks, or we haven't reached steady-state block-processing. + /// Wait until the next PoX anchor block arrives. + /// We know for a fact that they all exist for Epochs 2.5 and earlier, in both mainnet and + /// testnet. + /// Return (still-in-ibd?, maximum-burnchain-sync-height) on success. pub fn pox_sync_wait( &mut self, burnchain: &Burnchain, burnchain_tip: &BurnchainTip, // this is the highest burnchain snapshot we've sync'ed to burnchain_height: u64, // this is the absolute burnchain block height - num_sortitions_in_last_cycle: u64, - ) -> Result { - if self.watch_start_ts == 0 { - self.watch_start_ts = get_epoch_time_secs(); - } - if self.steady_state_resync_ts == 0 { - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - } + ) -> Result<(bool, u64), burnchain_error> { + let burnchain_rc = burnchain + .block_height_to_reward_cycle(burnchain_height) + .expect("FATAL: burnchain height is before system start"); + + let sortition_rc = burnchain + .block_height_to_reward_cycle(burnchain_tip.block_snapshot.block_height) + .expect("FATAL: sortition height is before system start"); let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( burnchain, @@ -444,220 +175,23 @@ impl PoxSyncWatchdog { burnchain_height, ); - // unconditionally download the first reward cycle - if burnchain_tip.block_snapshot.block_height - < burnchain.first_block_height + (burnchain.pox_constants.reward_cycle_length as u64) - { - debug!("PoX watchdog in first reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); + let max_sync_height = if sortition_rc < burnchain_rc { + burnchain + .reward_cycle_to_block_height(sortition_rc + 1) + .min(burnchain_height) + } else { + burnchain_tip + .block_snapshot + .block_height + .max(burnchain_height) + }; + self.relayer_comms.set_ibd(ibbd); + if !self.unconditionally_download { self.relayer_comms .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - - return Ok(ibbd); - } - - if self.unconditionally_download { - debug!("PoX watchdog set to unconditionally download (ibd={ibbd})"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - - let mut waited = false; - if ibbd { - // we are far behind the burnchain tip (i.e. not in the last reward cycle), - // so make sure the downloader knows about blocks it doesn't have yet so we can go and - // fetch its blocks before proceeding. - if num_sortitions_in_last_cycle > 0 { - debug!("PoX watchdog: Wait for at least one inventory state-machine pass..."); - self.relayer_comms.wait_for_inv_sync_pass(SYNC_WAIT_SECS)?; - waited = true; - } else { - debug!("PoX watchdog: In initial block download, and no sortitions to consider in this reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - } else { - debug!("PoX watchdog: not in initial burn block download, so not waiting for an inventory state-machine pass"); } - if burnchain_tip.block_snapshot.block_height - + (burnchain.pox_constants.reward_cycle_length as u64) - >= burnchain_height - { - // unconditionally download if we're within the last reward cycle (after the poll timeout) - if !waited { - debug!( - "PoX watchdog in last reward cycle -- sync after {} seconds", - self.steady_state_burnchain_sync_interval - ); - self.relayer_comms.set_ibd(ibbd); - - self.relayer_comms - .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - } else { - debug!("PoX watchdog in last reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - } - return Ok(ibbd); - } - - // have we reached steady-state behavior? i.e. have we stopped processing both burnchain - // and Stacks blocks? - let mut steady_state = false; - debug!("PoX watchdog: Wait until chainstate reaches steady-state block-processing..."); - - let ibbd = loop { - if !self.relayer_comms.should_keep_running() { - break false; - } - let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( - burnchain, - burnchain_tip.block_snapshot.block_height, - burnchain_height, - ); - - let expected_first_block_deadline = - self.watch_start_ts + (self.estimated_block_download_time as u64); - let expected_last_block_deadline = self.last_block_processed_ts - + (self.estimated_block_download_time as u64) - + (self.estimated_block_process_time as u64); - - match ( - self.count_attachable_stacks_blocks(), - self.count_processed_stacks_blocks(), - ) { - (Ok(num_available), Ok(num_processed)) => { - self.new_attachable_blocks.push_back(num_available as i64); - self.new_processed_blocks.push_back(num_processed as i64); - - if (self.new_attachable_blocks.len() as u64) > self.max_samples { - self.new_attachable_blocks.pop_front(); - } - if (self.new_processed_blocks.len() as u64) > self.max_samples { - self.new_processed_blocks.pop_front(); - } - - if (self.new_attachable_blocks.len() as u64) < self.max_samples - || (self.new_processed_blocks.len() as u64) < self.max_samples - { - // still getting initial samples - if self.new_processed_blocks.len() % 10 == 0 { - debug!( - "PoX watchdog: Still warming up: {} out of {} samples...", - &self.new_attachable_blocks.len(), - &self.max_samples - ); - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && get_epoch_time_secs() < expected_first_block_deadline - { - // still waiting for that first block in this reward cycle - debug!("PoX watchdog: Still warming up: waiting until {expected_first_block_deadline}s for first Stacks block download (estimated download time: {}s)...", self.estimated_block_download_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && (self.new_attachable_blocks.len() as u64) < self.max_samples - && self.watch_start_ts - + self.max_samples - + self.steady_state_burnchain_sync_interval - * (burnchain.stable_confirmations as u64) - < get_epoch_time_secs() - { - debug!( - "PoX watchdog: could not calculate {} samples in {} seconds. Assuming suspend/resume, or assuming load is too high.", - self.max_samples, - self.max_samples + self.steady_state_burnchain_sync_interval * (burnchain.stable_confirmations as u64) - ); - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - - self.watch_start_ts = get_epoch_time_secs(); - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - continue; - } - - // take first derivative of samples -- see if the download and processing rate has gone to 0 - let attachable_delta = PoxSyncWatchdog::derivative(&self.new_attachable_blocks); - let processed_delta = PoxSyncWatchdog::derivative(&self.new_processed_blocks); - - let (flat_attachable, attachable_deviants) = - PoxSyncWatchdog::is_mostly_flat(&attachable_delta, 0); - let (flat_processed, processed_deviants) = - PoxSyncWatchdog::is_mostly_flat(&processed_delta, 0); - - debug!("PoX watchdog: flat-attachable?: {flat_attachable}, flat-processed?: {flat_processed}, estimated block-download time: {}s, estimated block-processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time); - - if flat_attachable && flat_processed && self.last_block_processed_ts == 0 { - // we're flat-lining -- this may be the end of this cycle - self.last_block_processed_ts = get_epoch_time_secs(); - } - - if self.last_block_processed_ts > 0 - && get_epoch_time_secs() < expected_last_block_deadline - { - debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{expected_last_block_deadline})s before burnchain synchronization (estimated block-processing time: {}s)", - get_epoch_time_secs() + 1, self.estimated_block_process_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if ibbd { - // doing initial burnchain block download right now. - // only proceed to fetch the next reward cycle's burnchain blocks if we're neither downloading nor - // attaching blocks recently - debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {flat_attachable}, flat-processed = {flat_processed}, min-attachable: {attachable_deviants}, min-processed: {processed_deviants}"); - - if !flat_attachable || !flat_processed { - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - } else { - let now = get_epoch_time_secs(); - if now < self.steady_state_resync_ts { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state; waiting until at least {} before burnchain synchronization", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } else { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state, but ready burnchain synchronization as of {}", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - } - } - } - (err_attach, err_processed) => { - // can only happen on DB query failure - error!("PoX watchdog: Failed to count recently attached ('{err_attach:?}') and/or processed ('{err_processed:?}') staging blocks"); - panic!(); - } - }; - - if ibbd || !steady_state { - debug!("PoX watchdog: Wait for at least one downloader state-machine pass before resetting..."); - self.relayer_comms.wait_for_download_pass(SYNC_WAIT_SECS)?; - } else { - debug!("PoX watchdog: in steady-state, so not waiting for download pass"); - } - - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - break ibbd; - }; - - let ret = ibbd || !steady_state; - self.relayer_comms.set_ibd(ret); - Ok(ret) + Ok((ibbd, max_sync_height)) } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ef6199d331..a479e39a36 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3396,7 +3396,7 @@ fn vote_for_aggregate_key_burn_op() { /// This test boots a follower node using the block downloader #[test] #[ignore] -fn follower_bootup() { +fn follower_bootup_simple() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } From 4f6b1906bebe65fd32e279ea7d603c6d66d478a0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 1 Dec 2024 21:53:09 -0500 Subject: [PATCH 02/15] chore: force require anchor blocks --- stackslib/src/chainstate/coordinator/mod.rs | 20 +++++++++++++++++++- testnet/stacks-node/src/config.rs | 7 +++++++ testnet/stacks-node/src/run_loop/nakamoto.rs | 1 + testnet/stacks-node/src/run_loop/neon.rs | 1 + 4 files changed, 28 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 5b7c7e89b6..3c78e02cef 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -198,6 +198,9 @@ pub trait BlockEventDispatcher { } pub struct ChainsCoordinatorConfig { + /// true: assume all anchor blocks are present, and block chain sync until they arrive + /// false: process sortitions in reward cycles without anchor blocks + pub assume_present_anchor_blocks: bool, /// true: use affirmation maps before 2.1 /// false: only use affirmation maps in 2.1 or later pub always_use_affirmation_maps: bool, @@ -209,8 +212,9 @@ pub struct ChainsCoordinatorConfig { impl ChainsCoordinatorConfig { pub fn new() -> ChainsCoordinatorConfig { ChainsCoordinatorConfig { - always_use_affirmation_maps: false, + always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, } } } @@ -2336,6 +2340,20 @@ impl< panic!("BUG: no epoch defined at height {}", header.block_height) }); + if self.config.assume_present_anchor_blocks { + // anchor blocks are always assumed to be present in the chain history, + // so report its absence if we don't have it. + if let PoxAnchorBlockStatus::SelectedAndUnknown(missing_anchor_block, _) = + &rc_info.anchor_status + { + info!( + "Currently missing PoX anchor block {}, which is assumed to be present", + &missing_anchor_block + ); + return Ok(Some(missing_anchor_block.clone())); + } + } + if cur_epoch.epoch_id >= StacksEpochId::Epoch21 || self.config.always_use_affirmation_maps { // potentially have an anchor block, but only process the next reward cycle (and // subsequent reward cycles) with it if the prepare-phase block-commits affirm its diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 785ce057e5..2831b9c756 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1638,6 +1638,7 @@ pub struct NodeConfig { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: bool, pub require_affirmed_anchor_blocks: bool, + pub assume_present_anchor_blocks: bool, /// Fault injection for failing to push blocks pub fault_injection_block_push_fail_probability: Option, // fault injection for hiding blocks. @@ -1921,6 +1922,7 @@ impl Default for NodeConfig { use_test_genesis_chainstate: None, always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, fault_injection_block_push_fail_probability: None, fault_injection_hide_blocks: false, chain_liveness_poll_time_secs: 300, @@ -2393,6 +2395,7 @@ pub struct NodeConfigFile { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: Option, pub require_affirmed_anchor_blocks: Option, + pub assume_present_anchor_blocks: Option, /// At most, how often should the chain-liveness thread /// wake up the chains-coordinator. Defaults to 300s (5 min). pub chain_liveness_poll_time_secs: Option, @@ -2474,6 +2477,10 @@ impl NodeConfigFile { // miners should always try to mine, even if they don't have the anchored // blocks in the canonical affirmation map. Followers, however, can stall. require_affirmed_anchor_blocks: self.require_affirmed_anchor_blocks.unwrap_or(!miner), + // as of epoch 3.0, all prepare phases have anchor blocks. + // at the start of epoch 3.0, the chain stalls without anchor blocks. + // only set this to false if you're doing some very extreme testing. + assume_present_anchor_blocks: true, // chainstate fault_injection activation for hide_blocks. // you can't set this in the config file. fault_injection_hide_blocks: false, diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 16f5a12b2d..335fb325d8 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -319,6 +319,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 475265869a..94db4ef162 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -625,6 +625,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node From 18552645a550fcbe54b4d793ee3fd94a188a37eb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 2 Dec 2024 10:26:29 -0800 Subject: [PATCH 03/15] Add SignerEvent::NewNakamotoBlock and do not update a block to GloballyAccepted until node processes the new block successfully Signed-off-by: Jacinta Ferrant --- libsigner/src/events.rs | 76 +++++++++++++++++++++++++--------- stacks-signer/src/signerdb.rs | 12 ++---- stacks-signer/src/v0/signer.rs | 28 +++++++++++++ 3 files changed, 88 insertions(+), 28 deletions(-) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 1de0e34f09..26125a84d0 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -114,6 +114,13 @@ pub enum SignerEvent { /// the time at which this event was received by the signer's event processor received_time: SystemTime, }, + /// A new processed Nakamoto block was received from the node with the given block hash + NewNakamotoBlock { + /// The block header hash for the newly processed stacks block + block_hash: Sha512Trunc256Sum, + /// The block height for the newly processed stacks block + block_height: u64, + }, } /// Trait to implement a stop-signaler for the event receiver thread. @@ -311,16 +318,15 @@ impl EventReceiver for SignerEventReceiver { } else if request.url() == "/shutdown" { event_receiver.stop_signal.store(true, Ordering::SeqCst); return Err(EventError::Terminated); + } else if request.url() == "/new_block" { + process_new_block(request) } else { let url = request.url().to_string(); - // `/new_block` is expected, but not specifically handled. do not log. - if &url != "/new_block" { - debug!( - "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", - event_receiver.local_addr, - url - ); - } + debug!( + "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", + event_receiver.local_addr, + url + ); ack_dispatcher(request); Err(EventError::UnrecognizedEvent(url)) } @@ -475,9 +481,7 @@ fn process_proposal_response( if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } + ack_dispatcher(request); return Err(EventError::MalformedRequest(format!( "Failed to read body: {:?}", &e @@ -487,10 +491,7 @@ fn process_proposal_response( let event: BlockValidateResponse = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - + ack_dispatcher(request); Ok(SignerEvent::BlockValidationResponse(event)) } @@ -503,9 +504,7 @@ fn process_new_burn_block_event( if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } + ack_dispatcher(request); return Err(EventError::MalformedRequest(format!( "Failed to read body: {:?}", &e @@ -534,9 +533,46 @@ fn process_new_burn_block_event( received_time: SystemTime::now(), burn_header_hash, }; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); + ack_dispatcher(request); + Ok(event) +} + +/// Process a new burn block event from the node +fn process_new_block( + mut request: HttpRequest, +) -> Result, EventError> { + debug!("Got new_block event"); + let mut body = String::new(); + if let Err(e) = request.as_reader().read_to_string(&mut body) { + error!("Failed to read body: {:?}", &e); + + ack_dispatcher(request); + return Err(EventError::MalformedRequest(format!( + "Failed to read body: {:?}", + &e + ))); + } + #[derive(Debug, Deserialize)] + struct TempBlockEvent { + block_hash: String, + block_height: u64, } + + let temp: TempBlockEvent = serde_json::from_slice(body.as_bytes()) + .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; + let block_hash: Sha512Trunc256Sum = temp + .block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + Sha512Trunc256Sum::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + let event = SignerEvent::NewNakamotoBlock { + block_hash, + block_height: temp.block_height, + }; + ack_dispatcher(request); Ok(event) } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 9fcaa1fa1b..2360e56a03 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -811,13 +811,8 @@ impl SignerDb { block_sighash: &Sha512Trunc256Sum, ts: u64, ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2) WHERE reward_cycle = ?3 AND signer_signature_hash = ?4"; - let args = params![ - u64_to_sql(ts)?, - BlockState::GloballyAccepted.to_string(), - u64_to_sql(reward_cycle)?, - block_sighash - ]; + let qry = "UPDATE blocks SET broadcasted = ?1 WHERE reward_cycle = ?2 AND signer_signature_hash = ?3"; + let args = params![u64_to_sql(ts)?, u64_to_sql(reward_cycle)?, block_sighash]; debug!("Marking block {} as broadcasted at {}", block_sighash, ts); self.db.execute(qry, args)?; @@ -872,6 +867,7 @@ where } #[cfg(test)] +/// Create a test signer db pub fn test_signer_db(db_path: &str) -> SignerDb { use std::fs; @@ -1220,7 +1216,7 @@ mod tests { .expect("Unable to get block from db") .expect("Unable to get block from db") .state, - BlockState::GloballyAccepted + BlockState::Unprocessed ); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b537cfae8a..825e3ead08 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -126,6 +126,7 @@ impl SignerTrait for Signer { Some(SignerEvent::BlockValidationResponse(_)) | Some(SignerEvent::MinerMessages(..)) | Some(SignerEvent::NewBurnBlock { .. }) + | Some(SignerEvent::NewNakamotoBlock { .. }) | Some(SignerEvent::StatusCheck) | None => None, Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), @@ -246,6 +247,33 @@ impl SignerTrait for Signer { }); *sortition_state = None; } + SignerEvent::NewNakamotoBlock { + block_hash, + block_height, + } => { + debug!( + "{self}: Received a new block event."; + "block_hash" => %block_hash, + "block_height" => block_height + ); + if let Ok(Some(mut block_info)) = self + .signer_db + .block_lookup(self.reward_cycle, block_hash) + .inspect_err(|e| warn!("{self}: Failed to load block state: {e:?}")) + { + if block_info.state == BlockState::GloballyAccepted { + // We have already globally accepted this block. Do nothing. + return; + } + if let Err(e) = block_info.mark_globally_accepted() { + warn!("{self}: Failed to mark block as globally accepted: {e:?}"); + return; + } + if let Err(e) = self.signer_db.insert_block(&block_info) { + warn!("{self}: Failed to update block state to globally accepted: {e:?}"); + } + } + } } } From 57848b3c6ac316fd3e6babfee2fbb0ef048ac8ff Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 2 Dec 2024 14:17:45 -0800 Subject: [PATCH 04/15] Fix block state transitions and update some comments Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 34 ++++------- stacks-signer/src/v0/signer.rs | 66 ++++++++++++++------- testnet/stacks-node/src/event_dispatcher.rs | 21 +++++++ 3 files changed, 78 insertions(+), 43 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 2360e56a03..df1be98d6a 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -230,17 +230,9 @@ impl BlockInfo { } match state { BlockState::Unprocessed => false, - BlockState::LocallyAccepted => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyAccepted - ) - } - BlockState::LocallyRejected => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyRejected - ) + BlockState::LocallyAccepted | BlockState::LocallyRejected => { + !matches!(prev_state, BlockState::GloballyRejected) + && !matches!(prev_state, BlockState::GloballyAccepted) } BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), @@ -1245,7 +1237,14 @@ mod tests { assert_eq!(block.state, BlockState::LocallyAccepted); assert!(!block.check_state(BlockState::Unprocessed)); assert!(block.check_state(BlockState::LocallyAccepted)); - assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::LocallyRejected).unwrap(); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(block.check_state(BlockState::LocallyAccepted)); + assert!(block.check_state(BlockState::LocallyRejected)); assert!(block.check_state(BlockState::GloballyAccepted)); assert!(block.check_state(BlockState::GloballyRejected)); @@ -1257,15 +1256,8 @@ mod tests { assert!(block.check_state(BlockState::GloballyAccepted)); assert!(!block.check_state(BlockState::GloballyRejected)); - // Must manually override as will not be able to move from GloballyAccepted to LocallyAccepted - block.state = BlockState::LocallyRejected; - assert!(!block.check_state(BlockState::Unprocessed)); - assert!(!block.check_state(BlockState::LocallyAccepted)); - assert!(block.check_state(BlockState::LocallyRejected)); - assert!(block.check_state(BlockState::GloballyAccepted)); - assert!(block.check_state(BlockState::GloballyRejected)); - - block.move_to(BlockState::GloballyRejected).unwrap(); + // Must manually override as will not be able to move from GloballyAccepted to GloballyRejected + block.state = BlockState::GloballyRejected; assert!(!block.check_state(BlockState::Unprocessed)); assert!(!block.check_state(BlockState::LocallyAccepted)); assert!(!block.check_state(BlockState::LocallyRejected)); diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 825e3ead08..33778105a3 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -64,6 +64,11 @@ pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::syn /// Skip broadcasting the block to the network pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +/// Skip any block responses from other signers +pub static TEST_IGNORE_BLOCK_RESPONSES: std::sync::Mutex> = + std::sync::Mutex::new(None); + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -476,10 +481,7 @@ impl Signer { self.test_reject_block_proposal(block_proposal, &mut block_info, block_response); if let Some(block_response) = block_response { - // We know proposal is invalid. Send rejection message, do not do further validation - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; + // We know proposal is invalid. Send rejection message, do not do further validation and do not store it. debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); let res = self .stackerdb @@ -535,6 +537,10 @@ impl Signer { stacks_client: &StacksClient, block_response: &BlockResponse, ) { + #[cfg(any(test, feature = "testing"))] + if self.test_ignore_block_responses(block_response) { + return; + } match block_response { BlockResponse::Accepted(accepted) => { self.handle_block_signature(stacks_client, accepted); @@ -870,7 +876,7 @@ impl Signer { // Not enough rejection signatures to make a decision return; } - debug!("{self}: {total_reject_weight}/{total_weight} signers voteed to reject the block {block_hash}"); + debug!("{self}: {total_reject_weight}/{total_weight} signers voted to reject the block {block_hash}"); if let Err(e) = block_info.mark_globally_rejected() { warn!("{self}: Failed to mark block as globally rejected: {e:?}",); } @@ -999,7 +1005,7 @@ impl Signer { return; }; // move block to LOCALLY accepted state. - // We only mark this GLOBALLY accepted if we manage to broadcast it... + // It is only considered globally accepted IFF we receive a new block event confirming it OR see the chain tip of the node advance to it. if let Err(e) = block_info.mark_locally_accepted(true) { // Do not abort as we should still try to store the block signature threshold warn!("{self}: Failed to mark block as locally accepted: {e:?}"); @@ -1012,22 +1018,8 @@ impl Signer { panic!("{self} Failed to write block to signerdb: {e}"); }); #[cfg(any(test, feature = "testing"))] - { - if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block broadcast is stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Block validation is no longer stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - } - } + self.test_pause_block_broadcast(&block_info); + self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); if self .submitted_block_proposal @@ -1137,6 +1129,36 @@ impl Signer { } } + #[cfg(any(test, feature = "testing"))] + fn test_ignore_block_responses(&self, block_response: &BlockResponse) -> bool { + if *TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap() == Some(true) { + warn!( + "{self}: Ignoring block response due to testing directive"; + "block_response" => %block_response + ); + return true; + } + false + } + + #[cfg(any(test, feature = "testing"))] + fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { + if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Block broadcast is stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("{self}: Block validation is no longer stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + } + } + /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 8144cd8ec5..6dc2842b8c 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -70,6 +70,10 @@ use url::Url; use super::config::{EventKeyType, EventObserverConfig}; +#[cfg(test)] +pub static TEST_SKIP_BLOCK_ANNOUNCEMENT: std::sync::Mutex> = + std::sync::Mutex::new(None); + #[derive(Debug, Clone)] struct EventObserver { /// Path to the database where pending payloads are stored. If `None`, then @@ -1299,6 +1303,11 @@ impl EventDispatcher { let mature_rewards = serde_json::Value::Array(mature_rewards_vec); + #[cfg(any(test, feature = "testing"))] + if test_skip_block_announcement(&block) { + return; + } + for (observer_id, filtered_events_ids) in dispatch_matrix.iter().enumerate() { let filtered_events: Vec<_> = filtered_events_ids .iter() @@ -1695,6 +1704,18 @@ impl EventDispatcher { } } +#[cfg(any(test, feature = "testing"))] +fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { + if *TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap() == Some(true) { + warn!( + "Skipping new block announcement due to testing directive"; + "block_hash" => %block.block_hash + ); + return true; + } + false +} + #[cfg(test)] mod test { use std::net::TcpListener; From bf70c9f7e9d0b025f823df20a52bb88aa74428db Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 2 Dec 2024 16:04:42 -0800 Subject: [PATCH 05/15] Remove unused change Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 33778105a3..f53daf7745 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -64,11 +64,6 @@ pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::syn /// Skip broadcasting the block to the network pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); -#[cfg(any(test, feature = "testing"))] -/// Skip any block responses from other signers -pub static TEST_IGNORE_BLOCK_RESPONSES: std::sync::Mutex> = - std::sync::Mutex::new(None); - /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -1129,18 +1124,6 @@ impl Signer { } } - #[cfg(any(test, feature = "testing"))] - fn test_ignore_block_responses(&self, block_response: &BlockResponse) -> bool { - if *TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap() == Some(true) { - warn!( - "{self}: Ignoring block response due to testing directive"; - "block_response" => %block_response - ); - return true; - } - false - } - #[cfg(any(test, feature = "testing"))] fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { From d4860f8e3dc232ddd433e5148b3403270903f27d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 2 Dec 2024 16:37:51 -0800 Subject: [PATCH 06/15] Rename NewNakamotoBlock to NewBlock Signed-off-by: Jacinta Ferrant --- libsigner/src/events.rs | 6 +++--- stacks-signer/src/signerdb.rs | 8 ++++---- stacks-signer/src/v0/signer.rs | 9 +++------ testnet/stacks-node/src/event_dispatcher.rs | 21 --------------------- 4 files changed, 10 insertions(+), 34 deletions(-) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 26125a84d0..90ef022636 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -114,8 +114,8 @@ pub enum SignerEvent { /// the time at which this event was received by the signer's event processor received_time: SystemTime, }, - /// A new processed Nakamoto block was received from the node with the given block hash - NewNakamotoBlock { + /// A new processed Stacks block was received from the node with the given block hash + NewBlock { /// The block header hash for the newly processed stacks block block_hash: Sha512Trunc256Sum, /// The block height for the newly processed stacks block @@ -568,7 +568,7 @@ fn process_new_block( Sha512Trunc256Sum::from_hex(hex) .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) })?; - let event = SignerEvent::NewNakamotoBlock { + let event = SignerEvent::NewBlock { block_hash, block_height: temp.block_height, }; diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 4e24c20029..732a3c3451 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -230,10 +230,10 @@ impl BlockInfo { } match state { BlockState::Unprocessed => false, - BlockState::LocallyAccepted | BlockState::LocallyRejected => { - !matches!(prev_state, BlockState::GloballyRejected) - && !matches!(prev_state, BlockState::GloballyAccepted) - } + BlockState::LocallyAccepted | BlockState::LocallyRejected => !matches!( + prev_state, + BlockState::GloballyRejected | BlockState::GloballyAccepted + ), BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index f53daf7745..b53738223e 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -126,7 +126,7 @@ impl SignerTrait for Signer { Some(SignerEvent::BlockValidationResponse(_)) | Some(SignerEvent::MinerMessages(..)) | Some(SignerEvent::NewBurnBlock { .. }) - | Some(SignerEvent::NewNakamotoBlock { .. }) + | Some(SignerEvent::NewBlock { .. }) | Some(SignerEvent::StatusCheck) | None => None, Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), @@ -247,7 +247,7 @@ impl SignerTrait for Signer { }); *sortition_state = None; } - SignerEvent::NewNakamotoBlock { + SignerEvent::NewBlock { block_hash, block_height, } => { @@ -400,6 +400,7 @@ impl Signer { "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); + #[allow(unused_mut)] let mut block_info = BlockInfo::from(block_proposal.clone()); // Get sortition view if we don't have it @@ -532,10 +533,6 @@ impl Signer { stacks_client: &StacksClient, block_response: &BlockResponse, ) { - #[cfg(any(test, feature = "testing"))] - if self.test_ignore_block_responses(block_response) { - return; - } match block_response { BlockResponse::Accepted(accepted) => { self.handle_block_signature(stacks_client, accepted); diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 6dc2842b8c..8144cd8ec5 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -70,10 +70,6 @@ use url::Url; use super::config::{EventKeyType, EventObserverConfig}; -#[cfg(test)] -pub static TEST_SKIP_BLOCK_ANNOUNCEMENT: std::sync::Mutex> = - std::sync::Mutex::new(None); - #[derive(Debug, Clone)] struct EventObserver { /// Path to the database where pending payloads are stored. If `None`, then @@ -1303,11 +1299,6 @@ impl EventDispatcher { let mature_rewards = serde_json::Value::Array(mature_rewards_vec); - #[cfg(any(test, feature = "testing"))] - if test_skip_block_announcement(&block) { - return; - } - for (observer_id, filtered_events_ids) in dispatch_matrix.iter().enumerate() { let filtered_events: Vec<_> = filtered_events_ids .iter() @@ -1704,18 +1695,6 @@ impl EventDispatcher { } } -#[cfg(any(test, feature = "testing"))] -fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { - if *TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap() == Some(true) { - warn!( - "Skipping new block announcement due to testing directive"; - "block_hash" => %block.block_hash - ); - return true; - } - false -} - #[cfg(test)] mod test { use std::net::TcpListener; From b473984ceab7fb93e2d011dbcb70792de286aa38 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 2 Dec 2024 17:28:24 -0800 Subject: [PATCH 07/15] Add global_acceptance_depends_on_block_announcement Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + stacks-signer/src/v0/signer.rs | 21 ++ testnet/stacks-node/src/event_dispatcher.rs | 21 ++ testnet/stacks-node/src/tests/signer/v0.rs | 262 +++++++++++++++++++- 4 files changed, 299 insertions(+), 6 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 04e74f94e8..060e109a17 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -127,6 +127,7 @@ jobs: - tests::signer::v0::continue_after_fast_block_no_sortition - tests::signer::v0::block_validation_response_timeout - tests::signer::v0::tenure_extend_after_bad_commit + - tests::signer::v0::global_acceptance_depends_on_block_announcement - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b53738223e..37a31b841d 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -64,6 +64,11 @@ pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::syn /// Skip broadcasting the block to the network pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +/// Skip any block responses from other signers +pub static TEST_IGNORE_BLOCK_RESPONSES: std::sync::Mutex> = + std::sync::Mutex::new(None); + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -533,6 +538,10 @@ impl Signer { stacks_client: &StacksClient, block_response: &BlockResponse, ) { + #[cfg(any(test, feature = "testing"))] + if self.test_ignore_block_responses(block_response) { + return; + } match block_response { BlockResponse::Accepted(accepted) => { self.handle_block_signature(stacks_client, accepted); @@ -1121,6 +1130,18 @@ impl Signer { } } + #[cfg(any(test, feature = "testing"))] + fn test_ignore_block_responses(&self, block_response: &BlockResponse) -> bool { + if *TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap() == Some(true) { + warn!( + "{self}: Ignoring block response due to testing directive"; + "block_response" => %block_response + ); + return true; + } + false + } + #[cfg(any(test, feature = "testing"))] fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 8144cd8ec5..86ad9cae74 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -70,6 +70,10 @@ use url::Url; use super::config::{EventKeyType, EventObserverConfig}; +#[cfg(any(test, feature = "testing"))] +pub static TEST_SKIP_BLOCK_ANNOUNCEMENT: std::sync::Mutex> = + std::sync::Mutex::new(None); + #[derive(Debug, Clone)] struct EventObserver { /// Path to the database where pending payloads are stored. If `None`, then @@ -1299,6 +1303,11 @@ impl EventDispatcher { let mature_rewards = serde_json::Value::Array(mature_rewards_vec); + #[cfg(any(test, feature = "testing"))] + if test_skip_block_announcement(&block) { + return; + } + for (observer_id, filtered_events_ids) in dispatch_matrix.iter().enumerate() { let filtered_events: Vec<_> = filtered_events_ids .iter() @@ -1695,6 +1704,18 @@ impl EventDispatcher { } } +#[cfg(any(test, feature = "testing"))] +fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { + if *TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap() == Some(true) { + warn!( + "Skipping new block announcement due to testing directive"; + "block_hash" => %block.block_hash + ); + return true; + } + false +} + #[cfg(test)] mod test { use std::net::TcpListener; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2486043ccc..396d73cbd2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -43,7 +43,7 @@ use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STA use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; +use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -56,8 +56,8 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::v0::signer::{ - TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, - TEST_SKIP_BLOCK_BROADCAST, + TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_IGNORE_BLOCK_RESPONSES, TEST_PAUSE_BLOCK_BROADCAST, + TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, }; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -65,7 +65,7 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::config::{EventKeyType, EventObserverConfig}; -use crate::event_dispatcher::MinedNakamotoBlockEvent; +use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEMENT}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; @@ -375,7 +375,7 @@ impl SignerTest { } } - /// Propose an invalid block to the signers + /// Propose a block to the signers fn propose_block(&mut self, block: NakamotoBlock, timeout: Duration) { let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = @@ -385,6 +385,7 @@ impl SignerTest { .btc_regtest_controller .get_headers_height(); let reward_cycle = self.get_current_reward_cycle(); + let signer_signature_hash = block.header.signer_signature_hash(); let message = SignerMessage::BlockProposal(BlockProposal { block, burn_height, @@ -401,7 +402,7 @@ impl SignerTest { let mut version = 0; let slot_id = MinerSlotID::BlockProposal.to_u8() as u32; let start = Instant::now(); - debug!("Proposing invalid block to signers"); + debug!("Proposing block to signers: {signer_signature_hash}"); while !accepted { let mut chunk = StackerDBChunkData::new(slot_id * 2, version, message.serialize_to_vec()); @@ -8557,3 +8558,252 @@ fn tenure_extend_after_2_bad_commits() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// Test that signers that reject a block locally, but that was accepted globally will accept +/// only accept a block built upon it when they receive the new block event confirming their prior +/// rejected block. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). <30% of signers are configured to auto reject +/// any block proposals, announcement of new blocks are skipped, and signatures ignored by signers. +/// The subsequent block N+1 is proposed, triggering one of the <30% signers submit the block to the node +/// for validation. The node will fail due to a bad block header hash mismatch (passes height checks) +/// +/// Test Assertion: +/// - All signers accepted block N. +/// - Less than 30% of the signers rejected block N+1. +/// - The 30% of signers that rejected block N+1, will submit the block for validation +/// as it passes preliminary checks (even though its a sister block, it is a sister block to a locally rejected block) +fn global_acceptance_depends_on_block_announcement() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 4; + + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + test_observer::clear(); + // submit a tx so that the miner will mine a stacks block N + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); + + wait_for(short_timeout, || { + Ok(signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for N to be mined and processed"); + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); + + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); + // Make less than 30% of the signers reject the block and ensure it is accepted by the node, but not announced. + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 3 / 10) + .collect(); + let non_rejecting_signers = all_signers[num_signers * 3 / 10..].to_vec(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(rejecting_signers.clone()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap().replace(true); + TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + test_observer::clear(); + + // submit a tx so that the miner will mine a stacks block N+1 + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N+1"); + + let mut proposed_block = None; + let start_time = Instant::now(); + while proposed_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + proposed_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + let proposed_block = proposed_block.expect("Failed to find proposed block within 30s"); + + signer_test + .wait_for_block_acceptance( + short_timeout, + &proposed_block.header.signer_signature_hash(), + &non_rejecting_signers, + ) + .expect("Timed out waiting for block acceptance of N+1 by non rejecting signers"); + + signer_test + .wait_for_block_rejections(short_timeout, &rejecting_signers) + .expect("Timed out waiting for block rejection of N+1' from rejecting signers"); + + info!( + "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" + ); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + test_observer::clear(); + + let mut sister_block = proposed_block; + + let transfer_tx_bytes = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt * 2, + ); + let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx_bytes[..]).unwrap(); + let txs = vec![tx]; + let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + sister_block.txs = txs; + sister_block.header.tx_merkle_root = tx_merkle_root; + sister_block + .header + .sign_miner(&signer_test.running_nodes.conf.miner.mining_key.unwrap()) + .unwrap(); + signer_test.propose_block(sister_block.clone(), Duration::from_secs(30)); + + wait_for(30, || { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections: HashSet<_> = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + let rejected_pubkey = rejection + .recover_public_key() + .expect("Failed to recover public key from rejection"); + // Proves that one of the rejecting signers actually submitted the block for validation as it passed its preliminary checks about chain length + assert_eq!( + rejection.reason_code, + RejectCode::ValidationFailed(ValidateRejectCode::BadBlockHash) + ); + Some(rejected_pubkey) + } + _ => None, + } + }) + .collect::>(); + Ok(block_rejections.len() == all_signers.len()) + }) + .expect("Timed out waiting for block rejections for N+1'"); + // Assert the block was NOT mined and the tip has not changed. + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_after, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + ); +} From b8d41bea97b0af79afbc9cdb7f487cef3cd96285 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 6 Dec 2024 11:14:52 -0500 Subject: [PATCH 08/15] Fix test to confirm reorg accross boundaries is possible if block not announced by the node Signed-off-by: Jacinta Ferrant --- stacks-common/src/util/mod.rs | 24 ++ stacks-signer/src/v0/signer.rs | 72 ++--- testnet/stacks-node/src/event_dispatcher.rs | 12 +- .../src/nakamoto_node/sign_coordinator.rs | 19 +- testnet/stacks-node/src/run_loop/neon.rs | 28 +- testnet/stacks-node/src/tests/signer/mod.rs | 5 +- testnet/stacks-node/src/tests/signer/v0.rs | 259 ++++++++---------- 7 files changed, 193 insertions(+), 226 deletions(-) diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index a9dfc47806..416a365a2f 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -35,6 +35,30 @@ use std::path::Path; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; +#[cfg(any(test, feature = "testing"))] +#[derive(Clone)] +pub struct TestFlag(pub std::sync::Arc>>); + +#[cfg(any(test, feature = "testing"))] +impl Default for TestFlag { + fn default() -> Self { + Self(std::sync::Arc::new(std::sync::Mutex::new(None))) + } +} + +#[cfg(any(test, feature = "testing"))] +impl TestFlag { + /// Set the test flag to the given value + pub fn set(&self, value: T) { + *self.0.lock().unwrap() = Some(value); + } + + /// Get the test flag value. Defaults otherwise. + pub fn get(&self) -> T { + self.0.lock().unwrap().clone().unwrap_or_default().clone() + } +} + pub fn get_epoch_time_secs() -> u64 { let start = SystemTime::now(); let since_the_epoch = start diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 37a31b841d..55a8e42044 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -26,6 +26,8 @@ use clarity::types::chainstate::StacksPrivateKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; +#[cfg(any(test, feature = "testing"))] +use lazy_static::lazy_static; use libsigner::v0::messages::{ BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectCode, SignerMessage, @@ -35,6 +37,8 @@ use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::secp256k1::MessageSignature; +#[cfg(any(test, feature = "testing"))] +use stacks_common::util::TestFlag; use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; @@ -45,29 +49,28 @@ use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; #[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list -pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list -pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); +lazy_static! { + /// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list + pub static ref TEST_REJECT_ALL_BLOCK_PROPOSAL: TestFlag> = TestFlag::default(); +} #[cfg(any(test, feature = "testing"))] -/// Pause the block broadcast -pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); +lazy_static! { + /// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list + pub static ref TEST_IGNORE_ALL_BLOCK_PROPOSALS: TestFlag> = TestFlag::default(); +} #[cfg(any(test, feature = "testing"))] -/// Skip broadcasting the block to the network -pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); +lazy_static! { + /// Pause the block broadcast + pub static ref TEST_PAUSE_BLOCK_BROADCAST: TestFlag = TestFlag::default(); +} #[cfg(any(test, feature = "testing"))] -/// Skip any block responses from other signers -pub static TEST_IGNORE_BLOCK_RESPONSES: std::sync::Mutex> = - std::sync::Mutex::new(None); +lazy_static! { + /// Skip broadcasting the block to the network + pub static ref TEST_SKIP_BLOCK_BROADCAST: TestFlag = TestFlag::default(); +} /// The stacks signer registered for the reward cycle #[derive(Debug)] @@ -174,9 +177,8 @@ impl SignerTrait for Signer { match message { SignerMessage::BlockProposal(block_proposal) => { #[cfg(any(test, feature = "testing"))] - if let Some(public_keys) = - &*TEST_IGNORE_ALL_BLOCK_PROPOSALS.lock().unwrap() { + let public_keys = TEST_IGNORE_ALL_BLOCK_PROPOSALS.get(); if public_keys.contains( &stacks_common::types::chainstate::StacksPublicKey::from_private( &self.private_key, @@ -405,8 +407,10 @@ impl Signer { "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); - #[allow(unused_mut)] + #[cfg(any(test, feature = "testing"))] let mut block_info = BlockInfo::from(block_proposal.clone()); + #[cfg(not(any(test, feature = "testing")))] + let block_info = BlockInfo::from(block_proposal.clone()); // Get sortition view if we don't have it if sortition_state.is_none() { @@ -538,10 +542,6 @@ impl Signer { stacks_client: &StacksClient, block_response: &BlockResponse, ) { - #[cfg(any(test, feature = "testing"))] - if self.test_ignore_block_responses(block_response) { - return; - } match block_response { BlockResponse::Accepted(accepted) => { self.handle_block_signature(stacks_client, accepted); @@ -1071,7 +1071,7 @@ impl Signer { #[cfg(any(test, feature = "testing"))] fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { - if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + if TEST_SKIP_BLOCK_BROADCAST.get() { let block_hash = block.header.signer_signature_hash(); warn!( "{self}: Skipping block broadcast due to testing directive"; @@ -1099,9 +1099,7 @@ impl Signer { block_info: &mut BlockInfo, block_response: Option, ) -> Option { - let Some(public_keys) = &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() else { - return block_response; - }; + let public_keys = TEST_REJECT_ALL_BLOCK_PROPOSAL.get(); if public_keys.contains( &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), ) { @@ -1126,31 +1124,19 @@ impl Signer { self.mainnet, )) } else { - None - } - } - - #[cfg(any(test, feature = "testing"))] - fn test_ignore_block_responses(&self, block_response: &BlockResponse) -> bool { - if *TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap() == Some(true) { - warn!( - "{self}: Ignoring block response due to testing directive"; - "block_response" => %block_response - ); - return true; + block_response } - false } #[cfg(any(test, feature = "testing"))] fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { - if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + if TEST_PAUSE_BLOCK_BROADCAST.get() { // Do an extra check just so we don't log EVERY time. warn!("{self}: Block broadcast is stalled due to testing directive."; "block_id" => %block_info.block.block_id(), "height" => block_info.block.header.chain_length, ); - while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + while TEST_PAUSE_BLOCK_BROADCAST.get() { std::thread::sleep(std::time::Duration::from_millis(10)); } info!("{self}: Block validation is no longer stalled due to testing directive."; diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 86ad9cae74..b1e26e7770 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -26,6 +26,8 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; +#[cfg(any(test, feature = "testing"))] +use lazy_static::lazy_static; use rand::Rng; use rusqlite::{params, Connection}; use serde_json::json; @@ -59,6 +61,8 @@ use stacks::net::http::HttpRequestContents; use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; use stacks::util::hash::to_hex; +#[cfg(any(test, feature = "testing"))] +use stacks::util::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -71,8 +75,10 @@ use url::Url; use super::config::{EventKeyType, EventObserverConfig}; #[cfg(any(test, feature = "testing"))] -pub static TEST_SKIP_BLOCK_ANNOUNCEMENT: std::sync::Mutex> = - std::sync::Mutex::new(None); +lazy_static! { + /// Do not announce a signed/mined block to the network when set to true. + pub static ref TEST_SKIP_BLOCK_ANNOUNCEMENT: TestFlag = TestFlag::default(); +} #[derive(Debug, Clone)] struct EventObserver { @@ -1706,7 +1712,7 @@ impl EventDispatcher { #[cfg(any(test, feature = "testing"))] fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { - if *TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap() == Some(true) { + if TEST_SKIP_BLOCK_ANNOUNCEMENT.get() { warn!( "Skipping new block announcement due to testing directive"; "block_hash" => %block.block_hash diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 2b1efcbfc5..7f3ad9aecf 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -20,6 +20,8 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; use hashbrown::{HashMap, HashSet}; +#[cfg(any(test, feature = "testing"))] +use lazy_static::lazy_static; use libsigner::v0::messages::{ BlockAccepted, BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0, }; @@ -37,6 +39,8 @@ use stacks::net::stackerdb::StackerDBs; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::MessageSignature; +#[cfg(any(test, feature = "testing"))] +use stacks::util::TestFlag; use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -47,10 +51,12 @@ use crate::event_dispatcher::StackerDBChannel; use crate::neon::Counters; use crate::Config; -/// Fault injection flag to prevent the miner from seeing enough signer signatures. -/// Used to test that the signers will broadcast a block if it gets enough signatures -#[cfg(test)] -pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +lazy_static! { + /// Fault injection flag to prevent the miner from seeing enough signer signatures. + /// Used to test that the signers will broadcast a block if it gets enough signatures + pub static ref TEST_IGNORE_SIGNERS: TestFlag = TestFlag::default(); +} /// How long should the coordinator poll on the event receiver before /// waking up to check timeouts? @@ -256,10 +262,7 @@ impl SignCoordinator { /// Do we ignore signer signatures? #[cfg(test)] fn fault_injection_ignore_signatures() -> bool { - if *TEST_IGNORE_SIGNERS.lock().unwrap() == Some(true) { - return true; - } - false + TEST_IGNORE_SIGNERS.get() } #[cfg(not(test))] diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 5e021e50ab..53df2edcd3 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -21,6 +21,8 @@ use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; +#[cfg(test)] +use stacks::util::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; @@ -82,30 +84,6 @@ impl std::ops::Deref for RunLoopCounter { } } -#[cfg(test)] -#[derive(Clone)] -pub struct TestFlag(pub Arc>>); - -#[cfg(test)] -impl Default for TestFlag { - fn default() -> Self { - Self(Arc::new(std::sync::Mutex::new(None))) - } -} - -#[cfg(test)] -impl TestFlag { - /// Set the test flag to the given value - pub fn set(&self, value: bool) { - *self.0.lock().unwrap() = Some(value); - } - - /// Get the test flag value. Defaults to false if the flag is not set. - pub fn get(&self) -> bool { - self.0.lock().unwrap().unwrap_or(false) - } -} - #[derive(Clone, Default)] pub struct Counters { pub blocks_processed: RunLoopCounter, @@ -123,7 +101,7 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, #[cfg(test)] - pub naka_skip_commit_op: TestFlag, + pub naka_skip_commit_op: TestFlag, } impl Counters { diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 946a566c13..74a07cb235 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -49,6 +49,7 @@ use stacks::types::chainstate::{StacksAddress, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; +use stacks::util::TestFlag; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; @@ -60,7 +61,7 @@ use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; -use crate::neon::{Counters, TestFlag}; +use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ @@ -88,7 +89,7 @@ pub struct RunningNodes { pub nakamoto_blocks_mined: Arc, pub nakamoto_blocks_rejected: Arc, pub nakamoto_blocks_signer_pushed: Arc, - pub nakamoto_test_skip_commit_op: TestFlag, + pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 07a3d6b02a..e5a4e88123 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -43,7 +43,7 @@ use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STA use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; +use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -56,8 +56,8 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::v0::signer::{ - TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_IGNORE_BLOCK_RESPONSES, TEST_PAUSE_BLOCK_BROADCAST, - TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, + TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, + TEST_SKIP_BLOCK_BROADCAST, }; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -943,7 +943,7 @@ fn forked_tenure_testing( config.first_proposal_burn_block_timing = proposal_limit; // don't allow signers to post signed blocks (limits the amount of fault injection we // need) - TEST_SKIP_BLOCK_BROADCAST.lock().unwrap().replace(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); }, |config| { config.miner.tenure_cost_limit_per_block_percentage = None; @@ -2387,10 +2387,7 @@ fn retry_on_rejection() { .map(StacksPublicKey::from_private) .take(num_signers) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); let proposals_before = signer_test .running_nodes @@ -2437,10 +2434,7 @@ fn retry_on_rejection() { // resume signing info!("Disable unconditional rejection and wait for the block to be processed"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(vec![]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); loop { let blocks_mined = signer_test .running_nodes @@ -2500,7 +2494,7 @@ fn signers_broadcast_signed_blocks() { }) .expect("Timed out waiting for first nakamoto block to be mined"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined @@ -2785,7 +2779,7 @@ fn empty_sortition_before_approval() { let stacks_height_before = info.stacks_tip_height; info!("Forcing miner to ignore signatures for next block"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); info!("Pausing block commits to trigger an empty sortition."); signer_test @@ -2838,7 +2832,7 @@ fn empty_sortition_before_approval() { .replace(false); info!("Stop ignoring signers and wait for the tip to advance"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + TEST_IGNORE_SIGNERS.set(false); wait_for(60, || { let info = get_chain_info(&signer_test.running_nodes.conf); @@ -4726,10 +4720,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .cloned() .take(num_signers / 2 + num_signers % 2) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); test_observer::clear(); // Make a new stacks transaction to create a different block signature, but make sure to propose it // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted @@ -4762,10 +4753,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); let transfer_tx = make_stacks_transfer( &sender_sk, @@ -4921,10 +4909,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .cloned() .take(num_signers * 3 / 10) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); test_observer::clear(); // submit a tx so that the miner will mine a stacks block N+1 @@ -4989,10 +4974,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it let transfer_tx = make_stacks_transfer( @@ -5148,10 +5130,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .cloned() .skip(num_signers * 7 / 10) .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); @@ -5229,10 +5208,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(Vec::new()); wait_for(short_timeout, || { let info_after = signer_test .stacks_client @@ -5375,10 +5351,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { .cloned() .skip(num_signers * 7 / 10) .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); @@ -5595,9 +5568,9 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // broadcasted to the miner so it can end its tenure before block confirmation obtained // Clear the stackerdb chunks info!("Forcing miner to ignore block responses for block N+1"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); info!("Delaying signer block N+1 broadcasting to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); + TEST_PAUSE_BLOCK_BROADCAST.set(true); test_observer::clear(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test @@ -5722,9 +5695,9 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .expect("Timed out waiting for block proposal of N+1' block proposal"); info!("Allowing miner to accept block responses again. "); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + TEST_IGNORE_SIGNERS.set(false); info!("Allowing signers to broadcast block N+1 to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); + TEST_PAUSE_BLOCK_BROADCAST.set(false); // Assert the N+1' block was rejected let rejected_block = rejected_block.unwrap(); @@ -6069,10 +6042,7 @@ fn continue_after_fast_block_no_sortition() { // Make all signers ignore block proposals let ignoring_signers = all_signers.to_vec(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(ignoring_signers.clone()); info!("------------------------- Submit Miner 2 Block Commit -------------------------"); let rejections_before = signer_test @@ -6186,10 +6156,7 @@ fn continue_after_fast_block_no_sortition() { let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); let nmb_old_blocks = test_observer::get_blocks().len(); // Allow signers to respond to proposals again - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); info!("------------------------- Wait for Miner B's Block N -------------------------"); // wait for the new block to be processed @@ -6894,10 +6861,7 @@ fn block_commit_delay() { .iter() .map(StacksPublicKey::from_private) .collect::>(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(all_signers); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(all_signers); info!("------------------------- Test Mine Burn Block -------------------------"); let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; @@ -6932,10 +6896,7 @@ fn block_commit_delay() { .load(Ordering::SeqCst); info!("------------------------- Resume Signing -------------------------"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); // Wait for a block to be mined wait_for(60, || { @@ -8564,25 +8525,27 @@ fn tenure_extend_after_2_bad_commits() { #[test] #[ignore] -/// Test that signers that reject a block locally, but that was accepted globally will accept -/// only accept a block built upon it when they receive the new block event confirming their prior -/// rejected block. +/// Test that signers do not mark a block as globally accepted if it was not announced by the node. +/// This will simulate this case via testing flags, and ensure that a block can be reorged across tenure +/// boundaries now (as it is only marked locally accepted and no longer gets marked globally accepted +/// by simply seeing the threshold number of signatures). /// /// Test Setup: /// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. /// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. /// /// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). <30% of signers are configured to auto reject -/// any block proposals, announcement of new blocks are skipped, and signatures ignored by signers. -/// The subsequent block N+1 is proposed, triggering one of the <30% signers submit the block to the node -/// for validation. The node will fail due to a bad block header hash mismatch (passes height checks) +/// 1. The node mines 1 stacks block N (all signers sign it). +/// 2. <30% of signers are configured to auto reject any block proposals, broadcast of new blocks are skipped, and miners are configured to ignore signers responses. +/// 3. The node mines 1 stacks block N+1 (all signers sign it, but one which rejects it) but eventually all mark the block as locally accepted. +/// 4. A new tenure starts and the miner attempts to mine a new sister block N+1' (as it does not see the threshold number of signatures or any block push from signers). +/// 5. The signers accept this sister block as a valid reorg and the node advances to block N+1'. /// /// Test Assertion: /// - All signers accepted block N. /// - Less than 30% of the signers rejected block N+1. -/// - The 30% of signers that rejected block N+1, will submit the block for validation -/// as it passes preliminary checks (even though its a sister block, it is a sister block to a locally rejected block) +/// - All signers accept block N+1' as a valid reorg. +/// - The node advances to block N+1' fn global_acceptance_depends_on_block_announcement() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -8602,9 +8565,18 @@ fn global_acceptance_depends_on_block_announcement() { let nmb_txs = 4; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Just accept all reorg attempts + config.tenure_last_block_proposal_timeout = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + None, + None, ); let all_signers: Vec<_> = signer_test @@ -8674,14 +8646,10 @@ fn global_acceptance_depends_on_block_announcement() { .cloned() .take(num_signers * 3 / 10) .collect(); - let non_rejecting_signers = all_signers[num_signers * 3 / 10..].to_vec(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); - TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap().replace(true); - TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap().replace(true); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(true); + TEST_IGNORE_SIGNERS.set(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); test_observer::clear(); // submit a tx so that the miner will mine a stacks block N+1 @@ -8725,88 +8693,89 @@ fn global_acceptance_depends_on_block_announcement() { } let proposed_block = proposed_block.expect("Failed to find proposed block within 30s"); + // Even though one of the signers rejected the block, it will eventually accept the block as it sees the 70% threshold of signatures signer_test .wait_for_block_acceptance( short_timeout, &proposed_block.header.signer_signature_hash(), - &non_rejecting_signers, + &all_signers, ) - .expect("Timed out waiting for block acceptance of N+1 by non rejecting signers"); - - signer_test - .wait_for_block_rejections(short_timeout, &rejecting_signers) - .expect("Timed out waiting for block rejection of N+1' from rejecting signers"); + .expect("Timed out waiting for block acceptance of N+1 by all signers"); info!( "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" ); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); - test_observer::clear(); - - let mut sister_block = proposed_block; - - let transfer_tx_bytes = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt * 2, - ); - let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx_bytes[..]).unwrap(); - let txs = vec![tx]; - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); - - let merkle_tree = MerkleTree::::new(&txid_vecs); - let tx_merkle_root = merkle_tree.root(); - sister_block.txs = txs; - sister_block.header.tx_merkle_root = tx_merkle_root; - sister_block - .header - .sign_miner(&signer_test.running_nodes.conf.miner.mining_key.unwrap()) - .unwrap(); - signer_test.propose_block(sister_block.clone(), Duration::from_secs(30)); - wait_for(30, || { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections: HashSet<_> = stackerdb_events + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(false); + TEST_IGNORE_SIGNERS.set(false); + TEST_SKIP_BLOCK_BROADCAST.set(false); + test_observer::clear(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }, + ) + .unwrap(); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let mut sister_block = None; + let start_time = Instant::now(); + while sister_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + sister_block = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { + .find_map(|chunk| { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - let rejected_pubkey = rejection - .recover_public_key() - .expect("Failed to recover public key from rejection"); - // Proves that one of the rejecting signers actually submitted the block for validation as it passed its preliminary checks about chain length - assert_eq!( - rejection.reason_code, - RejectCode::ValidationFailed(ValidateRejectCode::BadBlockHash) - ); - Some(rejected_pubkey) + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_after.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } } _ => None, } - }) - .collect::>(); - Ok(block_rejections.len() == all_signers.len()) - }) - .expect("Timed out waiting for block rejections for N+1'"); - // Assert the block was NOT mined and the tip has not changed. - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); + }); + } + let sister_block = sister_block.expect("Failed to find proposed sister block within 30s"); + signer_test + .wait_for_block_acceptance( + short_timeout, + &sister_block.header.signer_signature_hash(), + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1' by all signers"); + + // Assert the block was mined and the tip has changed. assert_eq!( - info_after, - signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info") + info_after.stacks_tip_height, + sister_block.header.chain_length + ); + assert_eq!(info_after.stacks_tip, sister_block.header.block_hash()); + assert_eq!( + info_after.stacks_tip_consensus_hash, + sister_block.header.consensus_hash + ); + assert_eq!( + sister_block.header.chain_length, + proposed_block.header.chain_length ); + assert_ne!(sister_block, proposed_block); } From 6649c8c7f99e23ff991902bb19f31736765d1efd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sat, 7 Dec 2024 17:37:13 -0500 Subject: [PATCH 09/15] Increase the block proposal timeout in block_commit_delay test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e5a4e88123..a168dce7df 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -6815,13 +6815,12 @@ fn block_commit_delay() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![], |config| { // make the duration long enough that the miner will be marked as malicious - config.block_proposal_timeout = block_proposal_timeout; + config.block_proposal_timeout = Duration::from_secs(600); }, |config| { // Set the block commit delay to 10 minutes to ensure no block commit is sent From 045fcce3aeb09561cdc111a181fc477829e40ee3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Dec 2024 13:32:08 -0500 Subject: [PATCH 10/15] chore: use test-specific chains coordinator settings to allow tests to handle (now legacy) cases where the PoX anchor block does not arrive in order (or at all) --- stackslib/src/chainstate/coordinator/mod.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 3c78e02cef..c78c68a4a3 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -217,6 +217,14 @@ impl ChainsCoordinatorConfig { assume_present_anchor_blocks: true, } } + + pub fn test_new() -> ChainsCoordinatorConfig { + ChainsCoordinatorConfig { + always_use_affirmation_maps: false, + require_affirmed_anchor_blocks: false, + assume_present_anchor_blocks: false, + } + } } pub struct ChainsCoordinator< @@ -704,7 +712,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader notifier: (), atlas_config, atlas_db: Some(atlas_db), - config: ChainsCoordinatorConfig::new(), + config: ChainsCoordinatorConfig::test_new(), burnchain_indexer, refresh_stacker_db: Arc::new(AtomicBool::new(false)), in_nakamoto_epoch: false, From f96a33f95294ea96cd4969c19e80c934f6628004 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 12 Dec 2024 10:37:03 -0500 Subject: [PATCH 11/15] Implement process_event function for SignerEvent Signed-off-by: Jacinta Ferrant --- libsigner/src/events.rs | 192 ++++++++++++++-------------------------- 1 file changed, 68 insertions(+), 124 deletions(-) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 90ef022636..52a77e2bb8 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -305,21 +305,18 @@ impl EventReceiver for SignerEventReceiver { &request.method(), ))); } + debug!("Processing {} event", request.url()); if request.url() == "/stackerdb_chunks" { - process_stackerdb_event(event_receiver.local_addr, request) - .map_err(|e| { - error!("Error processing stackerdb_chunks message"; "err" => ?e); - e - }) + process_event::(request) } else if request.url() == "/proposal_response" { - process_proposal_response(request) + process_event::(request) } else if request.url() == "/new_burn_block" { - process_new_burn_block_event(request) + process_event::(request) } else if request.url() == "/shutdown" { event_receiver.stop_signal.store(true, Ordering::SeqCst); - return Err(EventError::Terminated); + Err(EventError::Terminated) } else if request.url() == "/new_block" { - process_new_block(request) + process_event::(request) } else { let url = request.url().to_string(); debug!( @@ -391,12 +388,13 @@ fn ack_dispatcher(request: HttpRequest) { // TODO: add tests from mutation testing results #4835 #[cfg_attr(test, mutants::skip)] -/// Process a stackerdb event from the node -fn process_stackerdb_event( - local_addr: Option, - mut request: HttpRequest, -) -> Result, EventError> { +fn process_event(mut request: HttpRequest) -> Result, EventError> +where + T: SignerEventTrait, + E: serde::de::DeserializeOwned + TryInto, Error = EventError>, +{ let mut body = String::new(); + if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); ack_dispatcher(request); @@ -405,27 +403,12 @@ fn process_stackerdb_event( &e ))); } - - debug!("Got stackerdb_chunks event"; "chunks_event_body" => %body); - let event: StackerDBChunksEvent = serde_json::from_slice(body.as_bytes()) + // Regardless of whether we successfully deserialize, we should ack the dispatcher so they don't keep resending it + ack_dispatcher(request); + let json_event: E = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let event_contract_id = event.contract_id.clone(); - - let signer_event = match SignerEvent::try_from(event) { - Err(e) => { - info!( - "[{:?}] next_event got event from an unexpected contract id {}, return OK so other side doesn't keep sending this", - local_addr, - event_contract_id - ); - ack_dispatcher(request); - return Err(e); - } - Ok(x) => x, - }; - - ack_dispatcher(request); + let signer_event: SignerEvent = json_event.try_into()?; Ok(signer_event) } @@ -472,108 +455,69 @@ impl TryFrom for SignerEvent { } } -/// Process a proposal response from the node -fn process_proposal_response( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got proposal_response event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - ack_dispatcher(request); - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); + fn try_from(block_validate_response: BlockValidateResponse) -> Result { + Ok(SignerEvent::BlockValidationResponse( + block_validate_response, + )) } +} - let event: BlockValidateResponse = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - - ack_dispatcher(request); - Ok(SignerEvent::BlockValidationResponse(event)) +#[derive(Debug, Deserialize)] +struct BurnBlockEvent { + burn_block_hash: String, + burn_block_height: u64, + reward_recipients: Vec, + reward_slot_holders: Vec, + burn_amount: u64, } -/// Process a new burn block event from the node -fn process_new_burn_block_event( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got burn_block event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - ack_dispatcher(request); - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); - } - #[derive(Debug, Deserialize)] - struct TempBurnBlockEvent { - burn_block_hash: String, - burn_block_height: u64, - reward_recipients: Vec, - reward_slot_holders: Vec, - burn_amount: u64, + fn try_from(burn_block_event: BurnBlockEvent) -> Result { + let burn_header_hash = burn_block_event + .burn_block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + BurnchainHeaderHash::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + + Ok(SignerEvent::NewBurnBlock { + burn_height: burn_block_event.burn_block_height, + received_time: SystemTime::now(), + burn_header_hash, + }) } - let temp: TempBurnBlockEvent = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let burn_header_hash = temp - .burn_block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - BurnchainHeaderHash::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - let event = SignerEvent::NewBurnBlock { - burn_height: temp.burn_block_height, - received_time: SystemTime::now(), - burn_header_hash, - }; - ack_dispatcher(request); - Ok(event) } -/// Process a new burn block event from the node -fn process_new_block( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got new_block event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +#[derive(Debug, Deserialize)] +struct BlockEvent { + block_hash: String, + block_height: u64, +} - ack_dispatcher(request); - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); - } - #[derive(Debug, Deserialize)] - struct TempBlockEvent { - block_hash: String, - block_height: u64, - } +impl TryFrom for SignerEvent { + type Error = EventError; - let temp: TempBlockEvent = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let block_hash: Sha512Trunc256Sum = temp - .block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - Sha512Trunc256Sum::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - let event = SignerEvent::NewBlock { - block_hash, - block_height: temp.block_height, - }; - ack_dispatcher(request); - Ok(event) + fn try_from(block_event: BlockEvent) -> Result { + let block_hash: Sha512Trunc256Sum = block_event + .block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + Sha512Trunc256Sum::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + Ok(SignerEvent::NewBlock { + block_hash, + block_height: block_event.block_height, + }) + } } pub fn get_signers_db_signer_set_message_id(name: &str) -> Option<(u32, u32)> { From ef7cb903e825f327cd521dcdac64fff5e43e1f0f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 12 Dec 2024 11:17:02 -0500 Subject: [PATCH 12/15] CRC: move TestFlag related functions to seperate test modules Signed-off-by: Jacinta Ferrant --- stacks-common/src/util/mod.rs | 23 +-- stacks-common/src/util/tests.rs | 99 ++++++++++++ stacks-signer/src/v0/mod.rs | 4 + stacks-signer/src/v0/signer.rs | 127 +--------------- stacks-signer/src/v0/tests.rs | 141 ++++++++++++++++++ testnet/stacks-node/src/event_dispatcher.rs | 2 +- .../src/nakamoto_node/stackerdb_listener.rs | 2 +- testnet/stacks-node/src/run_loop/neon.rs | 2 +- testnet/stacks-node/src/tests/signer/mod.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 10 files changed, 257 insertions(+), 147 deletions(-) create mode 100644 stacks-common/src/util/tests.rs create mode 100644 stacks-signer/src/v0/tests.rs diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 416a365a2f..5f733eddad 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -36,28 +36,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; #[cfg(any(test, feature = "testing"))] -#[derive(Clone)] -pub struct TestFlag(pub std::sync::Arc>>); - -#[cfg(any(test, feature = "testing"))] -impl Default for TestFlag { - fn default() -> Self { - Self(std::sync::Arc::new(std::sync::Mutex::new(None))) - } -} - -#[cfg(any(test, feature = "testing"))] -impl TestFlag { - /// Set the test flag to the given value - pub fn set(&self, value: T) { - *self.0.lock().unwrap() = Some(value); - } - - /// Get the test flag value. Defaults otherwise. - pub fn get(&self) -> T { - self.0.lock().unwrap().clone().unwrap_or_default().clone() - } -} +pub mod tests; pub fn get_epoch_time_secs() -> u64 { let start = SystemTime::now(); diff --git a/stacks-common/src/util/tests.rs b/stacks-common/src/util/tests.rs new file mode 100644 index 0000000000..b87e913718 --- /dev/null +++ b/stacks-common/src/util/tests.rs @@ -0,0 +1,99 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::{Arc, Mutex}; +/// `TestFlag` is a thread-safe utility designed for managing shared state in testing scenarios. It wraps +/// a value of type `T` inside an `Arc>>`, allowing you to set and retrieve a value +/// across different parts of your codebase while ensuring thread safety. +/// +/// This structure is particularly useful when: +/// - You need a global or static variable in tests. +/// - You want to control the execution of custom test code paths by setting and checking a shared value. +/// +/// # Type Parameter +/// - `T`: The type of the value managed by the `TestFlag`. It must implement the `Default` and `Clone` traits. +/// +/// # Examples +/// +/// ```rust +/// use stacks_common::util::tests::TestFlag; +/// use std::sync::{Arc, Mutex}; +/// +/// // Create a TestFlag instance +/// let test_flag = TestFlag::default(); +/// +/// // Set a value in the test flag +/// test_flag.set("test_value".to_string()); +/// +/// // Retrieve the value +/// assert_eq!(test_flag.get(), "test_value".to_string()); +/// +/// // Reset the value to default +/// test_flag.set("".to_string()); +/// assert_eq!(test_flag.get(), "".to_string()); +/// ``` +#[derive(Clone)] +pub struct TestFlag(pub Arc>>); + +impl Default for TestFlag { + fn default() -> Self { + Self(Arc::new(Mutex::new(None))) + } +} + +impl TestFlag { + /// Sets the value of the test flag. + /// + /// This method updates the value stored inside the `TestFlag`, replacing any existing value. + /// + /// # Arguments + /// - `value`: The new value to set for the `TestFlag`. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// test_flag.set(42); + /// assert_eq!(test_flag.get(), 42); + /// ``` + pub fn set(&self, value: T) { + *self.0.lock().unwrap() = Some(value); + } + + /// Retrieves the current value of the test flag. + /// + /// If no value has been set, this method returns the default value for the type `T`. + /// + /// # Returns + /// - The current value of the test flag, or the default value of `T` if none has been set. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// + /// // Get the default value + /// assert_eq!(test_flag.get(), 0); // For T = i32, default is 0 + /// + /// // Set a value + /// test_flag.set(123); + /// + /// // Get the updated value + /// assert_eq!(test_flag.get(), 123); + /// ``` + pub fn get(&self) -> T { + self.0.lock().unwrap().clone().unwrap_or_default().clone() + } +} diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs index 520fb36ca1..34b363311e 100644 --- a/stacks-signer/src/v0/mod.rs +++ b/stacks-signer/src/v0/mod.rs @@ -17,6 +17,10 @@ /// The signer module for processing events pub mod signer; +#[cfg(any(test, feature = "testing"))] +/// Test specific functions for the signer module +pub mod tests; + use libsigner::v0::messages::SignerMessage; use crate::v0::signer::Signer; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2fe948d0f9..5a5128cce4 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,8 +15,6 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; -#[cfg(any(test, feature = "testing"))] -use std::sync::LazyLock; use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; @@ -35,12 +33,8 @@ use libsigner::v0::messages::{ use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; -#[cfg(any(test, feature = "testing"))] -use stacks_common::types::chainstate::StacksPublicKey; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::secp256k1::MessageSignature; -#[cfg(any(test, feature = "testing"))] -use stacks_common::util::TestFlag; use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; @@ -50,27 +44,13 @@ use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list -pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: LazyLock>> = - LazyLock::new(TestFlag::default); - -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list -pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: LazyLock>> = - LazyLock::new(TestFlag::default); - -#[cfg(any(test, feature = "testing"))] -/// Pause the block broadcast -pub static TEST_PAUSE_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); - -#[cfg(any(test, feature = "testing"))] -/// Skip broadcasting the block to the network -pub static TEST_SKIP_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); - /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { + /// The private key of the signer + #[cfg(any(test, feature = "testing"))] + pub private_key: StacksPrivateKey, + #[cfg(not(any(test, feature = "testing")))] /// The private key of the signer private_key: StacksPrivateKey, /// The stackerdb client @@ -175,20 +155,8 @@ impl SignerTrait for Signer { match message { SignerMessage::BlockProposal(block_proposal) => { #[cfg(any(test, feature = "testing"))] - { - let public_keys = TEST_IGNORE_ALL_BLOCK_PROPOSALS.get(); - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private( - &self.private_key, - ), - ) { - warn!("{self}: Ignoring block proposal due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - continue; - } + if self.test_ignore_all_block_proposals(block_proposal) { + continue; } self.handle_block_proposal( stacks_client, @@ -1121,87 +1089,6 @@ impl Signer { } } - #[cfg(any(test, feature = "testing"))] - fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { - if TEST_SKIP_BLOCK_BROADCAST.get() { - let block_hash = block.header.signer_signature_hash(); - warn!( - "{self}: Skipping block broadcast due to testing directive"; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - "consensus_hash" => %block.header.consensus_hash - ); - - if let Err(e) = self - .signer_db - .set_block_broadcasted(&block_hash, get_epoch_time_secs()) - { - warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); - } - return true; - } - false - } - - #[cfg(any(test, feature = "testing"))] - fn test_reject_block_proposal( - &mut self, - block_proposal: &BlockProposal, - block_info: &mut BlockInfo, - block_response: Option, - ) -> Option { - let public_keys = TEST_REJECT_ALL_BLOCK_PROPOSAL.get(); - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), - ) { - warn!("{self}: Rejecting block proposal automatically due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; - // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject - // as invalid since we rejected in a prior round if this crops up again) - // in case this is the first time we saw this block. Safe to do since this is testing case only. - self.signer_db - .insert_block(block_info) - .unwrap_or_else(|e| self.handle_insert_block_error(e)); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::TestingDirective, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) - } else { - block_response - } - } - - #[cfg(any(test, feature = "testing"))] - fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { - if TEST_PAUSE_BLOCK_BROADCAST.get() { - // Do an extra check just so we don't log EVERY time. - warn!("{self}: Block broadcast is stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - while TEST_PAUSE_BLOCK_BROADCAST.get() { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("{self}: Block validation is no longer stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - } - } - /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); @@ -1216,7 +1103,7 @@ impl Signer { } /// Helper for logging insert_block error - fn handle_insert_block_error(&self, e: DBError) { + pub fn handle_insert_block_error(&self, e: DBError) { error!("{self}: Failed to insert block into signer-db: {e:?}"); panic!("{self} Failed to write block to signerdb: {e}"); } diff --git a/stacks-signer/src/v0/tests.rs b/stacks-signer/src/v0/tests.rs new file mode 100644 index 0000000000..0b9cdcc569 --- /dev/null +++ b/stacks-signer/src/v0/tests.rs @@ -0,0 +1,141 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::LazyLock; + +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use libsigner::v0::messages::{BlockResponse, RejectCode}; +use libsigner::BlockProposal; +use slog::{slog_info, slog_warn}; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::tests::TestFlag; +use stacks_common::{info, warn}; + +use super::signer::Signer; +use crate::signerdb::BlockInfo; + +/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list +pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list +pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to pause broadcasting the block to the network +pub static TEST_PAUSE_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +/// A global variable that can be used to skip broadcasting the block to the network +pub static TEST_SKIP_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +impl Signer { + /// Skip the block broadcast if the TEST_SKIP_BLOCK_BROADCAST flag is set + pub fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { + if TEST_SKIP_BLOCK_BROADCAST.get() { + let block_hash = block.header.signer_signature_hash(); + warn!( + "{self}: Skipping block broadcast due to testing directive"; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash + ); + + if let Err(e) = self + .signer_db + .set_block_broadcasted(&block_hash, get_epoch_time_secs()) + { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); + } + return true; + } + false + } + + /// Reject block proposals if the TEST_REJECT_ALL_BLOCK_PROPOSAL flag is set for the signer's public key + pub fn test_reject_block_proposal( + &mut self, + block_proposal: &BlockProposal, + block_info: &mut BlockInfo, + block_response: Option, + ) -> Option { + let public_keys = TEST_REJECT_ALL_BLOCK_PROPOSAL.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject + // as invalid since we rejected in a prior round if this crops up again) + // in case this is the first time we saw this block. Safe to do since this is testing case only. + self.signer_db + .insert_block(block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::TestingDirective, + &self.private_key, + self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), + )) + } else { + block_response + } + } + + /// Pause the block broadcast if the TEST_PAUSE_BLOCK_BROADCAST flag is set + pub fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { + if TEST_PAUSE_BLOCK_BROADCAST.get() { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Block broadcast is stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + while TEST_PAUSE_BLOCK_BROADCAST.get() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("{self}: Block validation is no longer stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + } + } + + /// Ignore block proposals if the TEST_IGNORE_ALL_BLOCK_PROPOSALS flag is set for the signer's public key + pub fn test_ignore_all_block_proposals(&self, block_proposal: &BlockProposal) -> bool { + let public_keys = TEST_IGNORE_ALL_BLOCK_PROPOSALS.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Ignoring block proposal due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + return true; + } + false + } +} diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 65b46011e7..11f52e883e 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -62,7 +62,7 @@ use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; use stacks::util::hash::to_hex; #[cfg(any(test, feature = "testing"))] -use stacks::util::TestFlag; +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 5fa6e1efd8..834c59fa95 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -36,7 +36,7 @@ use stacks::util::get_epoch_time_secs; use stacks::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; use stacks::util::secp256k1::MessageSignature; #[cfg(test)] -use stacks::util::TestFlag; +use stacks_common::util::tests::TestFlag; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index c3e2339bfc..9bbad4f20c 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -22,7 +22,7 @@ use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_rea use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; #[cfg(test)] -use stacks::util::TestFlag; +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index c669a7febd..e55fa54378 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -51,11 +51,11 @@ use stacks::types::chainstate::{StacksAddress, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; -use stacks::util::TestFlag; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::tests::TestFlag; use stacks_signer::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 247585984e..cbad4f931c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -59,7 +59,7 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::v0::signer::{ +use stacks_signer::v0::tests::{ TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, }; From 150bfce99eccb8a86cb2c867edf0893bd914f147 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 13 Dec 2024 15:34:12 -0500 Subject: [PATCH 13/15] Do not wait for an exact number of acceptance and rejections Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index cbad4f931c..64860abce9 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -6102,7 +6102,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } }) .collect::>(); - Ok(signatures.len() == num_signers) + Ok(signatures.len() >= num_signers * 7 / 10) }) .expect("Test timed out while waiting for signers signatures for first block proposal"); let block = block.unwrap(); @@ -6192,7 +6192,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } }) .collect::>(); - Ok(block_rejections.len() == num_signers) + Ok(block_rejections.len() >= num_signers * 7 / 10) }) .expect("FAIL: Timed out waiting for block proposal rejections"); From e486b9e19e9bc2c73835d3f7cb6c9c1a3f5e6153 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Dec 2024 14:44:31 -0500 Subject: [PATCH 14/15] chore: add changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f85ed6526b..1bf801607d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,8 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +- Nodes will assume that all PoX anchor blocks exist by default, and stall initial block download indefinitely to await their arrival (#5502) + ## [3.1.0.0.1] ### Added From a50825c581acf221ffbd69a569084e95dd373c96 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 16 Dec 2024 16:01:09 -0500 Subject: [PATCH 15/15] Missing changes from failed merge Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/event_dispatcher.rs | 2 -- testnet/stacks-node/src/tests/signer/mod.rs | 1 - testnet/stacks-node/src/tests/signer/v0.rs | 1 - 3 files changed, 4 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index ebabba89a2..2f71838adb 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -73,8 +73,6 @@ use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use url::Url; -use super::config::{EventKeyType, EventObserverConfig}; - #[cfg(any(test, feature = "testing"))] lazy_static! { /// Do not announce a signed/mined block to the network when set to true. diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 61bbed9097..432b990667 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -63,7 +63,6 @@ use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; -use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use crate::neon::{Counters, RunLoopCounter}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 242ab3b446..5641776b5b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -69,7 +69,6 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; -use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEMENT}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL,