From 9e53963989347427b87504e13d324e95f3d75e51 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 14 Nov 2024 19:36:23 -0800 Subject: [PATCH 01/57] feat: include lockup events in nakamoto blocks without coinbase --- .github/workflows/bitcoin-tests.yml | 1 + stackslib/src/chainstate/nakamoto/mod.rs | 77 +++++++- stackslib/src/cli.rs | 3 +- stx-genesis/chainstate-test.txt | 1 + stx-genesis/chainstate-test.txt.sha256 | 2 +- .../src/tests/nakamoto_integrations.rs | 167 ++++++++++++++++++ 6 files changed, 241 insertions(+), 10 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 52f46fbc49..956fa3382b 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -136,6 +136,7 @@ jobs: - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles + - tests::nakamoto_integrations::nakamoto_lockup_events - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::nakamoto_integrations::v3_signer_api_endpoint diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d67de8e987..1d39248656 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -73,7 +73,8 @@ use super::stacks::db::{ use super::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use super::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeError, TenureChangePayload, TransactionPayload, + TenureChangeError, TenureChangePayload, TokenTransferMemo, TransactionPayload, + TransactionVersion, }; use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; @@ -108,8 +109,7 @@ use crate::core::{ }; use crate::net::stackerdb::{StackerDBConfig, MINER_SLOT_COUNT}; use crate::net::Error as net_error; -use crate::util_lib::boot; -use crate::util_lib::boot::boot_code_id; +use crate::util_lib::boot::{self, boot_code_addr, boot_code_id, boot_code_tx_auth}; use crate::util_lib::db::{ query_int, query_row, query_row_columns, query_row_panic, query_rows, sqlite_open, tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, @@ -2048,7 +2048,8 @@ impl NakamotoChainState { return Err(e); }; - let (receipt, clarity_commit, reward_set_data) = ok_opt.expect("FATAL: unreachable"); + let (receipt, clarity_commit, reward_set_data, phantom_unlock_events) = + ok_opt.expect("FATAL: unreachable"); assert_eq!( receipt.header.anchored_header.block_hash(), @@ -2102,6 +2103,19 @@ impl NakamotoChainState { &receipt.header.anchored_header.block_hash() ); + let mut tx_receipts = receipt.tx_receipts.clone(); + if let Some(unlock_receipt) = + // For the event dispatcher, attach any STXMintEvents that + // could not be included in the block (e.g. because the + // block didn't have a Coinbase transaction). + Self::generate_phantom_unlock_tx( + phantom_unlock_events, + &stacks_chain_state.config(), + ) + { + tx_receipts.push(unlock_receipt); + } + // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { let block_event = ( @@ -2112,7 +2126,7 @@ impl NakamotoChainState { dispatcher.announce_block( &block_event, &receipt.header.clone(), - &receipt.tx_receipts, + &tx_receipts, &parent_block_id, next_ready_block_snapshot.winning_block_txid, &receipt.matured_rewards, @@ -3915,6 +3929,7 @@ impl NakamotoChainState { StacksEpochReceipt, PreCommitClarityBlock<'a>, Option, + Vec, ), ChainstateError, > { @@ -4215,6 +4230,8 @@ impl NakamotoChainState { Ok(lockup_events) => lockup_events, }; + // Track events that we couldn't attach to a coinbase receipt + let mut phantom_lockup_events = lockup_events.clone(); // if any, append lockups events to the coinbase receipt if lockup_events.len() > 0 { // Receipts are appended in order, so the first receipt should be @@ -4222,11 +4239,14 @@ impl NakamotoChainState { if let Some(receipt) = tx_receipts.get_mut(0) { if receipt.is_coinbase_tx() { receipt.events.append(&mut lockup_events); + phantom_lockup_events.clear(); } - } else { - warn!("Unable to attach lockups events, block's first transaction is not a coinbase transaction") } } + if phantom_lockup_events.len() > 0 { + info!("Unable to attach lockup events, block's first transaction is not a coinbase transaction. Will attach as a phantom tx."); + } + // if any, append auto unlock events to the coinbase receipt if auto_unlock_events.len() > 0 { // Receipts are appended in order, so the first receipt should be @@ -4394,7 +4414,12 @@ impl NakamotoChainState { coinbase_height, }; - Ok((epoch_receipt, clarity_commit, reward_set_data)) + Ok(( + epoch_receipt, + clarity_commit, + reward_set_data, + phantom_lockup_events, + )) } /// Create a StackerDB config for the .miners contract. @@ -4555,6 +4580,42 @@ impl NakamotoChainState { clarity.save_analysis(&contract_id, &analysis).unwrap(); }) } + + /// Generate a "phantom" transaction to include STXMintEvents for + /// lockups that could not be attached to a Coinbase transaction + /// (because the block doesn't have a Coinbase transaction). + fn generate_phantom_unlock_tx( + events: Vec, + config: &ChainstateConfig, + ) -> Option { + if events.is_empty() { + return None; + } + info!("Generating phantom unlock tx"); + let version = if config.mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + let boot_code_address = boot_code_addr(config.mainnet); + let boot_code_auth = boot_code_tx_auth(boot_code_address.clone()); + let unlock_tx = StacksTransaction::new( + version, + boot_code_auth, + TransactionPayload::TokenTransfer( + PrincipalData::Standard(boot_code_address.into()), + 0, + TokenTransferMemo([0u8; 34]), + ), + ); + let unlock_receipt = StacksTransactionReceipt::from_stx_transfer( + unlock_tx, + events, + Value::okay_true(), + ExecutionCost::zero(), + ); + Some(unlock_receipt) + } } impl StacksMessageCodec for NakamotoBlock { diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 587daee787..b463641a3d 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -783,7 +783,8 @@ fn replay_block_nakamoto( return Err(e); }; - let (receipt, _clarity_commit, _reward_set_data) = ok_opt.expect("FATAL: unreachable"); + let (receipt, _clarity_commit, _reward_set_data, _phantom_events) = + ok_opt.expect("FATAL: unreachable"); assert_eq!( receipt.header.anchored_header.block_hash(), diff --git a/stx-genesis/chainstate-test.txt b/stx-genesis/chainstate-test.txt index 614cf3d9f4..6eedf241d1 100644 --- a/stx-genesis/chainstate-test.txt +++ b/stx-genesis/chainstate-test.txt @@ -69,4 +69,5 @@ SM1ZH700J7CEDSEHM5AJ4C4MKKWNESTS35DD3SZM5,13888889,2267 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,45467 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,6587 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,2267 +SP2CTPPV8BHBVSQR727A3MK00ZD85RNY903KAG9F3,12345678,35 -----END STX VESTING----- \ No newline at end of file diff --git a/stx-genesis/chainstate-test.txt.sha256 b/stx-genesis/chainstate-test.txt.sha256 index 56782ae494..69ac95c254 100644 --- a/stx-genesis/chainstate-test.txt.sha256 +++ b/stx-genesis/chainstate-test.txt.sha256 @@ -1 +1 @@ -014402b47d53b0716402c172fa746adf308b03a826ebea91944a5eb6a304a823 \ No newline at end of file +088c3caea982a8f6f74dda48ec5f06f51f7605def9760a971b1acd763ee6b7cf \ No newline at end of file diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6ae34fce42..8af87a56c9 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -9301,6 +9301,173 @@ fn v3_signer_api_endpoint() { run_loop_thread.join().unwrap(); } +/// Verify that lockup events are attached to a phantom tx receipt +/// if the block does not have a coinbase tx +#[test] +#[ignore] +fn nakamoto_lockup_events() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + conf.connection_options.auth_token = Some(password.clone()); + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let stacker_sk = setup_stacker(&mut conf); + let signer_sk = Secp256k1PrivateKey::new(); + let signer_addr = tests::to_addr(&signer_sk); + let _signer_pubkey = Secp256k1PublicKey::from_private(&signer_sk); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + (send_amt + send_fee) * 100, + ); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // only subscribe to the block proposal events + test_observer::spawn(); + test_observer::register_any(&mut conf); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![signer_sk]); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &conf, + &blocks_processed, + &[stacker_sk], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + blind_signer(&conf, &signers, proposals_submitted); + let burnchain = conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + // TODO (hack) instantiate the sortdb in the burnchain + _ = btc_regtest_controller.sortdb_mut(); + + info!("------------------------- Setup finished, run test -------------------------"); + + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let get_stacks_height = || { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + tip.stacks_block_height + }; + let initial_block_height = get_stacks_height(); + + // This matches the data in `stx-genesis/chainstate-test.txt` + // Recipient: ST2CTPPV8BHBVSQR727A3MK00ZD85RNY9015WGW2D + let unlock_recipient = "ST2CTPPV8BHBVSQR727A3MK00ZD85RNY9015WGW2D"; + let unlock_height = 35_u64; + let interims_to_mine = unlock_height - initial_block_height; + + info!( + "----- Mining to unlock height -----"; + "unlock_height" => unlock_height, + "initial_height" => initial_block_height, + "interims_to_mine" => interims_to_mine, + ); + + // submit a tx so that the miner will mine an extra stacks block + let mut sender_nonce = 0; + + for _ in 0..interims_to_mine { + let height_before = get_stacks_height(); + info!("----- Mining interim block -----"; + "height" => %height_before, + "nonce" => %sender_nonce, + ); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + + wait_for(30, || Ok(get_stacks_height() > height_before)).unwrap(); + } + + let blocks = test_observer::get_blocks(); + let block = blocks.last().unwrap(); + assert_eq!( + block.get("block_height").unwrap().as_u64().unwrap(), + unlock_height + ); + + let events = block.get("events").unwrap().as_array().unwrap(); + let mut found_event = false; + for event in events { + let mint_event = event.get("stx_mint_event"); + if mint_event.is_some() { + found_event = true; + let mint_event = mint_event.unwrap(); + let recipient = mint_event.get("recipient").unwrap().as_str().unwrap(); + assert_eq!(recipient, unlock_recipient); + let amount = mint_event.get("amount").unwrap().as_str().unwrap(); + assert_eq!(amount, "12345678"); + } + } + assert!(found_event); + + info!("------------------------- Test finished, clean up -------------------------"); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + #[test] #[ignore] /// This test spins up a nakamoto-neon node. From 1f4f6dff87c1bfe9dcd986c1cc44630c1a4b4684 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 15 Nov 2024 05:10:44 -0800 Subject: [PATCH 02/57] feat: assert that txid is deterministic --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 8af87a56c9..4078fabc93 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -9453,6 +9453,11 @@ fn nakamoto_lockup_events() { assert_eq!(recipient, unlock_recipient); let amount = mint_event.get("amount").unwrap().as_str().unwrap(); assert_eq!(amount, "12345678"); + let txid = event.get("txid").unwrap().as_str().unwrap(); + assert_eq!( + txid, + "0xcba511741b230bd85cb5b3b10d26e0b92695d4a83f95c260cad82a40cd764235" + ); } } assert!(found_event); From fb306ea9a5cc591bcded9753970b2a2400537e47 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 1 Dec 2024 07:34:59 -0500 Subject: [PATCH 03/57] chore: fix #5502 by reducing the PoX sync watchdog --- testnet/stacks-node/src/run_loop/neon.rs | 48 +- testnet/stacks-node/src/syncctl.rs | 552 ++---------------- .../src/tests/nakamoto_integrations.rs | 2 +- 3 files changed, 48 insertions(+), 554 deletions(-) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 5e021e50ab..475265869a 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -1156,19 +1156,8 @@ impl RunLoop { let mut sortition_db_height = rc_aligned_height; let mut burnchain_height = sortition_db_height; - let mut num_sortitions_in_last_cycle = 1; // prepare to fetch the first reward cycle! - let mut target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(burnchain_height) - .expect("BUG: block height is not in a reward cycle") - + 1, - ), - burnchain.get_headers_height() - 1, - ); - debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; @@ -1196,17 +1185,13 @@ impl RunLoop { let remote_chain_height = burnchain.get_headers_height() - 1; - // wait for the p2p state-machine to do at least one pass - debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); - - // wait until it's okay to process the next reward cycle's sortitions - let ibd = match self.get_pox_watchdog().pox_sync_wait( + // wait until it's okay to process the next reward cycle's sortitions. + let (ibd, target_burnchain_block_height) = match self.get_pox_watchdog().pox_sync_wait( &burnchain_config, &burnchain_tip, remote_chain_height, - num_sortitions_in_last_cycle, ) { - Ok(ibd) => ibd, + Ok(x) => x, Err(e) => { debug!("Runloop: PoX sync wait routine aborted: {e:?}"); continue; @@ -1220,9 +1205,6 @@ impl RunLoop { 0.0 }; - // will recalculate this in the following loop - num_sortitions_in_last_cycle = 0; - // Download each burnchain block and process their sortitions. This, in turn, will // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and // process them. This loop runs for one reward cycle, so that the next pass of the @@ -1270,8 +1252,6 @@ impl RunLoop { "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); - let mut sort_count = 0; - debug!("Runloop: block mining until we process all sortitions"); signal_mining_blocked(globals.get_miner_status()); @@ -1289,9 +1269,6 @@ impl RunLoop { "Failed to find block in fork processed by burnchain indexer", ) }; - if block.sortition { - sort_count += 1; - } let sortition_id = &block.sortition_id; @@ -1338,9 +1315,8 @@ impl RunLoop { debug!("Runloop: enable miner after processing sortitions"); signal_mining_ready(globals.get_miner_status()); - num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height})" ); sortition_db_height = next_sortition_height; @@ -1359,22 +1335,6 @@ impl RunLoop { } } - // advance one reward cycle at a time. - // If we're still downloading, then this is simply target_burnchain_block_height + reward_cycle_len. - // Otherwise, this is burnchain_tip + reward_cycle_len - let next_target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: burnchain height before system start") - + 1, - ), - remote_chain_height, - ); - - debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); - target_burnchain_block_height = next_target_burnchain_block_height; - if sortition_db_height >= burnchain_height && !ibd { let canonical_stacks_tip_height = SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn()) diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index 395d829c8f..488234d21d 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -1,20 +1,28 @@ -use std::collections::VecDeque; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::Arc; use stacks::burnchains::{Burnchain, Error as burnchain_error}; -use stacks::chainstate::stacks::db::StacksChainState; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::burnchains::BurnchainTip; use crate::Config; -// amount of time to wait for an inv or download sync to complete. -// These _really should_ complete before the PoX sync watchdog permits processing the next reward -// cycle, so this number is intentionally high (like, there's something really wrong with your -// network if your node is actualy waiting a day in-between reward cycles). -const SYNC_WAIT_SECS: u64 = 24 * 3600; - #[derive(Clone)] pub struct PoxSyncWatchdogComms { /// how many passes in the p2p state machine have taken place since startup? @@ -56,22 +64,6 @@ impl PoxSyncWatchdogComms { self.last_ibd.load(Ordering::SeqCst) } - /// Wait for at least one inv-sync state-machine passes - pub fn wait_for_inv_sync_pass(&self, timeout: u64) -> Result { - let current = self.get_inv_sync_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_inv_sync_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one inv-sync pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - fn interruptable_sleep(&self, secs: u64) -> Result<(), burnchain_error> { let deadline = secs + get_epoch_time_secs(); while get_epoch_time_secs() < deadline { @@ -83,21 +75,6 @@ impl PoxSyncWatchdogComms { Ok(()) } - pub fn wait_for_download_pass(&self, timeout: u64) -> Result { - let current = self.get_download_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_download_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one download pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - pub fn should_keep_running(&self) -> bool { self.should_keep_running.load(Ordering::SeqCst) } @@ -124,82 +101,25 @@ impl PoxSyncWatchdogComms { /// unless it's reasonably sure that it has processed all Stacks blocks for this reward cycle. /// This struct monitors the Stacks chainstate to make this determination. pub struct PoxSyncWatchdog { - /// number of attachable but unprocessed staging blocks over time - new_attachable_blocks: VecDeque, - /// number of newly-processed staging blocks over time - new_processed_blocks: VecDeque, - /// last time we asked for attachable blocks - last_attachable_query: u64, - /// last time we asked for processed blocks - last_processed_query: u64, - /// number of samples to take - max_samples: u64, - /// maximum number of blocks to count per query (affects performance!) - max_staging: u64, - /// when did we first start watching? - watch_start_ts: u64, - /// when did we first see a flatline in block-processing rate? - last_block_processed_ts: u64, - /// estimated time for a block to get downloaded. Used to infer how long to wait for the first - /// blocks to show up when waiting for this reward cycle. - estimated_block_download_time: f64, - /// estimated time for a block to get processed -- from when it shows up as attachable to when - /// it shows up as processed. Used to infer how long to wait for the last block to get - /// processed before unblocking burnchain sync for the next reward cycle. - estimated_block_process_time: f64, - /// time between burnchain syncs in stead state + /// time between burnchain syncs in steady state steady_state_burnchain_sync_interval: u64, - /// when to re-sync under steady state - steady_state_resync_ts: u64, - /// chainstate handle - chainstate: StacksChainState, /// handle to relayer thread that informs the watchdog when the P2P state-machine does stuff relayer_comms: PoxSyncWatchdogComms, /// should this sync watchdog always download? used in integration tests. unconditionally_download: bool, } -const PER_SAMPLE_WAIT_MS: u64 = 1000; - impl PoxSyncWatchdog { pub fn new( config: &Config, watchdog_comms: PoxSyncWatchdogComms, ) -> Result { - let mainnet = config.is_mainnet(); - let chain_id = config.burnchain.chain_id; - let chainstate_path = config.get_chainstate_path_str(); let burnchain_poll_time = config.burnchain.poll_time_secs; - let download_timeout = config.connection_options.timeout; - let max_samples = config.node.pox_sync_sample_secs; let unconditionally_download = config.node.pox_sync_sample_secs == 0; - let marf_opts = config.node.get_marf_opts(); - - let (chainstate, _) = - match StacksChainState::open(mainnet, chain_id, &chainstate_path, Some(marf_opts)) { - Ok(cs) => cs, - Err(e) => { - return Err(format!( - "Failed to open chainstate at '{chainstate_path}': {e:?}" - )); - } - }; Ok(PoxSyncWatchdog { unconditionally_download, - new_attachable_blocks: VecDeque::new(), - new_processed_blocks: VecDeque::new(), - last_attachable_query: 0, - last_processed_query: 0, - max_samples, - max_staging: 10, - watch_start_ts: 0, - last_block_processed_ts: 0, - estimated_block_download_time: download_timeout as f64, - estimated_block_process_time: 5.0, steady_state_burnchain_sync_interval: burnchain_poll_time, - steady_state_resync_ts: 0, - chainstate, relayer_comms: watchdog_comms, }) } @@ -208,39 +128,9 @@ impl PoxSyncWatchdog { self.relayer_comms.clone() } - /// How many recently-added Stacks blocks are in an attachable state, up to $max_staging? - fn count_attachable_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_attachable_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_attachable_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_attachable_query = get_epoch_time_secs(); - Ok(cnt) - } - - /// How many recently-processed Stacks blocks are there, up to $max_staging? - /// ($max_staging is necessary to limit the runtime of this method, since the underlying SQL - /// uses COUNT(*), which in Sqlite is a _O(n)_ operation for _n_ rows) - fn count_processed_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_processed_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_processed_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_processed_query = get_epoch_time_secs(); - Ok(cnt) - } - /// Are we in the initial burnchain block download? i.e. is the burn tip snapshot far enough away /// from the burnchain height that we should be eagerly downloading snapshots? - pub fn infer_initial_burnchain_block_download( + fn infer_initial_burnchain_block_download( burnchain: &Burnchain, last_processed_height: u64, burnchain_height: u64, @@ -261,182 +151,23 @@ impl PoxSyncWatchdog { ibd } - /// Calculate the first derivative of a list of points - fn derivative(sample_list: &VecDeque) -> Vec { - let mut deltas = vec![]; - let mut prev = 0; - for (i, sample) in sample_list.iter().enumerate() { - if i == 0 { - prev = *sample; - continue; - } - let delta = *sample - prev; - prev = *sample; - deltas.push(delta); - } - deltas - } - - /// Is a derivative approximately flat, with a maximum absolute deviation from 0? - /// Return whether or not the sample is mostly flat, and how many points were over the given - /// error bar in either direction. - fn is_mostly_flat(deriv: &[i64], error: i64) -> (bool, usize) { - let mut total_deviates = 0; - let mut ret = true; - for d in deriv.iter() { - if d.abs() > error { - total_deviates += 1; - ret = false; - } - } - (ret, total_deviates) - } - - /// low and high pass filter average -- take average without the smallest and largest values - fn hilo_filter_avg(samples: &[i64]) -> f64 { - // take average with low and high pass - let mut min = i64::MAX; - let mut max = i64::MIN; - for s in samples.iter() { - if *s < 0 { - // nonsensical result (e.g. due to clock drift?) - continue; - } - if *s < min { - min = *s; - } - if *s > max { - max = *s; - } - } - - let mut count = 0; - let mut sum = 0; - for s in samples.iter() { - if *s < 0 { - // nonsensical result - continue; - } - if *s == min { - continue; - } - if *s == max { - continue; - } - count += 1; - sum += *s; - } - - if count == 0 { - // no viable samples - 1.0 - } else { - (sum as f64) / (count as f64) - } - } - - /// estimate how long a block remains in an unprocessed state - fn estimate_block_process_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_wait_times = - StacksChainState::measure_block_wait_time(chainstate.db(), start_height, end_height) - .expect("BUG: failed to query chainstate block-processing times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_wait_times) - } - - /// estimate how long a block takes to download - fn estimate_block_download_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_download_times = StacksChainState::measure_block_download_time( - chainstate.db(), - start_height, - end_height, - ) - .expect("BUG: failed to query chainstate block-download times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_download_times) - } - - /// Reset internal state. Performed when it's okay to begin syncing the burnchain. - /// Updates estimate for block-processing time and block-downloading time. - fn reset(&mut self, burnchain: &Burnchain, tip_height: u64) { - // find the average (with low/high pass filter) time a block spends in the DB without being - // processed, during this reward cycle - self.estimated_block_process_time = - PoxSyncWatchdog::estimate_block_process_time(&self.chainstate, burnchain, tip_height); - - // find the average (with low/high pass filter) time a block spends downloading - self.estimated_block_download_time = - PoxSyncWatchdog::estimate_block_download_time(&self.chainstate, burnchain, tip_height); - - debug!( - "Estimated block download time: {}s. Estimated block processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time - ); - - self.new_attachable_blocks.clear(); - self.new_processed_blocks.clear(); - self.last_block_processed_ts = 0; - self.watch_start_ts = 0; - self.steady_state_resync_ts = 0; - } - - /// Wait until all of the Stacks blocks for the given reward cycle are seemingly downloaded and - /// processed. Do so by watching the _rate_ at which attachable Stacks blocks arrive and get - /// processed. - /// Returns whether or not we're still in the initial block download -- i.e. true if we're - /// still downloading burnchain blocks, or we haven't reached steady-state block-processing. + /// Wait until the next PoX anchor block arrives. + /// We know for a fact that they all exist for Epochs 2.5 and earlier, in both mainnet and + /// testnet. + /// Return (still-in-ibd?, maximum-burnchain-sync-height) on success. pub fn pox_sync_wait( &mut self, burnchain: &Burnchain, burnchain_tip: &BurnchainTip, // this is the highest burnchain snapshot we've sync'ed to burnchain_height: u64, // this is the absolute burnchain block height - num_sortitions_in_last_cycle: u64, - ) -> Result { - if self.watch_start_ts == 0 { - self.watch_start_ts = get_epoch_time_secs(); - } - if self.steady_state_resync_ts == 0 { - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - } + ) -> Result<(bool, u64), burnchain_error> { + let burnchain_rc = burnchain + .block_height_to_reward_cycle(burnchain_height) + .expect("FATAL: burnchain height is before system start"); + + let sortition_rc = burnchain + .block_height_to_reward_cycle(burnchain_tip.block_snapshot.block_height) + .expect("FATAL: sortition height is before system start"); let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( burnchain, @@ -444,220 +175,23 @@ impl PoxSyncWatchdog { burnchain_height, ); - // unconditionally download the first reward cycle - if burnchain_tip.block_snapshot.block_height - < burnchain.first_block_height + (burnchain.pox_constants.reward_cycle_length as u64) - { - debug!("PoX watchdog in first reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); + let max_sync_height = if sortition_rc < burnchain_rc { + burnchain + .reward_cycle_to_block_height(sortition_rc + 1) + .min(burnchain_height) + } else { + burnchain_tip + .block_snapshot + .block_height + .max(burnchain_height) + }; + self.relayer_comms.set_ibd(ibbd); + if !self.unconditionally_download { self.relayer_comms .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - - return Ok(ibbd); - } - - if self.unconditionally_download { - debug!("PoX watchdog set to unconditionally download (ibd={ibbd})"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - - let mut waited = false; - if ibbd { - // we are far behind the burnchain tip (i.e. not in the last reward cycle), - // so make sure the downloader knows about blocks it doesn't have yet so we can go and - // fetch its blocks before proceeding. - if num_sortitions_in_last_cycle > 0 { - debug!("PoX watchdog: Wait for at least one inventory state-machine pass..."); - self.relayer_comms.wait_for_inv_sync_pass(SYNC_WAIT_SECS)?; - waited = true; - } else { - debug!("PoX watchdog: In initial block download, and no sortitions to consider in this reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - } else { - debug!("PoX watchdog: not in initial burn block download, so not waiting for an inventory state-machine pass"); } - if burnchain_tip.block_snapshot.block_height - + (burnchain.pox_constants.reward_cycle_length as u64) - >= burnchain_height - { - // unconditionally download if we're within the last reward cycle (after the poll timeout) - if !waited { - debug!( - "PoX watchdog in last reward cycle -- sync after {} seconds", - self.steady_state_burnchain_sync_interval - ); - self.relayer_comms.set_ibd(ibbd); - - self.relayer_comms - .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - } else { - debug!("PoX watchdog in last reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - } - return Ok(ibbd); - } - - // have we reached steady-state behavior? i.e. have we stopped processing both burnchain - // and Stacks blocks? - let mut steady_state = false; - debug!("PoX watchdog: Wait until chainstate reaches steady-state block-processing..."); - - let ibbd = loop { - if !self.relayer_comms.should_keep_running() { - break false; - } - let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( - burnchain, - burnchain_tip.block_snapshot.block_height, - burnchain_height, - ); - - let expected_first_block_deadline = - self.watch_start_ts + (self.estimated_block_download_time as u64); - let expected_last_block_deadline = self.last_block_processed_ts - + (self.estimated_block_download_time as u64) - + (self.estimated_block_process_time as u64); - - match ( - self.count_attachable_stacks_blocks(), - self.count_processed_stacks_blocks(), - ) { - (Ok(num_available), Ok(num_processed)) => { - self.new_attachable_blocks.push_back(num_available as i64); - self.new_processed_blocks.push_back(num_processed as i64); - - if (self.new_attachable_blocks.len() as u64) > self.max_samples { - self.new_attachable_blocks.pop_front(); - } - if (self.new_processed_blocks.len() as u64) > self.max_samples { - self.new_processed_blocks.pop_front(); - } - - if (self.new_attachable_blocks.len() as u64) < self.max_samples - || (self.new_processed_blocks.len() as u64) < self.max_samples - { - // still getting initial samples - if self.new_processed_blocks.len() % 10 == 0 { - debug!( - "PoX watchdog: Still warming up: {} out of {} samples...", - &self.new_attachable_blocks.len(), - &self.max_samples - ); - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && get_epoch_time_secs() < expected_first_block_deadline - { - // still waiting for that first block in this reward cycle - debug!("PoX watchdog: Still warming up: waiting until {expected_first_block_deadline}s for first Stacks block download (estimated download time: {}s)...", self.estimated_block_download_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && (self.new_attachable_blocks.len() as u64) < self.max_samples - && self.watch_start_ts - + self.max_samples - + self.steady_state_burnchain_sync_interval - * (burnchain.stable_confirmations as u64) - < get_epoch_time_secs() - { - debug!( - "PoX watchdog: could not calculate {} samples in {} seconds. Assuming suspend/resume, or assuming load is too high.", - self.max_samples, - self.max_samples + self.steady_state_burnchain_sync_interval * (burnchain.stable_confirmations as u64) - ); - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - - self.watch_start_ts = get_epoch_time_secs(); - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - continue; - } - - // take first derivative of samples -- see if the download and processing rate has gone to 0 - let attachable_delta = PoxSyncWatchdog::derivative(&self.new_attachable_blocks); - let processed_delta = PoxSyncWatchdog::derivative(&self.new_processed_blocks); - - let (flat_attachable, attachable_deviants) = - PoxSyncWatchdog::is_mostly_flat(&attachable_delta, 0); - let (flat_processed, processed_deviants) = - PoxSyncWatchdog::is_mostly_flat(&processed_delta, 0); - - debug!("PoX watchdog: flat-attachable?: {flat_attachable}, flat-processed?: {flat_processed}, estimated block-download time: {}s, estimated block-processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time); - - if flat_attachable && flat_processed && self.last_block_processed_ts == 0 { - // we're flat-lining -- this may be the end of this cycle - self.last_block_processed_ts = get_epoch_time_secs(); - } - - if self.last_block_processed_ts > 0 - && get_epoch_time_secs() < expected_last_block_deadline - { - debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{expected_last_block_deadline})s before burnchain synchronization (estimated block-processing time: {}s)", - get_epoch_time_secs() + 1, self.estimated_block_process_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if ibbd { - // doing initial burnchain block download right now. - // only proceed to fetch the next reward cycle's burnchain blocks if we're neither downloading nor - // attaching blocks recently - debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {flat_attachable}, flat-processed = {flat_processed}, min-attachable: {attachable_deviants}, min-processed: {processed_deviants}"); - - if !flat_attachable || !flat_processed { - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - } else { - let now = get_epoch_time_secs(); - if now < self.steady_state_resync_ts { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state; waiting until at least {} before burnchain synchronization", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } else { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state, but ready burnchain synchronization as of {}", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - } - } - } - (err_attach, err_processed) => { - // can only happen on DB query failure - error!("PoX watchdog: Failed to count recently attached ('{err_attach:?}') and/or processed ('{err_processed:?}') staging blocks"); - panic!(); - } - }; - - if ibbd || !steady_state { - debug!("PoX watchdog: Wait for at least one downloader state-machine pass before resetting..."); - self.relayer_comms.wait_for_download_pass(SYNC_WAIT_SECS)?; - } else { - debug!("PoX watchdog: in steady-state, so not waiting for download pass"); - } - - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - break ibbd; - }; - - let ret = ibbd || !steady_state; - self.relayer_comms.set_ibd(ret); - Ok(ret) + Ok((ibbd, max_sync_height)) } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ef6199d331..a479e39a36 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3396,7 +3396,7 @@ fn vote_for_aggregate_key_burn_op() { /// This test boots a follower node using the block downloader #[test] #[ignore] -fn follower_bootup() { +fn follower_bootup_simple() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } From 4f6b1906bebe65fd32e279ea7d603c6d66d478a0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 1 Dec 2024 21:53:09 -0500 Subject: [PATCH 04/57] chore: force require anchor blocks --- stackslib/src/chainstate/coordinator/mod.rs | 20 +++++++++++++++++++- testnet/stacks-node/src/config.rs | 7 +++++++ testnet/stacks-node/src/run_loop/nakamoto.rs | 1 + testnet/stacks-node/src/run_loop/neon.rs | 1 + 4 files changed, 28 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 5b7c7e89b6..3c78e02cef 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -198,6 +198,9 @@ pub trait BlockEventDispatcher { } pub struct ChainsCoordinatorConfig { + /// true: assume all anchor blocks are present, and block chain sync until they arrive + /// false: process sortitions in reward cycles without anchor blocks + pub assume_present_anchor_blocks: bool, /// true: use affirmation maps before 2.1 /// false: only use affirmation maps in 2.1 or later pub always_use_affirmation_maps: bool, @@ -209,8 +212,9 @@ pub struct ChainsCoordinatorConfig { impl ChainsCoordinatorConfig { pub fn new() -> ChainsCoordinatorConfig { ChainsCoordinatorConfig { - always_use_affirmation_maps: false, + always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, } } } @@ -2336,6 +2340,20 @@ impl< panic!("BUG: no epoch defined at height {}", header.block_height) }); + if self.config.assume_present_anchor_blocks { + // anchor blocks are always assumed to be present in the chain history, + // so report its absence if we don't have it. + if let PoxAnchorBlockStatus::SelectedAndUnknown(missing_anchor_block, _) = + &rc_info.anchor_status + { + info!( + "Currently missing PoX anchor block {}, which is assumed to be present", + &missing_anchor_block + ); + return Ok(Some(missing_anchor_block.clone())); + } + } + if cur_epoch.epoch_id >= StacksEpochId::Epoch21 || self.config.always_use_affirmation_maps { // potentially have an anchor block, but only process the next reward cycle (and // subsequent reward cycles) with it if the prepare-phase block-commits affirm its diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 785ce057e5..2831b9c756 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1638,6 +1638,7 @@ pub struct NodeConfig { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: bool, pub require_affirmed_anchor_blocks: bool, + pub assume_present_anchor_blocks: bool, /// Fault injection for failing to push blocks pub fault_injection_block_push_fail_probability: Option, // fault injection for hiding blocks. @@ -1921,6 +1922,7 @@ impl Default for NodeConfig { use_test_genesis_chainstate: None, always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, fault_injection_block_push_fail_probability: None, fault_injection_hide_blocks: false, chain_liveness_poll_time_secs: 300, @@ -2393,6 +2395,7 @@ pub struct NodeConfigFile { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: Option, pub require_affirmed_anchor_blocks: Option, + pub assume_present_anchor_blocks: Option, /// At most, how often should the chain-liveness thread /// wake up the chains-coordinator. Defaults to 300s (5 min). pub chain_liveness_poll_time_secs: Option, @@ -2474,6 +2477,10 @@ impl NodeConfigFile { // miners should always try to mine, even if they don't have the anchored // blocks in the canonical affirmation map. Followers, however, can stall. require_affirmed_anchor_blocks: self.require_affirmed_anchor_blocks.unwrap_or(!miner), + // as of epoch 3.0, all prepare phases have anchor blocks. + // at the start of epoch 3.0, the chain stalls without anchor blocks. + // only set this to false if you're doing some very extreme testing. + assume_present_anchor_blocks: true, // chainstate fault_injection activation for hide_blocks. // you can't set this in the config file. fault_injection_hide_blocks: false, diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 16f5a12b2d..335fb325d8 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -319,6 +319,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 475265869a..94db4ef162 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -625,6 +625,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node From 18552645a550fcbe54b4d793ee3fd94a188a37eb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 2 Dec 2024 10:26:29 -0800 Subject: [PATCH 05/57] Add SignerEvent::NewNakamotoBlock and do not update a block to GloballyAccepted until node processes the new block successfully Signed-off-by: Jacinta Ferrant --- libsigner/src/events.rs | 76 +++++++++++++++++++++++++--------- stacks-signer/src/signerdb.rs | 12 ++---- stacks-signer/src/v0/signer.rs | 28 +++++++++++++ 3 files changed, 88 insertions(+), 28 deletions(-) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 1de0e34f09..26125a84d0 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -114,6 +114,13 @@ pub enum SignerEvent { /// the time at which this event was received by the signer's event processor received_time: SystemTime, }, + /// A new processed Nakamoto block was received from the node with the given block hash + NewNakamotoBlock { + /// The block header hash for the newly processed stacks block + block_hash: Sha512Trunc256Sum, + /// The block height for the newly processed stacks block + block_height: u64, + }, } /// Trait to implement a stop-signaler for the event receiver thread. @@ -311,16 +318,15 @@ impl EventReceiver for SignerEventReceiver { } else if request.url() == "/shutdown" { event_receiver.stop_signal.store(true, Ordering::SeqCst); return Err(EventError::Terminated); + } else if request.url() == "/new_block" { + process_new_block(request) } else { let url = request.url().to_string(); - // `/new_block` is expected, but not specifically handled. do not log. - if &url != "/new_block" { - debug!( - "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", - event_receiver.local_addr, - url - ); - } + debug!( + "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", + event_receiver.local_addr, + url + ); ack_dispatcher(request); Err(EventError::UnrecognizedEvent(url)) } @@ -475,9 +481,7 @@ fn process_proposal_response( if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } + ack_dispatcher(request); return Err(EventError::MalformedRequest(format!( "Failed to read body: {:?}", &e @@ -487,10 +491,7 @@ fn process_proposal_response( let event: BlockValidateResponse = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - + ack_dispatcher(request); Ok(SignerEvent::BlockValidationResponse(event)) } @@ -503,9 +504,7 @@ fn process_new_burn_block_event( if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } + ack_dispatcher(request); return Err(EventError::MalformedRequest(format!( "Failed to read body: {:?}", &e @@ -534,9 +533,46 @@ fn process_new_burn_block_event( received_time: SystemTime::now(), burn_header_hash, }; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); + ack_dispatcher(request); + Ok(event) +} + +/// Process a new burn block event from the node +fn process_new_block( + mut request: HttpRequest, +) -> Result, EventError> { + debug!("Got new_block event"); + let mut body = String::new(); + if let Err(e) = request.as_reader().read_to_string(&mut body) { + error!("Failed to read body: {:?}", &e); + + ack_dispatcher(request); + return Err(EventError::MalformedRequest(format!( + "Failed to read body: {:?}", + &e + ))); + } + #[derive(Debug, Deserialize)] + struct TempBlockEvent { + block_hash: String, + block_height: u64, } + + let temp: TempBlockEvent = serde_json::from_slice(body.as_bytes()) + .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; + let block_hash: Sha512Trunc256Sum = temp + .block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + Sha512Trunc256Sum::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + let event = SignerEvent::NewNakamotoBlock { + block_hash, + block_height: temp.block_height, + }; + ack_dispatcher(request); Ok(event) } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 9fcaa1fa1b..2360e56a03 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -811,13 +811,8 @@ impl SignerDb { block_sighash: &Sha512Trunc256Sum, ts: u64, ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2) WHERE reward_cycle = ?3 AND signer_signature_hash = ?4"; - let args = params![ - u64_to_sql(ts)?, - BlockState::GloballyAccepted.to_string(), - u64_to_sql(reward_cycle)?, - block_sighash - ]; + let qry = "UPDATE blocks SET broadcasted = ?1 WHERE reward_cycle = ?2 AND signer_signature_hash = ?3"; + let args = params![u64_to_sql(ts)?, u64_to_sql(reward_cycle)?, block_sighash]; debug!("Marking block {} as broadcasted at {}", block_sighash, ts); self.db.execute(qry, args)?; @@ -872,6 +867,7 @@ where } #[cfg(test)] +/// Create a test signer db pub fn test_signer_db(db_path: &str) -> SignerDb { use std::fs; @@ -1220,7 +1216,7 @@ mod tests { .expect("Unable to get block from db") .expect("Unable to get block from db") .state, - BlockState::GloballyAccepted + BlockState::Unprocessed ); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b537cfae8a..825e3ead08 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -126,6 +126,7 @@ impl SignerTrait for Signer { Some(SignerEvent::BlockValidationResponse(_)) | Some(SignerEvent::MinerMessages(..)) | Some(SignerEvent::NewBurnBlock { .. }) + | Some(SignerEvent::NewNakamotoBlock { .. }) | Some(SignerEvent::StatusCheck) | None => None, Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), @@ -246,6 +247,33 @@ impl SignerTrait for Signer { }); *sortition_state = None; } + SignerEvent::NewNakamotoBlock { + block_hash, + block_height, + } => { + debug!( + "{self}: Received a new block event."; + "block_hash" => %block_hash, + "block_height" => block_height + ); + if let Ok(Some(mut block_info)) = self + .signer_db + .block_lookup(self.reward_cycle, block_hash) + .inspect_err(|e| warn!("{self}: Failed to load block state: {e:?}")) + { + if block_info.state == BlockState::GloballyAccepted { + // We have already globally accepted this block. Do nothing. + return; + } + if let Err(e) = block_info.mark_globally_accepted() { + warn!("{self}: Failed to mark block as globally accepted: {e:?}"); + return; + } + if let Err(e) = self.signer_db.insert_block(&block_info) { + warn!("{self}: Failed to update block state to globally accepted: {e:?}"); + } + } + } } } From 57848b3c6ac316fd3e6babfee2fbb0ef048ac8ff Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 2 Dec 2024 14:17:45 -0800 Subject: [PATCH 06/57] Fix block state transitions and update some comments Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 34 ++++------- stacks-signer/src/v0/signer.rs | 66 ++++++++++++++------- testnet/stacks-node/src/event_dispatcher.rs | 21 +++++++ 3 files changed, 78 insertions(+), 43 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 2360e56a03..df1be98d6a 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -230,17 +230,9 @@ impl BlockInfo { } match state { BlockState::Unprocessed => false, - BlockState::LocallyAccepted => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyAccepted - ) - } - BlockState::LocallyRejected => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyRejected - ) + BlockState::LocallyAccepted | BlockState::LocallyRejected => { + !matches!(prev_state, BlockState::GloballyRejected) + && !matches!(prev_state, BlockState::GloballyAccepted) } BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), @@ -1245,7 +1237,14 @@ mod tests { assert_eq!(block.state, BlockState::LocallyAccepted); assert!(!block.check_state(BlockState::Unprocessed)); assert!(block.check_state(BlockState::LocallyAccepted)); - assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::LocallyRejected).unwrap(); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(block.check_state(BlockState::LocallyAccepted)); + assert!(block.check_state(BlockState::LocallyRejected)); assert!(block.check_state(BlockState::GloballyAccepted)); assert!(block.check_state(BlockState::GloballyRejected)); @@ -1257,15 +1256,8 @@ mod tests { assert!(block.check_state(BlockState::GloballyAccepted)); assert!(!block.check_state(BlockState::GloballyRejected)); - // Must manually override as will not be able to move from GloballyAccepted to LocallyAccepted - block.state = BlockState::LocallyRejected; - assert!(!block.check_state(BlockState::Unprocessed)); - assert!(!block.check_state(BlockState::LocallyAccepted)); - assert!(block.check_state(BlockState::LocallyRejected)); - assert!(block.check_state(BlockState::GloballyAccepted)); - assert!(block.check_state(BlockState::GloballyRejected)); - - block.move_to(BlockState::GloballyRejected).unwrap(); + // Must manually override as will not be able to move from GloballyAccepted to GloballyRejected + block.state = BlockState::GloballyRejected; assert!(!block.check_state(BlockState::Unprocessed)); assert!(!block.check_state(BlockState::LocallyAccepted)); assert!(!block.check_state(BlockState::LocallyRejected)); diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 825e3ead08..33778105a3 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -64,6 +64,11 @@ pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::syn /// Skip broadcasting the block to the network pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +/// Skip any block responses from other signers +pub static TEST_IGNORE_BLOCK_RESPONSES: std::sync::Mutex> = + std::sync::Mutex::new(None); + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -476,10 +481,7 @@ impl Signer { self.test_reject_block_proposal(block_proposal, &mut block_info, block_response); if let Some(block_response) = block_response { - // We know proposal is invalid. Send rejection message, do not do further validation - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; + // We know proposal is invalid. Send rejection message, do not do further validation and do not store it. debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); let res = self .stackerdb @@ -535,6 +537,10 @@ impl Signer { stacks_client: &StacksClient, block_response: &BlockResponse, ) { + #[cfg(any(test, feature = "testing"))] + if self.test_ignore_block_responses(block_response) { + return; + } match block_response { BlockResponse::Accepted(accepted) => { self.handle_block_signature(stacks_client, accepted); @@ -870,7 +876,7 @@ impl Signer { // Not enough rejection signatures to make a decision return; } - debug!("{self}: {total_reject_weight}/{total_weight} signers voteed to reject the block {block_hash}"); + debug!("{self}: {total_reject_weight}/{total_weight} signers voted to reject the block {block_hash}"); if let Err(e) = block_info.mark_globally_rejected() { warn!("{self}: Failed to mark block as globally rejected: {e:?}",); } @@ -999,7 +1005,7 @@ impl Signer { return; }; // move block to LOCALLY accepted state. - // We only mark this GLOBALLY accepted if we manage to broadcast it... + // It is only considered globally accepted IFF we receive a new block event confirming it OR see the chain tip of the node advance to it. if let Err(e) = block_info.mark_locally_accepted(true) { // Do not abort as we should still try to store the block signature threshold warn!("{self}: Failed to mark block as locally accepted: {e:?}"); @@ -1012,22 +1018,8 @@ impl Signer { panic!("{self} Failed to write block to signerdb: {e}"); }); #[cfg(any(test, feature = "testing"))] - { - if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block broadcast is stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Block validation is no longer stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - } - } + self.test_pause_block_broadcast(&block_info); + self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); if self .submitted_block_proposal @@ -1137,6 +1129,36 @@ impl Signer { } } + #[cfg(any(test, feature = "testing"))] + fn test_ignore_block_responses(&self, block_response: &BlockResponse) -> bool { + if *TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap() == Some(true) { + warn!( + "{self}: Ignoring block response due to testing directive"; + "block_response" => %block_response + ); + return true; + } + false + } + + #[cfg(any(test, feature = "testing"))] + fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { + if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Block broadcast is stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("{self}: Block validation is no longer stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + } + } + /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 8144cd8ec5..6dc2842b8c 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -70,6 +70,10 @@ use url::Url; use super::config::{EventKeyType, EventObserverConfig}; +#[cfg(test)] +pub static TEST_SKIP_BLOCK_ANNOUNCEMENT: std::sync::Mutex> = + std::sync::Mutex::new(None); + #[derive(Debug, Clone)] struct EventObserver { /// Path to the database where pending payloads are stored. If `None`, then @@ -1299,6 +1303,11 @@ impl EventDispatcher { let mature_rewards = serde_json::Value::Array(mature_rewards_vec); + #[cfg(any(test, feature = "testing"))] + if test_skip_block_announcement(&block) { + return; + } + for (observer_id, filtered_events_ids) in dispatch_matrix.iter().enumerate() { let filtered_events: Vec<_> = filtered_events_ids .iter() @@ -1695,6 +1704,18 @@ impl EventDispatcher { } } +#[cfg(any(test, feature = "testing"))] +fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { + if *TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap() == Some(true) { + warn!( + "Skipping new block announcement due to testing directive"; + "block_hash" => %block.block_hash + ); + return true; + } + false +} + #[cfg(test)] mod test { use std::net::TcpListener; From bf70c9f7e9d0b025f823df20a52bb88aa74428db Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 2 Dec 2024 16:04:42 -0800 Subject: [PATCH 07/57] Remove unused change Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 33778105a3..f53daf7745 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -64,11 +64,6 @@ pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::syn /// Skip broadcasting the block to the network pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); -#[cfg(any(test, feature = "testing"))] -/// Skip any block responses from other signers -pub static TEST_IGNORE_BLOCK_RESPONSES: std::sync::Mutex> = - std::sync::Mutex::new(None); - /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -1129,18 +1124,6 @@ impl Signer { } } - #[cfg(any(test, feature = "testing"))] - fn test_ignore_block_responses(&self, block_response: &BlockResponse) -> bool { - if *TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap() == Some(true) { - warn!( - "{self}: Ignoring block response due to testing directive"; - "block_response" => %block_response - ); - return true; - } - false - } - #[cfg(any(test, feature = "testing"))] fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { From d4860f8e3dc232ddd433e5148b3403270903f27d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 2 Dec 2024 16:37:51 -0800 Subject: [PATCH 08/57] Rename NewNakamotoBlock to NewBlock Signed-off-by: Jacinta Ferrant --- libsigner/src/events.rs | 6 +++--- stacks-signer/src/signerdb.rs | 8 ++++---- stacks-signer/src/v0/signer.rs | 9 +++------ testnet/stacks-node/src/event_dispatcher.rs | 21 --------------------- 4 files changed, 10 insertions(+), 34 deletions(-) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 26125a84d0..90ef022636 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -114,8 +114,8 @@ pub enum SignerEvent { /// the time at which this event was received by the signer's event processor received_time: SystemTime, }, - /// A new processed Nakamoto block was received from the node with the given block hash - NewNakamotoBlock { + /// A new processed Stacks block was received from the node with the given block hash + NewBlock { /// The block header hash for the newly processed stacks block block_hash: Sha512Trunc256Sum, /// The block height for the newly processed stacks block @@ -568,7 +568,7 @@ fn process_new_block( Sha512Trunc256Sum::from_hex(hex) .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) })?; - let event = SignerEvent::NewNakamotoBlock { + let event = SignerEvent::NewBlock { block_hash, block_height: temp.block_height, }; diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 4e24c20029..732a3c3451 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -230,10 +230,10 @@ impl BlockInfo { } match state { BlockState::Unprocessed => false, - BlockState::LocallyAccepted | BlockState::LocallyRejected => { - !matches!(prev_state, BlockState::GloballyRejected) - && !matches!(prev_state, BlockState::GloballyAccepted) - } + BlockState::LocallyAccepted | BlockState::LocallyRejected => !matches!( + prev_state, + BlockState::GloballyRejected | BlockState::GloballyAccepted + ), BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index f53daf7745..b53738223e 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -126,7 +126,7 @@ impl SignerTrait for Signer { Some(SignerEvent::BlockValidationResponse(_)) | Some(SignerEvent::MinerMessages(..)) | Some(SignerEvent::NewBurnBlock { .. }) - | Some(SignerEvent::NewNakamotoBlock { .. }) + | Some(SignerEvent::NewBlock { .. }) | Some(SignerEvent::StatusCheck) | None => None, Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), @@ -247,7 +247,7 @@ impl SignerTrait for Signer { }); *sortition_state = None; } - SignerEvent::NewNakamotoBlock { + SignerEvent::NewBlock { block_hash, block_height, } => { @@ -400,6 +400,7 @@ impl Signer { "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); + #[allow(unused_mut)] let mut block_info = BlockInfo::from(block_proposal.clone()); // Get sortition view if we don't have it @@ -532,10 +533,6 @@ impl Signer { stacks_client: &StacksClient, block_response: &BlockResponse, ) { - #[cfg(any(test, feature = "testing"))] - if self.test_ignore_block_responses(block_response) { - return; - } match block_response { BlockResponse::Accepted(accepted) => { self.handle_block_signature(stacks_client, accepted); diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 6dc2842b8c..8144cd8ec5 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -70,10 +70,6 @@ use url::Url; use super::config::{EventKeyType, EventObserverConfig}; -#[cfg(test)] -pub static TEST_SKIP_BLOCK_ANNOUNCEMENT: std::sync::Mutex> = - std::sync::Mutex::new(None); - #[derive(Debug, Clone)] struct EventObserver { /// Path to the database where pending payloads are stored. If `None`, then @@ -1303,11 +1299,6 @@ impl EventDispatcher { let mature_rewards = serde_json::Value::Array(mature_rewards_vec); - #[cfg(any(test, feature = "testing"))] - if test_skip_block_announcement(&block) { - return; - } - for (observer_id, filtered_events_ids) in dispatch_matrix.iter().enumerate() { let filtered_events: Vec<_> = filtered_events_ids .iter() @@ -1704,18 +1695,6 @@ impl EventDispatcher { } } -#[cfg(any(test, feature = "testing"))] -fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { - if *TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap() == Some(true) { - warn!( - "Skipping new block announcement due to testing directive"; - "block_hash" => %block.block_hash - ); - return true; - } - false -} - #[cfg(test)] mod test { use std::net::TcpListener; From b473984ceab7fb93e2d011dbcb70792de286aa38 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 2 Dec 2024 17:28:24 -0800 Subject: [PATCH 09/57] Add global_acceptance_depends_on_block_announcement Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + stacks-signer/src/v0/signer.rs | 21 ++ testnet/stacks-node/src/event_dispatcher.rs | 21 ++ testnet/stacks-node/src/tests/signer/v0.rs | 262 +++++++++++++++++++- 4 files changed, 299 insertions(+), 6 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 04e74f94e8..060e109a17 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -127,6 +127,7 @@ jobs: - tests::signer::v0::continue_after_fast_block_no_sortition - tests::signer::v0::block_validation_response_timeout - tests::signer::v0::tenure_extend_after_bad_commit + - tests::signer::v0::global_acceptance_depends_on_block_announcement - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b53738223e..37a31b841d 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -64,6 +64,11 @@ pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::syn /// Skip broadcasting the block to the network pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +/// Skip any block responses from other signers +pub static TEST_IGNORE_BLOCK_RESPONSES: std::sync::Mutex> = + std::sync::Mutex::new(None); + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -533,6 +538,10 @@ impl Signer { stacks_client: &StacksClient, block_response: &BlockResponse, ) { + #[cfg(any(test, feature = "testing"))] + if self.test_ignore_block_responses(block_response) { + return; + } match block_response { BlockResponse::Accepted(accepted) => { self.handle_block_signature(stacks_client, accepted); @@ -1121,6 +1130,18 @@ impl Signer { } } + #[cfg(any(test, feature = "testing"))] + fn test_ignore_block_responses(&self, block_response: &BlockResponse) -> bool { + if *TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap() == Some(true) { + warn!( + "{self}: Ignoring block response due to testing directive"; + "block_response" => %block_response + ); + return true; + } + false + } + #[cfg(any(test, feature = "testing"))] fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 8144cd8ec5..86ad9cae74 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -70,6 +70,10 @@ use url::Url; use super::config::{EventKeyType, EventObserverConfig}; +#[cfg(any(test, feature = "testing"))] +pub static TEST_SKIP_BLOCK_ANNOUNCEMENT: std::sync::Mutex> = + std::sync::Mutex::new(None); + #[derive(Debug, Clone)] struct EventObserver { /// Path to the database where pending payloads are stored. If `None`, then @@ -1299,6 +1303,11 @@ impl EventDispatcher { let mature_rewards = serde_json::Value::Array(mature_rewards_vec); + #[cfg(any(test, feature = "testing"))] + if test_skip_block_announcement(&block) { + return; + } + for (observer_id, filtered_events_ids) in dispatch_matrix.iter().enumerate() { let filtered_events: Vec<_> = filtered_events_ids .iter() @@ -1695,6 +1704,18 @@ impl EventDispatcher { } } +#[cfg(any(test, feature = "testing"))] +fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { + if *TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap() == Some(true) { + warn!( + "Skipping new block announcement due to testing directive"; + "block_hash" => %block.block_hash + ); + return true; + } + false +} + #[cfg(test)] mod test { use std::net::TcpListener; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2486043ccc..396d73cbd2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -43,7 +43,7 @@ use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STA use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; +use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -56,8 +56,8 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::v0::signer::{ - TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, - TEST_SKIP_BLOCK_BROADCAST, + TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_IGNORE_BLOCK_RESPONSES, TEST_PAUSE_BLOCK_BROADCAST, + TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, }; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -65,7 +65,7 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::config::{EventKeyType, EventObserverConfig}; -use crate::event_dispatcher::MinedNakamotoBlockEvent; +use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEMENT}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; @@ -375,7 +375,7 @@ impl SignerTest { } } - /// Propose an invalid block to the signers + /// Propose a block to the signers fn propose_block(&mut self, block: NakamotoBlock, timeout: Duration) { let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = @@ -385,6 +385,7 @@ impl SignerTest { .btc_regtest_controller .get_headers_height(); let reward_cycle = self.get_current_reward_cycle(); + let signer_signature_hash = block.header.signer_signature_hash(); let message = SignerMessage::BlockProposal(BlockProposal { block, burn_height, @@ -401,7 +402,7 @@ impl SignerTest { let mut version = 0; let slot_id = MinerSlotID::BlockProposal.to_u8() as u32; let start = Instant::now(); - debug!("Proposing invalid block to signers"); + debug!("Proposing block to signers: {signer_signature_hash}"); while !accepted { let mut chunk = StackerDBChunkData::new(slot_id * 2, version, message.serialize_to_vec()); @@ -8557,3 +8558,252 @@ fn tenure_extend_after_2_bad_commits() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// Test that signers that reject a block locally, but that was accepted globally will accept +/// only accept a block built upon it when they receive the new block event confirming their prior +/// rejected block. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). <30% of signers are configured to auto reject +/// any block proposals, announcement of new blocks are skipped, and signatures ignored by signers. +/// The subsequent block N+1 is proposed, triggering one of the <30% signers submit the block to the node +/// for validation. The node will fail due to a bad block header hash mismatch (passes height checks) +/// +/// Test Assertion: +/// - All signers accepted block N. +/// - Less than 30% of the signers rejected block N+1. +/// - The 30% of signers that rejected block N+1, will submit the block for validation +/// as it passes preliminary checks (even though its a sister block, it is a sister block to a locally rejected block) +fn global_acceptance_depends_on_block_announcement() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 4; + + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + test_observer::clear(); + // submit a tx so that the miner will mine a stacks block N + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); + + wait_for(short_timeout, || { + Ok(signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for N to be mined and processed"); + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); + + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); + // Make less than 30% of the signers reject the block and ensure it is accepted by the node, but not announced. + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 3 / 10) + .collect(); + let non_rejecting_signers = all_signers[num_signers * 3 / 10..].to_vec(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(rejecting_signers.clone()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap().replace(true); + TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + test_observer::clear(); + + // submit a tx so that the miner will mine a stacks block N+1 + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N+1"); + + let mut proposed_block = None; + let start_time = Instant::now(); + while proposed_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + proposed_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + let proposed_block = proposed_block.expect("Failed to find proposed block within 30s"); + + signer_test + .wait_for_block_acceptance( + short_timeout, + &proposed_block.header.signer_signature_hash(), + &non_rejecting_signers, + ) + .expect("Timed out waiting for block acceptance of N+1 by non rejecting signers"); + + signer_test + .wait_for_block_rejections(short_timeout, &rejecting_signers) + .expect("Timed out waiting for block rejection of N+1' from rejecting signers"); + + info!( + "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" + ); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + test_observer::clear(); + + let mut sister_block = proposed_block; + + let transfer_tx_bytes = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt * 2, + ); + let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx_bytes[..]).unwrap(); + let txs = vec![tx]; + let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + sister_block.txs = txs; + sister_block.header.tx_merkle_root = tx_merkle_root; + sister_block + .header + .sign_miner(&signer_test.running_nodes.conf.miner.mining_key.unwrap()) + .unwrap(); + signer_test.propose_block(sister_block.clone(), Duration::from_secs(30)); + + wait_for(30, || { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections: HashSet<_> = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + let rejected_pubkey = rejection + .recover_public_key() + .expect("Failed to recover public key from rejection"); + // Proves that one of the rejecting signers actually submitted the block for validation as it passed its preliminary checks about chain length + assert_eq!( + rejection.reason_code, + RejectCode::ValidationFailed(ValidateRejectCode::BadBlockHash) + ); + Some(rejected_pubkey) + } + _ => None, + } + }) + .collect::>(); + Ok(block_rejections.len() == all_signers.len()) + }) + .expect("Timed out waiting for block rejections for N+1'"); + // Assert the block was NOT mined and the tip has not changed. + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_after, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + ); +} From b8d41bea97b0af79afbc9cdb7f487cef3cd96285 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 6 Dec 2024 11:14:52 -0500 Subject: [PATCH 10/57] Fix test to confirm reorg accross boundaries is possible if block not announced by the node Signed-off-by: Jacinta Ferrant --- stacks-common/src/util/mod.rs | 24 ++ stacks-signer/src/v0/signer.rs | 72 ++--- testnet/stacks-node/src/event_dispatcher.rs | 12 +- .../src/nakamoto_node/sign_coordinator.rs | 19 +- testnet/stacks-node/src/run_loop/neon.rs | 28 +- testnet/stacks-node/src/tests/signer/mod.rs | 5 +- testnet/stacks-node/src/tests/signer/v0.rs | 259 ++++++++---------- 7 files changed, 193 insertions(+), 226 deletions(-) diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index a9dfc47806..416a365a2f 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -35,6 +35,30 @@ use std::path::Path; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; +#[cfg(any(test, feature = "testing"))] +#[derive(Clone)] +pub struct TestFlag(pub std::sync::Arc>>); + +#[cfg(any(test, feature = "testing"))] +impl Default for TestFlag { + fn default() -> Self { + Self(std::sync::Arc::new(std::sync::Mutex::new(None))) + } +} + +#[cfg(any(test, feature = "testing"))] +impl TestFlag { + /// Set the test flag to the given value + pub fn set(&self, value: T) { + *self.0.lock().unwrap() = Some(value); + } + + /// Get the test flag value. Defaults otherwise. + pub fn get(&self) -> T { + self.0.lock().unwrap().clone().unwrap_or_default().clone() + } +} + pub fn get_epoch_time_secs() -> u64 { let start = SystemTime::now(); let since_the_epoch = start diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 37a31b841d..55a8e42044 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -26,6 +26,8 @@ use clarity::types::chainstate::StacksPrivateKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; +#[cfg(any(test, feature = "testing"))] +use lazy_static::lazy_static; use libsigner::v0::messages::{ BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectCode, SignerMessage, @@ -35,6 +37,8 @@ use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::secp256k1::MessageSignature; +#[cfg(any(test, feature = "testing"))] +use stacks_common::util::TestFlag; use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; @@ -45,29 +49,28 @@ use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; #[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list -pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list -pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); +lazy_static! { + /// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list + pub static ref TEST_REJECT_ALL_BLOCK_PROPOSAL: TestFlag> = TestFlag::default(); +} #[cfg(any(test, feature = "testing"))] -/// Pause the block broadcast -pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); +lazy_static! { + /// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list + pub static ref TEST_IGNORE_ALL_BLOCK_PROPOSALS: TestFlag> = TestFlag::default(); +} #[cfg(any(test, feature = "testing"))] -/// Skip broadcasting the block to the network -pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); +lazy_static! { + /// Pause the block broadcast + pub static ref TEST_PAUSE_BLOCK_BROADCAST: TestFlag = TestFlag::default(); +} #[cfg(any(test, feature = "testing"))] -/// Skip any block responses from other signers -pub static TEST_IGNORE_BLOCK_RESPONSES: std::sync::Mutex> = - std::sync::Mutex::new(None); +lazy_static! { + /// Skip broadcasting the block to the network + pub static ref TEST_SKIP_BLOCK_BROADCAST: TestFlag = TestFlag::default(); +} /// The stacks signer registered for the reward cycle #[derive(Debug)] @@ -174,9 +177,8 @@ impl SignerTrait for Signer { match message { SignerMessage::BlockProposal(block_proposal) => { #[cfg(any(test, feature = "testing"))] - if let Some(public_keys) = - &*TEST_IGNORE_ALL_BLOCK_PROPOSALS.lock().unwrap() { + let public_keys = TEST_IGNORE_ALL_BLOCK_PROPOSALS.get(); if public_keys.contains( &stacks_common::types::chainstate::StacksPublicKey::from_private( &self.private_key, @@ -405,8 +407,10 @@ impl Signer { "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); - #[allow(unused_mut)] + #[cfg(any(test, feature = "testing"))] let mut block_info = BlockInfo::from(block_proposal.clone()); + #[cfg(not(any(test, feature = "testing")))] + let block_info = BlockInfo::from(block_proposal.clone()); // Get sortition view if we don't have it if sortition_state.is_none() { @@ -538,10 +542,6 @@ impl Signer { stacks_client: &StacksClient, block_response: &BlockResponse, ) { - #[cfg(any(test, feature = "testing"))] - if self.test_ignore_block_responses(block_response) { - return; - } match block_response { BlockResponse::Accepted(accepted) => { self.handle_block_signature(stacks_client, accepted); @@ -1071,7 +1071,7 @@ impl Signer { #[cfg(any(test, feature = "testing"))] fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { - if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + if TEST_SKIP_BLOCK_BROADCAST.get() { let block_hash = block.header.signer_signature_hash(); warn!( "{self}: Skipping block broadcast due to testing directive"; @@ -1099,9 +1099,7 @@ impl Signer { block_info: &mut BlockInfo, block_response: Option, ) -> Option { - let Some(public_keys) = &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() else { - return block_response; - }; + let public_keys = TEST_REJECT_ALL_BLOCK_PROPOSAL.get(); if public_keys.contains( &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), ) { @@ -1126,31 +1124,19 @@ impl Signer { self.mainnet, )) } else { - None - } - } - - #[cfg(any(test, feature = "testing"))] - fn test_ignore_block_responses(&self, block_response: &BlockResponse) -> bool { - if *TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap() == Some(true) { - warn!( - "{self}: Ignoring block response due to testing directive"; - "block_response" => %block_response - ); - return true; + block_response } - false } #[cfg(any(test, feature = "testing"))] fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { - if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + if TEST_PAUSE_BLOCK_BROADCAST.get() { // Do an extra check just so we don't log EVERY time. warn!("{self}: Block broadcast is stalled due to testing directive."; "block_id" => %block_info.block.block_id(), "height" => block_info.block.header.chain_length, ); - while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + while TEST_PAUSE_BLOCK_BROADCAST.get() { std::thread::sleep(std::time::Duration::from_millis(10)); } info!("{self}: Block validation is no longer stalled due to testing directive."; diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 86ad9cae74..b1e26e7770 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -26,6 +26,8 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; +#[cfg(any(test, feature = "testing"))] +use lazy_static::lazy_static; use rand::Rng; use rusqlite::{params, Connection}; use serde_json::json; @@ -59,6 +61,8 @@ use stacks::net::http::HttpRequestContents; use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; use stacks::util::hash::to_hex; +#[cfg(any(test, feature = "testing"))] +use stacks::util::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -71,8 +75,10 @@ use url::Url; use super::config::{EventKeyType, EventObserverConfig}; #[cfg(any(test, feature = "testing"))] -pub static TEST_SKIP_BLOCK_ANNOUNCEMENT: std::sync::Mutex> = - std::sync::Mutex::new(None); +lazy_static! { + /// Do not announce a signed/mined block to the network when set to true. + pub static ref TEST_SKIP_BLOCK_ANNOUNCEMENT: TestFlag = TestFlag::default(); +} #[derive(Debug, Clone)] struct EventObserver { @@ -1706,7 +1712,7 @@ impl EventDispatcher { #[cfg(any(test, feature = "testing"))] fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { - if *TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap() == Some(true) { + if TEST_SKIP_BLOCK_ANNOUNCEMENT.get() { warn!( "Skipping new block announcement due to testing directive"; "block_hash" => %block.block_hash diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 2b1efcbfc5..7f3ad9aecf 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -20,6 +20,8 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; use hashbrown::{HashMap, HashSet}; +#[cfg(any(test, feature = "testing"))] +use lazy_static::lazy_static; use libsigner::v0::messages::{ BlockAccepted, BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0, }; @@ -37,6 +39,8 @@ use stacks::net::stackerdb::StackerDBs; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::MessageSignature; +#[cfg(any(test, feature = "testing"))] +use stacks::util::TestFlag; use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -47,10 +51,12 @@ use crate::event_dispatcher::StackerDBChannel; use crate::neon::Counters; use crate::Config; -/// Fault injection flag to prevent the miner from seeing enough signer signatures. -/// Used to test that the signers will broadcast a block if it gets enough signatures -#[cfg(test)] -pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +lazy_static! { + /// Fault injection flag to prevent the miner from seeing enough signer signatures. + /// Used to test that the signers will broadcast a block if it gets enough signatures + pub static ref TEST_IGNORE_SIGNERS: TestFlag = TestFlag::default(); +} /// How long should the coordinator poll on the event receiver before /// waking up to check timeouts? @@ -256,10 +262,7 @@ impl SignCoordinator { /// Do we ignore signer signatures? #[cfg(test)] fn fault_injection_ignore_signatures() -> bool { - if *TEST_IGNORE_SIGNERS.lock().unwrap() == Some(true) { - return true; - } - false + TEST_IGNORE_SIGNERS.get() } #[cfg(not(test))] diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 5e021e50ab..53df2edcd3 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -21,6 +21,8 @@ use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; +#[cfg(test)] +use stacks::util::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; @@ -82,30 +84,6 @@ impl std::ops::Deref for RunLoopCounter { } } -#[cfg(test)] -#[derive(Clone)] -pub struct TestFlag(pub Arc>>); - -#[cfg(test)] -impl Default for TestFlag { - fn default() -> Self { - Self(Arc::new(std::sync::Mutex::new(None))) - } -} - -#[cfg(test)] -impl TestFlag { - /// Set the test flag to the given value - pub fn set(&self, value: bool) { - *self.0.lock().unwrap() = Some(value); - } - - /// Get the test flag value. Defaults to false if the flag is not set. - pub fn get(&self) -> bool { - self.0.lock().unwrap().unwrap_or(false) - } -} - #[derive(Clone, Default)] pub struct Counters { pub blocks_processed: RunLoopCounter, @@ -123,7 +101,7 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, #[cfg(test)] - pub naka_skip_commit_op: TestFlag, + pub naka_skip_commit_op: TestFlag, } impl Counters { diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 946a566c13..74a07cb235 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -49,6 +49,7 @@ use stacks::types::chainstate::{StacksAddress, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; +use stacks::util::TestFlag; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; @@ -60,7 +61,7 @@ use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; -use crate::neon::{Counters, TestFlag}; +use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ @@ -88,7 +89,7 @@ pub struct RunningNodes { pub nakamoto_blocks_mined: Arc, pub nakamoto_blocks_rejected: Arc, pub nakamoto_blocks_signer_pushed: Arc, - pub nakamoto_test_skip_commit_op: TestFlag, + pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 07a3d6b02a..e5a4e88123 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -43,7 +43,7 @@ use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STA use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; +use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -56,8 +56,8 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::v0::signer::{ - TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_IGNORE_BLOCK_RESPONSES, TEST_PAUSE_BLOCK_BROADCAST, - TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, + TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, + TEST_SKIP_BLOCK_BROADCAST, }; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -943,7 +943,7 @@ fn forked_tenure_testing( config.first_proposal_burn_block_timing = proposal_limit; // don't allow signers to post signed blocks (limits the amount of fault injection we // need) - TEST_SKIP_BLOCK_BROADCAST.lock().unwrap().replace(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); }, |config| { config.miner.tenure_cost_limit_per_block_percentage = None; @@ -2387,10 +2387,7 @@ fn retry_on_rejection() { .map(StacksPublicKey::from_private) .take(num_signers) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); let proposals_before = signer_test .running_nodes @@ -2437,10 +2434,7 @@ fn retry_on_rejection() { // resume signing info!("Disable unconditional rejection and wait for the block to be processed"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(vec![]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); loop { let blocks_mined = signer_test .running_nodes @@ -2500,7 +2494,7 @@ fn signers_broadcast_signed_blocks() { }) .expect("Timed out waiting for first nakamoto block to be mined"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined @@ -2785,7 +2779,7 @@ fn empty_sortition_before_approval() { let stacks_height_before = info.stacks_tip_height; info!("Forcing miner to ignore signatures for next block"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); info!("Pausing block commits to trigger an empty sortition."); signer_test @@ -2838,7 +2832,7 @@ fn empty_sortition_before_approval() { .replace(false); info!("Stop ignoring signers and wait for the tip to advance"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + TEST_IGNORE_SIGNERS.set(false); wait_for(60, || { let info = get_chain_info(&signer_test.running_nodes.conf); @@ -4726,10 +4720,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .cloned() .take(num_signers / 2 + num_signers % 2) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); test_observer::clear(); // Make a new stacks transaction to create a different block signature, but make sure to propose it // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted @@ -4762,10 +4753,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); let transfer_tx = make_stacks_transfer( &sender_sk, @@ -4921,10 +4909,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .cloned() .take(num_signers * 3 / 10) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); test_observer::clear(); // submit a tx so that the miner will mine a stacks block N+1 @@ -4989,10 +4974,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it let transfer_tx = make_stacks_transfer( @@ -5148,10 +5130,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .cloned() .skip(num_signers * 7 / 10) .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); @@ -5229,10 +5208,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(Vec::new()); wait_for(short_timeout, || { let info_after = signer_test .stacks_client @@ -5375,10 +5351,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { .cloned() .skip(num_signers * 7 / 10) .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); @@ -5595,9 +5568,9 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // broadcasted to the miner so it can end its tenure before block confirmation obtained // Clear the stackerdb chunks info!("Forcing miner to ignore block responses for block N+1"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); info!("Delaying signer block N+1 broadcasting to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); + TEST_PAUSE_BLOCK_BROADCAST.set(true); test_observer::clear(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test @@ -5722,9 +5695,9 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .expect("Timed out waiting for block proposal of N+1' block proposal"); info!("Allowing miner to accept block responses again. "); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + TEST_IGNORE_SIGNERS.set(false); info!("Allowing signers to broadcast block N+1 to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); + TEST_PAUSE_BLOCK_BROADCAST.set(false); // Assert the N+1' block was rejected let rejected_block = rejected_block.unwrap(); @@ -6069,10 +6042,7 @@ fn continue_after_fast_block_no_sortition() { // Make all signers ignore block proposals let ignoring_signers = all_signers.to_vec(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(ignoring_signers.clone()); info!("------------------------- Submit Miner 2 Block Commit -------------------------"); let rejections_before = signer_test @@ -6186,10 +6156,7 @@ fn continue_after_fast_block_no_sortition() { let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); let nmb_old_blocks = test_observer::get_blocks().len(); // Allow signers to respond to proposals again - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); info!("------------------------- Wait for Miner B's Block N -------------------------"); // wait for the new block to be processed @@ -6894,10 +6861,7 @@ fn block_commit_delay() { .iter() .map(StacksPublicKey::from_private) .collect::>(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(all_signers); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(all_signers); info!("------------------------- Test Mine Burn Block -------------------------"); let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; @@ -6932,10 +6896,7 @@ fn block_commit_delay() { .load(Ordering::SeqCst); info!("------------------------- Resume Signing -------------------------"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); // Wait for a block to be mined wait_for(60, || { @@ -8564,25 +8525,27 @@ fn tenure_extend_after_2_bad_commits() { #[test] #[ignore] -/// Test that signers that reject a block locally, but that was accepted globally will accept -/// only accept a block built upon it when they receive the new block event confirming their prior -/// rejected block. +/// Test that signers do not mark a block as globally accepted if it was not announced by the node. +/// This will simulate this case via testing flags, and ensure that a block can be reorged across tenure +/// boundaries now (as it is only marked locally accepted and no longer gets marked globally accepted +/// by simply seeing the threshold number of signatures). /// /// Test Setup: /// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. /// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. /// /// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). <30% of signers are configured to auto reject -/// any block proposals, announcement of new blocks are skipped, and signatures ignored by signers. -/// The subsequent block N+1 is proposed, triggering one of the <30% signers submit the block to the node -/// for validation. The node will fail due to a bad block header hash mismatch (passes height checks) +/// 1. The node mines 1 stacks block N (all signers sign it). +/// 2. <30% of signers are configured to auto reject any block proposals, broadcast of new blocks are skipped, and miners are configured to ignore signers responses. +/// 3. The node mines 1 stacks block N+1 (all signers sign it, but one which rejects it) but eventually all mark the block as locally accepted. +/// 4. A new tenure starts and the miner attempts to mine a new sister block N+1' (as it does not see the threshold number of signatures or any block push from signers). +/// 5. The signers accept this sister block as a valid reorg and the node advances to block N+1'. /// /// Test Assertion: /// - All signers accepted block N. /// - Less than 30% of the signers rejected block N+1. -/// - The 30% of signers that rejected block N+1, will submit the block for validation -/// as it passes preliminary checks (even though its a sister block, it is a sister block to a locally rejected block) +/// - All signers accept block N+1' as a valid reorg. +/// - The node advances to block N+1' fn global_acceptance_depends_on_block_announcement() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -8602,9 +8565,18 @@ fn global_acceptance_depends_on_block_announcement() { let nmb_txs = 4; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Just accept all reorg attempts + config.tenure_last_block_proposal_timeout = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + None, + None, ); let all_signers: Vec<_> = signer_test @@ -8674,14 +8646,10 @@ fn global_acceptance_depends_on_block_announcement() { .cloned() .take(num_signers * 3 / 10) .collect(); - let non_rejecting_signers = all_signers[num_signers * 3 / 10..].to_vec(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); - TEST_SKIP_BLOCK_ANNOUNCEMENT.lock().unwrap().replace(true); - TEST_IGNORE_BLOCK_RESPONSES.lock().unwrap().replace(true); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(true); + TEST_IGNORE_SIGNERS.set(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); test_observer::clear(); // submit a tx so that the miner will mine a stacks block N+1 @@ -8725,88 +8693,89 @@ fn global_acceptance_depends_on_block_announcement() { } let proposed_block = proposed_block.expect("Failed to find proposed block within 30s"); + // Even though one of the signers rejected the block, it will eventually accept the block as it sees the 70% threshold of signatures signer_test .wait_for_block_acceptance( short_timeout, &proposed_block.header.signer_signature_hash(), - &non_rejecting_signers, + &all_signers, ) - .expect("Timed out waiting for block acceptance of N+1 by non rejecting signers"); - - signer_test - .wait_for_block_rejections(short_timeout, &rejecting_signers) - .expect("Timed out waiting for block rejection of N+1' from rejecting signers"); + .expect("Timed out waiting for block acceptance of N+1 by all signers"); info!( "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" ); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); - test_observer::clear(); - - let mut sister_block = proposed_block; - - let transfer_tx_bytes = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt * 2, - ); - let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx_bytes[..]).unwrap(); - let txs = vec![tx]; - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); - - let merkle_tree = MerkleTree::::new(&txid_vecs); - let tx_merkle_root = merkle_tree.root(); - sister_block.txs = txs; - sister_block.header.tx_merkle_root = tx_merkle_root; - sister_block - .header - .sign_miner(&signer_test.running_nodes.conf.miner.mining_key.unwrap()) - .unwrap(); - signer_test.propose_block(sister_block.clone(), Duration::from_secs(30)); - wait_for(30, || { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections: HashSet<_> = stackerdb_events + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(false); + TEST_IGNORE_SIGNERS.set(false); + TEST_SKIP_BLOCK_BROADCAST.set(false); + test_observer::clear(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }, + ) + .unwrap(); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let mut sister_block = None; + let start_time = Instant::now(); + while sister_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + sister_block = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { + .find_map(|chunk| { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - let rejected_pubkey = rejection - .recover_public_key() - .expect("Failed to recover public key from rejection"); - // Proves that one of the rejecting signers actually submitted the block for validation as it passed its preliminary checks about chain length - assert_eq!( - rejection.reason_code, - RejectCode::ValidationFailed(ValidateRejectCode::BadBlockHash) - ); - Some(rejected_pubkey) + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_after.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } } _ => None, } - }) - .collect::>(); - Ok(block_rejections.len() == all_signers.len()) - }) - .expect("Timed out waiting for block rejections for N+1'"); - // Assert the block was NOT mined and the tip has not changed. - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); + }); + } + let sister_block = sister_block.expect("Failed to find proposed sister block within 30s"); + signer_test + .wait_for_block_acceptance( + short_timeout, + &sister_block.header.signer_signature_hash(), + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1' by all signers"); + + // Assert the block was mined and the tip has changed. assert_eq!( - info_after, - signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info") + info_after.stacks_tip_height, + sister_block.header.chain_length + ); + assert_eq!(info_after.stacks_tip, sister_block.header.block_hash()); + assert_eq!( + info_after.stacks_tip_consensus_hash, + sister_block.header.consensus_hash + ); + assert_eq!( + sister_block.header.chain_length, + proposed_block.header.chain_length ); + assert_ne!(sister_block, proposed_block); } From 6649c8c7f99e23ff991902bb19f31736765d1efd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sat, 7 Dec 2024 17:37:13 -0500 Subject: [PATCH 11/57] Increase the block proposal timeout in block_commit_delay test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e5a4e88123..a168dce7df 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -6815,13 +6815,12 @@ fn block_commit_delay() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![], |config| { // make the duration long enough that the miner will be marked as malicious - config.block_proposal_timeout = block_proposal_timeout; + config.block_proposal_timeout = Duration::from_secs(600); }, |config| { // Set the block commit delay to 10 minutes to ensure no block commit is sent From 045fcce3aeb09561cdc111a181fc477829e40ee3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Dec 2024 13:32:08 -0500 Subject: [PATCH 12/57] chore: use test-specific chains coordinator settings to allow tests to handle (now legacy) cases where the PoX anchor block does not arrive in order (or at all) --- stackslib/src/chainstate/coordinator/mod.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 3c78e02cef..c78c68a4a3 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -217,6 +217,14 @@ impl ChainsCoordinatorConfig { assume_present_anchor_blocks: true, } } + + pub fn test_new() -> ChainsCoordinatorConfig { + ChainsCoordinatorConfig { + always_use_affirmation_maps: false, + require_affirmed_anchor_blocks: false, + assume_present_anchor_blocks: false, + } + } } pub struct ChainsCoordinator< @@ -704,7 +712,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader notifier: (), atlas_config, atlas_db: Some(atlas_db), - config: ChainsCoordinatorConfig::new(), + config: ChainsCoordinatorConfig::test_new(), burnchain_indexer, refresh_stacker_db: Arc::new(AtomicBool::new(false)), in_nakamoto_epoch: false, From 83a032fa65ec7c4776f7869fd574716e53352a96 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 9 Dec 2024 13:53:10 -0500 Subject: [PATCH 13/57] Added block_proposal_max_age_secs signer configuration to drop old proposals without processing Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + stacks-signer/CHANGELOG.md | 2 + stacks-signer/src/client/mod.rs | 1 + stacks-signer/src/config.rs | 12 ++ stacks-signer/src/runloop.rs | 1 + stacks-signer/src/v0/signer.rs | 20 ++++ testnet/stacks-node/src/tests/signer/v0.rs | 123 +++++++++++++++++++-- 7 files changed, 152 insertions(+), 8 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 41e14b4458..2786fc20b1 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -127,6 +127,7 @@ jobs: - tests::signer::v0::continue_after_fast_block_no_sortition - tests::signer::v0::block_validation_response_timeout - tests::signer::v0::tenure_extend_after_bad_commit + - tests::signer::v0::block_proposal_max_age_rejections - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index a332b344ce..0b0937cde8 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +- Introduced the `block_proposal_max_age_secs` configuration option for signers, enabling them to automatically ignore block proposals that exceed the specified age in seconds. + ### Added ### Changed diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 37706368dc..ffb03af027 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -413,6 +413,7 @@ pub(crate) mod tests { block_proposal_timeout: config.block_proposal_timeout, tenure_last_block_proposal_timeout: config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: config.block_proposal_validation_timeout, + block_proposal_max_age_secs: config.block_proposal_max_age_secs, } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 57c90ab0eb..3b1fd7ec3e 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -38,6 +38,7 @@ const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 600_000; const BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS: u64 = 120_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; const DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS: u64 = 30; +const DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS: u64 = 600; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -135,6 +136,8 @@ pub struct SignerConfig { pub tenure_last_block_proposal_timeout: Duration, /// How much time to wait for a block proposal validation response before marking the block invalid pub block_proposal_validation_timeout: Duration, + /// The maximum age of a block proposal in seconds that will be processed by the signer + pub block_proposal_max_age_secs: u64, } /// The parsed configuration for the signer @@ -171,6 +174,8 @@ pub struct GlobalConfig { /// How long to wait for a response from a block proposal validation response from the node /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout: Duration, + /// The maximum age of a block proposal that will be processed by the signer + pub block_proposal_max_age_secs: u64, } /// Internal struct for loading up the config file @@ -206,6 +211,8 @@ struct RawConfigFile { /// How long to wait (in millisecs) for a response from a block proposal validation response from the node /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout_ms: Option, + /// The maximum age of a block proposal (in secs) that will be processed by the signer. + pub block_proposal_max_age_secs: Option, } impl RawConfigFile { @@ -297,6 +304,10 @@ impl TryFrom for GlobalConfig { .unwrap_or(BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS), ); + let block_proposal_max_age_secs = raw_data + .block_proposal_max_age_secs + .unwrap_or(DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -312,6 +323,7 @@ impl TryFrom for GlobalConfig { chain_id: raw_data.chain_id, tenure_last_block_proposal_timeout, block_proposal_validation_timeout, + block_proposal_max_age_secs, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index c8f6041478..2a15e75449 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -285,6 +285,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo block_proposal_timeout: self.config.block_proposal_timeout, tenure_last_block_proposal_timeout: self.config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: self.config.block_proposal_validation_timeout, + block_proposal_max_age_secs: self.config.block_proposal_max_age_secs, })) } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b537cfae8a..4c2a4b3732 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -92,6 +92,8 @@ pub struct Signer { pub block_proposal_validation_timeout: Duration, /// The current submitted block proposal and its submission time pub submitted_block_proposal: Option<(BlockProposal, Instant)>, + /// Maximum age of a block proposal in seconds before it is dropped without processing + pub block_proposal_max_age_secs: u64, } impl std::fmt::Display for Signer { @@ -284,6 +286,7 @@ impl From for Signer { proposal_config, submitted_block_proposal: None, block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, + block_proposal_max_age_secs: signer_config.block_proposal_max_age_secs, } } } @@ -331,6 +334,23 @@ impl Signer { return; } + if block_proposal + .block + .header + .timestamp + .saturating_add(self.block_proposal_max_age_secs) + < get_epoch_time_secs() + { + // Block is too old. Drop it with a warning. Don't even bother broadcasting to the node. + warn!("{self}: Received a block proposal that is more than {} secs old. Ignoring...", self.block_proposal_max_age_secs; + "block_id" => %block_proposal.block.block_id(), + "block_height" => block_proposal.block.header.chain_length, + "burn_height" => block_proposal.burn_height, + "timestamp" => block_proposal.block.header.timestamp, + ); + return; + } + // TODO: should add a check to ignore an old burn block height if we know its outdated. Would require us to store the burn block height we last saw on the side. // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9c9fee200f..b8cc15fce8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -43,6 +43,7 @@ use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STA use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; +use stacks::util::get_epoch_time_secs; use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; @@ -811,14 +812,8 @@ fn reloads_signer_set_in() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - vec![(sender_addr, send_amt + send_fee)], - |_config| {}, - |_| {}, - None, - None, - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); setup_epoch_3_reward_set( &signer_test.running_nodes.conf, @@ -8574,3 +8569,115 @@ fn tenure_extend_after_2_bad_commits() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// Test the block_proposal_max_age_secs signer configuration option. It should reject blocks that are +/// invalid but within the max age window, otherwise it should simply drop the block without further processing. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// +/// Test Execution: +/// The stacks node is advanced to epoch 3.0 reward set calculation to ensure the signer set is determined. +/// An invalid block proposal with a recent timestamp is forcibly written to the miner's slot to simulate the miner proposing a block. +/// The signers process the invalid block and broadcast a block response rejection to the respective .signers-XXX-YYY contract. +/// A second block proposal with an outdated timestamp is then submitted to the miner's slot to simulate the miner proposing a very old block. +/// The test confirms no further block rejection response is submitted to the .signers-XXX-YYY contract. +/// +/// Test Assertion: +/// - Each signer successfully rejects the recent invalid block proposal. +/// - No signer submits a block proposal response for the outdated block proposal. +/// - The stacks tip does not advance +fn block_proposal_max_age_rejections() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |config| { + config.block_proposal_max_age_secs = 30; + }, + |_| {}, + None, + None, + ); + signer_test.boot_to_epoch_3(); + let short_timeout = Duration::from_secs(30); + + // Make sure no other block approvals are in the system. + test_observer::clear(); + info!("------------------------- Send Block Proposal To Signers -------------------------"); + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let mut block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + // First propose a stale block that is older than the block_proposal_max_age_secs + block.header.timestamp = get_epoch_time_secs().saturating_sub( + signer_test.signer_configs[0] + .block_proposal_max_age_secs + .saturating_add(1), + ); + let _block_signer_signature_hash_1 = block.header.signer_signature_hash(); + signer_test.propose_block(block.clone(), short_timeout); + + // Next propose a recent invalid block + block.header.timestamp = get_epoch_time_secs(); + let block_signer_signature_hash_2 = block.header.signer_signature_hash(); + signer_test.propose_block(block, short_timeout); + + info!("------------------------- Test Block Proposal Rejected -------------------------"); + // Verify the signers rejected only the SECOND block proposal. The first was not even processed. + wait_for(30, || { + let rejections: Vec<_> = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .map(|chunk| { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + return None; + }; + assert!(matches!( + message, + SignerMessage::BlockResponse(BlockResponse::Rejected(_)) + )); + let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason_code, + signer_signature_hash, + signature, + .. + })) = message + else { + panic!("Received an unexpected block approval from the signer"); + }; + assert_eq!( + signer_signature_hash, block_signer_signature_hash_2, + "Received a rejection for an unexpected block: {signer_signature_hash}" + ); + assert!( + matches!(reason_code, RejectCode::SortitionViewMismatch), + "Received a rejection for an unexpected reason: {reason_code}" + ); + Some(signature) + }) + .collect(); + Ok(rejections.len() == num_signers) + }) + .expect("Timed out waiting for block rejections"); + + info!("------------------------- Test Peer Info-------------------------"); + assert_eq!(info_before, get_chain_info(&signer_test.running_nodes.conf)); + + info!("------------------------- Test Shutdown-------------------------"); + signer_test.shutdown(); +} From 738845e89e3caed663c9ee9ccc359effe4e5fcd7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 9 Dec 2024 15:04:39 -0500 Subject: [PATCH 14/57] Fix test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 48 ++++++++++------------ 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b8cc15fce8..0d718954ca 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -23,7 +23,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, + BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage }; use libsigner::{BlockProposal, SignerSession, StackerDBSession, VERSION_STRING}; use stacks::address::AddressHashMode; @@ -8614,8 +8614,6 @@ fn block_proposal_max_age_rejections() { signer_test.boot_to_epoch_3(); let short_timeout = Duration::from_secs(30); - // Make sure no other block approvals are in the system. - test_observer::clear(); info!("------------------------- Send Block Proposal To Signers -------------------------"); let info_before = get_chain_info(&signer_test.running_nodes.conf); let mut block = NakamotoBlock { @@ -8628,7 +8626,7 @@ fn block_proposal_max_age_rejections() { .block_proposal_max_age_secs .saturating_add(1), ); - let _block_signer_signature_hash_1 = block.header.signer_signature_hash(); + let block_signer_signature_hash_1 = block.header.signer_signature_hash(); signer_test.propose_block(block.clone(), short_timeout); // Next propose a recent invalid block @@ -8647,31 +8645,27 @@ fn block_proposal_max_age_rejections() { else { return None; }; - assert!(matches!( - message, - SignerMessage::BlockResponse(BlockResponse::Rejected(_)) - )); - let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { - reason_code, - signer_signature_hash, - signature, - .. - })) = message - else { - panic!("Received an unexpected block approval from the signer"); - }; - assert_eq!( - signer_signature_hash, block_signer_signature_hash_2, - "Received a rejection for an unexpected block: {signer_signature_hash}" - ); - assert!( - matches!(reason_code, RejectCode::SortitionViewMismatch), - "Received a rejection for an unexpected reason: {reason_code}" - ); - Some(signature) + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + signature, + .. + })) => { + assert_eq!(signer_signature_hash, block_signer_signature_hash_2, "We should only reject the second block"); + Some(signature) + } + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + .. + })) => { + assert_ne!(signer_signature_hash, block_signer_signature_hash_1, "We should never have accepted block"); + None + } + _ => None, + } }) .collect(); - Ok(rejections.len() == num_signers) + Ok(rejections.len() > num_signers * 7/10) }) .expect("Timed out waiting for block rejections"); From 66bc4ab1d3b31c7ae3f53151e014eba90d63d10a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 10 Dec 2024 09:39:53 -0500 Subject: [PATCH 15/57] Make sure NakamotoBlock has valid header timestamp in signer v0 tests Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d3d31ddb6d..d4c8c778c5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -489,6 +489,7 @@ fn block_proposal_rejection() { header: NakamotoBlockHeader::empty(), txs: vec![], }; + block.header.timestamp = get_epoch_time_secs(); // First propose a block to the signers that does not have the correct consensus hash or BitVec. This should be rejected BEFORE // the block is submitted to the node for validation. @@ -7509,6 +7510,7 @@ fn block_validation_response_timeout() { header: NakamotoBlockHeader::empty(), txs: vec![], }; + block.header.timestamp = get_epoch_time_secs(); let info_before = get_chain_info(&signer_test.running_nodes.conf); // Propose a block to the signers that passes initial checks but will not be submitted to the stacks node due to the submission stall From cc2af4e491130409563196f92ceac8d702619dcd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 10 Dec 2024 14:13:57 -0500 Subject: [PATCH 16/57] Should not use the nakamoto blocks mined test observer heuristic in multi miner tests Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 5 ++- testnet/stacks-node/src/tests/signer/v0.rs | 45 ++++++++++++--------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index ff128d0a03..3c68b27783 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -310,7 +310,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest info_before.stacks_tip_height - && blocks_mined > mined_before) + && (!use_nakamoto_blocks_mined || blocks_mined > mined_before)) }) .unwrap(); let mined_block_elapsed_time = mined_block_time.elapsed(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d4c8c778c5..00276b09ee 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -280,7 +280,7 @@ impl SignerTest { // could be other miners mining blocks. let height_before = get_chain_info(&self.running_nodes.conf).stacks_tip_height; info!("Waiting for first Nakamoto block: {}", height_before + 1); - self.mine_nakamoto_block(Duration::from_secs(30)); + self.mine_nakamoto_block(Duration::from_secs(30), false); wait_for(30, || { Ok(get_chain_info(&self.running_nodes.conf).stacks_tip_height > height_before) }) @@ -289,12 +289,17 @@ impl SignerTest { } // Only call after already past the epoch 3.0 boundary - fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { + fn mine_and_verify_confirmed_naka_block( + &mut self, + timeout: Duration, + num_signers: usize, + use_nakamoto_blocks_mined: bool, + ) { info!("------------------------- Try mining one block -------------------------"); let reward_cycle = self.get_current_reward_cycle(); - self.mine_nakamoto_block(timeout); + self.mine_nakamoto_block(timeout, use_nakamoto_blocks_mined); // Verify that the signers accepted the proposed block, sending back a validate ok response let proposed_signer_signature_hash = self @@ -377,7 +382,7 @@ impl SignerTest { let total_nmb_blocks_to_mine = burnchain_height.saturating_sub(current_block_height); debug!("Mining {total_nmb_blocks_to_mine} Nakamoto block(s) to reach burnchain height {burnchain_height}"); for _ in 0..total_nmb_blocks_to_mine { - self.mine_and_verify_confirmed_naka_block(timeout, num_signers); + self.mine_and_verify_confirmed_naka_block(timeout, num_signers, false); } } @@ -590,7 +595,7 @@ fn miner_gather_signatures() { signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); // Test prometheus metrics response #[cfg(feature = "monitoring_prom")] @@ -1327,7 +1332,7 @@ fn bitcoind_forking_test() { for i in 0..pre_fork_tenures { info!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let pre_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; @@ -1399,7 +1404,7 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let pre_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; @@ -1475,7 +1480,7 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let test_end_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2508,7 +2513,7 @@ fn signers_broadcast_signed_blocks() { .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); wait_for(30, || { let blocks_mined = signer_test @@ -2609,7 +2614,7 @@ fn tenure_extend_after_idle() { signer_test.boot_to_epoch_3(); info!("---- Nakamoto booted, starting test ----"); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); info!("---- Waiting for a tenure extend ----"); @@ -2671,7 +2676,7 @@ fn stx_transfers_dont_effect_idle_timeout() { "info_height" => info_before.stacks_tip_height, "blocks_before" => blocks_before, ); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); info!("---- Getting current idle timeout ----"); @@ -2809,7 +2814,7 @@ fn idle_tenure_extend_active_mining() { // Add a delay to the block validation process TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap().replace(3); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); info!("---- Getting current idle timeout ----"); @@ -2877,7 +2882,7 @@ fn idle_tenure_extend_active_mining() { info!("----- Submitted deploy txs, mining BTC block -----"); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); let mut last_response = signer_test.get_latest_block_response(slot_id); // Make multiple tenures that get extended through idle timeouts @@ -3990,7 +3995,7 @@ fn signer_set_rollover() { send_amt, ); submit_tx(&http_origin, &transfer_tx); - signer_test.mine_nakamoto_block(short_timeout); + signer_test.mine_nakamoto_block(short_timeout, true); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); let block_sighash = mined_block.signer_signature_hash; let signer_signatures = mined_block.signer_signature; @@ -4064,7 +4069,7 @@ fn signer_set_rollover() { }) .expect("Timed out waiting for stacking txs to be mined"); - signer_test.mine_nakamoto_block(short_timeout); + signer_test.mine_nakamoto_block(short_timeout, true); let next_reward_cycle = reward_cycle.saturating_add(1); @@ -4117,7 +4122,7 @@ fn signer_set_rollover() { send_amt, ); submit_tx(&http_origin, &transfer_tx); - signer_test.mine_nakamoto_block(short_timeout); + signer_test.mine_nakamoto_block(short_timeout, true); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); info!("---- Verifying that the new signers signed the block -----"); @@ -4304,7 +4309,7 @@ fn duplicate_signers() { info!("------------------------- Try mining one block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); info!("------------------------- Read all `BlockResponse::Accepted` messages -------------------------"); @@ -6816,7 +6821,7 @@ fn continue_after_tenure_extend() { signer_test.boot_to_epoch_3(); info!("------------------------- Mine Normal Tenure -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); info!("------------------------- Extend Tenure -------------------------"); signer_test @@ -7462,7 +7467,7 @@ fn block_validation_response_timeout() { signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); info!("------------------------- Test Block Validation Stalled -------------------------"); TEST_VALIDATE_STALL.lock().unwrap().replace(true); let validation_stall_start = Instant::now(); @@ -7580,7 +7585,7 @@ fn block_validation_response_timeout() { ); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); let info_before = info_after; - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); wait_for(30, || { let info = get_chain_info(&signer_test.running_nodes.conf); From a78f81811e8f789df1651b783f4b321c5c250252 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 10 Dec 2024 17:05:45 -0500 Subject: [PATCH 17/57] CRC: add signer_signature_hash to warn message for dropping old block proposals Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index a883acf255..e2a007313d 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -356,6 +356,7 @@ impl Signer { { // Block is too old. Drop it with a warning. Don't even bother broadcasting to the node. warn!("{self}: Received a block proposal that is more than {} secs old. Ignoring...", self.block_proposal_max_age_secs; + "signer_signature_hash" => %block_proposal.block.header.signer_signature_hash(), "block_id" => %block_proposal.block.block_id(), "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, From 016a63f4ae3bbbdcdeecb66924b034cb86aa78b6 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 10 Dec 2024 17:27:22 -0500 Subject: [PATCH 18/57] CRC: replace signer_signature_hash with signer_sighash in warn log Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index e2a007313d..df5a1208c3 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -356,7 +356,7 @@ impl Signer { { // Block is too old. Drop it with a warning. Don't even bother broadcasting to the node. warn!("{self}: Received a block proposal that is more than {} secs old. Ignoring...", self.block_proposal_max_age_secs; - "signer_signature_hash" => %block_proposal.block.header.signer_signature_hash(), + "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), "block_id" => %block_proposal.block.block_id(), "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, From daf21f6b9ffbbe952ce082cfd2df68870e065736 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 11 Dec 2024 10:43:18 -0500 Subject: [PATCH 19/57] Remove signers older than the current reward cycle if they have no more blocks to process Signed-off-by: Jacinta Ferrant --- stacks-signer/src/runloop.rs | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 11faadf871..e6dc4be85e 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -423,23 +423,15 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { let reward_cycle = signer.reward_cycle(); - let next_reward_cycle = reward_cycle.wrapping_add(1); - let stale = match next_reward_cycle.cmp(¤t_reward_cycle) { - std::cmp::Ordering::Less => true, // We are more than one reward cycle behind, so we are stale - std::cmp::Ordering::Equal => { - // We are the next reward cycle, so check if we were registered and have any pending blocks to process - match signer { - ConfiguredSigner::RegisteredSigner(signer) => { - !signer.has_unprocessed_blocks() - } - _ => true, - } + if current_reward_cycle >= reward_cycle { + // We are either the current or a future reward cycle, so we are not stale. + continue; + } + if let ConfiguredSigner::RegisteredSigner(signer) = signer { + if !signer.has_unprocessed_blocks() { + debug!("{signer}: Signer's tenure has completed."); + to_delete.push(*idx); } - std::cmp::Ordering::Greater => false, // We are the current reward cycle, so we are not stale - }; - if stale { - debug!("{signer}: Signer's tenure has completed."); - to_delete.push(*idx); } } for idx in to_delete { From d3ff2c1941fbedc67662477ac54d30318f307186 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 11 Dec 2024 16:13:52 -0500 Subject: [PATCH 20/57] fix: New warnings in Rust 1.83 --- clarity/src/vm/contexts.rs | 12 ++++++------ clarity/src/vm/database/key_value_wrapper.rs | 4 ++-- stackslib/src/net/tests/mod.rs | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index a559ad59fd..eedc2857fa 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -499,7 +499,7 @@ impl EventBatch { impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { #[cfg(any(test, feature = "testing"))] - pub fn new(database: ClarityDatabase<'a>, epoch: StacksEpochId) -> OwnedEnvironment<'a, '_> { + pub fn new(database: ClarityDatabase<'a>, epoch: StacksEpochId) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new( false, @@ -513,7 +513,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { } #[cfg(any(test, feature = "testing"))] - pub fn new_toplevel(mut database: ClarityDatabase<'a>) -> OwnedEnvironment<'a, '_> { + pub fn new_toplevel(mut database: ClarityDatabase<'a>) -> OwnedEnvironment<'a, 'a> { database.begin(); let epoch = database.get_clarity_epoch_version().unwrap(); let version = ClarityVersion::default_for_epoch(epoch); @@ -540,7 +540,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { mut database: ClarityDatabase<'a>, epoch: StacksEpochId, use_mainnet: bool, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { use crate::vm::tests::test_only_mainnet_to_chain_id; let cost_track = LimitedCostTracker::new_max_limit(&mut database, epoch, use_mainnet) .expect("FAIL: problem instantiating cost tracking"); @@ -557,7 +557,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { chain_id: u32, database: ClarityDatabase<'a>, epoch_id: StacksEpochId, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new( mainnet, @@ -576,7 +576,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { database: ClarityDatabase<'a>, cost_tracker: LimitedCostTracker, epoch_id: StacksEpochId, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new(mainnet, chain_id, database, cost_tracker, epoch_id), call_stack: CallStack::new(), @@ -1546,7 +1546,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { database: ClarityDatabase<'a>, cost_track: LimitedCostTracker, epoch_id: StacksEpochId, - ) -> GlobalContext { + ) -> GlobalContext<'a, 'hooks> { GlobalContext { database, cost_track, diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index c444aa553e..c0b75be83f 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -205,7 +205,7 @@ where } impl<'a> RollbackWrapper<'a> { - pub fn new(store: &'a mut dyn ClarityBackingStore) -> RollbackWrapper { + pub fn new(store: &'a mut dyn ClarityBackingStore) -> RollbackWrapper<'a> { RollbackWrapper { store, lookup_map: HashMap::new(), @@ -218,7 +218,7 @@ impl<'a> RollbackWrapper<'a> { pub fn from_persisted_log( store: &'a mut dyn ClarityBackingStore, log: RollbackWrapperPersistedLog, - ) -> RollbackWrapper { + ) -> RollbackWrapper<'a> { RollbackWrapper { store, lookup_map: log.lookup_map, diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 6729dbc4a8..53d6ec9fa1 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -355,7 +355,7 @@ impl NakamotoBootPlan { fn boot_nakamoto_peers<'a>( mut self, observer: Option<&'a TestEventObserver>, - ) -> (TestPeer<'a>, Vec) { + ) -> (TestPeer<'a>, Vec>) { let mut peer_config = TestPeerConfig::new(&self.test_name, 0, 0); peer_config.network_id = self.network_id; peer_config.private_key = self.private_key.clone(); @@ -666,7 +666,7 @@ impl NakamotoBootPlan { self, boot_plan: Vec, observer: Option<&'a TestEventObserver>, - ) -> (TestPeer<'a>, Vec) { + ) -> (TestPeer<'a>, Vec>) { let test_signers = self.test_signers.clone(); let pox_constants = self.pox_constants.clone(); let test_stackers = self.test_stackers.clone(); From 236716aa5ce16e0bd3e3659b28d4938cd99340ae Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 11 Dec 2024 17:00:47 -0500 Subject: [PATCH 21/57] Fix equality check for current_reward_cycle in cleanup_stale_signers Signed-off-by: Jacinta Ferrant --- stacks-signer/src/runloop.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index e6dc4be85e..6d226a2592 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -423,7 +423,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { let reward_cycle = signer.reward_cycle(); - if current_reward_cycle >= reward_cycle { + if reward_cycle >= current_reward_cycle { // We are either the current or a future reward cycle, so we are not stale. continue; } From ce528cd755b17a38ba2cda125299832714002eed Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 11 Dec 2024 17:30:57 -0500 Subject: [PATCH 22/57] chore: fix typo in log --- stackslib/src/burnchains/bitcoin/indexer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 83c8903d35..3361301675 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -924,7 +924,7 @@ impl BitcoinIndexer { return Ok(()); } warn!( - "Header at height {} is not wihtin 2 hours of now (is at {})", + "Header at height {} is not within 2 hours of now (is at {})", highest_header_height, highest_header.block_header.header.time ); self.drop_headers(highest_header_height.saturating_sub(1))?; From a81f7199bb7db3f2d14c50a7018f4db4dee4d2ea Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 12 Dec 2024 09:26:08 -0500 Subject: [PATCH 23/57] Update changelog Signed-off-by: Jacinta Ferrant --- stacks-signer/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 2f1187de51..73d374f1be 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## Added ## Changed +- Improvements to the stale signer cleanup logic: deletes the prior signer if it has no remaining unprocessed blocks in its database ## [3.1.0.0.1.0] From f96a33f95294ea96cd4969c19e80c934f6628004 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 12 Dec 2024 10:37:03 -0500 Subject: [PATCH 24/57] Implement process_event function for SignerEvent Signed-off-by: Jacinta Ferrant --- libsigner/src/events.rs | 192 ++++++++++++++-------------------------- 1 file changed, 68 insertions(+), 124 deletions(-) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 90ef022636..52a77e2bb8 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -305,21 +305,18 @@ impl EventReceiver for SignerEventReceiver { &request.method(), ))); } + debug!("Processing {} event", request.url()); if request.url() == "/stackerdb_chunks" { - process_stackerdb_event(event_receiver.local_addr, request) - .map_err(|e| { - error!("Error processing stackerdb_chunks message"; "err" => ?e); - e - }) + process_event::(request) } else if request.url() == "/proposal_response" { - process_proposal_response(request) + process_event::(request) } else if request.url() == "/new_burn_block" { - process_new_burn_block_event(request) + process_event::(request) } else if request.url() == "/shutdown" { event_receiver.stop_signal.store(true, Ordering::SeqCst); - return Err(EventError::Terminated); + Err(EventError::Terminated) } else if request.url() == "/new_block" { - process_new_block(request) + process_event::(request) } else { let url = request.url().to_string(); debug!( @@ -391,12 +388,13 @@ fn ack_dispatcher(request: HttpRequest) { // TODO: add tests from mutation testing results #4835 #[cfg_attr(test, mutants::skip)] -/// Process a stackerdb event from the node -fn process_stackerdb_event( - local_addr: Option, - mut request: HttpRequest, -) -> Result, EventError> { +fn process_event(mut request: HttpRequest) -> Result, EventError> +where + T: SignerEventTrait, + E: serde::de::DeserializeOwned + TryInto, Error = EventError>, +{ let mut body = String::new(); + if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); ack_dispatcher(request); @@ -405,27 +403,12 @@ fn process_stackerdb_event( &e ))); } - - debug!("Got stackerdb_chunks event"; "chunks_event_body" => %body); - let event: StackerDBChunksEvent = serde_json::from_slice(body.as_bytes()) + // Regardless of whether we successfully deserialize, we should ack the dispatcher so they don't keep resending it + ack_dispatcher(request); + let json_event: E = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let event_contract_id = event.contract_id.clone(); - - let signer_event = match SignerEvent::try_from(event) { - Err(e) => { - info!( - "[{:?}] next_event got event from an unexpected contract id {}, return OK so other side doesn't keep sending this", - local_addr, - event_contract_id - ); - ack_dispatcher(request); - return Err(e); - } - Ok(x) => x, - }; - - ack_dispatcher(request); + let signer_event: SignerEvent = json_event.try_into()?; Ok(signer_event) } @@ -472,108 +455,69 @@ impl TryFrom for SignerEvent { } } -/// Process a proposal response from the node -fn process_proposal_response( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got proposal_response event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - ack_dispatcher(request); - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); + fn try_from(block_validate_response: BlockValidateResponse) -> Result { + Ok(SignerEvent::BlockValidationResponse( + block_validate_response, + )) } +} - let event: BlockValidateResponse = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - - ack_dispatcher(request); - Ok(SignerEvent::BlockValidationResponse(event)) +#[derive(Debug, Deserialize)] +struct BurnBlockEvent { + burn_block_hash: String, + burn_block_height: u64, + reward_recipients: Vec, + reward_slot_holders: Vec, + burn_amount: u64, } -/// Process a new burn block event from the node -fn process_new_burn_block_event( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got burn_block event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - ack_dispatcher(request); - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); - } - #[derive(Debug, Deserialize)] - struct TempBurnBlockEvent { - burn_block_hash: String, - burn_block_height: u64, - reward_recipients: Vec, - reward_slot_holders: Vec, - burn_amount: u64, + fn try_from(burn_block_event: BurnBlockEvent) -> Result { + let burn_header_hash = burn_block_event + .burn_block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + BurnchainHeaderHash::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + + Ok(SignerEvent::NewBurnBlock { + burn_height: burn_block_event.burn_block_height, + received_time: SystemTime::now(), + burn_header_hash, + }) } - let temp: TempBurnBlockEvent = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let burn_header_hash = temp - .burn_block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - BurnchainHeaderHash::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - let event = SignerEvent::NewBurnBlock { - burn_height: temp.burn_block_height, - received_time: SystemTime::now(), - burn_header_hash, - }; - ack_dispatcher(request); - Ok(event) } -/// Process a new burn block event from the node -fn process_new_block( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got new_block event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +#[derive(Debug, Deserialize)] +struct BlockEvent { + block_hash: String, + block_height: u64, +} - ack_dispatcher(request); - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); - } - #[derive(Debug, Deserialize)] - struct TempBlockEvent { - block_hash: String, - block_height: u64, - } +impl TryFrom for SignerEvent { + type Error = EventError; - let temp: TempBlockEvent = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let block_hash: Sha512Trunc256Sum = temp - .block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - Sha512Trunc256Sum::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - let event = SignerEvent::NewBlock { - block_hash, - block_height: temp.block_height, - }; - ack_dispatcher(request); - Ok(event) + fn try_from(block_event: BlockEvent) -> Result { + let block_hash: Sha512Trunc256Sum = block_event + .block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + Sha512Trunc256Sum::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + Ok(SignerEvent::NewBlock { + block_hash, + block_height: block_event.block_height, + }) + } } pub fn get_signers_db_signer_set_message_id(name: &str) -> Option<(u32, u32)> { From c5be7557afe88ba860b1826258f1f56a3ae41d44 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 9 Dec 2024 17:18:21 -0500 Subject: [PATCH 25/57] feat: Allow passing `--config ` to `stacks-inspect try_mine` (and other commands) --- Cargo.lock | 29 +++-- Cargo.toml | 3 +- stacks-signer/Cargo.toml | 2 +- stackslib/Cargo.toml | 1 + stackslib/src/cli.rs | 163 ++++++++++++++++++++++++- stackslib/src/main.rs | 209 ++++++++++----------------------- testnet/stacks-node/Cargo.toml | 2 +- 7 files changed, 244 insertions(+), 165 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a3769b6a8..87ceaf2808 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -637,7 +637,7 @@ name = "clarity" version = "0.0.1" dependencies = [ "assert-json-diff 1.1.0", - "hashbrown", + "hashbrown 0.14.3", "integer-sqrt", "lazy_static", "mutants", @@ -1411,13 +1411,19 @@ dependencies = [ "serde", ] +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" + [[package]] name = "hashlink" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown", + "hashbrown 0.14.3", ] [[package]] @@ -1683,12 +1689,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.15.2", ] [[package]] @@ -1841,7 +1847,7 @@ name = "libsigner" version = "0.0.1" dependencies = [ "clarity", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libc", "libstackerdb", @@ -2621,7 +2627,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" name = "relay-server" version = "0.0.1" dependencies = [ - "hashbrown", + "hashbrown 0.14.3", ] [[package]] @@ -3270,7 +3276,7 @@ dependencies = [ "chrono", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libc", "nix", @@ -3305,7 +3311,7 @@ dependencies = [ "base64 0.12.3", "chrono", "clarity", - "hashbrown", + "hashbrown 0.14.3", "http-types", "lazy_static", "libc", @@ -3346,7 +3352,7 @@ dependencies = [ "backoff", "clap 4.5.0", "clarity", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libsigner", "libstackerdb", @@ -3385,7 +3391,7 @@ dependencies = [ "criterion", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown", + "hashbrown 0.14.3", "integer-sqrt", "lazy_static", "libc", @@ -3421,6 +3427,7 @@ dependencies = [ "stx-genesis", "tikv-jemallocator", "time 0.2.27", + "toml", "url", "winapi 0.3.9", ] diff --git a/Cargo.toml b/Cargo.toml index c00c223c47..194e946ef4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,8 @@ rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } -thiserror = { version = "1.0.65" } +thiserror = "1.0.65" +toml = "0.5.6" # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 3beba641f2..8c3d8a5a35 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -40,7 +40,7 @@ stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib" } thiserror = { workspace = true } tiny_http = { version = "0.12", optional = true } -toml = "0.5.6" +toml = { workspace = true } tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } rand = { workspace = true } diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index edd58c6161..cbee2bfc98 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -58,6 +58,7 @@ libstackerdb = { path = "../libstackerdb" } siphasher = "0.3.7" hashbrown = { workspace = true } rusqlite = { workspace = true } +toml = { workspace = true } [target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] tikv-jemallocator = {workspace = true} diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index f703f8a367..16699ff81a 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -16,8 +16,9 @@ //! Subcommands used by `stacks-inspect` binary +use std::any::type_name; use std::cell::LazyCell; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -28,9 +29,12 @@ use regex::Regex; use rusqlite::{Connection, OpenFlags}; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::get_epoch_time_ms; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::VRFProof; use crate::burnchains::db::BurnchainDB; -use crate::burnchains::PoxConstants; +use crate::burnchains::{Burnchain, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ get_ancestor_sort_id, SortitionDB, SortitionHandle, SortitionHandleContext, }; @@ -43,6 +47,8 @@ use crate::chainstate::stacks::miner::*; use crate::chainstate::stacks::{Error as ChainstateError, *}; use crate::clarity_vm::clarity::ClarityInstance; use crate::core::*; +use crate::cost_estimates::metrics::UnitMetric; +use crate::cost_estimates::UnitEstimator; use crate::util_lib::db::IndexDBTx; /// Can be used with CLI commands to support non-mainnet chainstate @@ -58,6 +64,10 @@ pub struct StacksChainConfig { } impl StacksChainConfig { + pub fn type_name() -> &'static str { + type_name::() + } + pub fn default_mainnet() -> Self { Self { chain_id: CHAIN_ID_MAINNET, @@ -107,6 +117,18 @@ impl StacksChainConfig { epochs, } } + + pub fn from_file(path: &str) -> Self { + let text = fs::read_to_string(&path) + .unwrap_or_else(|e| panic!("Failed to read file '{path}': {e}")); + let config: Self = toml::from_str(&text).unwrap_or_else(|e| { + panic!( + "Failed to parse file '{path}' as `{t}`: {e}", + t = Self::type_name() + ) + }); + config + } } const STACKS_CHAIN_CONFIG_DEFAULT_MAINNET: LazyCell = @@ -369,6 +391,143 @@ pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConf } } +/// Replay mock mined blocks from JSON files +/// Terminates on error using `process::exit()` +/// +/// Arguments: +/// - `argv`: Args in CLI format: ` [args...]` +/// - `conf`: Optional config for running on non-mainnet chainstate +pub fn command_try_mine(argv: &[String], conf: Option<&StacksChainConfig>) { + let print_help_and_exit = || -> ! { + let n = &argv[0]; + eprintln!("Usage: {n} [min-fee [max-time]]"); + eprintln!(""); + eprintln!("Given a , try to ''mine'' an anchored block. This invokes the miner block"); + eprintln!("assembly, but does not attempt to broadcast a block commit. This is useful for determining"); + eprintln!( + "what transactions a given chain state would include in an anchor block, or otherwise" + ); + eprintln!("simulating a miner."); + process::exit(1); + }; + + let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; + let conf = conf.unwrap_or(&default_conf); + + let start = get_epoch_time_ms(); + let db_path = &argv[2]; + let burnchain_path = format!("{db_path}/burnchain"); + let sort_db_path = format!("{db_path}/burnchain/sortition"); + let chain_state_path = format!("{db_path}/chainstate/"); + + let mut min_fee = u64::MAX; + let mut max_time = u64::MAX; + + if argv.len() < 2 { + print_help_and_exit(); + } + if argv.len() >= 3 { + min_fee = argv[3].parse().expect("Could not parse min_fee"); + } + if argv.len() >= 4 { + max_time = argv[4].parse().expect("Could not parse max_time"); + } + + let sort_db = SortitionDB::open(&sort_db_path, false, conf.pox_constants.clone()) + .unwrap_or_else(|_| panic!("Failed to open {sort_db_path}")); + let (chain_state, _) = StacksChainState::open(true, conf.chain_id, &chain_state_path, None) + .expect("Failed to open stacks chain state"); + let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) + .expect("Failed to get sortition chain tip"); + + let estimator = Box::new(UnitEstimator); + let metric = Box::new(UnitMetric); + + let mut mempool_db = MemPoolDB::open(true, conf.chain_id, &chain_state_path, estimator, metric) + .expect("Failed to open mempool db"); + + let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap() + .unwrap(); + let parent_header = StacksChainState::get_anchored_block_header_info( + chain_state.db(), + &header_tip.consensus_hash, + &header_tip.anchored_header.block_hash(), + ) + .expect("Failed to load chain tip header info") + .expect("Failed to load chain tip header info"); + + let sk = StacksPrivateKey::new(); + let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); + tx_auth.set_origin_nonce(0); + + let mut coinbase_tx = StacksTransaction::new( + TransactionVersion::Mainnet, + tx_auth, + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), + ); + + coinbase_tx.chain_id = conf.chain_id; + coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&coinbase_tx); + tx_signer.sign_origin(&sk).unwrap(); + let coinbase_tx = tx_signer.get_tx().unwrap(); + + let mut settings = BlockBuilderSettings::limited(); + settings.max_miner_time_ms = max_time; + + let result = StacksBlockBuilder::build_anchored_block( + &chain_state, + &sort_db.index_handle(&chain_tip.sortition_id), + &mut mempool_db, + &parent_header, + chain_tip.total_burn, + VRFProof::empty(), + Hash160([0; 20]), + &coinbase_tx, + settings, + None, + &Burnchain::new(&burnchain_path, "bitcoin", "main").unwrap(), + ); + + let stop = get_epoch_time_ms(); + + println!( + "{} mined block @ height = {} off of {} ({}/{}) in {}ms. Min-fee: {}, Max-time: {}", + if result.is_ok() { + "Successfully" + } else { + "Failed to" + }, + parent_header.stacks_block_height + 1, + StacksBlockHeader::make_index_block_hash( + &parent_header.consensus_hash, + &parent_header.anchored_header.block_hash() + ), + &parent_header.consensus_hash, + &parent_header.anchored_header.block_hash(), + stop.saturating_sub(start), + min_fee, + max_time + ); + + if let Ok((block, execution_cost, size)) = result { + let mut total_fees = 0; + for tx in block.txs.iter() { + total_fees += tx.get_tx_fee(); + } + println!( + "Block {}: {} uSTX, {} bytes, cost {:?}", + block.block_hash(), + total_fees, + size, + &execution_cost + ); + } + + process::exit(0); +} + /// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate fn replay_staging_block( db_path: &str, diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 7f324c52c8..4cefbf383e 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -25,6 +25,7 @@ extern crate stacks_common; #[macro_use(slog_debug, slog_info, slog_warn)] extern crate slog; +use blockstack_lib::cli::StacksChainConfig; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; @@ -301,6 +302,59 @@ fn check_shadow_network(network: &str) { } } +/// Options common to many `stacks-inspect` subcommands +/// Returned by `process_common_opts()` +#[derive(Default)] +struct CommonOpts { + config: Option, +} + +/// Process arguments common to many `stacks-inspect` subcommands and drain them from `argv` +/// +/// Args: +/// - `argv`: Full CLI args `Vec` +/// - `start_at`: Position in args vec where to look for common options. +/// For example, if `start_at` is `1`, then look for these options **before** the subcommand: +/// ```console +/// stacks-inspect --config testnet.toml replay-block path/to/chainstate +/// ``` +fn drain_common_opts<'a>(argv: &mut Vec, start_at: usize) -> CommonOpts { + let mut i = start_at; + let mut opts = CommonOpts::default(); + while let Some(arg) = argv.get(i) { + let (prefix, opt) = arg.split_at(2); + if prefix != "--" { + break; + } + i += 1; + match opt { + "config" => { + let path = &argv[i]; + i += 1; + let config = StacksChainConfig::from_file(&path); + opts.config.replace(config); + } + "network" => { + let network = &argv[i]; + i += 1; + let config = match network.to_lowercase().as_str() { + "testnet" => cli::StacksChainConfig::default_testnet(), + "mainnet" => cli::StacksChainConfig::default_mainnet(), + other => { + eprintln!("Unknown network choice `{other}`"); + process::exit(1); + } + }; + opts.config.replace(config); + } + _ => panic!("Unrecognized option: {opt}"), + } + } + // Remove options processed + argv.drain(start_at..i); + opts +} + #[cfg_attr(test, mutants::skip)] fn main() { let mut argv: Vec = env::args().collect(); @@ -309,6 +363,8 @@ fn main() { process::exit(1); } + let common_opts = drain_common_opts(&mut argv, 1); + if argv[1] == "--version" { println!( "{}", @@ -796,128 +852,7 @@ check if the associated microblocks can be downloaded } if argv[1] == "try-mine" { - if argv.len() < 3 { - eprintln!( - "Usage: {} try-mine [min-fee [max-time]] - -Given a , try to ''mine'' an anchored block. This invokes the miner block -assembly, but does not attempt to broadcast a block commit. This is useful for determining -what transactions a given chain state would include in an anchor block, or otherwise -simulating a miner. -", - argv[0] - ); - process::exit(1); - } - - let start = get_epoch_time_ms(); - let burnchain_path = format!("{}/mainnet/burnchain", &argv[2]); - let sort_db_path = format!("{}/mainnet/burnchain/sortition", &argv[2]); - let chain_state_path = format!("{}/mainnet/chainstate/", &argv[2]); - - let mut min_fee = u64::MAX; - let mut max_time = u64::MAX; - - if argv.len() >= 4 { - min_fee = argv[3].parse().expect("Could not parse min_fee"); - } - if argv.len() >= 5 { - max_time = argv[4].parse().expect("Could not parse max_time"); - } - - let sort_db = SortitionDB::open(&sort_db_path, false, PoxConstants::mainnet_default()) - .unwrap_or_else(|_| panic!("Failed to open {sort_db_path}")); - let chain_id = CHAIN_ID_MAINNET; - let (chain_state, _) = StacksChainState::open(true, chain_id, &chain_state_path, None) - .expect("Failed to open stacks chain state"); - let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) - .expect("Failed to get sortition chain tip"); - - let estimator = Box::new(UnitEstimator); - let metric = Box::new(UnitMetric); - - let mut mempool_db = MemPoolDB::open(true, chain_id, &chain_state_path, estimator, metric) - .expect("Failed to open mempool db"); - - let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) - .unwrap() - .unwrap(); - let parent_header = StacksChainState::get_anchored_block_header_info( - chain_state.db(), - &header_tip.consensus_hash, - &header_tip.anchored_header.block_hash(), - ) - .expect("Failed to load chain tip header info") - .expect("Failed to load chain tip header info"); - - let sk = StacksPrivateKey::new(); - let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); - tx_auth.set_origin_nonce(0); - - let mut coinbase_tx = StacksTransaction::new( - TransactionVersion::Mainnet, - tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), - ); - - coinbase_tx.chain_id = chain_id; - coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - let mut tx_signer = StacksTransactionSigner::new(&coinbase_tx); - tx_signer.sign_origin(&sk).unwrap(); - let coinbase_tx = tx_signer.get_tx().unwrap(); - - let mut settings = BlockBuilderSettings::limited(); - settings.max_miner_time_ms = max_time; - - let result = StacksBlockBuilder::build_anchored_block( - &chain_state, - &sort_db.index_handle(&chain_tip.sortition_id), - &mut mempool_db, - &parent_header, - chain_tip.total_burn, - VRFProof::empty(), - Hash160([0; 20]), - &coinbase_tx, - settings, - None, - &Burnchain::new(&burnchain_path, "bitcoin", "main").unwrap(), - ); - - let stop = get_epoch_time_ms(); - - println!( - "{} mined block @ height = {} off of {} ({}/{}) in {}ms. Min-fee: {}, Max-time: {}", - if result.is_ok() { - "Successfully" - } else { - "Failed to" - }, - parent_header.stacks_block_height + 1, - StacksBlockHeader::make_index_block_hash( - &parent_header.consensus_hash, - &parent_header.anchored_header.block_hash() - ), - &parent_header.consensus_hash, - &parent_header.anchored_header.block_hash(), - stop.saturating_sub(start), - min_fee, - max_time - ); - - if let Ok((block, execution_cost, size)) = result { - let mut total_fees = 0; - for tx in block.txs.iter() { - total_fees += tx.get_tx_fee(); - } - println!( - "Block {}: {} uSTX, {} bytes, cost {:?}", - block.block_hash(), - total_fees, - size, - &execution_cost - ); - } - + cli::command_try_mine(&argv[1..], common_opts.config.as_ref()); process::exit(0); } @@ -1719,41 +1654,17 @@ simulating a miner. } if argv[1] == "replay-block" { - cli::command_replay_block(&argv[1..], None); + cli::command_replay_block(&argv[1..], common_opts.config.as_ref()); process::exit(0); } if argv[1] == "replay-naka-block" { - let chain_config = - if let Some(network_flag_ix) = argv.iter().position(|arg| arg == "--network") { - let Some(network_choice) = argv.get(network_flag_ix + 1) else { - eprintln!("Must supply network choice after `--network` option"); - process::exit(1); - }; - - let network_config = match network_choice.to_lowercase().as_str() { - "testnet" => cli::StacksChainConfig::default_testnet(), - "mainnet" => cli::StacksChainConfig::default_mainnet(), - other => { - eprintln!("Unknown network choice `{other}`"); - process::exit(1); - } - }; - - argv.remove(network_flag_ix + 1); - argv.remove(network_flag_ix); - - Some(network_config) - } else { - None - }; - - cli::command_replay_block_nakamoto(&argv[1..], chain_config.as_ref()); + cli::command_replay_block_nakamoto(&argv[1..], common_opts.config.as_ref()); process::exit(0); } if argv[1] == "replay-mock-mining" { - cli::command_replay_mock_mining(&argv[1..], None); + cli::command_replay_mock_mining(&argv[1..], common_opts.config.as_ref()); process::exit(0); } diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 0c68d22ee7..33c5f09306 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -14,7 +14,7 @@ serde_derive = "1" serde_json = { version = "1.0", features = ["arbitrary_precision", "raw_value"] } stacks = { package = "stackslib", path = "../../stackslib" } stx-genesis = { path = "../../stx-genesis"} -toml = "0.5.6" +toml = { workspace = true } base64 = "0.12.0" backtrace = "0.3.50" libc = "0.2.151" From f7b12957cc9c22ee5bba1df0ef3df620b86c97b9 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 10 Dec 2024 11:32:10 -0500 Subject: [PATCH 26/57] fix: Arg indexing in `command_try_mine()` --- stackslib/src/cli.rs | 119 ++++++++++++++++++++++++++++++++++++------ stackslib/src/main.rs | 56 +------------------- 2 files changed, 103 insertions(+), 72 deletions(-) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 16699ff81a..576c3b90c4 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -53,7 +53,7 @@ use crate::util_lib::db::IndexDBTx; /// Can be used with CLI commands to support non-mainnet chainstate /// Allows integration testing of these functions -#[derive(Deserialize)] +#[derive(Debug, Deserialize, PartialEq)] pub struct StacksChainConfig { pub chain_id: u32, pub first_block_height: u64, @@ -131,9 +131,63 @@ impl StacksChainConfig { } } +// Can't be initialized as `const`, so this is the next best option const STACKS_CHAIN_CONFIG_DEFAULT_MAINNET: LazyCell = LazyCell::new(StacksChainConfig::default_mainnet); +/// Options common to many `stacks-inspect` subcommands +/// Returned by `process_common_opts()` +#[derive(Debug, Default, PartialEq)] +pub struct CommonOpts { + pub config: Option, +} + +/// Process arguments common to many `stacks-inspect` subcommands and drain them from `argv` +/// +/// Args: +/// - `argv`: Full CLI args `Vec` +/// - `start_at`: Position in args vec where to look for common options. +/// For example, if `start_at` is `1`, then look for these options **before** the subcommand: +/// ```console +/// stacks-inspect --config testnet.toml replay-block path/to/chainstate +/// ``` +pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts { + let mut i = start_at; + let mut opts = CommonOpts::default(); + while let Some(arg) = argv.get(i) { + let (prefix, opt) = arg.split_at(2); + if prefix != "--" { + break; + } + i += 1; + match opt { + "config" => { + let path = &argv[i]; + i += 1; + let config = StacksChainConfig::from_file(&path); + opts.config.replace(config); + } + "network" => { + let network = &argv[i]; + i += 1; + let config = match network.to_lowercase().as_str() { + "testnet" => StacksChainConfig::default_testnet(), + "mainnet" => StacksChainConfig::default_mainnet(), + other => { + eprintln!("Unknown network choice `{other}`"); + process::exit(1); + } + }; + opts.config.replace(config); + } + _ => panic!("Unrecognized option: {opt}"), + } + } + // Remove options processed + argv.drain(start_at..i); + opts +} + /// Replay blocks from chainstate database /// Terminates on error using `process::exit()` /// @@ -398,7 +452,7 @@ pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConf /// - `argv`: Args in CLI format: ` [args...]` /// - `conf`: Optional config for running on non-mainnet chainstate pub fn command_try_mine(argv: &[String], conf: Option<&StacksChainConfig>) { - let print_help_and_exit = || -> ! { + let print_help_and_exit = || { let n = &argv[0]; eprintln!("Usage: {n} [min-fee [max-time]]"); eprintln!(""); @@ -411,28 +465,26 @@ pub fn command_try_mine(argv: &[String], conf: Option<&StacksChainConfig>) { process::exit(1); }; + // Parse subcommand-specific args + let db_path = argv.get(1).unwrap_or_else(print_help_and_exit); + let min_fee = argv + .get(2) + .map(|arg| arg.parse().expect("Could not parse min_fee")) + .unwrap_or(u64::MAX); + let max_time = argv + .get(3) + .map(|arg| arg.parse().expect("Could not parse max_time")) + .unwrap_or(u64::MAX); + + let start = get_epoch_time_ms(); + let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; let conf = conf.unwrap_or(&default_conf); - let start = get_epoch_time_ms(); - let db_path = &argv[2]; let burnchain_path = format!("{db_path}/burnchain"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let chain_state_path = format!("{db_path}/chainstate/"); - let mut min_fee = u64::MAX; - let mut max_time = u64::MAX; - - if argv.len() < 2 { - print_help_and_exit(); - } - if argv.len() >= 3 { - min_fee = argv[3].parse().expect("Could not parse min_fee"); - } - if argv.len() >= 4 { - max_time = argv[4].parse().expect("Could not parse max_time"); - } - let sort_db = SortitionDB::open(&sort_db_path, false, conf.pox_constants.clone()) .unwrap_or_else(|_| panic!("Failed to open {sort_db_path}")); let (chain_state, _) = StacksChainState::open(true, conf.chain_id, &chain_state_path, None) @@ -1099,3 +1151,36 @@ fn replay_block_nakamoto( Ok(()) } + +#[cfg(test)] +pub mod test { + use super::{drain_common_opts, CommonOpts}; + + fn parse_cli_command(s: &str) -> Vec { + s.split(' ').map(String::from).collect() + } + + #[test] + pub fn test_drain_common_opts() { + // Should find/remove no options + let mut argv = parse_cli_command( + "stacks-inspect try-mine --config my_config.toml /tmp/chainstate/mainnet", + ); + let argv_init = argv.clone(); + let opts = drain_common_opts(&mut argv, 0); + let opts = drain_common_opts(&mut argv, 1); + + assert_eq!(argv, argv_init); + assert_eq!(opts, CommonOpts::default()); + + // Should find config opts and remove from vec + let mut argv = parse_cli_command( + "stacks-inspect --network testnet --network mainnet try-mine /tmp/chainstate/mainnet", + ); + let opts = drain_common_opts(&mut argv, 1); + let argv_expected = parse_cli_command("stacks-inspect try-mine /tmp/chainstate/mainnet"); + + assert_eq!(argv, argv_expected); + assert!(opts.config.is_some()); + } +} diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 4cefbf383e..76a9d63c21 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -25,7 +25,6 @@ extern crate stacks_common; #[macro_use(slog_debug, slog_info, slog_warn)] extern crate slog; -use blockstack_lib::cli::StacksChainConfig; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; @@ -302,59 +301,6 @@ fn check_shadow_network(network: &str) { } } -/// Options common to many `stacks-inspect` subcommands -/// Returned by `process_common_opts()` -#[derive(Default)] -struct CommonOpts { - config: Option, -} - -/// Process arguments common to many `stacks-inspect` subcommands and drain them from `argv` -/// -/// Args: -/// - `argv`: Full CLI args `Vec` -/// - `start_at`: Position in args vec where to look for common options. -/// For example, if `start_at` is `1`, then look for these options **before** the subcommand: -/// ```console -/// stacks-inspect --config testnet.toml replay-block path/to/chainstate -/// ``` -fn drain_common_opts<'a>(argv: &mut Vec, start_at: usize) -> CommonOpts { - let mut i = start_at; - let mut opts = CommonOpts::default(); - while let Some(arg) = argv.get(i) { - let (prefix, opt) = arg.split_at(2); - if prefix != "--" { - break; - } - i += 1; - match opt { - "config" => { - let path = &argv[i]; - i += 1; - let config = StacksChainConfig::from_file(&path); - opts.config.replace(config); - } - "network" => { - let network = &argv[i]; - i += 1; - let config = match network.to_lowercase().as_str() { - "testnet" => cli::StacksChainConfig::default_testnet(), - "mainnet" => cli::StacksChainConfig::default_mainnet(), - other => { - eprintln!("Unknown network choice `{other}`"); - process::exit(1); - } - }; - opts.config.replace(config); - } - _ => panic!("Unrecognized option: {opt}"), - } - } - // Remove options processed - argv.drain(start_at..i); - opts -} - #[cfg_attr(test, mutants::skip)] fn main() { let mut argv: Vec = env::args().collect(); @@ -363,7 +309,7 @@ fn main() { process::exit(1); } - let common_opts = drain_common_opts(&mut argv, 1); + let common_opts = cli::drain_common_opts(&mut argv, 1); if argv[1] == "--version" { println!( From 5fdfd5d3a6f657437c82d86d1754529551458574 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 10 Dec 2024 15:29:06 -0500 Subject: [PATCH 27/57] refactor: Move `stacks-node` config file parsing into `stackslib` --- stackslib/src/cli.rs | 2 +- .../src/config}/chain_data.rs | 35 +++++----- .../config.rs => stackslib/src/config/mod.rs | 66 ++++++++++--------- stackslib/src/lib.rs | 1 + .../burnchains/bitcoin_regtest_controller.rs | 14 ++-- testnet/stacks-node/src/event_dispatcher.rs | 3 +- testnet/stacks-node/src/globals.rs | 2 +- testnet/stacks-node/src/main.rs | 6 +- testnet/stacks-node/src/neon_node.rs | 4 +- .../stacks-node/src/tests/bitcoin_regtest.rs | 2 +- testnet/stacks-node/src/tests/epoch_205.rs | 2 +- testnet/stacks-node/src/tests/epoch_21.rs | 2 +- testnet/stacks-node/src/tests/epoch_22.rs | 2 +- testnet/stacks-node/src/tests/epoch_23.rs | 2 +- testnet/stacks-node/src/tests/epoch_24.rs | 2 +- testnet/stacks-node/src/tests/epoch_25.rs | 2 +- testnet/stacks-node/src/tests/integrations.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 2 +- .../src/tests/neon_integrations.rs | 4 +- testnet/stacks-node/src/tests/signer/mod.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- testnet/stacks-node/src/tests/stackerdb.rs | 2 +- 22 files changed, 81 insertions(+), 80 deletions(-) rename {testnet/stacks-node/src => stackslib/src/config}/chain_data.rs (97%) rename testnet/stacks-node/src/config.rs => stackslib/src/config/mod.rs (98%) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 576c3b90c4..99a6de43c5 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -1154,7 +1154,7 @@ fn replay_block_nakamoto( #[cfg(test)] pub mod test { - use super::{drain_common_opts, CommonOpts}; + use super::*; fn parse_cli_command(s: &str) -> Vec { s.split(' ').map(String::from).collect() diff --git a/testnet/stacks-node/src/chain_data.rs b/stackslib/src/config/chain_data.rs similarity index 97% rename from testnet/stacks-node/src/chain_data.rs rename to stackslib/src/config/chain_data.rs index cc60f964a3..e4c3899511 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/stackslib/src/config/chain_data.rs @@ -17,21 +17,22 @@ use std::collections::HashMap; use std::process::{Command, Stdio}; -use stacks::burnchains::bitcoin::address::BitcoinAddress; -use stacks::burnchains::bitcoin::{BitcoinNetworkType, BitcoinTxOutput}; -use stacks::burnchains::{Burnchain, BurnchainSigner, Error as BurnchainError, Txid}; -use stacks::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; -use stacks::chainstate::burn::distribution::BurnSamplePoint; -use stacks::chainstate::burn::operations::leader_block_commit::{ - MissedBlockCommit, BURN_BLOCK_MINED_AT_MODULUS, -}; -use stacks::chainstate::burn::operations::LeaderBlockCommitOp; -use stacks::chainstate::stacks::address::PoxAddress; -use stacks::core::MINING_COMMITMENT_WINDOW; -use stacks::util_lib::db::Error as DBError; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRFSeed}; use stacks_common::util::hash::hex_bytes; +use crate::burnchains::bitcoin::address::BitcoinAddress; +use crate::burnchains::bitcoin::{BitcoinNetworkType, BitcoinTxOutput}; +use crate::burnchains::{Burnchain, BurnchainSigner, Error as BurnchainError, Txid}; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; +use crate::chainstate::burn::distribution::BurnSamplePoint; +use crate::chainstate::burn::operations::leader_block_commit::{ + MissedBlockCommit, BURN_BLOCK_MINED_AT_MODULUS, +}; +use crate::chainstate::burn::operations::LeaderBlockCommitOp; +use crate::chainstate::stacks::address::PoxAddress; +use crate::core::MINING_COMMITMENT_WINDOW; +use crate::util_lib::db::Error as DBError; + pub struct MinerStats { pub unconfirmed_commits_helper: String, } @@ -526,11 +527,6 @@ pub mod tests { use std::fs; use std::io::Write; - use stacks::burnchains::{BurnchainSigner, Txid}; - use stacks::chainstate::burn::distribution::BurnSamplePoint; - use stacks::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; - use stacks::chainstate::burn::operations::LeaderBlockCommitOp; - use stacks::chainstate::stacks::address::{PoxAddress, PoxAddressType20}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPublicKey, VRFSeed, }; @@ -538,6 +534,11 @@ pub mod tests { use stacks_common::util::uint::{BitArray, Uint256}; use super::MinerStats; + use crate::burnchains::{BurnchainSigner, Txid}; + use crate::chainstate::burn::distribution::BurnSamplePoint; + use crate::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; + use crate::chainstate::burn::operations::LeaderBlockCommitOp; + use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20}; #[test] fn test_burn_dist_to_prob_dist() { diff --git a/testnet/stacks-node/src/config.rs b/stackslib/src/config/mod.rs similarity index 98% rename from testnet/stacks-node/src/config.rs rename to stackslib/src/config/mod.rs index 4ad793a4c3..f4934ecdaf 100644 --- a/testnet/stacks-node/src/config.rs +++ b/stackslib/src/config/mod.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +pub mod chain_data; + use std::collections::{HashMap, HashSet}; use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::path::PathBuf; @@ -27,35 +29,6 @@ use clarity::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdenti use lazy_static::lazy_static; use rand::RngCore; use serde::Deserialize; -use stacks::burnchains::affirmation::AffirmationMap; -use stacks::burnchains::bitcoin::BitcoinNetworkType; -use stacks::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; -use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; -use stacks::chainstate::stacks::boot::MINERS_NAME; -use stacks::chainstate::stacks::index::marf::MARFOpenOpts; -use stacks::chainstate::stacks::index::storage::TrieHashCalculationMode; -use stacks::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; -use stacks::chainstate::stacks::MAX_BLOCK_LEN; -use stacks::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; -use stacks::core::{ - MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, - BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, - BITCOIN_TESTNET_STACKS_25_REORGED_HEIGHT, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, - PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, -}; -use stacks::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; -use stacks::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; -use stacks::cost_estimates::fee_scalar::ScalarFeeRateEstimator; -use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMetric}; -use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; -use stacks::net::atlas::AtlasConfig; -use stacks::net::connection::ConnectionOptions; -use stacks::net::{Neighbor, NeighborAddress, NeighborKey}; -use stacks::types::chainstate::BurnchainHeaderHash; -use stacks::types::EpochList; -use stacks::util::hash::to_hex; -use stacks::util_lib::boot::boot_code_id; -use stacks::util_lib::db::Error as DBError; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::net::PeerAddress; @@ -64,7 +37,36 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use crate::chain_data::MinerStats; +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::bitcoin::BitcoinNetworkType; +use crate::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; +use crate::chainstate::nakamoto::signer_set::NakamotoSigners; +use crate::chainstate::stacks::boot::MINERS_NAME; +use crate::chainstate::stacks::index::marf::MARFOpenOpts; +use crate::chainstate::stacks::index::storage::TrieHashCalculationMode; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; +use crate::chainstate::stacks::MAX_BLOCK_LEN; +use crate::config::chain_data::MinerStats; +use crate::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; +use crate::core::{ + MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, + BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, + BITCOIN_TESTNET_STACKS_25_REORGED_HEIGHT, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, + PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, STACKS_EPOCHS_REGTEST, STACKS_EPOCHS_TESTNET, +}; +use crate::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; +use crate::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; +use crate::cost_estimates::fee_scalar::ScalarFeeRateEstimator; +use crate::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMetric}; +use crate::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; +use crate::net::atlas::AtlasConfig; +use crate::net::connection::ConnectionOptions; +use crate::net::{Neighbor, NeighborAddress, NeighborKey}; +use crate::types::chainstate::BurnchainHeaderHash; +use crate::types::EpochList; +use crate::util::hash::to_hex; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; pub const DEFAULT_SATS_PER_VB: u64 = 50; pub const OP_TX_BLOCK_COMMIT_ESTIM_SIZE: u64 = 380; @@ -636,8 +638,8 @@ impl Config { BitcoinNetworkType::Mainnet => { Err("Cannot configure epochs in mainnet mode".to_string()) } - BitcoinNetworkType::Testnet => Ok(stacks::core::STACKS_EPOCHS_TESTNET.to_vec()), - BitcoinNetworkType::Regtest => Ok(stacks::core::STACKS_EPOCHS_REGTEST.to_vec()), + BitcoinNetworkType::Testnet => Ok(STACKS_EPOCHS_TESTNET.to_vec()), + BitcoinNetworkType::Regtest => Ok(STACKS_EPOCHS_REGTEST.to_vec()), }?; let mut matched_epochs = vec![]; for configured_epoch in conf_epochs.iter() { diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index 31f97628a6..190ef8a1f0 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -63,6 +63,7 @@ pub mod clarity_cli; /// A high level library for interacting with the Clarity vm pub mod clarity_vm; pub mod cli; +pub mod config; pub mod core; pub mod cost_estimates; pub mod deps; diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 727483886e..f3aaa95ab5 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -48,6 +48,12 @@ use stacks::chainstate::burn::Opcodes; use stacks::chainstate::coordinator::comm::CoordinatorChannels; #[cfg(test)] use stacks::chainstate::stacks::address::PoxAddress; +use stacks::config::BurnchainConfig; +#[cfg(test)] +use stacks::config::{ + OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, + OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, +}; use stacks::core::{EpochList, StacksEpochId}; use stacks::monitoring::{increment_btc_blocks_received_counter, increment_btc_ops_sent_counter}; use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; @@ -74,12 +80,6 @@ use url::Url; use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; -use crate::config::BurnchainConfig; -#[cfg(test)] -use crate::config::{ - OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, - OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, -}; /// The number of bitcoin blocks that can have /// passed since the UTXO cache was last refreshed before @@ -2806,13 +2806,13 @@ mod tests { use std::io::Write; use stacks::burnchains::BurnchainSigner; + use stacks::config::DEFAULT_SATS_PER_VB; use stacks_common::deps_common::bitcoin::blockdata::script::Builder; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, VRFSeed}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use super::*; - use crate::config::DEFAULT_SATS_PER_VB; #[test] fn test_get_satoshis_per_byte() { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 4d6eec8922..14382a059a 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -49,6 +49,7 @@ use stacks::chainstate::stacks::miner::TransactionEvent; use stacks::chainstate::stacks::{ StacksBlock, StacksMicroblock, StacksTransaction, TransactionPayload, }; +use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::core::mempool::{MemPoolDropReason, MemPoolEventDispatcher, ProposalCallbackReceiver}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::{ @@ -68,8 +69,6 @@ use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use url::Url; -use super::config::{EventKeyType, EventObserverConfig}; - #[derive(Debug, Clone)] struct EventObserver { /// Path to the database where pending payloads are stored. If `None`, then diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index c285c6a168..2a9a601723 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -10,10 +10,10 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::MinerStatus; +use stacks::config::MinerConfig; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; -use crate::config::MinerConfig; use crate::neon::Counters; use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::RegisteredKey; diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 4fa1c5e5a7..7916de9d00 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -16,8 +16,6 @@ use stacks_common::util::hash::hex_bytes; pub mod monitoring; pub mod burnchains; -pub mod chain_data; -pub mod config; pub mod event_dispatcher; pub mod genesis_data; pub mod globals; @@ -41,19 +39,19 @@ use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvi use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::blocks::DummyEventDispatcher; use stacks::chainstate::stacks::db::StacksChainState; +use stacks::config::chain_data::MinerStats; +pub use stacks::config::{Config, ConfigFile}; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; pub use self::burnchains::{ BitcoinRegtestController, BurnchainController, BurnchainTip, MocknetController, }; -pub use self::config::{Config, ConfigFile}; pub use self::event_dispatcher::EventDispatcher; pub use self::keychain::Keychain; pub use self::node::{ChainTip, Node}; pub use self::run_loop::{helium, neon}; pub use self::tenure::Tenure; -use crate::chain_data::MinerStats; use crate::neon_node::{BlockMinerThread, TipCandidate}; use crate::run_loop::boot_nakamoto; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index c74ce3d878..2d4dc7fadd 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -187,6 +187,8 @@ use stacks::chainstate::stacks::{ StacksMicroblock, StacksPublicKey, StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionPayload, TransactionVersion, }; +use stacks::config::chain_data::MinerStats; +use stacks::config::NodeConfig; use stacks::core::mempool::MemPoolDB; use stacks::core::{EpochList, FIRST_BURNCHAIN_CONSENSUS_HASH, STACKS_EPOCH_3_0_MARKER}; use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; @@ -220,8 +222,6 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, burnchain_params_from_config, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::{make_bitcoin_indexer, Error as BurnchainControllerError}; -use crate::chain_data::MinerStats; -use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; use crate::nakamoto_node::signer_coordinator::SignerCoordinator; use crate::run_loop::neon::RunLoop; diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 3e69ac18cc..ef193f56f7 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -7,12 +7,12 @@ use stacks::chainstate::burn::operations::BlockstackOperationType::{ LeaderBlockCommit, LeaderKeyRegister, }; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::InitialBalance; use stacks::core::StacksEpochId; use stacks_common::util::hash::hex_bytes; use super::PUBLISH_CONTRACT; use crate::burnchains::bitcoin_regtest_controller::BitcoinRPCRequest; -use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::to_addr; use crate::Config; diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 1964612bd4..e555b6a8aa 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -13,6 +13,7 @@ use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{ StacksBlockHeader, StacksPrivateKey, StacksTransaction, TransactionPayload, }; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, @@ -22,7 +23,6 @@ use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRF use stacks_common::util::hash::hex_bytes; use stacks_common::util::sleep_ms; -use crate::config::{EventKeyType, InitialBalance}; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::{ diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 55d3ee0b7b..d50cac0117 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -24,6 +24,7 @@ use stacks::chainstate::stacks::miner::{ }; use stacks::chainstate::stacks::StacksBlockHeader; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::{Config, InitialBalance}; use stacks::core::{self, EpochList, BURNCHAIN_TX_SEARCH_WINDOW}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{ @@ -35,7 +36,6 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; use crate::burnchains::bitcoin_regtest_controller::UTXO; -use crate::config::{Config, InitialBalance}; use crate::neon::RunLoopCounter; use crate::operations::BurnchainOpSigner; use crate::stacks_common::address::AddressHashMode; diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 3bf521d7cb..493fb36fcd 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -8,6 +8,7 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready}; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::{EventKeyType, EventObserverConfig, InitialBalance}; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -17,7 +18,6 @@ use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; use super::neon_integrations::get_account; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; use crate::neon_node::StacksNode; use crate::stacks_common::types::Address; use crate::stacks_common::util::hash::bytes_to_hex; diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 92b6a97b8f..085e5a49cb 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -18,10 +18,10 @@ use std::{env, thread}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use stacks::burnchains::{Burnchain, PoxConstants}; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks_common::util::sleep_ms; -use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::*; diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 5e4ff9852a..8780d08012 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -26,6 +26,7 @@ use stacks::chainstate::stacks::boot::RawRewardSetEntry; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::consts::STACKS_EPOCH_MAX; @@ -35,7 +36,6 @@ use stacks_common::util::hash::{bytes_to_hex, hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; -use crate::config::InitialBalance; use crate::stacks_common::codec::StacksMessageCodec; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::{ diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index bedf8721cb..e840b0fcd3 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -17,11 +17,11 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, PoxConstants}; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksPrivateKey; -use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::wait_for; use crate::tests::neon_integrations::{ diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 79c3394352..7f893835d1 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -24,6 +24,7 @@ use stacks::chainstate::stacks::{ }; use stacks::clarity_vm::clarity::ClarityConnection; use stacks::codec::StacksMessageCodec; +use stacks::config::InitialBalance; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, @@ -40,7 +41,6 @@ use super::{ make_contract_call, make_contract_publish, make_stacks_transfer, to_addr, ADDR_4, SK_1, SK_2, SK_3, }; -use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::make_sponsored_stacks_transfer_on_testnet; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 281feae99a..59a95576bc 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -57,6 +57,7 @@ use stacks::chainstate::stacks::{ TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSpendingCondition, TransactionVersion, MAX_BLOCK_LEN, }; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, @@ -95,7 +96,6 @@ use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use stacks_signer::v0::SpawnedSigner; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index e3d592d23c..4680f58302 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -41,6 +41,7 @@ use stacks::chainstate::stacks::{ use stacks::clarity_cli::vm_execute as execute; use stacks::cli::{self, StacksChainConfig}; use stacks::codec::StacksMessageCodec; +use stacks::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, @@ -83,7 +84,6 @@ use super::{ SK_2, SK_3, }; use crate::burnchains::bitcoin_regtest_controller::{self, addr2str, BitcoinRPCRequest, UTXO}; -use crate::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; use crate::stacks_common::types::PrivateKey; @@ -199,13 +199,13 @@ pub mod test_observer { use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::StacksTransaction; use stacks::codec::StacksMessageCodec; + use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::net::api::postblock_proposal::BlockValidateResponse; use stacks::util::hash::hex_bytes; use stacks_common::types::chainstate::StacksBlockId; use warp::Filter; use {tokio, warp}; - use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::{MinedBlockEvent, MinedMicroblockEvent, MinedNakamotoBlockEvent}; use crate::Config; diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 3c68b27783..07b69e14f9 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -44,6 +44,7 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; @@ -61,7 +62,6 @@ use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; -use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use crate::neon::{Counters, RunLoopCounter, TestFlag}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 00276b09ee..7dc3ccd633 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -37,6 +37,7 @@ use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, TransactionPayload}; use stacks::codec::StacksMessageCodec; +use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::getsigner::GetSignerResponse; @@ -68,7 +69,6 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; -use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index c68b477b47..6212dd6fcc 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -18,6 +18,7 @@ use std::{env, thread}; use clarity::vm::types::QualifiedContractIdentifier; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -25,7 +26,6 @@ use {reqwest, serde_json}; use super::bitcoin_regtest::BitcoinCoreController; use crate::burnchains::BurnchainController; -use crate::config::{EventKeyType, InitialBalance}; use crate::tests::neon_integrations::{ neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, }; From 4a0dd95b5d1a7008264a55563d7784d5bd9c2b82 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 10 Dec 2024 17:05:02 -0500 Subject: [PATCH 28/57] feat: `stacks-inspect` commands work with unmodified `stacks-node` config file --- stackslib/src/cli.rs | 245 ++++++++++++++---------------------- stackslib/src/config/mod.rs | 73 +++++------ 2 files changed, 135 insertions(+), 183 deletions(-) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 99a6de43c5..fa99a9739a 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -46,100 +46,17 @@ use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, St use crate::chainstate::stacks::miner::*; use crate::chainstate::stacks::{Error as ChainstateError, *}; use crate::clarity_vm::clarity::ClarityInstance; +use crate::config::{Config, ConfigFile, DEFAULT_MAINNET_CONFIG}; use crate::core::*; use crate::cost_estimates::metrics::UnitMetric; use crate::cost_estimates::UnitEstimator; use crate::util_lib::db::IndexDBTx; -/// Can be used with CLI commands to support non-mainnet chainstate -/// Allows integration testing of these functions -#[derive(Debug, Deserialize, PartialEq)] -pub struct StacksChainConfig { - pub chain_id: u32, - pub first_block_height: u64, - pub first_burn_header_hash: BurnchainHeaderHash, - pub first_burn_header_timestamp: u64, - pub pox_constants: PoxConstants, - pub epochs: EpochList, -} - -impl StacksChainConfig { - pub fn type_name() -> &'static str { - type_name::() - } - - pub fn default_mainnet() -> Self { - Self { - chain_id: CHAIN_ID_MAINNET, - first_block_height: BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - first_burn_header_hash: BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH) - .unwrap(), - first_burn_header_timestamp: BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants: PoxConstants::mainnet_default(), - epochs: (*STACKS_EPOCHS_MAINNET).clone(), - } - } - - pub fn default_testnet() -> Self { - let mut pox_constants = PoxConstants::regtest_default(); - pox_constants.prepare_length = 100; - pox_constants.reward_cycle_length = 900; - pox_constants.v1_unlock_height = 3; - pox_constants.v2_unlock_height = 5; - pox_constants.pox_3_activation_height = 5; - pox_constants.pox_4_activation_height = 6; - pox_constants.v3_unlock_height = 7; - let mut epochs = EpochList::new(&*STACKS_EPOCHS_REGTEST); - epochs[StacksEpochId::Epoch10].start_height = 0; - epochs[StacksEpochId::Epoch10].end_height = 0; - epochs[StacksEpochId::Epoch20].start_height = 0; - epochs[StacksEpochId::Epoch20].end_height = 1; - epochs[StacksEpochId::Epoch2_05].start_height = 1; - epochs[StacksEpochId::Epoch2_05].end_height = 2; - epochs[StacksEpochId::Epoch21].start_height = 2; - epochs[StacksEpochId::Epoch21].end_height = 3; - epochs[StacksEpochId::Epoch22].start_height = 3; - epochs[StacksEpochId::Epoch22].end_height = 4; - epochs[StacksEpochId::Epoch23].start_height = 4; - epochs[StacksEpochId::Epoch23].end_height = 5; - epochs[StacksEpochId::Epoch24].start_height = 5; - epochs[StacksEpochId::Epoch24].end_height = 6; - epochs[StacksEpochId::Epoch25].start_height = 6; - epochs[StacksEpochId::Epoch25].end_height = 56_457; - epochs[StacksEpochId::Epoch30].start_height = 56_457; - Self { - chain_id: CHAIN_ID_TESTNET, - first_block_height: 0, - first_burn_header_hash: BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH) - .unwrap(), - first_burn_header_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants, - epochs, - } - } - - pub fn from_file(path: &str) -> Self { - let text = fs::read_to_string(&path) - .unwrap_or_else(|e| panic!("Failed to read file '{path}': {e}")); - let config: Self = toml::from_str(&text).unwrap_or_else(|e| { - panic!( - "Failed to parse file '{path}' as `{t}`: {e}", - t = Self::type_name() - ) - }); - config - } -} - -// Can't be initialized as `const`, so this is the next best option -const STACKS_CHAIN_CONFIG_DEFAULT_MAINNET: LazyCell = - LazyCell::new(StacksChainConfig::default_mainnet); - /// Options common to many `stacks-inspect` subcommands /// Returned by `process_common_opts()` -#[derive(Debug, Default, PartialEq)] +#[derive(Debug, Default)] pub struct CommonOpts { - pub config: Option, + pub config: Option, } /// Process arguments common to many `stacks-inspect` subcommands and drain them from `argv` @@ -164,20 +81,30 @@ pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts "config" => { let path = &argv[i]; i += 1; - let config = StacksChainConfig::from_file(&path); + let config_file = ConfigFile::from_path(&path).unwrap_or_else(|e| { + panic!("Failed to read '{path}' as stacks-node config: {e}") + }); + let config = Config::from_config_file(config_file, false).unwrap_or_else(|e| { + panic!("Failed to convert config file into node config: {e}") + }); opts.config.replace(config); } "network" => { let network = &argv[i]; i += 1; - let config = match network.to_lowercase().as_str() { - "testnet" => StacksChainConfig::default_testnet(), - "mainnet" => StacksChainConfig::default_mainnet(), + let config_file = match network.to_lowercase().as_str() { + "helium" => ConfigFile::helium(), + "mainnet" => ConfigFile::mainnet(), + "mocknet" => ConfigFile::mocknet(), + "xenon" => ConfigFile::xenon(), other => { eprintln!("Unknown network choice `{other}`"); process::exit(1); } }; + let config = Config::from_config_file(config_file, false).unwrap_or_else(|e| { + panic!("Failed to convert config file into node config: {e}") + }); opts.config.replace(config); } _ => panic!("Unrecognized option: {opt}"), @@ -193,7 +120,7 @@ pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_block(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -271,7 +198,7 @@ pub fn command_replay_block(argv: &[String], conf: Option<&StacksChainConfig>) { /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -288,12 +215,15 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainC let chain_state_path = format!("{db_path}/chainstate/"); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); let conn = chainstate.nakamoto_blocks_db(); @@ -357,7 +287,7 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainC /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` /// - `conf`: Optional config for running on non-mainnet chainstate -pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_mock_mining(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -451,7 +381,7 @@ pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConf /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` /// - `conf`: Optional config for running on non-mainnet chainstate -pub fn command_try_mine(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || { let n = &argv[0]; eprintln!("Usage: {n} [min-fee [max-time]]"); @@ -478,25 +408,36 @@ pub fn command_try_mine(argv: &[String], conf: Option<&StacksChainConfig>) { let start = get_epoch_time_ms(); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); let burnchain_path = format!("{db_path}/burnchain"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let chain_state_path = format!("{db_path}/chainstate/"); - let sort_db = SortitionDB::open(&sort_db_path, false, conf.pox_constants.clone()) + let burnchain = conf.get_burnchain(); + let sort_db = SortitionDB::open(&sort_db_path, false, burnchain.pox_constants.clone()) .unwrap_or_else(|_| panic!("Failed to open {sort_db_path}")); - let (chain_state, _) = StacksChainState::open(true, conf.chain_id, &chain_state_path, None) - .expect("Failed to open stacks chain state"); + let (chain_state, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .expect("Failed to open stacks chain state"); let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) .expect("Failed to get sortition chain tip"); let estimator = Box::new(UnitEstimator); let metric = Box::new(UnitMetric); - let mut mempool_db = MemPoolDB::open(true, conf.chain_id, &chain_state_path, estimator, metric) - .expect("Failed to open mempool db"); + let mut mempool_db = MemPoolDB::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + estimator, + metric, + ) + .expect("Failed to open mempool db"); let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) .unwrap() @@ -519,7 +460,7 @@ pub fn command_try_mine(argv: &[String], conf: Option<&StacksChainConfig>) { TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); - coinbase_tx.chain_id = conf.chain_id; + coinbase_tx.chain_id = conf.burnchain.chain_id; coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; let mut tx_signer = StacksTransactionSigner::new(&coinbase_tx); tx_signer.sign_origin(&sk).unwrap(); @@ -581,31 +522,32 @@ pub fn command_try_mine(argv: &[String], conf: Option<&StacksChainConfig>) { } /// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate -fn replay_staging_block( - db_path: &str, - index_block_hash_hex: &str, - conf: Option<&StacksChainConfig>, -) { +fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option<&Config>) { let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.epochs.as_ref().expect("No Epochs found"); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -659,30 +601,31 @@ fn replay_staging_block( } /// Process a mock mined block and call `replay_block()` to validate -fn replay_mock_mined_block( - db_path: &str, - block: AssembledAnchorBlock, - conf: Option<&StacksChainConfig>, -) { +fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock, conf: Option<&Config>) { let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.epochs.as_ref().expect("No Epochs found"); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -861,22 +804,28 @@ fn replay_block( } /// Fetch and process a NakamotoBlock from database and call `replay_block_nakamoto()` to validate -fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &StacksChainConfig) { +fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &Config) { let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.epochs.as_ref().expect("No Epochs found"); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -1171,11 +1120,11 @@ pub mod test { let opts = drain_common_opts(&mut argv, 1); assert_eq!(argv, argv_init); - assert_eq!(opts, CommonOpts::default()); + assert!(opts.config.is_none()); // Should find config opts and remove from vec let mut argv = parse_cli_command( - "stacks-inspect --network testnet --network mainnet try-mine /tmp/chainstate/mainnet", + "stacks-inspect --network mocknet --network mainnet try-mine /tmp/chainstate/mainnet", ); let opts = drain_common_opts(&mut argv, 1); let argv_expected = parse_cli_command("stacks-inspect try-mine /tmp/chainstate/mainnet"); diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index f4934ecdaf..7193aac9bd 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -20,13 +20,12 @@ use std::collections::{HashMap, HashSet}; use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::str::FromStr; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, LazyLock, Mutex}; use std::time::Duration; use std::{cmp, fs, thread}; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdentifier}; -use lazy_static::lazy_static; use rand::RngCore; use serde::Deserialize; use stacks_common::consts::SIGNER_SLOTS_PER_USER; @@ -96,6 +95,43 @@ const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; +static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = + LazyLock::new(|| ConnectionOptions { + inbox_maxlen: 100, + outbox_maxlen: 100, + timeout: 15, + idle_timeout: 15, // how long a HTTP connection can be idle before it's closed + heartbeat: 3600, + // can't use u64::max, because sqlite stores as i64. + private_key_lifetime: 9223372036854775807, + num_neighbors: 32, // number of neighbors whose inventories we track + num_clients: 750, // number of inbound p2p connections + soft_num_neighbors: 16, // soft-limit on the number of neighbors whose inventories we track + soft_num_clients: 750, // soft limit on the number of inbound p2p connections + max_neighbors_per_host: 1, // maximum number of neighbors per host we permit + max_clients_per_host: 4, // maximum number of inbound p2p connections per host we permit + soft_max_neighbors_per_host: 1, // soft limit on the number of neighbors per host we permit + soft_max_neighbors_per_org: 32, // soft limit on the number of neighbors per AS we permit (TODO: for now it must be greater than num_neighbors) + soft_max_clients_per_host: 4, // soft limit on how many inbound p2p connections per host we permit + max_http_clients: 1000, // maximum number of HTTP connections + max_neighbors_of_neighbor: 10, // maximum number of neighbors we'll handshake with when doing a neighbor walk (I/O for this can be expensive, so keep small-ish) + walk_interval: 60, // how often, in seconds, we do a neighbor walk + walk_seed_probability: 0.1, // 10% of the time when not in IBD, walk to a non-seed node even if we aren't connected to a seed node + log_neighbors_freq: 60_000, // every minute, log all peer connections + inv_sync_interval: 45, // how often, in seconds, we refresh block inventories + inv_reward_cycles: 3, // how many reward cycles to look back on, for mainnet + download_interval: 10, // how often, in seconds, we do a block download scan (should be less than inv_sync_interval) + dns_timeout: 15_000, + max_inflight_blocks: 6, + max_inflight_attachments: 6, + ..std::default::Default::default() + }); + +pub static DEFAULT_MAINNET_CONFIG: LazyLock = LazyLock::new(|| { + Config::from_config_file(ConfigFile::mainnet(), false) + .expect("Failed to create default mainnet config") +}); + #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] pub struct ConfigFile { @@ -313,39 +349,6 @@ pub struct Config { pub atlas: AtlasConfig, } -lazy_static! { - static ref HELIUM_DEFAULT_CONNECTION_OPTIONS: ConnectionOptions = ConnectionOptions { - inbox_maxlen: 100, - outbox_maxlen: 100, - timeout: 15, - idle_timeout: 15, // how long a HTTP connection can be idle before it's closed - heartbeat: 3600, - // can't use u64::max, because sqlite stores as i64. - private_key_lifetime: 9223372036854775807, - num_neighbors: 32, // number of neighbors whose inventories we track - num_clients: 750, // number of inbound p2p connections - soft_num_neighbors: 16, // soft-limit on the number of neighbors whose inventories we track - soft_num_clients: 750, // soft limit on the number of inbound p2p connections - max_neighbors_per_host: 1, // maximum number of neighbors per host we permit - max_clients_per_host: 4, // maximum number of inbound p2p connections per host we permit - soft_max_neighbors_per_host: 1, // soft limit on the number of neighbors per host we permit - soft_max_neighbors_per_org: 32, // soft limit on the number of neighbors per AS we permit (TODO: for now it must be greater than num_neighbors) - soft_max_clients_per_host: 4, // soft limit on how many inbound p2p connections per host we permit - max_http_clients: 1000, // maximum number of HTTP connections - max_neighbors_of_neighbor: 10, // maximum number of neighbors we'll handshake with when doing a neighbor walk (I/O for this can be expensive, so keep small-ish) - walk_interval: 60, // how often, in seconds, we do a neighbor walk - walk_seed_probability: 0.1, // 10% of the time when not in IBD, walk to a non-seed node even if we aren't connected to a seed node - log_neighbors_freq: 60_000, // every minute, log all peer connections - inv_sync_interval: 45, // how often, in seconds, we refresh block inventories - inv_reward_cycles: 3, // how many reward cycles to look back on, for mainnet - download_interval: 10, // how often, in seconds, we do a block download scan (should be less than inv_sync_interval) - dns_timeout: 15_000, - max_inflight_blocks: 6, - max_inflight_attachments: 6, - .. std::default::Default::default() - }; -} - impl Config { /// get the up-to-date burnchain options from the config. /// If the config file can't be loaded, then return the existing config From 66a100f71cd6f69df2809076720860133b5d1ba9 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 10 Dec 2024 17:16:35 -0500 Subject: [PATCH 29/57] fix: `mock_miner_replay()` integration tests works with new config method --- stackslib/src/cli.rs | 2 ++ .../stacks-node/src/tests/neon_integrations.rs | 18 ++---------------- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index fa99a9739a..0ba1ac131f 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -74,8 +74,10 @@ pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts while let Some(arg) = argv.get(i) { let (prefix, opt) = arg.split_at(2); if prefix != "--" { + // No args left to take break; } + // "Take" arg i += 1; match opt { "config" => { diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 4680f58302..fc363d3db8 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -5,7 +5,6 @@ use std::sync::{mpsc, Arc}; use std::time::{Duration, Instant}; use std::{cmp, env, fs, io, thread}; -use clarity::consts::BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -39,7 +38,7 @@ use stacks::chainstate::stacks::{ StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; -use stacks::cli::{self, StacksChainConfig}; +use stacks::cli; use stacks::codec::StacksMessageCodec; use stacks::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use stacks::core::mempool::MemPoolWalkTxTypes; @@ -12691,22 +12690,9 @@ fn mock_miner_replay() { let blocks_dir = blocks_dir.into_os_string().into_string().unwrap(); let db_path = format!("{}/neon", conf.node.working_dir); let args: Vec = vec!["replay-mock-mining".into(), db_path, blocks_dir]; - let SortitionDB { - first_block_height, - first_burn_header_hash, - .. - } = *btc_regtest_controller.sortdb_mut(); - let replay_config = StacksChainConfig { - chain_id: conf.burnchain.chain_id, - first_block_height, - first_burn_header_hash, - first_burn_header_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants: burnchain_config.pox_constants, - epochs: conf.burnchain.epochs.expect("Missing `epochs` in config"), - }; info!("Replaying mock mined blocks..."); - cli::command_replay_mock_mining(&args, Some(&replay_config)); + cli::command_replay_mock_mining(&args, Some(&conf)); // ---------- Test finished, clean up ---------- From 58f0ab867e65176d82acff695aef52268f303229 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 11 Dec 2024 14:05:20 -0500 Subject: [PATCH 30/57] fix: `stacks-inspect` commands work with default mainnet config again --- stackslib/src/chainstate/nakamoto/mod.rs | 3 ++ stackslib/src/cli.rs | 35 ++++++++++--------- stackslib/src/config/mod.rs | 9 ++--- .../stacks-node/src/run_loop/boot_nakamoto.rs | 8 ++--- 4 files changed, 29 insertions(+), 26 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 35f6e5d1e1..3ea1281abe 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2701,10 +2701,12 @@ impl NakamotoChainState { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT * FROM block_headers WHERE index_block_hash = ?1"; + println!("get_block_header_epoch2(): Looking for block {index_block_hash}"); let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { "FATAL: multiple rows for the same block hash".to_string() })?; + println!("get_block_header_epoch2(): Found {result:?}"); Ok(result) } @@ -2713,6 +2715,7 @@ impl NakamotoChainState { chainstate_conn: &Connection, index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { + println!("get_block_header(): Looking for block {index_block_hash}"); if let Some(header) = Self::get_block_header_nakamoto(chainstate_conn, index_block_hash)? { return Ok(Some(header)); } diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 0ba1ac131f..3200f4255c 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -390,10 +390,8 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { eprintln!(""); eprintln!("Given a , try to ''mine'' an anchored block. This invokes the miner block"); eprintln!("assembly, but does not attempt to broadcast a block commit. This is useful for determining"); - eprintln!( - "what transactions a given chain state would include in an anchor block, or otherwise" - ); - eprintln!("simulating a miner."); + eprintln!("what transactions a given chain state would include in an anchor block,"); + eprintln!("or otherwise simulating a miner."); process::exit(1); }; @@ -418,16 +416,16 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { let burnchain = conf.get_burnchain(); let sort_db = SortitionDB::open(&sort_db_path, false, burnchain.pox_constants.clone()) - .unwrap_or_else(|_| panic!("Failed to open {sort_db_path}")); + .unwrap_or_else(|e| panic!("Failed to open {sort_db_path}: {e}")); let (chain_state, _) = StacksChainState::open( conf.is_mainnet(), conf.burnchain.chain_id, &chain_state_path, None, ) - .expect("Failed to open stacks chain state"); + .unwrap_or_else(|e| panic!("Failed to open stacks chain state: {e}")); let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) - .expect("Failed to get sortition chain tip"); + .unwrap_or_else(|e| panic!("Failed to get sortition chain tip: {e}")); let estimator = Box::new(UnitEstimator); let metric = Box::new(UnitMetric); @@ -439,7 +437,7 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { estimator, metric, ) - .expect("Failed to open mempool db"); + .unwrap_or_else(|e| panic!("Failed to open mempool db: {e}")); let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) .unwrap() @@ -449,7 +447,7 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { &header_tip.consensus_hash, &header_tip.anchored_header.block_hash(), ) - .expect("Failed to load chain tip header info") + .unwrap_or_else(|e| panic!("Failed to load chain tip header info: {e}")) .expect("Failed to load chain tip header info"); let sk = StacksPrivateKey::new(); @@ -482,7 +480,12 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { &coinbase_tx, settings, None, - &Burnchain::new(&burnchain_path, "bitcoin", "main").unwrap(), + &Burnchain::new( + &burnchain_path, + &burnchain.chain_name, + &burnchain.network_name, + ) + .unwrap(), ); let stop = get_epoch_time_ms(); @@ -542,13 +545,13 @@ fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option< .unwrap(); let burnchain = conf.get_burnchain(); - let epochs = conf.burnchain.epochs.as_ref().expect("No Epochs found"); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, burnchain.first_block_height, &burnchain.first_block_hash, u64::from(burnchain.first_block_timestamp), - epochs, + &epochs, burnchain.pox_constants.clone(), None, true, @@ -620,13 +623,13 @@ fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock, conf: Opt .unwrap(); let burnchain = conf.get_burnchain(); - let epochs = conf.burnchain.epochs.as_ref().expect("No Epochs found"); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, burnchain.first_block_height, &burnchain.first_block_hash, u64::from(burnchain.first_block_timestamp), - epochs, + &epochs, burnchain.pox_constants.clone(), None, true, @@ -820,13 +823,13 @@ fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &C .unwrap(); let burnchain = conf.get_burnchain(); - let epochs = conf.burnchain.epochs.as_ref().expect("No Epochs found"); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, burnchain.first_block_height, &burnchain.first_block_hash, u64::from(burnchain.first_block_timestamp), - epochs, + &epochs, burnchain.pox_constants.clone(), None, true, diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 7193aac9bd..e71e7f16f4 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -521,10 +521,7 @@ impl Config { } fn check_nakamoto_config(&self, burnchain: &Burnchain) { - let epochs = StacksEpoch::get_epochs( - self.burnchain.get_bitcoin_network().1, - self.burnchain.epochs.as_ref(), - ); + let epochs = self.burnchain.get_epoch_list(); let Some(epoch_30) = epochs.get(StacksEpochId::Epoch30) else { // no Epoch 3.0, so just return return; @@ -1288,6 +1285,10 @@ impl BurnchainConfig { other => panic!("Invalid stacks-node mode: {other}"), } } + + pub fn get_epoch_list(&self) -> EpochList { + StacksEpoch::get_epochs(self.get_bitcoin_network().1, self.epochs.as_ref()) + } } #[derive(Clone, Deserialize, Default, Debug)] diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 648c6d7470..171ebcb2cb 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -22,9 +22,8 @@ use std::{fs, thread}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::coordinator::comm::CoordinatorChannels; -use stacks::core::StacksEpochExtension; use stacks::net::p2p::PeerNetwork; -use stacks_common::types::{StacksEpoch, StacksEpochId}; +use stacks_common::types::StacksEpochId; use crate::event_dispatcher::EventDispatcher; use crate::globals::NeonGlobals; @@ -233,10 +232,7 @@ impl BootRunLoop { fn reached_epoch_30_transition(config: &Config) -> Result { let burn_height = Self::get_burn_height(config)?; - let epochs = StacksEpoch::get_epochs( - config.burnchain.get_bitcoin_network().1, - config.burnchain.epochs.as_ref(), - ); + let epochs = config.burnchain.get_epoch_list(); let epoch_3 = epochs .get(StacksEpochId::Epoch30) .ok_or("No Epoch-3.0 defined")?; From 6e1e41014cf27b14ef0ec9137b36139bb3615717 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 11 Dec 2024 14:30:07 -0500 Subject: [PATCH 31/57] chore: Remove `toml` dependency from `stacks-node` --- Cargo.lock | 1 - testnet/stacks-node/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 87ceaf2808..47621472bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3338,7 +3338,6 @@ dependencies = [ "tikv-jemallocator", "tiny_http", "tokio", - "toml", "tracing", "tracing-subscriber", "url", diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 33c5f09306..e902140428 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -14,7 +14,6 @@ serde_derive = "1" serde_json = { version = "1.0", features = ["arbitrary_precision", "raw_value"] } stacks = { package = "stackslib", path = "../../stackslib" } stx-genesis = { path = "../../stx-genesis"} -toml = { workspace = true } base64 = "0.12.0" backtrace = "0.3.50" libc = "0.2.151" From 5fe4948ba3b7c4fa958313a4198d7c16f197ae99 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 11 Dec 2024 15:41:27 -0500 Subject: [PATCH 32/57] fix: Move config files to fix `test_example_confs()` --- .../stacks-node => stackslib}/conf/mainnet-follower-conf.toml | 0 {testnet/stacks-node => stackslib}/conf/mainnet-miner-conf.toml | 0 .../stacks-node => stackslib}/conf/mainnet-mockminer-conf.toml | 0 {testnet/stacks-node => stackslib}/conf/mainnet-signer.toml | 0 .../stacks-node => stackslib}/conf/testnet-follower-conf.toml | 0 {testnet/stacks-node => stackslib}/conf/testnet-miner-conf.toml | 0 {testnet/stacks-node => stackslib}/conf/testnet-signer.toml | 0 7 files changed, 0 insertions(+), 0 deletions(-) rename {testnet/stacks-node => stackslib}/conf/mainnet-follower-conf.toml (100%) rename {testnet/stacks-node => stackslib}/conf/mainnet-miner-conf.toml (100%) rename {testnet/stacks-node => stackslib}/conf/mainnet-mockminer-conf.toml (100%) rename {testnet/stacks-node => stackslib}/conf/mainnet-signer.toml (100%) rename {testnet/stacks-node => stackslib}/conf/testnet-follower-conf.toml (100%) rename {testnet/stacks-node => stackslib}/conf/testnet-miner-conf.toml (100%) rename {testnet/stacks-node => stackslib}/conf/testnet-signer.toml (100%) diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/stackslib/conf/mainnet-follower-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-follower-conf.toml rename to stackslib/conf/mainnet-follower-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/stackslib/conf/mainnet-miner-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-miner-conf.toml rename to stackslib/conf/mainnet-miner-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/stackslib/conf/mainnet-mockminer-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-mockminer-conf.toml rename to stackslib/conf/mainnet-mockminer-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-signer.toml b/stackslib/conf/mainnet-signer.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-signer.toml rename to stackslib/conf/mainnet-signer.toml diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/stackslib/conf/testnet-follower-conf.toml similarity index 100% rename from testnet/stacks-node/conf/testnet-follower-conf.toml rename to stackslib/conf/testnet-follower-conf.toml diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/stackslib/conf/testnet-miner-conf.toml similarity index 100% rename from testnet/stacks-node/conf/testnet-miner-conf.toml rename to stackslib/conf/testnet-miner-conf.toml diff --git a/testnet/stacks-node/conf/testnet-signer.toml b/stackslib/conf/testnet-signer.toml similarity index 100% rename from testnet/stacks-node/conf/testnet-signer.toml rename to stackslib/conf/testnet-signer.toml From ef7cb903e825f327cd521dcdac64fff5e43e1f0f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 12 Dec 2024 11:17:02 -0500 Subject: [PATCH 33/57] CRC: move TestFlag related functions to seperate test modules Signed-off-by: Jacinta Ferrant --- stacks-common/src/util/mod.rs | 23 +-- stacks-common/src/util/tests.rs | 99 ++++++++++++ stacks-signer/src/v0/mod.rs | 4 + stacks-signer/src/v0/signer.rs | 127 +--------------- stacks-signer/src/v0/tests.rs | 141 ++++++++++++++++++ testnet/stacks-node/src/event_dispatcher.rs | 2 +- .../src/nakamoto_node/stackerdb_listener.rs | 2 +- testnet/stacks-node/src/run_loop/neon.rs | 2 +- testnet/stacks-node/src/tests/signer/mod.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 10 files changed, 257 insertions(+), 147 deletions(-) create mode 100644 stacks-common/src/util/tests.rs create mode 100644 stacks-signer/src/v0/tests.rs diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 416a365a2f..5f733eddad 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -36,28 +36,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; #[cfg(any(test, feature = "testing"))] -#[derive(Clone)] -pub struct TestFlag(pub std::sync::Arc>>); - -#[cfg(any(test, feature = "testing"))] -impl Default for TestFlag { - fn default() -> Self { - Self(std::sync::Arc::new(std::sync::Mutex::new(None))) - } -} - -#[cfg(any(test, feature = "testing"))] -impl TestFlag { - /// Set the test flag to the given value - pub fn set(&self, value: T) { - *self.0.lock().unwrap() = Some(value); - } - - /// Get the test flag value. Defaults otherwise. - pub fn get(&self) -> T { - self.0.lock().unwrap().clone().unwrap_or_default().clone() - } -} +pub mod tests; pub fn get_epoch_time_secs() -> u64 { let start = SystemTime::now(); diff --git a/stacks-common/src/util/tests.rs b/stacks-common/src/util/tests.rs new file mode 100644 index 0000000000..b87e913718 --- /dev/null +++ b/stacks-common/src/util/tests.rs @@ -0,0 +1,99 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::{Arc, Mutex}; +/// `TestFlag` is a thread-safe utility designed for managing shared state in testing scenarios. It wraps +/// a value of type `T` inside an `Arc>>`, allowing you to set and retrieve a value +/// across different parts of your codebase while ensuring thread safety. +/// +/// This structure is particularly useful when: +/// - You need a global or static variable in tests. +/// - You want to control the execution of custom test code paths by setting and checking a shared value. +/// +/// # Type Parameter +/// - `T`: The type of the value managed by the `TestFlag`. It must implement the `Default` and `Clone` traits. +/// +/// # Examples +/// +/// ```rust +/// use stacks_common::util::tests::TestFlag; +/// use std::sync::{Arc, Mutex}; +/// +/// // Create a TestFlag instance +/// let test_flag = TestFlag::default(); +/// +/// // Set a value in the test flag +/// test_flag.set("test_value".to_string()); +/// +/// // Retrieve the value +/// assert_eq!(test_flag.get(), "test_value".to_string()); +/// +/// // Reset the value to default +/// test_flag.set("".to_string()); +/// assert_eq!(test_flag.get(), "".to_string()); +/// ``` +#[derive(Clone)] +pub struct TestFlag(pub Arc>>); + +impl Default for TestFlag { + fn default() -> Self { + Self(Arc::new(Mutex::new(None))) + } +} + +impl TestFlag { + /// Sets the value of the test flag. + /// + /// This method updates the value stored inside the `TestFlag`, replacing any existing value. + /// + /// # Arguments + /// - `value`: The new value to set for the `TestFlag`. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// test_flag.set(42); + /// assert_eq!(test_flag.get(), 42); + /// ``` + pub fn set(&self, value: T) { + *self.0.lock().unwrap() = Some(value); + } + + /// Retrieves the current value of the test flag. + /// + /// If no value has been set, this method returns the default value for the type `T`. + /// + /// # Returns + /// - The current value of the test flag, or the default value of `T` if none has been set. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// + /// // Get the default value + /// assert_eq!(test_flag.get(), 0); // For T = i32, default is 0 + /// + /// // Set a value + /// test_flag.set(123); + /// + /// // Get the updated value + /// assert_eq!(test_flag.get(), 123); + /// ``` + pub fn get(&self) -> T { + self.0.lock().unwrap().clone().unwrap_or_default().clone() + } +} diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs index 520fb36ca1..34b363311e 100644 --- a/stacks-signer/src/v0/mod.rs +++ b/stacks-signer/src/v0/mod.rs @@ -17,6 +17,10 @@ /// The signer module for processing events pub mod signer; +#[cfg(any(test, feature = "testing"))] +/// Test specific functions for the signer module +pub mod tests; + use libsigner::v0::messages::SignerMessage; use crate::v0::signer::Signer; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2fe948d0f9..5a5128cce4 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,8 +15,6 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; -#[cfg(any(test, feature = "testing"))] -use std::sync::LazyLock; use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; @@ -35,12 +33,8 @@ use libsigner::v0::messages::{ use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; -#[cfg(any(test, feature = "testing"))] -use stacks_common::types::chainstate::StacksPublicKey; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::secp256k1::MessageSignature; -#[cfg(any(test, feature = "testing"))] -use stacks_common::util::TestFlag; use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; @@ -50,27 +44,13 @@ use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list -pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: LazyLock>> = - LazyLock::new(TestFlag::default); - -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list -pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: LazyLock>> = - LazyLock::new(TestFlag::default); - -#[cfg(any(test, feature = "testing"))] -/// Pause the block broadcast -pub static TEST_PAUSE_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); - -#[cfg(any(test, feature = "testing"))] -/// Skip broadcasting the block to the network -pub static TEST_SKIP_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); - /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { + /// The private key of the signer + #[cfg(any(test, feature = "testing"))] + pub private_key: StacksPrivateKey, + #[cfg(not(any(test, feature = "testing")))] /// The private key of the signer private_key: StacksPrivateKey, /// The stackerdb client @@ -175,20 +155,8 @@ impl SignerTrait for Signer { match message { SignerMessage::BlockProposal(block_proposal) => { #[cfg(any(test, feature = "testing"))] - { - let public_keys = TEST_IGNORE_ALL_BLOCK_PROPOSALS.get(); - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private( - &self.private_key, - ), - ) { - warn!("{self}: Ignoring block proposal due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - continue; - } + if self.test_ignore_all_block_proposals(block_proposal) { + continue; } self.handle_block_proposal( stacks_client, @@ -1121,87 +1089,6 @@ impl Signer { } } - #[cfg(any(test, feature = "testing"))] - fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { - if TEST_SKIP_BLOCK_BROADCAST.get() { - let block_hash = block.header.signer_signature_hash(); - warn!( - "{self}: Skipping block broadcast due to testing directive"; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - "consensus_hash" => %block.header.consensus_hash - ); - - if let Err(e) = self - .signer_db - .set_block_broadcasted(&block_hash, get_epoch_time_secs()) - { - warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); - } - return true; - } - false - } - - #[cfg(any(test, feature = "testing"))] - fn test_reject_block_proposal( - &mut self, - block_proposal: &BlockProposal, - block_info: &mut BlockInfo, - block_response: Option, - ) -> Option { - let public_keys = TEST_REJECT_ALL_BLOCK_PROPOSAL.get(); - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), - ) { - warn!("{self}: Rejecting block proposal automatically due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; - // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject - // as invalid since we rejected in a prior round if this crops up again) - // in case this is the first time we saw this block. Safe to do since this is testing case only. - self.signer_db - .insert_block(block_info) - .unwrap_or_else(|e| self.handle_insert_block_error(e)); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::TestingDirective, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) - } else { - block_response - } - } - - #[cfg(any(test, feature = "testing"))] - fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { - if TEST_PAUSE_BLOCK_BROADCAST.get() { - // Do an extra check just so we don't log EVERY time. - warn!("{self}: Block broadcast is stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - while TEST_PAUSE_BLOCK_BROADCAST.get() { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("{self}: Block validation is no longer stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - } - } - /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); @@ -1216,7 +1103,7 @@ impl Signer { } /// Helper for logging insert_block error - fn handle_insert_block_error(&self, e: DBError) { + pub fn handle_insert_block_error(&self, e: DBError) { error!("{self}: Failed to insert block into signer-db: {e:?}"); panic!("{self} Failed to write block to signerdb: {e}"); } diff --git a/stacks-signer/src/v0/tests.rs b/stacks-signer/src/v0/tests.rs new file mode 100644 index 0000000000..0b9cdcc569 --- /dev/null +++ b/stacks-signer/src/v0/tests.rs @@ -0,0 +1,141 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::LazyLock; + +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use libsigner::v0::messages::{BlockResponse, RejectCode}; +use libsigner::BlockProposal; +use slog::{slog_info, slog_warn}; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::tests::TestFlag; +use stacks_common::{info, warn}; + +use super::signer::Signer; +use crate::signerdb::BlockInfo; + +/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list +pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list +pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to pause broadcasting the block to the network +pub static TEST_PAUSE_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +/// A global variable that can be used to skip broadcasting the block to the network +pub static TEST_SKIP_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +impl Signer { + /// Skip the block broadcast if the TEST_SKIP_BLOCK_BROADCAST flag is set + pub fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { + if TEST_SKIP_BLOCK_BROADCAST.get() { + let block_hash = block.header.signer_signature_hash(); + warn!( + "{self}: Skipping block broadcast due to testing directive"; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash + ); + + if let Err(e) = self + .signer_db + .set_block_broadcasted(&block_hash, get_epoch_time_secs()) + { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); + } + return true; + } + false + } + + /// Reject block proposals if the TEST_REJECT_ALL_BLOCK_PROPOSAL flag is set for the signer's public key + pub fn test_reject_block_proposal( + &mut self, + block_proposal: &BlockProposal, + block_info: &mut BlockInfo, + block_response: Option, + ) -> Option { + let public_keys = TEST_REJECT_ALL_BLOCK_PROPOSAL.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject + // as invalid since we rejected in a prior round if this crops up again) + // in case this is the first time we saw this block. Safe to do since this is testing case only. + self.signer_db + .insert_block(block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::TestingDirective, + &self.private_key, + self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), + )) + } else { + block_response + } + } + + /// Pause the block broadcast if the TEST_PAUSE_BLOCK_BROADCAST flag is set + pub fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { + if TEST_PAUSE_BLOCK_BROADCAST.get() { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Block broadcast is stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + while TEST_PAUSE_BLOCK_BROADCAST.get() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("{self}: Block validation is no longer stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + } + } + + /// Ignore block proposals if the TEST_IGNORE_ALL_BLOCK_PROPOSALS flag is set for the signer's public key + pub fn test_ignore_all_block_proposals(&self, block_proposal: &BlockProposal) -> bool { + let public_keys = TEST_IGNORE_ALL_BLOCK_PROPOSALS.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Ignoring block proposal due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + return true; + } + false + } +} diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 65b46011e7..11f52e883e 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -62,7 +62,7 @@ use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; use stacks::util::hash::to_hex; #[cfg(any(test, feature = "testing"))] -use stacks::util::TestFlag; +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 5fa6e1efd8..834c59fa95 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -36,7 +36,7 @@ use stacks::util::get_epoch_time_secs; use stacks::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; use stacks::util::secp256k1::MessageSignature; #[cfg(test)] -use stacks::util::TestFlag; +use stacks_common::util::tests::TestFlag; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index c3e2339bfc..9bbad4f20c 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -22,7 +22,7 @@ use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_rea use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; #[cfg(test)] -use stacks::util::TestFlag; +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index c669a7febd..e55fa54378 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -51,11 +51,11 @@ use stacks::types::chainstate::{StacksAddress, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; -use stacks::util::TestFlag; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::tests::TestFlag; use stacks_signer::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 247585984e..cbad4f931c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -59,7 +59,7 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::v0::signer::{ +use stacks_signer::v0::tests::{ TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, }; From 4ab762f0384e377a9b12e22f3173ef626d2a56ce Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 12 Dec 2024 13:46:16 -0500 Subject: [PATCH 34/57] chore: Remove debugging print statements --- stackslib/src/chainstate/nakamoto/mod.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 3ea1281abe..35f6e5d1e1 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2701,12 +2701,10 @@ impl NakamotoChainState { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT * FROM block_headers WHERE index_block_hash = ?1"; - println!("get_block_header_epoch2(): Looking for block {index_block_hash}"); let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { "FATAL: multiple rows for the same block hash".to_string() })?; - println!("get_block_header_epoch2(): Found {result:?}"); Ok(result) } @@ -2715,7 +2713,6 @@ impl NakamotoChainState { chainstate_conn: &Connection, index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { - println!("get_block_header(): Looking for block {index_block_hash}"); if let Some(header) = Self::get_block_header_nakamoto(chainstate_conn, index_block_hash)? { return Ok(Some(header)); } From 55961e3b42f639400880b0d717a708f777f6332d Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 13 Dec 2024 13:43:43 -0500 Subject: [PATCH 35/57] chore: Upgrade `debug` print to `info` --- stackslib/src/cli.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 3200f4255c..e5a8cf4a47 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -861,7 +861,7 @@ fn replay_block_nakamoto( ) }); - debug!("Process staging Nakamoto block"; + info!("Process staging Nakamoto block"; "consensus_hash" => %block.header.consensus_hash, "stacks_block_hash" => %block.header.block_hash(), "stacks_block_id" => %block.header.block_id(), From 150bfce99eccb8a86cb2c867edf0893bd914f147 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 13 Dec 2024 15:34:12 -0500 Subject: [PATCH 36/57] Do not wait for an exact number of acceptance and rejections Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index cbad4f931c..64860abce9 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -6102,7 +6102,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } }) .collect::>(); - Ok(signatures.len() == num_signers) + Ok(signatures.len() >= num_signers * 7 / 10) }) .expect("Test timed out while waiting for signers signatures for first block proposal"); let block = block.unwrap(); @@ -6192,7 +6192,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } }) .collect::>(); - Ok(block_rejections.len() == num_signers) + Ok(block_rejections.len() >= num_signers * 7 / 10) }) .expect("FAIL: Timed out waiting for block proposal rejections"); From 1767ee011c38c422c3e81b36ff8f881800876538 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 13 Dec 2024 15:31:16 -0500 Subject: [PATCH 37/57] chore: Remove double lookup of chain tip in `try-mine` --- stackslib/src/cli.rs | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index e5a8cf4a47..fc58f4acf4 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -439,16 +439,9 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { ) .unwrap_or_else(|e| panic!("Failed to open mempool db: {e}")); - let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) - .unwrap() - .unwrap(); - let parent_header = StacksChainState::get_anchored_block_header_info( - chain_state.db(), - &header_tip.consensus_hash, - &header_tip.anchored_header.block_hash(), - ) - .unwrap_or_else(|e| panic!("Failed to load chain tip header info: {e}")) - .expect("Failed to load chain tip header info"); + let tip_header = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap_or_else(|e| panic!("Error looking up chain tip: {e}")) + .expect("No chain tip found"); let sk = StacksPrivateKey::new(); let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); @@ -473,7 +466,7 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { &chain_state, &sort_db.index_handle(&chain_tip.sortition_id), &mut mempool_db, - &parent_header, + &tip_header, chain_tip.total_burn, VRFProof::empty(), Hash160([0; 20]), @@ -497,13 +490,13 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { } else { "Failed to" }, - parent_header.stacks_block_height + 1, + tip_header.stacks_block_height + 1, StacksBlockHeader::make_index_block_hash( - &parent_header.consensus_hash, - &parent_header.anchored_header.block_hash() + &tip_header.consensus_hash, + &tip_header.anchored_header.block_hash() ), - &parent_header.consensus_hash, - &parent_header.anchored_header.block_hash(), + &tip_header.consensus_hash, + &tip_header.anchored_header.block_hash(), stop.saturating_sub(start), min_fee, max_time From 02f3d01d5ca15ca9a24143412a23858813bacc89 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 13 Dec 2024 16:04:25 -0500 Subject: [PATCH 38/57] Add tenure_timeout_secs to miner config to self-issue tenure extends Signed-off-by: Jacinta Ferrant --- CHANGELOG.md | 1 + testnet/stacks-node/src/config.rs | 7 +++++++ testnet/stacks-node/src/nakamoto_node/miner.rs | 10 +++++++++- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f85ed6526b..387cc1d9eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] ### Added +- Add `tenure_timeout_secs` to the miner for determining when a time-based tenure extend should be attempted. ### Changed diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 4ad793a4c3..77d94e5afe 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -93,6 +93,8 @@ const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; +// This should be greater than the signers' timeout. This is used for issuing fallback tenure extends +const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 420; #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] @@ -2145,6 +2147,8 @@ pub struct MinerConfig { pub block_commit_delay: Duration, /// The percentage of the remaining tenure cost limit to consume each block. pub tenure_cost_limit_per_block_percentage: Option, + /// Duration to wait before attempting to issue a tenure extend + pub tenure_timeout: Duration, } impl Default for MinerConfig { @@ -2181,6 +2185,7 @@ impl Default for MinerConfig { tenure_cost_limit_per_block_percentage: Some( DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE, ), + tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), } } } @@ -2566,6 +2571,7 @@ pub struct MinerConfigFile { pub subsequent_rejection_pause_ms: Option, pub block_commit_delay_ms: Option, pub tenure_cost_limit_per_block_percentage: Option, + pub tenure_timeout_secs: Option, } impl MinerConfigFile { @@ -2706,6 +2712,7 @@ impl MinerConfigFile { subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), block_commit_delay: self.block_commit_delay_ms.map(Duration::from_millis).unwrap_or(miner_default_config.block_commit_delay), tenure_cost_limit_per_block_percentage, + tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 6a4ea39b60..6acfb7c33d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -160,6 +160,8 @@ pub struct BlockMinerThread { /// Handle to the p2p thread for block broadcast p2p_handle: NetworkHandle, signer_set_cache: Option, + /// The time at which tenure change was issued + tenure_change_time: Instant, } impl BlockMinerThread { @@ -187,6 +189,7 @@ impl BlockMinerThread { reason, p2p_handle: rt.get_p2p_handle(), signer_set_cache: None, + tenure_change_time: Instant::now(), } } @@ -1186,7 +1189,9 @@ impl BlockMinerThread { if self.last_block_mined.is_some() { // Check if we can extend the current tenure let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); - if get_epoch_time_secs() <= tenure_extend_timestamp { + if get_epoch_time_secs() <= tenure_extend_timestamp + && self.tenure_change_time.elapsed() <= self.config.miner.tenure_timeout + { return Ok(NakamotoTenureInfo { coinbase_tx: None, tenure_change_tx: None, @@ -1195,6 +1200,8 @@ impl BlockMinerThread { info!("Miner: Time-based tenure extend"; "current_timestamp" => get_epoch_time_secs(), "tenure_extend_timestamp" => tenure_extend_timestamp, + "tenure_change_time_elapsed" => self.tenure_change_time.elapsed().as_secs(), + "tenure_timeout_secs" => self.config.miner.tenure_timeout.as_secs(), ); self.tenure_extend_reset(); } @@ -1265,6 +1272,7 @@ impl BlockMinerThread { } fn tenure_extend_reset(&mut self) { + self.tenure_change_time = Instant::now(); self.reason = MinerReason::Extended { burn_view_consensus_hash: self.burn_block.consensus_hash, }; From 1eef7e86d9d30d34721a80d2a476fdce97c73b29 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 13 Dec 2024 16:20:24 -0500 Subject: [PATCH 39/57] chore: Add explicit panic if `stacks-inspect try-mine` tries to mine on Nakamoto chainstate --- stackslib/src/cli.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index fc58f4acf4..1f43a34d40 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -443,6 +443,15 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { .unwrap_or_else(|e| panic!("Error looking up chain tip: {e}")) .expect("No chain tip found"); + // Fail if Nakamoto chainstate detected. `try-mine` cannot mine Nakamoto blocks yet + // TODO: Add Nakamoto block support + if matches!( + &tip_header.anchored_header, + StacksBlockHeaderTypes::Nakamoto(..) + ) { + panic!("Attempting to mine Nakamoto block. Nakamoto blocks not supported yet!"); + }; + let sk = StacksPrivateKey::new(); let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); tx_auth.set_origin_nonce(0); From 51605beb1a8177fb0872df34e8ea619228c30d20 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 13 Dec 2024 16:22:06 -0500 Subject: [PATCH 40/57] chore: Add symlink `testnet/stacks-node/conf` -> `stackslib/conf` so as not to break existing links --- testnet/stacks-node/conf | 1 + 1 file changed, 1 insertion(+) create mode 120000 testnet/stacks-node/conf diff --git a/testnet/stacks-node/conf b/testnet/stacks-node/conf new file mode 120000 index 0000000000..94edd3b5d4 --- /dev/null +++ b/testnet/stacks-node/conf @@ -0,0 +1 @@ +../../stackslib/conf/ \ No newline at end of file From 1c01db1353df5615e1bafbc29df6ac32cd629e66 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 13 Dec 2024 16:35:51 -0500 Subject: [PATCH 41/57] Add tests for miner's tenure timeout caused tenure extends Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 4 +- testnet/stacks-node/src/tests/signer/v0.rs | 171 ++++++++++++++++++++- 2 files changed, 172 insertions(+), 3 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index de4a8cfdb3..adab04a104 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -122,7 +122,9 @@ jobs: - tests::signer::v0::signer_set_rollover - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend - - tests::signer::v0::tenure_extend_after_idle + - tests::signer::v0::tenure_extend_after_idle_signers + - tests::signer::v0::tenure_extend_after_idle_miner + - tests::signer::v0::tenure_extend_succeeds_after_rejected_attempt - tests::signer::v0::stx_transfers_dont_effect_idle_timeout - tests::signer::v0::idle_tenure_extend_active_mining - tests::signer::v0::multiple_miners_with_custom_chain_id diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 00276b09ee..6fd0fd54af 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2580,8 +2580,8 @@ fn signers_broadcast_signed_blocks() { #[test] #[ignore] -/// This test verifies that a miner will produce a TenureExtend transaction after the idle timeout is reached. -fn tenure_extend_after_idle() { +/// This test verifies that a miner will produce a TenureExtend transaction after the signers' idle timeout is reached. +fn tenure_extend_after_idle_signers() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -2629,6 +2629,173 @@ fn tenure_extend_after_idle() { signer_test.shutdown(); } +#[test] +#[ignore] +/// This test verifies that a miner will produce a TenureExtend transaction after the miner's idle timeout +/// even if they do not see the signers' tenure extend timestamp responses. +fn tenure_extend_after_idle_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); + let miner_idle_timeout = idle_timeout + Duration::from_secs(10); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_timeout = miner_idle_timeout; + }, + None, + None, + ); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Start a new tenure but ignore block signatures so no timestamps are recorded ----"); + let tip_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + TEST_IGNORE_SIGNERS.set(true); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || { + let tip_height = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + Ok(tip_height > tip_height_before) + }, + ) + .expect("Failed to mine the tenure change block"); + + // Now, wait for a block with a tenure change due to the new block + wait_for(30, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::BlockFound, + )) + }) + .expect("Timed out waiting for a block with a tenure change"); + + info!("---- Waiting for a tenure extend ----"); + + TEST_IGNORE_SIGNERS.set(false); + // Now, wait for a block with a tenure extend + wait_for(miner_idle_timeout.as_secs() + 20, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test verifies that a miner that attempts to produce a tenure extend too early will be rejected by the signers, +/// but will eventually succeed after the signers' idle timeout has passed. +fn tenure_extend_succeeds_after_rejected_attempt() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); + let miner_idle_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_timeout = miner_idle_timeout; + }, + None, + None, + ); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Waiting for a rejected tenure extend ----"); + // Now, wait for a block with a tenure extend proposal from the miner, but ensure it is rejected. + wait_for(30, || { + let block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + if let SignerMessage::BlockProposal(proposal) = message { + if proposal.block.get_tenure_tx_payload().unwrap().cause + == TenureChangeCause::Extended + { + return Some(proposal.block); + } + } + None + }); + let Some(block) = &block else { + return Ok(false); + }; + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + if let SignerMessage::BlockResponse(BlockResponse::Rejected(rejected)) = message { + if block.header.signer_signature_hash() == rejected.signer_signature_hash { + return Some(rejected.signature); + } + } + None + }) + .collect::>(); + Ok(signatures.len() >= num_signers * 7 / 10) + }) + .expect("Test timed out while waiting for a rejected tenure extend"); + + info!("---- Waiting for an accepted tenure extend ----"); + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Test timed out while waiting for an accepted tenure extend"); + signer_test.shutdown(); +} + #[test] #[ignore] /// Verify that Nakamoto blocks that don't modify the tenure's execution cost From a107fa4a11cac280440efdf998d235449ca07349 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 13 Dec 2024 16:39:03 -0500 Subject: [PATCH 42/57] update comment on mined_blocks and tenure_change_time to be clear the tenure change doesn't have to be successful Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/miner.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 6acfb7c33d..d9edf97e90 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -139,7 +139,7 @@ pub struct BlockMinerThread { burnchain: Burnchain, /// Last block mined last_block_mined: Option, - /// Number of blocks mined since a tenure change/extend + /// Number of blocks mined since a tenure change/extend was attempted mined_blocks: u64, /// Copy of the node's registered VRF key registered_key: RegisteredKey, @@ -160,7 +160,7 @@ pub struct BlockMinerThread { /// Handle to the p2p thread for block broadcast p2p_handle: NetworkHandle, signer_set_cache: Option, - /// The time at which tenure change was issued + /// The time at which tenure change/extend was attempted tenure_change_time: Instant, } From 2107ab3f72b4721619fc900b2ff68889f15018e9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 16 Dec 2024 13:00:55 -0500 Subject: [PATCH 43/57] Fix p2p ci no tests errors Signed-off-by: Jacinta Ferrant --- .github/workflows/p2p-tests.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/p2p-tests.yml b/.github/workflows/p2p-tests.yml index 1c33eca0fb..81790bdc12 100644 --- a/.github/workflows/p2p-tests.yml +++ b/.github/workflows/p2p-tests.yml @@ -43,10 +43,10 @@ jobs: - net::tests::convergence::test_walk_star_15_org_biased - net::tests::convergence::test_walk_inbound_line_15 - net::api::tests::postblock_proposal::test_try_make_response - - net::server::tests::test_http_10_threads_getinfo - - net::server::tests::test_http_10_threads_getblock - - net::server::tests::test_http_too_many_clients - - net::server::tests::test_http_slow_client + - net::server::test::test_http_10_threads_getinfo + - net::server::test::test_http_10_threads_getblock + - net::server::test::test_http_too_many_clients + - net::server::test::test_http_slow_client steps: ## Setup test environment - name: Setup Test Environment From 76014e513d6f2ecb7a466be1e2e782c237fbe727 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 16 Dec 2024 13:50:18 -0500 Subject: [PATCH 44/57] Fix full genesis test Signed-off-by: Jacinta Ferrant --- .github/workflows/stacks-core-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 98eb5cf92c..e7afd86c6c 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -29,7 +29,7 @@ jobs: max-parallel: 2 matrix: test-name: - - neon_integrations::bitcoind_integration_test + - tests::neon_integrations::bitcoind_integration_test steps: ## Setup test environment - name: Setup Test Environment From e486b9e19e9bc2c73835d3f7cb6c9c1a3f5e6153 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Dec 2024 14:44:31 -0500 Subject: [PATCH 45/57] chore: add changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f85ed6526b..1bf801607d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,8 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +- Nodes will assume that all PoX anchor blocks exist by default, and stall initial block download indefinitely to await their arrival (#5502) + ## [3.1.0.0.1] ### Added From 3a479225ea691af11bf8052cd27a73c9406873ad Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 16 Dec 2024 11:59:37 -0800 Subject: [PATCH 46/57] Removing unused workflow step (remains in bitcoin-tests.yml) --- .github/workflows/stacks-core-tests.yml | 49 ------------------------- 1 file changed, 49 deletions(-) diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index e7afd86c6c..068c4fb575 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -18,55 +18,6 @@ concurrency: cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: - # Full genesis test with code coverage - full-genesis: - name: Full Genesis Test - runs-on: ubuntu-latest - strategy: - ## Continue with the test matrix even if we've had a failure - fail-fast: false - ## Run a maximum of 2 concurrent tests from the test matrix - max-parallel: 2 - matrix: - test-name: - - tests::neon_integrations::bitcoind_integration_test - steps: - ## Setup test environment - - name: Setup Test Environment - id: setup_tests - uses: stacks-network/actions/stacks-core/testenv@main - with: - genesis: true - btc-version: "25.0" - - ## Run test matrix using restored cache of archive file - ## - Test will timeout after env.TEST_TIMEOUT minutes - - name: Run Tests - id: run_tests - timeout-minutes: ${{ fromJSON(env.TEST_TIMEOUT) }} - uses: stacks-network/actions/stacks-core/run-tests@main - with: - test-name: ${{ matrix.test-name }} - threads: 1 - archive-file: ~/genesis_archive.tar.zst - - ## Upload code coverage file - - name: Code Coverage - id: codecov - uses: stacks-network/actions/codecov@main - with: - test-name: large_genesis - filename: ./lcov.info - - - name: Status Output - run: | - echo "run_tests: ${{ steps.run_tests.outputs.status }}" - echo "codecov: ${{ steps.codecov.outputs.status }}" - - - name: Check Failures - if: steps.run_tests.outputs.status == 'failure' || steps.codecov.outputs.status == 'failure' - run: exit 1 - # Unit tests with code coverage unit-tests: name: Unit Tests From a50825c581acf221ffbd69a569084e95dd373c96 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 16 Dec 2024 16:01:09 -0500 Subject: [PATCH 47/57] Missing changes from failed merge Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/event_dispatcher.rs | 2 -- testnet/stacks-node/src/tests/signer/mod.rs | 1 - testnet/stacks-node/src/tests/signer/v0.rs | 1 - 3 files changed, 4 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index ebabba89a2..2f71838adb 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -73,8 +73,6 @@ use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use url::Url; -use super::config::{EventKeyType, EventObserverConfig}; - #[cfg(any(test, feature = "testing"))] lazy_static! { /// Do not announce a signed/mined block to the network when set to true. diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 61bbed9097..432b990667 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -63,7 +63,6 @@ use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; -use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use crate::neon::{Counters, RunLoopCounter}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 242ab3b446..5641776b5b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -69,7 +69,6 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; -use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEMENT}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, From 319b95ea6d725d1c164b8ae5a9d4ec3c56ad0587 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 16 Dec 2024 16:32:30 -0500 Subject: [PATCH 48/57] fix: remove `full-genesis` from `needs` of "Check Tests" --- .github/workflows/stacks-core-tests.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 068c4fb575..457a2aaefd 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -137,7 +137,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - full-genesis - open-api-validation - core-contracts-clarinet-test steps: From c0a5e83903dbed0d3ef7958c95064de180cf57f4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 17 Dec 2024 12:41:57 -0500 Subject: [PATCH 49/57] Add clippy to CI Signed-off-by: Jacinta Ferrant --- .github/workflows/clippy.yml | 40 ++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 .github/workflows/clippy.yml diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml new file mode 100644 index 0000000000..83cf240815 --- /dev/null +++ b/.github/workflows/clippy.yml @@ -0,0 +1,40 @@ +## Perform Clippy checks - currently set to defaults +## https://github.com/rust-lang/rust-clippy#usage +## https://rust-lang.github.io/rust-clippy/master/index.html +## +name: Clippy Checks + +# Only run when: +# - PRs are (re)opened against develop branch +on: + pull_request: + branches: + - develop + types: + - opened + - reopened + - synchronize + +jobs: + clippy_check: + name: Clippy Check + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Define Rust Toolchain + id: define_rust_toolchain + run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV + - name: Setup Rust Toolchain + id: setup_rust_toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: ${{ env.RUST_TOOLCHAIN }} + components: clippy + - name: Clippy + id: clippy + uses: actions-rs/clippy-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + args: -p libstackerdb -p stacks-signer -p pox-locking --no-deps --tests --all-features -- -D warnings \ No newline at end of file From 906c01236fb51059d6ae070593966d378aae6d60 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 17 Dec 2024 12:55:49 -0500 Subject: [PATCH 50/57] Fix clippy warnings in stacks signer Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 2 +- stacks-signer/src/cli.rs | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index a80a51a6dd..462f3dc2d2 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -176,7 +176,7 @@ enum ProposedBy<'a> { CurrentSortition(&'a SortitionState), } -impl<'a> ProposedBy<'a> { +impl ProposedBy<'_> { pub fn state(&self) -> &SortitionState { match self { ProposedBy::LastSortition(x) => x, diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 4e9067498d..7b666d3762 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -41,11 +41,9 @@ use stacks_common::types::chainstate::StacksPrivateKey; extern crate alloc; -#[derive(Parser, Debug)] -#[command(author, version, about)] -#[command(long_version = VERSION_STRING.as_str())] - /// The CLI arguments for the stacks signer +#[derive(Parser, Debug)] +#[command(author, version, about, long_version = VERSION_STRING.as_str())] pub struct Cli { /// Subcommand action to take #[command(subcommand)] From 29baa895898fffb2e89b6b941a91abc68c7393c2 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 19 Dec 2024 11:56:44 -0700 Subject: [PATCH 51/57] chore: fix errors after merge --- stackslib/src/chainstate/nakamoto/mod.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 71068e1519..7be4b2d89b 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4207,11 +4207,13 @@ impl NakamotoChainState { applied_epoch_transition: bool, signers_updated: bool, coinbase_height: u64, + phantom_lockup_events: Vec, ) -> Result< ( StacksEpochReceipt, PreCommitClarityBlock<'a>, Option, + Vec, ), ChainstateError, > { @@ -4248,7 +4250,7 @@ impl NakamotoChainState { coinbase_height, }; - return Ok((epoch_receipt, clarity_commit, None)); + return Ok((epoch_receipt, clarity_commit, None, phantom_lockup_events)); } /// Append a Nakamoto Stacks block to the Stacks chain state. @@ -4631,6 +4633,7 @@ impl NakamotoChainState { applied_epoch_transition, signer_set_calc.is_some(), coinbase_height, + phantom_lockup_events, ); } @@ -4942,7 +4945,7 @@ impl NakamotoChainState { unlock_tx, events, Value::okay_true(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ); Some(unlock_receipt) } From 41921b9b4de41d5f306fdb335e6f9dd2d3beacd3 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 19 Dec 2024 12:33:30 -0700 Subject: [PATCH 52/57] fix: include block height in phantom tx memo for unique txids --- stackslib/src/chainstate/nakamoto/mod.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 7be4b2d89b..0694edf7a0 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2156,6 +2156,7 @@ impl NakamotoChainState { Self::generate_phantom_unlock_tx( phantom_unlock_events, &stacks_chain_state.config(), + next_ready_block.header.chain_length, ) { tx_receipts.push(unlock_receipt); @@ -4920,6 +4921,7 @@ impl NakamotoChainState { fn generate_phantom_unlock_tx( events: Vec, config: &ChainstateConfig, + stacks_block_height: u64, ) -> Option { if events.is_empty() { return None; @@ -4930,6 +4932,16 @@ impl NakamotoChainState { } else { TransactionVersion::Testnet }; + + // Make the txid unique -- the phantom tx payload should include something block-specific otherwise + // they will always have the same txid. In this case we use the block height in the memo. This also + // happens to give some indication of the purpose of this phantom tx, for anyone looking. + let memo = TokenTransferMemo({ + let str = format!("Block {} token unlocks", stacks_block_height); + let mut buf = [0u8; 34]; + buf[..str.len().min(34)].copy_from_slice(&str.as_bytes()[..]); + buf + }); let boot_code_address = boot_code_addr(config.mainnet); let boot_code_auth = boot_code_tx_auth(boot_code_address.clone()); let unlock_tx = StacksTransaction::new( @@ -4938,7 +4950,7 @@ impl NakamotoChainState { TransactionPayload::TokenTransfer( PrincipalData::Standard(boot_code_address.into()), 0, - TokenTransferMemo([0u8; 34]), + memo, ), ); let unlock_receipt = StacksTransactionReceipt::from_stx_transfer( From ce10a2b56f0ae8b12bb1f8574614f105f4cbefa1 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 19 Dec 2024 13:07:58 -0700 Subject: [PATCH 53/57] chore: fix test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index d84aa5d578..13923a847a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -9577,7 +9577,7 @@ fn nakamoto_lockup_events() { let txid = event.get("txid").unwrap().as_str().unwrap(); assert_eq!( txid, - "0xcba511741b230bd85cb5b3b10d26e0b92695d4a83f95c260cad82a40cd764235" + "0x63dd5773338782755e4947a05a336539137dfe13b19a0eac5154306850aca8ef" ); } } From 76552d4439dfb82275b69bd7ebdf81c9eccd81b4 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 19 Dec 2024 14:16:55 -0700 Subject: [PATCH 54/57] chore: fix tests --- stackslib/src/chainstate/stacks/db/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 42f72d5165..ffdea5a7dd 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2903,7 +2903,7 @@ pub mod test { // Just update the expected value assert_eq!( genesis_root_hash.to_string(), - "c771616ff6acb710051238c9f4a3c48020a6d70cda637d34b89f2311a7e27886" + "0eb3076f0635ccdfcdc048afb8dea9048c5180a2e2b2952874af1d18f06321e8" ); } From 346bc93b4f817d36badfa427beb0fff0b87cb7b9 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 19 Dec 2024 15:00:19 -0700 Subject: [PATCH 55/57] fix: remove clone --- stackslib/src/chainstate/nakamoto/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 0694edf7a0..edb10cd4d7 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2093,7 +2093,7 @@ impl NakamotoChainState { return Err(e); }; - let (receipt, clarity_commit, reward_set_data, phantom_unlock_events) = + let (mut receipt, clarity_commit, reward_set_data, phantom_unlock_events) = ok_opt.expect("FATAL: unreachable"); assert_eq!( @@ -2148,7 +2148,7 @@ impl NakamotoChainState { &receipt.header.anchored_header.block_hash() ); - let mut tx_receipts = receipt.tx_receipts.clone(); + let tx_receipts = &mut receipt.tx_receipts; if let Some(unlock_receipt) = // For the event dispatcher, attach any STXMintEvents that // could not be included in the block (e.g. because the From f01b42716f34aafebe4aaba8d7d92df5b7955c21 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 19 Dec 2024 15:19:33 -0700 Subject: [PATCH 56/57] chore: move another clone --- stackslib/src/chainstate/nakamoto/mod.rs | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index edb10cd4d7..929d8dfe90 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4545,20 +4545,17 @@ impl NakamotoChainState { Ok(lockup_events) => lockup_events, }; - // Track events that we couldn't attach to a coinbase receipt - let mut phantom_lockup_events = lockup_events.clone(); - // if any, append lockups events to the coinbase receipt - if lockup_events.len() > 0 { + // If any, append lockups events to the coinbase receipt + if let Some(receipt) = tx_receipts.get_mut(0) { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction - if let Some(receipt) = tx_receipts.get_mut(0) { - if receipt.is_coinbase_tx() { - receipt.events.append(&mut lockup_events); - phantom_lockup_events.clear(); - } + if receipt.is_coinbase_tx() { + receipt.events.append(&mut lockup_events); } } - if phantom_lockup_events.len() > 0 { + + // If lockup_events still contains items, it means they weren't attached + if !lockup_events.is_empty() { info!("Unable to attach lockup events, block's first transaction is not a coinbase transaction. Will attach as a phantom tx."); } @@ -4634,7 +4631,7 @@ impl NakamotoChainState { applied_epoch_transition, signer_set_calc.is_some(), coinbase_height, - phantom_lockup_events, + lockup_events, ); } @@ -4752,7 +4749,7 @@ impl NakamotoChainState { epoch_receipt, clarity_commit, reward_set_data, - phantom_lockup_events, + lockup_events, )) } From 8e77ac0c1da88a3e01179418775b2df7165dfbd2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 20 Dec 2024 09:25:55 -0500 Subject: [PATCH 57/57] fix: remove DNS names from `peer_host` field We don't need to do DNS lookups for tests and we want to encourage people to run their own Bitcoin nodes anyway. --- stackslib/src/config/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 513c3ba3b6..42663372f6 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -181,7 +181,7 @@ impl ConfigFile { mode: Some("xenon".to_string()), rpc_port: Some(18332), peer_port: Some(18333), - peer_host: Some("bitcoind.testnet.stacks.co".to_string()), + peer_host: Some("0.0.0.0".to_string()), magic_bytes: Some("T2".into()), ..BurnchainConfigFile::default() }; @@ -227,9 +227,9 @@ impl ConfigFile { mode: Some("mainnet".to_string()), rpc_port: Some(8332), peer_port: Some(8333), - peer_host: Some("bitcoin.blockstack.com".to_string()), - username: Some("blockstack".to_string()), - password: Some("blockstacksystem".to_string()), + peer_host: Some("0.0.0.0".to_string()), + username: Some("bitcoin".to_string()), + password: Some("bitcoin".to_string()), magic_bytes: Some("X2".to_string()), ..BurnchainConfigFile::default() };