From 8fe3394ec7437c40cfd0ae491bf1f7348371cf45 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sat, 9 Nov 2024 16:29:36 -0500 Subject: [PATCH 001/115] test: add test for tenure-extend upon failed miner --- testnet/stacks-node/src/tests/signer/v0.rs | 422 ++++++++++++++++++++- 1 file changed, 406 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5ac25f97bb..79f55d7e7b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -420,6 +420,22 @@ impl SignerTest { } } +fn verify_last_block_contains_tenure_change_tx(cause: TenureChangeCause) { + let blocks = test_observer::get_blocks(); + let tenure_change_tx = &blocks.last().unwrap(); + let transactions = tenure_change_tx["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) => { + assert_eq!(payload.cause, cause); + } + _ => panic!("Expected tenure change transaction, got {parsed:?}"), + }; +} + #[test] #[ignore] /// Test that a signer can respond to an invalid block proposal @@ -5841,22 +5857,6 @@ fn continue_after_fast_block_no_sortition() { let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); // Some helper functions for verifying the blocks contain their expected transactions - let verify_last_block_contains_tenure_change_tx = |cause: TenureChangeCause| { - let blocks = test_observer::get_blocks(); - let tenure_change_tx = &blocks.last().unwrap(); - let transactions = tenure_change_tx["transactions"].as_array().unwrap(); - let tx = transactions.first().expect("No transactions in block"); - let raw_tx = tx["raw_tx"].as_str().unwrap(); - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => { - assert_eq!(payload.cause, cause); - } - _ => panic!("Expected tenure change transaction, got {parsed:?}"), - }; - }; - let verify_last_block_contains_transfer_tx = || { let blocks = test_observer::get_blocks(); let tenure_change_tx = &blocks.last().unwrap(); @@ -6868,3 +6868,393 @@ fn block_commit_delay() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// Test that a miner will extend its tenure after the proceeding miner fails to mine a block. +/// - Miner 1 wins a tenure and mines normally +/// - Miner 2 wins a tenure but fails to mine a block +/// - Miner 1 extends its tenure +fn tenure_extend_after_failed_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 1; + let sender_nonce = 0; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.block_proposal_timeout = Duration::from_secs(30); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf.miner.mining_key.unwrap(), + )); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf_node_2.miner.mining_key.unwrap(), + )); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Wins Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("------------------------- Pause Block Proposals -------------------------"); + TEST_MINE_STALL.lock().unwrap().replace(true); + + // Unpause miner 2's block commits + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + // Ensure miner 2 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .unwrap(); + + info!("------------------------- Miner 2 Wins Tenure B, Mines No Blocks -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + // assure we have a successful sortition that miner B won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("------------------------- Wait for Block Proposal Timeout -------------------------"); + sleep_ms( + signer_test.signer_configs[0] + .block_proposal_timeout + .as_millis() as u64 + * 2, + ); + + info!("------------------------- Miner 1 Extends Tenure A -------------------------"); + + // Re-enable block mining + TEST_MINE_STALL.lock().unwrap().replace(false); + + // wait for a tenure extend block from miner 1 to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for tenure extend block to be mined and processed"); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + // Re-enable block commits for miner 2 + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(true); + + // Wait for block commit from miner 2 + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); + + info!("------------------------- Miner 2 Mines the Next Tenure -------------------------"); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }, + ) + .expect("Timed out waiting for final block to be mined and processed"); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} From 311ad504dd3449638a4ad5680e9a043e5fc9bc2f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 12 Nov 2024 15:49:06 -0500 Subject: [PATCH 002/115] feat: implement tenure-extend after bad sortition winner See #5361 --- .../stacks-node/src/nakamoto_node/relayer.rs | 125 ++++- testnet/stacks-node/src/tests/signer/v0.rs | 439 +++++++++++++++++- 2 files changed, 540 insertions(+), 24 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 7c8dc6f2c5..805703264e 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -382,20 +382,50 @@ impl RelayerThread { /// parent block could be an epoch 2 block. In this case, the right thing to do is to wait for /// the next block-commit. pub(crate) fn choose_miner_directive( - config: &Config, - sortdb: &SortitionDB, + &self, sn: BlockSnapshot, won_sortition: bool, committed_index_hash: StacksBlockId, ) -> Option { + let (cur_stacks_tip_ch, cur_stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) + .expect("FATAL: failed to query sortition DB for stacks tip"); + + let stacks_tip = StacksBlockId::new(&cur_stacks_tip_ch, &cur_stacks_tip_bh); + let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + &stacks_tip, + &cur_stacks_tip_ch, + ) + .expect( + "Relayer: Failed to get tenure-start block header for stacks tip {stacks_tip}: {e:?}", + ) + .expect("Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip}"); + let directive = if sn.sortition { Some( - if won_sortition || config.get_node_config(false).mock_mining { + if won_sortition || self.config.get_node_config(false).mock_mining { + info!("Relayer: Won sortition; begin tenure."); MinerDirective::BeginTenure { parent_tenure_start: committed_index_hash, burnchain_tip: sn, } + } else if committed_index_hash + != highest_tenure_start_block_header.index_block_hash() + { + info!( + "Relayer: Winner of sortition {} did not commit to the correct parent tenure. Attempt to continue tenure.", + &sn.consensus_hash + ); + // We didn't win the sortition, but the miner that did win + // did not commit to the correct parent tenure. This means + // it will be unable to produce a valid block, so we should + // continue our tenure. + MinerDirective::ContinueTenure { + new_burn_view: sn.consensus_hash, + } } else { + info!("Relayer: Stop tenure"); MinerDirective::StopTenure }, ) @@ -404,16 +434,16 @@ impl RelayerThread { // If it's in epoch 2.x, then we must always begin a new tenure, but we can't do so // right now since this sortition has no winner. let (cur_stacks_tip_ch, _cur_stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for stacks tip"); let stacks_tip_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cur_stacks_tip_ch) + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &cur_stacks_tip_ch) .expect("FATAL: failed to query sortiiton DB for epoch") .expect("FATAL: no sortition for canonical stacks tip"); let cur_epoch = - SortitionDB::get_stacks_epoch(sortdb.conn(), stacks_tip_sn.block_height) + SortitionDB::get_stacks_epoch(self.sortdb.conn(), stacks_tip_sn.block_height) .expect("FATAL: failed to query sortition DB for epoch") .expect("FATAL: no epoch defined for existing sortition"); @@ -424,6 +454,7 @@ impl RelayerThread { ); None } else { + info!("Relayer: No sortition; continue tenure."); Some(MinerDirective::ContinueTenure { new_burn_view: sn.consensus_hash, }) @@ -480,13 +511,7 @@ impl RelayerThread { return Ok(None); } - let directive_opt = Self::choose_miner_directive( - &self.config, - &self.sortdb, - sn, - won_sortition, - committed_index_hash, - ); + let directive_opt = self.choose_miner_directive(sn, won_sortition, committed_index_hash); Ok(directive_opt) } @@ -880,29 +905,79 @@ impl RelayerThread { }; let mining_pkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(mining_key)); - // If we won the last sortition, then we should start a new tenure off of it. - let last_block_election_snapshot = { + let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + &canonical_stacks_tip, + &canonical_stacks_tip_ch, + ) + .map_err(|e| { + error!( + "Relayer: Failed to get tenure-start block header for stacks tip {canonical_stacks_tip}: {e:?}" + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "Relayer: Failed to find tenure-start block header for stacks tip {canonical_stacks_tip}" + ); + NakamotoNodeError::ParentNotFound + })?; + let highest_tenure_bhh = + BlockHeaderHash(highest_tenure_start_block_header.index_block_hash().0); + + // If we won the last good sortition, then we should extend off of it. + let last_good_block_election_snapshot = { let ih = self.sortdb.index_handle(&burn_tip.sortition_id); - ih.get_last_snapshot_with_sortition(burn_tip.block_height) + info!( + "Relayer: Getting last snapshot with sortition for {}", + burn_tip.block_height + ); + + let sn = ih + .get_last_snapshot_with_sortition(burn_tip.block_height) .map_err(|e| { error!("Relayer: failed to get last snapshot with sortition: {e:?}"); NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + if sn.winning_stacks_block_hash != highest_tenure_bhh { + info!( + "Relayer: Sortition winner is not committed to the canonical tip; allowing last miner to extend"; + "burn_block_height" => burn_tip.block_height, + "consensus_hash" => %burn_tip.consensus_hash, + ); + + SortitionDB::get_block_snapshot_consensus( + self.sortdb.conn(), + &canonical_stacks_tip_ch, + ) + .map_err(|e| { + error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip })? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for canonical tip"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + } else { + sn + } }; - let won_last_sortition = last_block_election_snapshot.miner_pk_hash == Some(mining_pkh); - debug!( - "Relayer: Current burn block had no sortition. Checking for tenure continuation."; + let won_last_sortition = + last_good_block_election_snapshot.miner_pk_hash == Some(mining_pkh); + info!( + "Relayer: Current burn block had no sortition or a bad sortition. Checking for tenure continuation."; "won_last_sortition" => won_last_sortition, "current_mining_pkh" => %mining_pkh, - "last_block_election_snapshot.consensus_hash" => %last_block_election_snapshot.consensus_hash, - "last_block_election_snapshot.miner_pk_hash" => ?last_block_election_snapshot.miner_pk_hash, + "last_good_block_election_snapshot.consensus_hash" => %last_good_block_election_snapshot.consensus_hash, + "last_good_block_election_snapshot.miner_pk_hash" => ?last_good_block_election_snapshot.miner_pk_hash, "canonical_stacks_tip_id" => %canonical_stacks_tip, "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, "burn_view_ch" => %new_burn_view, ); if !won_last_sortition { + info!("Relayer: Did not win the last sortition. Cannot continue tenure."); return Ok(()); } @@ -924,8 +999,12 @@ impl RelayerThread { if !won_canonical_block_snapshot { debug!("Relayer: Failed to issue a tenure change payload in our last tenure. Issue a new tenure change payload."); ( - StacksBlockId(last_block_election_snapshot.winning_stacks_block_hash.0), - last_block_election_snapshot, + StacksBlockId( + last_good_block_election_snapshot + .winning_stacks_block_hash + .0, + ), + last_good_block_election_snapshot, MinerReason::EmptyTenure, ) } else { diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 79f55d7e7b..318c642a46 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -6871,7 +6871,7 @@ fn block_commit_delay() { #[test] #[ignore] -/// Test that a miner will extend its tenure after the proceeding miner fails to mine a block. +/// Test that a miner will extend its tenure after the succeding miner fails to mine a block. /// - Miner 1 wins a tenure and mines normally /// - Miner 2 wins a tenure but fails to mine a block /// - Miner 1 extends its tenure @@ -7258,3 +7258,440 @@ fn tenure_extend_after_failed_miner() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// Test that a miner will extend its tenure after the succeding miner commits to the wrong block. +/// - Miner 1 wins a tenure and mines normally +/// - Miner 1 wins another tenure and mines normally, but miner 2 does not see any blocks from this tenure +/// - Miner 2 wins a tenure and is unable to mine a block +/// - Miner 1 extends its tenure and mines an additional block +/// - Miner 2 wins the next tenure and mines normally +fn tenure_extend_after_bad_commit() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 1; + let sender_nonce = 0; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.block_proposal_timeout = Duration::from_secs(30); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_skip_commit_op = signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf.miner.mining_key.unwrap(), + )); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf_node_2.miner.mining_key.unwrap(), + )); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Wins Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("------------------------- Pause Block Proposals -------------------------"); + TEST_MINE_STALL.lock().unwrap().replace(true); + + // Unpause miner 1's block commits + let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); + rl1_skip_commit_op.set(false); + + // Ensure miner 1 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) + }) + .unwrap(); + + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Wins Tenure B -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + // assure we have a successful sortition that miner B won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + info!("----------------- Miner 2 Submits Block Commit Before Any Blocks ------------------"); + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); + + // Re-pause block commits for miner 2 so that it cannot RBF its original commit + rl2_skip_commit_op.set(true); + + info!("----------------------------- Resume Block Production -----------------------------"); + + TEST_MINE_STALL.lock().unwrap().replace(false); + + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("--------------- Miner 2 Wins Tenure C With Old Block Commit ----------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .expect("Timed out waiting for burn block to be processed"); + + info!("------------------------- Miner 1 Extends Tenure B -------------------------"); + + // wait for a tenure extend block from miner 1 to be processed + // (miner 2's proposals will be rejected) + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for tenure extend block to be mined and processed"); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + // Re-enable block commits for miner 2 + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(true); + + // Wait for block commit from miner 2 + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); + + info!("------------------------- Miner 2 Mines the Next Tenure -------------------------"); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }, + ) + .expect("Timed out waiting for final block to be mined and processed"); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} From 2a4a09b13e8bd66f1f1ae1ae000dd51e45038944 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 13 Nov 2024 00:26:21 -0500 Subject: [PATCH 003/115] feat: make signer accept tenure extend on bad sortition With this change, the signer will accept a tenure extend from miner N-1 when miner N wins a sortition but commits to the wrong parent tenure. --- stacks-signer/src/chainstate.rs | 15 +++++++++++++++ stacks-signer/src/signerdb.rs | 9 +++++++++ testnet/stacks-node/src/tests/signer/v0.rs | 16 +++++++++------- 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index fa24c8b22e..aea2d93ef0 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -203,7 +203,22 @@ impl SortitionsView { "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, ); self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } else if let Some(tip) = signer_db.get_canonical_tip()? { + // If this is a tenure change block, then the current sortition's parent tenure must be + // the canonical tip's tenure. If it's not, then the current tip may already be in this + // tenure. + if self.cur_sortition.parent_tenure_id != tip.block.header.consensus_hash + && self.cur_sortition.consensus_hash != tip.block.header.consensus_hash + { + warn!( + "Current sortition does not build off of canonical tip tenure, marking as invalid"; + "current_sortition_parent" => ?self.cur_sortition.parent_tenure_id, + "tip_consensus_hash" => ?tip.block.header.consensus_hash, + ); + self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } } + if let Some(last_sortition) = self.last_sortition.as_mut() { if last_sortition.is_timed_out(self.config.block_proposal_timeout, signer_db)? { info!( diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 1d2e650207..b65880f24e 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -600,6 +600,15 @@ impl SignerDb { try_deserialize(result) } + /// Return the canonical tip -- the last globally accepted block. + pub fn get_canonical_tip(&self) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE json_extract(block_info, '$.state') = ?1 ORDER BY stacks_height DESC LIMIT 1"; + let args = params![&BlockState::GloballyAccepted.to_string()]; + let result: Option = query_row(&self.db, query, args)?; + + try_deserialize(result) + } + /// Insert or replace a burn block into the database pub fn insert_burn_block( &mut self, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 318c642a46..a2eabfa875 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -6886,8 +6886,8 @@ fn tenure_extend_after_failed_miner() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let num_txs = 1; - let sender_nonce = 0; + let num_txs = 2; + let mut sender_nonce = 0; let btc_miner_1_seed = vec![1, 1, 1, 1]; let btc_miner_2_seed = vec![2, 2, 2, 2]; @@ -7100,6 +7100,7 @@ fn tenure_extend_after_failed_miner() { send_amt, ); submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; // wait for the new block to be processed wait_for(30, || { @@ -7278,8 +7279,8 @@ fn tenure_extend_after_bad_commit() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let num_txs = 1; - let sender_nonce = 0; + let num_txs = 2; + let mut sender_nonce = 0; let btc_miner_1_seed = vec![1, 1, 1, 1]; let btc_miner_2_seed = vec![2, 2, 2, 2]; @@ -7496,6 +7497,7 @@ fn tenure_extend_after_bad_commit() { send_amt, ); submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; // wait for the new block to be processed wait_for(30, || { @@ -7654,9 +7656,11 @@ fn tenure_extend_after_bad_commit() { }) .expect("Timed out waiting for block to be mined and processed"); + info!("------------------------- Miner 2 Mines the Next Tenure -------------------------"); + // Re-enable block commits for miner 2 let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); - rl2_skip_commit_op.set(true); + rl2_skip_commit_op.set(false); // Wait for block commit from miner 2 wait_for(30, || { @@ -7664,8 +7668,6 @@ fn tenure_extend_after_bad_commit() { }) .expect("Timed out waiting for block commit from miner 2"); - info!("------------------------- Miner 2 Mines the Next Tenure -------------------------"); - let stacks_height_before = signer_test .stacks_client .get_peer_info() From a56a73c6835b03c8ad936b0cbe6642c309a91b3a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 12:03:57 -0500 Subject: [PATCH 004/115] test: add `tenure_extend_after_bad_commit` to yaml file --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 52f46fbc49..985143e9e2 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -126,6 +126,7 @@ jobs: - tests::signer::v0::block_commit_delay - tests::signer::v0::continue_after_fast_block_no_sortition - tests::signer::v0::block_validation_response_timeout + - tests::signer::v0::tenure_extend_after_bad_commit - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state From 6438551aef193d793bef7ad6078b3209fe040c51 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 14:23:53 -0500 Subject: [PATCH 005/115] refactor: move the `StackerDBChannel` into the `EventDispatcher` The previous design using a global singleton causes trouble in testing, when we have multiple miners running in different threads of the same process. --- testnet/stacks-node/src/event_dispatcher.rs | 22 ++++++++--------- .../stacks-node/src/nakamoto_node/miner.rs | 1 + .../src/nakamoto_node/sign_coordinator.rs | 24 +++++++++++++++---- 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 88bfc8dae7..8144cd8ec5 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -18,7 +18,7 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::path::PathBuf; use std::sync::mpsc::{channel, Receiver, Sender}; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; use std::thread::sleep; use std::time::Duration; @@ -107,17 +107,8 @@ pub const PATH_BLOCK_PROCESSED: &str = "new_block"; pub const PATH_ATTACHMENT_PROCESSED: &str = "attachments/new"; pub const PATH_PROPOSAL_RESPONSE: &str = "proposal_response"; -pub static STACKER_DB_CHANNEL: StackerDBChannel = StackerDBChannel::new(); - /// This struct receives StackerDB event callbacks without registering -/// over the JSON/RPC interface. To ensure that any event observer -/// uses the same channel, we use a lazy_static global for the channel (this -/// implements a singleton using STACKER_DB_CHANNEL). -/// -/// This is in place because a Nakamoto miner needs to receive -/// StackerDB events. It could either poll the database (seems like a -/// bad idea) or listen for events. Registering for RPC callbacks -/// seems bad. So instead, it uses a singleton sync channel. +/// over the JSON/RPC interface. pub struct StackerDBChannel { sender_info: Mutex>, } @@ -923,6 +914,8 @@ pub struct EventDispatcher { /// Index into `registered_observers` that will receive block proposal events (Nakamoto and /// later) block_proposal_observers_lookup: HashSet, + /// Channel for sending StackerDB events to the miner coordinator + pub stackerdb_channel: Arc>, } /// This struct is used specifically for receiving proposal responses. @@ -1115,6 +1108,7 @@ impl Default for EventDispatcher { impl EventDispatcher { pub fn new() -> EventDispatcher { EventDispatcher { + stackerdb_channel: Arc::new(Mutex::new(StackerDBChannel::new())), registered_observers: vec![], contract_events_observers_lookup: HashMap::new(), assets_observers_lookup: HashMap::new(), @@ -1544,7 +1538,11 @@ impl EventDispatcher { let interested_observers = self.filter_observers(&self.stackerdb_observers_lookup, false); - let interested_receiver = STACKER_DB_CHANNEL.is_active(&contract_id); + let stackerdb_channel = self + .stackerdb_channel + .lock() + .expect("FATAL: failed to lock StackerDB channel mutex"); + let interested_receiver = stackerdb_channel.is_active(&contract_id); if interested_observers.is_empty() && interested_receiver.is_none() { return; } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 411e4f3be8..fb35d60fc3 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -557,6 +557,7 @@ impl BlockMinerThread { miner_privkey, &self.config, self.globals.should_keep_running.clone(), + self.event_dispatcher.stackerdb_channel.clone(), ) .map_err(|e| { NakamotoNodeError::SigningCoordinatorFailure(format!( diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 14eeef20b9..2b1efcbfc5 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -16,7 +16,7 @@ use std::collections::BTreeMap; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Receiver; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use std::time::Duration; use hashbrown::{HashMap, HashSet}; @@ -43,7 +43,7 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; use super::Error as NakamotoNodeError; -use crate::event_dispatcher::STACKER_DB_CHANNEL; +use crate::event_dispatcher::StackerDBChannel; use crate::neon::Counters; use crate::Config; @@ -68,11 +68,16 @@ pub struct SignCoordinator { total_weight: u32, keep_running: Arc, pub next_signer_bitvec: BitVec<4000>, + stackerdb_channel: Arc>, } impl Drop for SignCoordinator { fn drop(&mut self) { - STACKER_DB_CHANNEL.replace_receiver(self.receiver.take().expect( + let stackerdb_channel = self + .stackerdb_channel + .lock() + .expect("FATAL: failed to lock stackerdb channel"); + stackerdb_channel.replace_receiver(self.receiver.take().expect( "FATAL: lost possession of the StackerDB channel before dropping SignCoordinator", )); } @@ -87,6 +92,7 @@ impl SignCoordinator { message_key: StacksPrivateKey, config: &Config, keep_running: Arc, + stackerdb_channel: Arc>, ) -> Result { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { @@ -150,7 +156,10 @@ impl SignCoordinator { use crate::tests::nakamoto_integrations::TEST_SIGNING; if TEST_SIGNING.lock().unwrap().is_some() { debug!("Short-circuiting spinning up coordinator from signer commitments. Using test signers channel."); - let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); + let (receiver, replaced_other) = stackerdb_channel + .lock() + .expect("FATAL: failed to lock StackerDB channel") + .register_miner_coordinator(); if replaced_other { warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); } @@ -164,12 +173,16 @@ impl SignCoordinator { weight_threshold: threshold, total_weight, keep_running, + stackerdb_channel, }; return Ok(sign_coordinator); } } - let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); + let (receiver, replaced_other) = stackerdb_channel + .lock() + .expect("FATAL: failed to lock StackerDB channel") + .register_miner_coordinator(); if replaced_other { warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); } @@ -184,6 +197,7 @@ impl SignCoordinator { weight_threshold: threshold, total_weight, keep_running, + stackerdb_channel, }) } From 4420c82c325d7464c5f64e60851c123195f3e0c3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 14:24:51 -0500 Subject: [PATCH 006/115] feat: add an index for block state --- stacks-signer/src/signerdb.rs | 44 +++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index b65880f24e..f4b40dbf9b 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -308,6 +308,10 @@ static CREATE_INDEXES_3: &str = r#" CREATE INDEX IF NOT EXISTS block_rejection_signer_addrs_on_block_signature_hash ON block_rejection_signer_addrs(signer_signature_hash); "#; +static CREATE_INDEXES_4: &str = r#" +CREATE INDEX IF NOT EXISTS blocks_state ON blocks ((json_extract(block_info, '$.state'))); +"#; + static CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, @@ -345,6 +349,12 @@ static DROP_SCHEMA_2: &str = " DROP TABLE IF EXISTS blocks; DROP TABLE IF EXISTS db_config;"; +static DROP_SCHEMA_3: &str = " + DROP TABLE IF EXISTS burn_blocks; + DROP TABLE IF EXISTS signer_states; + DROP TABLE IF EXISTS blocks; + DROP TABLE IF EXISTS db_config;"; + static CREATE_BLOCK_SIGNATURES_TABLE: &str = r#" CREATE TABLE IF NOT EXISTS block_signatures ( -- The block sighash commits to all of the stacks and burnchain state as of its parent, @@ -405,9 +415,24 @@ static SCHEMA_3: &[&str] = &[ "INSERT INTO db_config (version) VALUES (3);", ]; +static SCHEMA_4: &[&str] = &[ + DROP_SCHEMA_3, + CREATE_DB_CONFIG, + CREATE_BURN_STATE_TABLE, + CREATE_BLOCKS_TABLE_2, + CREATE_SIGNER_STATE_TABLE, + CREATE_BLOCK_SIGNATURES_TABLE, + CREATE_BLOCK_REJECTION_SIGNER_ADDRS_TABLE, + CREATE_INDEXES_1, + CREATE_INDEXES_2, + CREATE_INDEXES_3, + CREATE_INDEXES_4, + "INSERT INTO db_config (version) VALUES (4);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 3; + pub const SCHEMA_VERSION: u32 = 4; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -479,6 +504,20 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 3 to schema 4 + fn schema_4_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 4 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_4.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + /// Either instantiate a new database, or migrate an existing one /// If the detected version of the existing database is 0 (i.e., a pre-migration /// logic DB, the DB will be dropped). @@ -490,7 +529,8 @@ impl SignerDb { 0 => Self::schema_1_migration(&sql_tx)?, 1 => Self::schema_2_migration(&sql_tx)?, 2 => Self::schema_3_migration(&sql_tx)?, - 3 => break, + 3 => Self::schema_4_migration(&sql_tx)?, + 4 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, From 3fa811620f3135c12880639809f84357212db3d3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 14:28:12 -0500 Subject: [PATCH 007/115] docs: update changelogs --- CHANGELOG.md | 1 + stacks-signer/CHANGELOG.md | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0470bab77b..3810b805da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Remove the panic for reporting DB deadlocks (just error and continue waiting) - Add index to `metadata_table` in Clarity DB on `blockhash` - Add `block_commit_delay_ms` to the config file to control the time to wait after seeing a new burn block, before submitting a block commit, to allow time for the first Nakamoto block of the new tenure to be mined, allowing this miner to avoid the need to RBF the block commit. +- If the winning miner of a sortition is committed to the wrong parent tenure, the previous miner can immediately tenure extend and continue mining since the winning miner would never be able to propose a valid block. (#5361) ## [3.0.0.0.1] diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 3183c0d5c3..2895f1161f 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,8 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +- Allow a miner to extend their tenure immediately if the winner of the next tenure has committed to the wrong parent tenure (#5361) + ## [3.0.0.0.1.0] ### Changed From df8f240daa0fa4fe4dacc07d63df3d4d03c70ec9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 15:31:12 -0500 Subject: [PATCH 008/115] chore: improve comment about checking the parent tenure --- stacks-signer/src/chainstate.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index aea2d93ef0..f469f3a882 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -204,11 +204,13 @@ impl SortitionsView { ); self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; } else if let Some(tip) = signer_db.get_canonical_tip()? { - // If this is a tenure change block, then the current sortition's parent tenure must be - // the canonical tip's tenure. If it's not, then the current tip may already be in this - // tenure. - if self.cur_sortition.parent_tenure_id != tip.block.header.consensus_hash - && self.cur_sortition.consensus_hash != tip.block.header.consensus_hash + // Check if the current sortition is aligned with the expected tenure: + // - If the tip is in the current tenure, we are in the process of mining this tenure. + // - If the tip is not in the current tenure, then we’re starting a new tenure, + // and the current sortition's parent tenure must match the tenure of the tip. + // - Else the miner of the current sortition has committed to an incorrect parent tenure. + if self.cur_sortition.consensus_hash != tip.block.header.consensus_hash + && self.cur_sortition.parent_tenure_id != tip.block.header.consensus_hash { warn!( "Current sortition does not build off of canonical tip tenure, marking as invalid"; From 44769cf0dd15d7450a802598180d4333c6e39b1d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 18:30:23 -0500 Subject: [PATCH 009/115] test: add unit test for `SignerDb::get_canonical_tip` --- stacks-signer/src/signerdb.rs | 41 +++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index f4b40dbf9b..28b771d145 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -1275,4 +1275,45 @@ mod tests { assert!(!block.check_state(BlockState::GloballyAccepted)); assert!(block.check_state(BlockState::GloballyRejected)); } + + #[test] + fn test_get_canonical_tip() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + + let (mut block_info_1, _block_proposal_1) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.block.header.chain_length = 1; + b.burn_height = 1; + }); + + let (mut block_info_2, _block_proposal_2) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.block.header.chain_length = 2; + b.burn_height = 2; + }); + + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + + assert!(db.get_canonical_tip().unwrap().is_none()); + + block_info_1 + .mark_globally_accepted() + .expect("Failed to mark block as globally accepted"); + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + + assert_eq!(db.get_canonical_tip().unwrap().unwrap(), block_info_1); + + block_info_2 + .mark_globally_accepted() + .expect("Failed to mark block as globally accepted"); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + + assert_eq!(db.get_canonical_tip().unwrap().unwrap(), block_info_2); + } } From 4c7c5aab838a168683e66048f4485966dae4ec3e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 18:34:01 -0500 Subject: [PATCH 010/115] chore: remove unnecessary log --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 805703264e..0fe5efcdee 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -928,11 +928,6 @@ impl RelayerThread { // If we won the last good sortition, then we should extend off of it. let last_good_block_election_snapshot = { let ih = self.sortdb.index_handle(&burn_tip.sortition_id); - info!( - "Relayer: Getting last snapshot with sortition for {}", - burn_tip.block_height - ); - let sn = ih .get_last_snapshot_with_sortition(burn_tip.block_height) .map_err(|e| { From a9acfa048f846cf99c136c5ab01fc1b9c347ddda Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 18:39:32 -0500 Subject: [PATCH 011/115] feat: simplify signerdb migration --- stacks-signer/src/signerdb.rs | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 28b771d145..5bd201540c 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -416,18 +416,8 @@ static SCHEMA_3: &[&str] = &[ ]; static SCHEMA_4: &[&str] = &[ - DROP_SCHEMA_3, - CREATE_DB_CONFIG, - CREATE_BURN_STATE_TABLE, - CREATE_BLOCKS_TABLE_2, - CREATE_SIGNER_STATE_TABLE, - CREATE_BLOCK_SIGNATURES_TABLE, - CREATE_BLOCK_REJECTION_SIGNER_ADDRS_TABLE, - CREATE_INDEXES_1, - CREATE_INDEXES_2, - CREATE_INDEXES_3, CREATE_INDEXES_4, - "INSERT INTO db_config (version) VALUES (4);", + "INSERT OR REPLACE INTO db_config (version) VALUES (4);", ]; impl SignerDb { @@ -452,7 +442,7 @@ impl SignerDb { return Ok(0); } let result = conn - .query_row("SELECT version FROM db_config LIMIT 1", [], |row| { + .query_row("SELECT MAX(version) FROM db_config LIMIT 1", [], |row| { row.get(0) }) .optional(); From 58fda005622d95e119c0909a3ee3658b0247c79e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 21:54:22 -0500 Subject: [PATCH 012/115] chore: cleanup unused --- stacks-signer/src/signerdb.rs | 6 ------ testnet/stacks-node/src/tests/epoch_25.rs | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5bd201540c..ff264f3cef 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -349,12 +349,6 @@ static DROP_SCHEMA_2: &str = " DROP TABLE IF EXISTS blocks; DROP TABLE IF EXISTS db_config;"; -static DROP_SCHEMA_3: &str = " - DROP TABLE IF EXISTS burn_blocks; - DROP TABLE IF EXISTS signer_states; - DROP TABLE IF EXISTS blocks; - DROP TABLE IF EXISTS db_config;"; - static CREATE_BLOCK_SIGNATURES_TABLE: &str = r#" CREATE TABLE IF NOT EXISTS block_signatures ( -- The block sighash commits to all of the stacks and burnchain state as of its parent, diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 34083fb22a..bc30e51528 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -23,7 +23,7 @@ use stacks_common::types::chainstate::StacksPrivateKey; use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; -use crate::tests::nakamoto_integrations::{next_block_and, wait_for}; +use crate::tests::nakamoto_integrations::wait_for; use crate::tests::neon_integrations::{ get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, From 21788465fb012c57a692e523a6fb2a2dce3b5355 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 21:59:23 -0500 Subject: [PATCH 013/115] refactor: clean up `continue_tenure` --- .../stacks-node/src/nakamoto_node/relayer.rs | 232 +++++++++++------- 1 file changed, 138 insertions(+), 94 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 0fe5efcdee..d86dcc3fad 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -877,74 +877,113 @@ impl RelayerThread { Ok(()) } - fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { - if let Err(e) = self.stop_tenure() { - error!("Relayer: Failed to stop tenure: {e:?}"); - return Ok(()); - } - debug!("Relayer: successfully stopped tenure."); - // Check if we should undergo a tenure change to switch to the new burn view - let burn_tip = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &new_burn_view) - .map_err(|e| { - error!("Relayer: failed to get block snapshot for new burn view: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })? - .ok_or_else(|| { - error!("Relayer: failed to get block snapshot for new burn view"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; + fn get_burn_tip_snapshot( + &self, + new_burn_view: &ConsensusHash, + ) -> Result { + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), new_burn_view) + .map_err(|e| { + error!( + "Relayer: failed to get block snapshot for new burn view: {:?}", + e + ); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for new burn view"); + NakamotoNodeError::SnapshotNotFoundForChainTip + }) + } - let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) = + fn get_canonical_stacks_tip(&self) -> StacksBlockId { + let (ch, bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap(); - let canonical_stacks_tip = - StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); + StacksBlockId::new(&ch, &bh) + } + fn get_mining_key_pkh(&self) -> Option { let Some(ref mining_key) = self.config.miner.mining_key else { - return Ok(()); + return None; }; - let mining_pkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(mining_key)); + Some(Hash160::from_node_public_key( + &StacksPublicKey::from_private(mining_key), + )) + } + fn get_highest_tenure_bhh( + &self, + tip_block_id: &StacksBlockId, + tip_ch: &ConsensusHash, + ) -> Result { let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( &mut self.chainstate.index_conn(), - &canonical_stacks_tip, - &canonical_stacks_tip_ch, + tip_block_id, + &tip_ch, ) .map_err(|e| { error!( - "Relayer: Failed to get tenure-start block header for stacks tip {canonical_stacks_tip}: {e:?}" + "Relayer: Failed to get tenure-start block header for stacks tip {tip_block_id}: {e:?}" ); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { error!( - "Relayer: Failed to find tenure-start block header for stacks tip {canonical_stacks_tip}" + "Relayer: Failed to find tenure-start block header for stacks tip {tip_block_id}" ); NakamotoNodeError::ParentNotFound })?; - let highest_tenure_bhh = - BlockHeaderHash(highest_tenure_start_block_header.index_block_hash().0); + Ok(BlockHeaderHash( + highest_tenure_start_block_header.index_block_hash().0, + )) + } - // If we won the last good sortition, then we should extend off of it. - let last_good_block_election_snapshot = { - let ih = self.sortdb.index_handle(&burn_tip.sortition_id); - let sn = ih - .get_last_snapshot_with_sortition(burn_tip.block_height) - .map_err(|e| { - error!("Relayer: failed to get last snapshot with sortition: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; - if sn.winning_stacks_block_hash != highest_tenure_bhh { - info!( - "Relayer: Sortition winner is not committed to the canonical tip; allowing last miner to extend"; - "burn_block_height" => burn_tip.block_height, - "consensus_hash" => %burn_tip.consensus_hash, - ); + fn determine_tenure_type( + &self, + canonical_snapshot: BlockSnapshot, + last_snapshot: BlockSnapshot, + new_burn_view: ConsensusHash, + mining_pkh: Hash160, + ) -> (StacksBlockId, BlockSnapshot, MinerReason) { + if canonical_snapshot.miner_pk_hash != Some(mining_pkh) { + debug!("Relayer: Failed to issue a tenure change payload in our last tenure. Issue a new tenure change payload."); + ( + StacksBlockId(last_snapshot.winning_stacks_block_hash.0), + last_snapshot, + MinerReason::EmptyTenure, + ) + } else { + debug!("Relayer: Successfully issued a tenure change payload. Issue a continue extend from the chain tip."); + ( + self.get_canonical_stacks_tip(), + canonical_snapshot, + MinerReason::Extended { + burn_view_consensus_hash: new_burn_view, + }, + ) + } + } - SortitionDB::get_block_snapshot_consensus( - self.sortdb.conn(), - &canonical_stacks_tip_ch, - ) + fn get_last_good_block_snapshot( + &self, + burn_tip: &BlockSnapshot, + highest_tenure_bhh: &BlockHeaderHash, + canonical_stacks_tip_ch: &ConsensusHash, + ) -> Result { + let ih = self.sortdb.index_handle(&burn_tip.sortition_id); + let sn = ih + .get_last_snapshot_with_sortition(burn_tip.block_height) + .map_err(|e| { + error!("Relayer: failed to get last snapshot with sortition: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + if &sn.winning_stacks_block_hash != highest_tenure_bhh { + info!( + "Relayer: Sortition winner is not committed to the canonical tip; allowing last miner to extend"; + "burn_block_height" => burn_tip.block_height, + "consensus_hash" => %burn_tip.consensus_hash, + ); + + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), canonical_stacks_tip_ch) .map_err(|e| { error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); NakamotoNodeError::SnapshotNotFoundForChainTip @@ -952,11 +991,47 @@ impl RelayerThread { .ok_or_else(|| { error!("Relayer: failed to get block snapshot for canonical tip"); NakamotoNodeError::SnapshotNotFoundForChainTip - })? - } else { - sn - } + }) + } else { + Ok(sn) + } + } + + fn get_block_snapshot(&self, ch: &ConsensusHash) -> Result { + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &ch) + .map_err(|e| { + error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for canonical tip"); + NakamotoNodeError::SnapshotNotFoundForChainTip + }) + } + + fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { + if let Err(e) = self.stop_tenure() { + error!("Relayer: Failed to stop tenure: {e:?}"); + return Ok(()); + } + debug!("Relayer: successfully stopped tenure."); + + // Get the necessary snapshots and state + let burn_tip = self.get_burn_tip_snapshot(&new_burn_view)?; + let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap(); + let canonical_stacks_tip = + StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); + let Some(mining_pkh) = self.get_mining_key_pkh() else { + return Ok(()); }; + let highest_tenure_bhh = + self.get_highest_tenure_bhh(&canonical_stacks_tip, &canonical_stacks_tip_ch)?; + let last_good_block_election_snapshot = self.get_last_good_block_snapshot( + &burn_tip, + &highest_tenure_bhh, + &canonical_stacks_tip_ch, + )?; let won_last_sortition = last_good_block_election_snapshot.miner_pk_hash == Some(mining_pkh); @@ -976,54 +1051,23 @@ impl RelayerThread { return Ok(()); } - let canonical_block_snapshot = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) - .map_err(|e| { - error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })? - .ok_or_else(|| { - error!("Relayer: failed to get block snapshot for canonical tip"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; - - let won_canonical_block_snapshot = - canonical_block_snapshot.miner_pk_hash == Some(mining_pkh); - - let (parent_tenure_start, block_election_snapshot, reason) = - if !won_canonical_block_snapshot { - debug!("Relayer: Failed to issue a tenure change payload in our last tenure. Issue a new tenure change payload."); - ( - StacksBlockId( - last_good_block_election_snapshot - .winning_stacks_block_hash - .0, - ), - last_good_block_election_snapshot, - MinerReason::EmptyTenure, - ) - } else { - debug!("Relayer: Successfully issued a tenure change payload in its tenure. Issue a continue extend from the chain tip."); - ( - canonical_stacks_tip, //For tenure extend, we should be extending off the canonical tip - canonical_block_snapshot, - MinerReason::Extended { - burn_view_consensus_hash: new_burn_view, - }, - ) - }; - match self.start_new_tenure( + let canonical_snapshot = self.get_block_snapshot(&canonical_stacks_tip_ch)?; + let (parent_tenure_start, block_election_snapshot, reason) = self.determine_tenure_type( + canonical_snapshot, + last_good_block_election_snapshot, + new_burn_view, + mining_pkh, + ); + + if let Err(e) = self.start_new_tenure( parent_tenure_start, block_election_snapshot, burn_tip, reason, ) { - Ok(()) => { - debug!("Relayer: successfully started new tenure."); - } - Err(e) => { - error!("Relayer: Failed to start new tenure: {e:?}"); - } + error!("Relayer: Failed to start new tenure: {e:?}"); + } else { + debug!("Relayer: successfully started new tenure."); } Ok(()) } From d8140e063328fb8bb8e0f89e61831b6df69f8117 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 22:15:25 -0500 Subject: [PATCH 014/115] refactor: `last_block_contains_tenure_change_tx` --- testnet/stacks-node/src/tests/signer/v0.rs | 47 ++++++++-------------- 1 file changed, 17 insertions(+), 30 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fc26e2b482..eaf57dffd7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -420,20 +420,25 @@ impl SignerTest { } } -fn verify_last_block_contains_tenure_change_tx(cause: TenureChangeCause) { +fn last_block_contains_tenure_change_tx(cause: TenureChangeCause) -> bool { let blocks = test_observer::get_blocks(); - let tenure_change_tx = &blocks.last().unwrap(); - let transactions = tenure_change_tx["transactions"].as_array().unwrap(); + let last_block = &blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); let tx = transactions.first().expect("No transactions in block"); let raw_tx = tx["raw_tx"].as_str().unwrap(); let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); match &parsed.payload { - TransactionPayload::TenureChange(payload) => { - assert_eq!(payload.cause, cause); + TransactionPayload::TenureChange(payload) if payload.cause == cause => { + info!("Found tenure change transaction: {parsed:?}"); + true } - _ => panic!("Expected tenure change transaction, got {parsed:?}"), - }; + _ => false, + } +} + +fn verify_last_block_contains_tenure_change_tx(cause: TenureChangeCause) { + assert!(last_block_contains_tenure_change_tx(cause)); } #[test] @@ -2800,27 +2805,9 @@ fn empty_sortition_before_approval() { // Wait for a block with a tenure extend to be mined wait_for(60, || { - let blocks = test_observer::get_blocks(); - let last_block = blocks.last().unwrap(); - info!("Last block mined: {:?}", last_block); - for tx in last_block["transactions"].as_array().unwrap() { - let raw_tx = tx["raw_tx"].as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::TenureChange(payload) = &parsed.payload { - match payload.cause { - TenureChangeCause::Extended => { - info!("Found tenure extend block"); - return Ok(true); - } - TenureChangeCause::BlockFound => {} - } - }; - } - Ok(false) + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) }) .expect("Timed out waiting for tenure extend"); @@ -5858,8 +5845,8 @@ fn continue_after_fast_block_no_sortition() { // Some helper functions for verifying the blocks contain their expected transactions let verify_last_block_contains_transfer_tx = || { let blocks = test_observer::get_blocks(); - let tenure_change_tx = &blocks.last().unwrap(); - let transactions = tenure_change_tx["transactions"].as_array().unwrap(); + let last_block = &blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); let tx = transactions.first().expect("No transactions in block"); let raw_tx = tx["raw_tx"].as_str().unwrap(); let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); From 54c88c6a253f40e85755c36f57e1999ea47ca4f7 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 14 Nov 2024 22:24:33 -0500 Subject: [PATCH 015/115] test: additional checks requested in PR review --- testnet/stacks-node/src/tests/signer/v0.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index eaf57dffd7..5f5890183b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7702,7 +7702,7 @@ fn tenure_extend_after_bad_commit() { ) .unwrap(); - // assure we have a successful sortition that miner B won + // assure we have a successful sortition that miner 1 won let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); assert!(tip.sortition); assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); @@ -7756,6 +7756,11 @@ fn tenure_extend_after_bad_commit() { ) .expect("Timed out waiting for burn block to be processed"); + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + info!("------------------------- Miner 1 Extends Tenure B -------------------------"); // wait for a tenure extend block from miner 1 to be processed @@ -7844,6 +7849,12 @@ fn tenure_extend_after_bad_commit() { ) .expect("Timed out waiting for final block to be mined and processed"); + // assure we have a successful sortition that miner 2 won and it had a block found tenure change + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + info!("------------------------- Shutdown -------------------------"); rl2_coord_channels .lock() From ba2faf7035b9e3c472d21685121c75f139f02dd2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 15 Nov 2024 09:16:01 -0500 Subject: [PATCH 016/115] feat: add ability to disable tenure-extend for tests This is useful when checking the behavior during forking. --- testnet/stacks-node/src/nakamoto_node/miner.rs | 2 ++ .../stacks-node/src/nakamoto_node/relayer.rs | 8 +++++++- testnet/stacks-node/src/tests/signer/v0.rs | 18 +++++++++++++++++- 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index fb35d60fc3..877eab88a1 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -62,6 +62,8 @@ pub static TEST_BROADCAST_STALL: std::sync::Mutex> = std::sync::Mut pub static TEST_BLOCK_ANNOUNCE_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); #[cfg(test)] pub static TEST_SKIP_P2P_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(test)] +pub static TEST_NO_TENURE_EXTEND: std::sync::Mutex> = std::sync::Mutex::new(None); /// If the miner was interrupted while mining a block, how long should the /// miner thread sleep before trying again? diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index d86dcc3fad..5aef6f2612 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -58,7 +58,7 @@ use super::{ BLOCK_PROCESSOR_STACK_SIZE, }; use crate::burnchains::BurnchainController; -use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; +use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective, TEST_NO_TENURE_EXTEND}; use crate::neon_node::{ fault_injection_skip_mining, open_chainstate_with_faults, LeaderKeyRegistrationState, }; @@ -1016,6 +1016,12 @@ impl RelayerThread { } debug!("Relayer: successfully stopped tenure."); + #[cfg(test)] + if *TEST_NO_TENURE_EXTEND.lock().unwrap() == Some(true) { + info!("Relayer: TEST_NO_TENURE_EXTEND is set; skipping tenure extension."); + return Ok(()); + } + // Get the necessary snapshots and state let burn_tip = self.get_burn_tip_snapshot(&new_burn_view)?; let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) = diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5f5890183b..72c8e29edb 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -67,7 +67,7 @@ use super::SignerTest; use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::{ - TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, + TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_NO_TENURE_EXTEND, }; use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; @@ -952,6 +952,10 @@ fn forked_tenure_testing( sleep_ms(1000); info!("------------------------- Reached Epoch 3.0 -------------------------"); + // Disable tenure extend so that miners will not tenure extend when the + // test is checking for fork behavior. + TEST_NO_TENURE_EXTEND.lock().unwrap().replace(true); + let naka_conf = signer_test.running_nodes.conf.clone(); let burnchain = naka_conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); @@ -1283,6 +1287,10 @@ fn bitcoind_forking_test() { let pre_epoch_3_nonce = get_account(&http_origin, &miner_address).nonce; let pre_fork_tenures = 10; + // Disable tenure extend so that miners will not tenure extend when the + // test is checking for fork behavior. + TEST_NO_TENURE_EXTEND.lock().unwrap().replace(true); + for i in 0..pre_fork_tenures { info!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); @@ -1924,6 +1932,10 @@ fn miner_forking() { "RL1 did not win the sortition" ); + // Disable tenure extend so that miners will not tenure extend when the + // test is checking for fork behavior. + TEST_NO_TENURE_EXTEND.lock().unwrap().replace(true); + info!( "------------------------- RL2 Wins Sortition With Outdated View -------------------------" ); @@ -4261,6 +4273,10 @@ fn partial_tenure_fork() { info!("------------------------- Reached Epoch 3.0 -------------------------"); + // Disable tenure extend so that miners will not tenure extend when the + // test is checking for fork behavior. + TEST_NO_TENURE_EXTEND.lock().unwrap().replace(true); + // due to the random nature of mining sortitions, the way this test is structured // is that we keep track of how many tenures each miner produced, and once enough sortitions // have been produced such that each miner has produced 3 tenures, we stop and check the From 965f58ba294f965741079c83d69c974310df2edd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 15 Nov 2024 09:47:29 -0500 Subject: [PATCH 017/115] fix: fix import for test-only feature --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 5aef6f2612..ef73c252c4 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -58,7 +58,9 @@ use super::{ BLOCK_PROCESSOR_STACK_SIZE, }; use crate::burnchains::BurnchainController; -use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective, TEST_NO_TENURE_EXTEND}; +#[cfg(test)] +use crate::nakamoto_node::miner::TEST_NO_TENURE_EXTEND; +use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; use crate::neon_node::{ fault_injection_skip_mining, open_chainstate_with_faults, LeaderKeyRegistrationState, }; From cd5e7cc7b084151ea796b34e10ba79cb743b72f1 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 15 Nov 2024 09:56:44 -0500 Subject: [PATCH 018/115] refactor: add comments and improve naming --- .../stacks-node/src/nakamoto_node/relayer.rs | 38 +++++++++++++++---- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index ef73c252c4..c8228e5375 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -879,11 +879,12 @@ impl RelayerThread { Ok(()) } - fn get_burn_tip_snapshot( + /// Get a snapshot for an existing burn chain block given its consensus hash. + fn get_block_snapshot_consensus( &self, - new_burn_view: &ConsensusHash, + ch: &ConsensusHash, ) -> Result { - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), new_burn_view) + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), ch) .map_err(|e| { error!( "Relayer: failed to get block snapshot for new burn view: {:?}", @@ -897,12 +898,14 @@ impl RelayerThread { }) } + /// Get the Stacks block ID for the canonical tip. fn get_canonical_stacks_tip(&self) -> StacksBlockId { let (ch, bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap(); StacksBlockId::new(&ch, &bh) } + /// Get the public key hash for the mining key. fn get_mining_key_pkh(&self) -> Option { let Some(ref mining_key) = self.config.miner.mining_key else { return None; @@ -912,15 +915,22 @@ impl RelayerThread { )) } - fn get_highest_tenure_bhh( + /// Get the tenure-start block header hash of a given consensus hash. + /// For Nakamoto blocks, this is the first block in the tenure identified by the consensus + /// hash. + /// For epoch2 blocks, this is simply the block whose winning sortition happened in the + /// sortition identified by the consensus hash. + /// + /// `tip_block_id` is the chain tip from which to perform the query. + fn get_tenure_bhh( &self, tip_block_id: &StacksBlockId, - tip_ch: &ConsensusHash, + ch: &ConsensusHash, ) -> Result { let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( &mut self.chainstate.index_conn(), tip_block_id, - &tip_ch, + &ch, ) .map_err(|e| { error!( @@ -939,6 +949,8 @@ impl RelayerThread { )) } + /// Determine the type of tenure change to issue based on whether this + /// miner successfully issued a tenure change in the last tenure. fn determine_tenure_type( &self, canonical_snapshot: BlockSnapshot, @@ -965,6 +977,9 @@ impl RelayerThread { } } + /// Get the block snapshot of the most recent sortition that committed to + /// the canonical tip. If the latest sortition did not commit to the + /// canonical tip, then the tip's tenure is the last good sortition. fn get_last_good_block_snapshot( &self, burn_tip: &BlockSnapshot, @@ -1011,6 +1026,13 @@ impl RelayerThread { }) } + /// Attempt to continue a miner's tenure into the next burn block. + /// This is allowed if the miner won the last good sortition and one of the + /// following conditions is met: + /// - There was no sortition in the latest burn block + /// - The winner of the latest sortition did not commit to the canonical tip + /// - The winner of the latest sortition did not mine any blocks within the + /// timeout period (not yet implemented) fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { if let Err(e) = self.stop_tenure() { error!("Relayer: Failed to stop tenure: {e:?}"); @@ -1025,7 +1047,7 @@ impl RelayerThread { } // Get the necessary snapshots and state - let burn_tip = self.get_burn_tip_snapshot(&new_burn_view)?; + let burn_tip = self.get_block_snapshot_consensus(&new_burn_view)?; let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap(); let canonical_stacks_tip = @@ -1034,7 +1056,7 @@ impl RelayerThread { return Ok(()); }; let highest_tenure_bhh = - self.get_highest_tenure_bhh(&canonical_stacks_tip, &canonical_stacks_tip_ch)?; + self.get_tenure_bhh(&canonical_stacks_tip, &canonical_stacks_tip_ch)?; let last_good_block_election_snapshot = self.get_last_good_block_snapshot( &burn_tip, &highest_tenure_bhh, From 52e0d1f1775182aed25300585ec5430858a0038b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 15 Nov 2024 14:03:06 -0800 Subject: [PATCH 019/115] Add tenure_extend_timestamp to Block Response Reject and Accept messages Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 54 ++++++++++++++++--- stacks-signer/CHANGELOG.md | 2 + stacks-signer/src/client/stackerdb.rs | 1 + stacks-signer/src/tests/conf/signer-0.toml | 2 +- stacks-signer/src/v0/signer.rs | 26 ++++++++- .../src/nakamoto_node/sign_coordinator.rs | 1 + 6 files changed, 76 insertions(+), 10 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 618aa20937..e303888462 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -638,11 +638,16 @@ impl std::fmt::Display for BlockResponse { impl BlockResponse { /// Create a new accepted BlockResponse for the provided block signer signature hash and signature - pub fn accepted(hash: Sha512Trunc256Sum, sig: MessageSignature) -> Self { + pub fn accepted( + signer_signature_hash: Sha512Trunc256Sum, + signature: MessageSignature, + tenure_extend_timestamp: u64, + ) -> Self { Self::Accepted(BlockAccepted { - signer_signature_hash: hash, - signature: sig, + signer_signature_hash, + signature, metadata: SignerMessageMetadata::default(), + tenure_extend_timestamp, }) } @@ -652,8 +657,15 @@ impl BlockResponse { reject_code: RejectCode, private_key: &StacksPrivateKey, mainnet: bool, + timestamp: u64, ) -> Self { - Self::Rejected(BlockRejection::new(hash, reject_code, private_key, mainnet)) + Self::Rejected(BlockRejection::new( + hash, + reject_code, + private_key, + mainnet, + timestamp, + )) } } @@ -748,6 +760,8 @@ pub struct BlockAccepted { pub signature: MessageSignature, /// Signer message metadata pub metadata: SignerMessageMetadata, + /// The timestamp at which a tenure extend will be accepted by the responding signer + pub tenure_extend_timestamp: u64, } impl StacksMessageCodec for BlockAccepted { @@ -755,6 +769,7 @@ impl StacksMessageCodec for BlockAccepted { write_next(fd, &self.signer_signature_hash)?; write_next(fd, &self.signature)?; write_next(fd, &self.metadata)?; + write_next(fd, &self.tenure_extend_timestamp)?; Ok(()) } @@ -762,21 +777,28 @@ impl StacksMessageCodec for BlockAccepted { let signer_signature_hash = read_next::(fd)?; let signature = read_next::(fd)?; let metadata = read_next::(fd)?; + let tenure_extend_timestamp = read_next::(fd).unwrap_or_default(); Ok(Self { signer_signature_hash, signature, metadata, + tenure_extend_timestamp, }) } } impl BlockAccepted { /// Create a new BlockAccepted for the provided block signer signature hash and signature - pub fn new(signer_signature_hash: Sha512Trunc256Sum, signature: MessageSignature) -> Self { + pub fn new( + signer_signature_hash: Sha512Trunc256Sum, + signature: MessageSignature, + tenure_extend_timestamp: u64, + ) -> Self { Self { signer_signature_hash, signature, metadata: SignerMessageMetadata::default(), + tenure_extend_timestamp, } } } @@ -796,6 +818,8 @@ pub struct BlockRejection { pub chain_id: u32, /// Signer message metadata pub metadata: SignerMessageMetadata, + /// The timestamp at which a tenure extend will be accepted by the responding signer + pub tenure_extend_timestamp: u64, } impl BlockRejection { @@ -805,6 +829,7 @@ impl BlockRejection { reason_code: RejectCode, private_key: &StacksPrivateKey, mainnet: bool, + timestamp: u64, ) -> Self { let chain_id = if mainnet { CHAIN_ID_MAINNET @@ -818,6 +843,7 @@ impl BlockRejection { signature: MessageSignature::empty(), chain_id, metadata: SignerMessageMetadata::default(), + tenure_extend_timestamp: timestamp, }; rejection .sign(private_key) @@ -830,6 +856,7 @@ impl BlockRejection { reject: BlockValidateReject, private_key: &StacksPrivateKey, mainnet: bool, + timestamp: u64, ) -> Self { let chain_id = if mainnet { CHAIN_ID_MAINNET @@ -843,6 +870,7 @@ impl BlockRejection { chain_id, signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), + tenure_extend_timestamp: timestamp, }; rejection .sign(private_key) @@ -893,6 +921,7 @@ impl StacksMessageCodec for BlockRejection { write_next(fd, &self.chain_id)?; write_next(fd, &self.signature)?; write_next(fd, &self.metadata)?; + write_next(fd, &self.tenure_extend_timestamp)?; Ok(()) } @@ -906,6 +935,7 @@ impl StacksMessageCodec for BlockRejection { let chain_id = read_next::(fd)?; let signature = read_next::(fd)?; let metadata = read_next::(fd)?; + let tenure_extend_timestamp = read_next::(fd).unwrap_or_default(); Ok(Self { reason, reason_code, @@ -913,6 +943,7 @@ impl StacksMessageCodec for BlockRejection { chain_id, signature, metadata, + tenure_extend_timestamp, }) } } @@ -1046,6 +1077,7 @@ mod test { RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), &StacksPrivateKey::new(), thread_rng().gen_bool(0.5), + thread_rng().next_u64(), ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) @@ -1057,6 +1089,7 @@ mod test { RejectCode::ConnectivityIssues, &StacksPrivateKey::new(), thread_rng().gen_bool(0.5), + thread_rng().next_u64(), ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) @@ -1070,6 +1103,7 @@ mod test { signer_signature_hash: Sha512Trunc256Sum([0u8; 32]), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), + tenure_extend_timestamp: thread_rng().next_u64(), }; let response = BlockResponse::Accepted(accepted); let serialized_response = response.serialize_to_vec(); @@ -1082,6 +1116,7 @@ mod test { RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), &StacksPrivateKey::new(), thread_rng().gen_bool(0.5), + thread_rng().next_u64(), )); let serialized_response = response.serialize_to_vec(); let deserialized_response = read_next::(&mut &serialized_response[..]) @@ -1095,6 +1130,7 @@ mod test { signer_signature_hash: Sha512Trunc256Sum([2u8; 32]), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), + tenure_extend_timestamp: thread_rng().next_u64(), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)); let serialized_signer_message = signer_message.serialize_to_vec(); @@ -1258,6 +1294,7 @@ mod test { chain_id: CHAIN_ID_TESTNET, signature: MessageSignature::from_hex("006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3").unwrap(), metadata: SignerMessageMetadata::empty(), + tenure_extend_timestamp: 0 })) ); @@ -1270,15 +1307,16 @@ mod test { .unwrap(), metadata: SignerMessageMetadata::empty(), signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), + tenure_extend_timestamp: 0 })) ); } #[test] fn test_block_response_metadata() { - let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df30000000b48656c6c6f20776f726c64"; + let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df30000000b48656c6c6f20776f726c6400"; let block_rejected_bytes = hex_bytes(&block_rejected_hex).unwrap(); - let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e80000000b48656c6c6f20776f726c64"; + let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e80000000b48656c6c6f20776f726c6400"; let block_accepted_bytes = hex_bytes(&block_accepted_hex).unwrap(); let block_rejected = read_next::(&mut &block_rejected_bytes[..]) .expect("Failed to deserialize BlockRejection"); @@ -1296,6 +1334,7 @@ mod test { metadata: SignerMessageMetadata { server_version: "Hello world".to_string(), }, + tenure_extend_timestamp: 0, })) ); @@ -1310,6 +1349,7 @@ mod test { server_version: "Hello world".to_string(), }, signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), + tenure_extend_timestamp: 0 })) ); } diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 3183c0d5c3..2bba1db19f 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,8 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +- Add tenure extend timestamp to signer block responses + ## [3.0.0.0.1.0] ### Changed diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 117dd4814f..b1a50da2df 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -286,6 +286,7 @@ mod tests { chain_id: thread_rng().next_u32(), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::empty(), + tenure_extend_timestamp: thread_rng().next_u64(), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Rejected(block_reject)); let ack = StackerDBChunkAckData { diff --git a/stacks-signer/src/tests/conf/signer-0.toml b/stacks-signer/src/tests/conf/signer-0.toml index 19002c1914..26a9380dcb 100644 --- a/stacks-signer/src/tests/conf/signer-0.toml +++ b/stacks-signer/src/tests/conf/signer-0.toml @@ -1,6 +1,6 @@ stacks_private_key = "6a1fc1a3183018c6d79a4e11e154d2bdad2d89ac8bc1b0a021de8b4d28774fbb01" node_host = "127.0.0.1:20443" -endpoint = "localhost:30000" +endpoint = "[::1]:30000" network = "testnet" auth_password = "12345" db_path = ":memory:" diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 081d8b6a6b..ef0cf88eb6 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -299,7 +299,11 @@ impl Signer { .private_key .sign(block_info.signer_signature_hash().bits()) .expect("Failed to sign block"); - BlockResponse::accepted(block_info.signer_signature_hash(), signature) + BlockResponse::accepted( + block_info.signer_signature_hash(), + signature, + self.calculate_tenure_extend_timestamp(), + ) } else { debug!("{self}: Rejecting block {}", block_info.block.block_id()); BlockResponse::rejected( @@ -307,6 +311,7 @@ impl Signer { RejectCode::RejectedInPriorRound, &self.private_key, self.mainnet, + self.calculate_tenure_extend_timestamp(), ) }; Some(response) @@ -409,6 +414,7 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, + self.calculate_tenure_extend_timestamp(), )) } // Block proposal is bad @@ -423,6 +429,7 @@ impl Signer { RejectCode::SortitionViewMismatch, &self.private_key, self.mainnet, + self.calculate_tenure_extend_timestamp(), )) } // Block proposal passed check, still don't know if valid @@ -439,6 +446,7 @@ impl Signer { RejectCode::NoSortitionView, &self.private_key, self.mainnet, + self.calculate_tenure_extend_timestamp(), )) }; @@ -569,7 +577,11 @@ impl Signer { self.signer_db .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - let accepted = BlockAccepted::new(block_info.signer_signature_hash(), signature); + let accepted = BlockAccepted::new( + block_info.signer_signature_hash(), + signature, + self.calculate_tenure_extend_timestamp(), + ); // have to save the signature _after_ the block info self.handle_block_signature(stacks_client, &accepted); Some(BlockResponse::Accepted(accepted)) @@ -623,6 +635,7 @@ impl Signer { block_validate_reject.clone(), &self.private_key, self.mainnet, + self.calculate_tenure_extend_timestamp(), ); self.signer_db .insert_block(&block_info) @@ -720,6 +733,7 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, + self.calculate_tenure_extend_timestamp(), ); if let Err(e) = block_info.mark_locally_rejected() { warn!("{self}: Failed to mark block as locally rejected: {e:?}",); @@ -865,6 +879,7 @@ impl Signer { signer_signature_hash: block_hash, signature, metadata, + .. } = accepted; debug!( "{self}: Received a block-accept signature: ({block_hash}, {signature}, {})", @@ -1101,6 +1116,7 @@ impl Signer { RejectCode::TestingDirective, &self.private_key, self.mainnet, + self.calculate_tenure_extend_timestamp(), )) } else { None @@ -1119,4 +1135,10 @@ impl Signer { warn!("{self}: Failed to send mock signature to stacker-db: {e:?}",); } } + + /// Calculate the tenure extend timestamp based on the tenure start and already consumed idle time + fn calculate_tenure_extend_timestamp(&self) -> u64 { + // TODO: udpate this to grab the idle time consumed against the tenure start time + get_epoch_time_secs() + } } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 14eeef20b9..0785f64818 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -458,6 +458,7 @@ impl SignCoordinator { signer_signature_hash: response_hash, signature, metadata, + tenure_extend_timestamp: _, // TOOD: utilize this info } = accepted; let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { From 9a327c4c6fdaaa5980fd9f79f96e6a9464f56644 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 15 Nov 2024 14:06:42 -0800 Subject: [PATCH 020/115] Change to use u64::MAX in default value of tenure_extend_timestamp Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index e303888462..c6af0b4860 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -777,7 +777,7 @@ impl StacksMessageCodec for BlockAccepted { let signer_signature_hash = read_next::(fd)?; let signature = read_next::(fd)?; let metadata = read_next::(fd)?; - let tenure_extend_timestamp = read_next::(fd).unwrap_or_default(); + let tenure_extend_timestamp = read_next::(fd).unwrap_or(u64::MAX); Ok(Self { signer_signature_hash, signature, @@ -935,7 +935,7 @@ impl StacksMessageCodec for BlockRejection { let chain_id = read_next::(fd)?; let signature = read_next::(fd)?; let metadata = read_next::(fd)?; - let tenure_extend_timestamp = read_next::(fd).unwrap_or_default(); + let tenure_extend_timestamp = read_next::(fd).unwrap_or(u64::MAX); Ok(Self { reason, reason_code, @@ -1294,7 +1294,7 @@ mod test { chain_id: CHAIN_ID_TESTNET, signature: MessageSignature::from_hex("006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3").unwrap(), metadata: SignerMessageMetadata::empty(), - tenure_extend_timestamp: 0 + tenure_extend_timestamp: u64::MAX })) ); @@ -1307,7 +1307,7 @@ mod test { .unwrap(), metadata: SignerMessageMetadata::empty(), signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), - tenure_extend_timestamp: 0 + tenure_extend_timestamp: u64::MAX })) ); } From 9b4b89ccd0a647672cef35fe8568273d488bd76d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 15 Nov 2024 15:50:17 -0800 Subject: [PATCH 021/115] Fix failing test Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index c6af0b4860..f900ea4386 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -1334,7 +1334,7 @@ mod test { metadata: SignerMessageMetadata { server_version: "Hello world".to_string(), }, - tenure_extend_timestamp: 0, + tenure_extend_timestamp: u64::MAX, })) ); @@ -1349,7 +1349,7 @@ mod test { server_version: "Hello world".to_string(), }, signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), - tenure_extend_timestamp: 0 + tenure_extend_timestamp: u64::MAX })) ); } From 4de3da9eb55d4aa07814b9f4154a044d629b3559 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 15 Nov 2024 16:28:32 -0800 Subject: [PATCH 022/115] Add timestamp calculation to all block responses Signed-off-by: Jacinta Ferrant --- stacks-signer/CHANGELOG.md | 1 + stacks-signer/src/chainstate.rs | 3 + stacks-signer/src/client/mod.rs | 1 + stacks-signer/src/config.rs | 14 ++ stacks-signer/src/runloop.rs | 1 + stacks-signer/src/signerdb.rs | 154 ++++++++++++++++++ stacks-signer/src/tests/chainstate.rs | 1 + stacks-signer/src/v0/signer.rs | 106 +++++++++--- testnet/stacks-node/src/tests/epoch_25.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 5 + testnet/stacks-node/src/tests/signer/v0.rs | 2 + 11 files changed, 266 insertions(+), 24 deletions(-) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 2bba1db19f..38a4508320 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -12,6 +12,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - Add tenure extend timestamp to signer block responses +- Added tenure_idle_timeout_secs configuration option for determining when a tenure extend will be accepted ## [3.0.0.0.1.0] diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index fa24c8b22e..66bf173941 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -122,6 +122,8 @@ pub struct ProposalEvalConfig { /// Time to wait for the last block of a tenure to be globally accepted or rejected before considering /// a new miner's block at the same height as valid. pub tenure_last_block_proposal_timeout: Duration, + /// How much idle time must pass before allowing a tenure extend + pub tenure_idle_timeout: Duration, } impl From<&SignerConfig> for ProposalEvalConfig { @@ -130,6 +132,7 @@ impl From<&SignerConfig> for ProposalEvalConfig { first_proposal_burn_block_timing: value.first_proposal_burn_block_timing, block_proposal_timeout: value.block_proposal_timeout, tenure_last_block_proposal_timeout: value.tenure_last_block_proposal_timeout, + tenure_idle_timeout: value.tenure_idle_timeout, } } } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 37706368dc..ba55bd9810 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -413,6 +413,7 @@ pub(crate) mod tests { block_proposal_timeout: config.block_proposal_timeout, tenure_last_block_proposal_timeout: config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: config.block_proposal_validation_timeout, + tenure_idle_timeout: config.tenure_idle_timeout, } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 57c90ab0eb..1ea0415d82 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -38,6 +38,7 @@ const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 600_000; const BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS: u64 = 120_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; const DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS: u64 = 30; +const TENURE_IDLE_TIMEOUT_SECS: u64 = 300; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -135,6 +136,8 @@ pub struct SignerConfig { pub tenure_last_block_proposal_timeout: Duration, /// How much time to wait for a block proposal validation response before marking the block invalid pub block_proposal_validation_timeout: Duration, + /// How much idle tie must pass before allowing a tenure extend + pub tenure_idle_timeout: Duration, } /// The parsed configuration for the signer @@ -171,6 +174,8 @@ pub struct GlobalConfig { /// How long to wait for a response from a block proposal validation response from the node /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout: Duration, + /// How much idle time must pass before allowing a tenure extend + pub tenure_idle_timeout: Duration, } /// Internal struct for loading up the config file @@ -206,6 +211,8 @@ struct RawConfigFile { /// How long to wait (in millisecs) for a response from a block proposal validation response from the node /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout_ms: Option, + /// How much idle time (in seconds) must pass before a tenure extend is allowed + pub tenure_idle_timeout_secs: Option, } impl RawConfigFile { @@ -297,6 +304,12 @@ impl TryFrom for GlobalConfig { .unwrap_or(BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS), ); + let tenure_idle_timeout = Duration::from_secs( + raw_data + .tenure_idle_timeout_secs + .unwrap_or(TENURE_IDLE_TIMEOUT_SECS), + ); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -312,6 +325,7 @@ impl TryFrom for GlobalConfig { chain_id: raw_data.chain_id, tenure_last_block_proposal_timeout, block_proposal_validation_timeout, + tenure_idle_timeout, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index c8f6041478..11faadf871 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -285,6 +285,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo block_proposal_timeout: self.config.block_proposal_timeout, tenure_last_block_proposal_timeout: self.config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: self.config.block_proposal_validation_timeout, + tenure_idle_timeout: self.config.tenure_idle_timeout, })) } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 1d2e650207..4ec701172e 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -159,6 +159,8 @@ pub struct BlockInfo { pub state: BlockState, /// Extra data specific to v0, v1, etc. pub ext: ExtraBlockInfo, + /// Time at which the proposal was processed (epoch time in seconds) + pub processed_time: Option, } impl From for BlockInfo { @@ -175,6 +177,7 @@ impl From for BlockInfo { signed_group: None, ext: ExtraBlockInfo::default(), state: BlockState::Unprocessed, + processed_time: None, } } } @@ -190,6 +193,7 @@ impl BlockInfo { } else { self.signed_self.get_or_insert(get_epoch_time_secs()); } + self.processed_time = Some(get_epoch_time_secs()); Ok(()) } @@ -809,6 +813,20 @@ impl SignerDb { BlockState::try_from(state.as_str()).map_err(|_| DBError::Corruption)?, )) } + + /// Return the all globally accepted block in a tenure (identified by its consensus hash). + pub fn get_globally_accepted_blocks( + &self, + tenure: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') = ?2"; + let args = params![tenure, &BlockState::GloballyAccepted.to_string()]; + let result: Vec = query_rows(&self.db, query, args)?; + result + .iter() + .map(|info| serde_json::from_str(info).map_err(DBError::from)) + .collect() + } } fn try_deserialize(s: Option) -> Result, DBError> @@ -1185,6 +1203,7 @@ mod tests { 12345 ); } + #[test] fn state_machine() { let (mut block, _) = create_block(); @@ -1226,4 +1245,139 @@ mod tests { assert!(!block.check_state(BlockState::GloballyAccepted)); assert!(block.check_state(BlockState::GloballyRejected)); } + + #[test] + fn get_accepted_blocks() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let consensus_hash_1 = ConsensusHash([0x01; 20]); + let consensus_hash_2 = ConsensusHash([0x02; 20]); + let consensus_hash_3 = ConsensusHash([0x03; 20]); + let (mut block_info_1, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.block.header.chain_length = 1; + b.burn_height = 1; + }); + let (mut block_info_2, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.block.header.chain_length = 2; + b.burn_height = 2; + }); + let (mut block_info_3, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x03; 65]); + b.block.header.chain_length = 3; + b.burn_height = 3; + }); + let (mut block_info_4, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_2; + b.block.header.miner_signature = MessageSignature([0x03; 65]); + b.block.header.chain_length = 3; + b.burn_height = 4; + }); + block_info_1.mark_globally_accepted().unwrap(); + block_info_2.mark_locally_accepted(false).unwrap(); + block_info_3.mark_locally_accepted(false).unwrap(); + block_info_4.mark_globally_accepted().unwrap(); + + db.insert_block(&block_info_1).unwrap(); + db.insert_block(&block_info_2).unwrap(); + db.insert_block(&block_info_3).unwrap(); + db.insert_block(&block_info_4).unwrap(); + + // Verify tenure consensus_hash_1 + let block_info = db + .get_last_accepted_block(&consensus_hash_1) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_3); + let block_info = db + .get_last_globally_accepted_block(&consensus_hash_1) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_1); + + // Verify tenure consensus_hash_2 + let block_info = db + .get_last_accepted_block(&consensus_hash_2) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_4); + let block_info = db + .get_last_globally_accepted_block(&consensus_hash_2) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_4); + + // Verify tenure consensus_hash_3 + assert!(db + .get_last_accepted_block(&consensus_hash_3) + .unwrap() + .is_none()); + assert!(db + .get_last_globally_accepted_block(&consensus_hash_3) + .unwrap() + .is_none()); + } + + #[test] + fn get_all_globally_accepted_blocks() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let consensus_hash_1 = ConsensusHash([0x01; 20]); + let consensus_hash_2 = ConsensusHash([0x02; 20]); + let consensus_hash_3 = ConsensusHash([0x03; 20]); + let (mut block_info_1, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.block.header.chain_length = 1; + b.burn_height = 1; + }); + let (mut block_info_2, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.block.header.chain_length = 2; + b.burn_height = 2; + }); + let (mut block_info_3, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x03; 65]); + b.block.header.chain_length = 3; + b.burn_height = 3; + }); + let (mut block_info_4, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_2; + b.block.header.miner_signature = MessageSignature([0x03; 65]); + b.block.header.chain_length = 3; + b.burn_height = 4; + }); + block_info_1.mark_globally_accepted().unwrap(); + block_info_2.mark_locally_accepted(false).unwrap(); + block_info_3.mark_globally_accepted().unwrap(); + block_info_4.mark_globally_accepted().unwrap(); + + db.insert_block(&block_info_1).unwrap(); + db.insert_block(&block_info_2).unwrap(); + db.insert_block(&block_info_3).unwrap(); + db.insert_block(&block_info_4).unwrap(); + + // Verify tenure consensus_hash_1 + let block_infos = db.get_globally_accepted_blocks(&consensus_hash_1).unwrap(); + assert_eq!(block_infos.len(), 2); + assert!(block_infos.contains(&block_info_1)); + assert!(block_infos.contains(&block_info_3)); + + // Verify tenure consensus_hash_2 + let block_infos = db.get_globally_accepted_blocks(&consensus_hash_2).unwrap(); + assert_eq!(block_infos.len(), 1); + assert!(block_infos.contains(&block_info_4)); + + // Verify tenure consensus_hash_3 + assert!(db + .get_globally_accepted_blocks(&consensus_hash_3) + .unwrap() + .is_empty()); + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index bec9f1258d..72fdf9c629 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -90,6 +90,7 @@ fn setup_test_environment( first_proposal_burn_block_timing: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(5), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }, }; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index ef0cf88eb6..9e421f9994 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -133,7 +133,7 @@ impl SignerTrait for Signer { if event_parity == Some(other_signer_parity) { return; } - self.check_submitted_block_proposal(); + self.check_submitted_block_proposal(stacks_client, sortition_state); debug!("{self}: Processing event: {event:?}"); let Some(event) = event else { // No event. Do nothing. @@ -143,7 +143,11 @@ impl SignerTrait for Signer { match event { SignerEvent::BlockValidationResponse(block_validate_response) => { debug!("{self}: Received a block proposal result from the stacks node..."); - self.handle_block_validate_response(stacks_client, block_validate_response) + self.handle_block_validate_response( + stacks_client, + block_validate_response, + sortition_state, + ) } SignerEvent::SignerMessages(_signer_set, messages) => { debug!( @@ -291,7 +295,12 @@ impl Signer { /// Determine this signers response to a proposed block /// Returns a BlockResponse if we have already validated the block /// Returns None otherwise - fn determine_response(&self, block_info: &BlockInfo) -> Option { + fn determine_response( + &self, + block_info: &BlockInfo, + stacks_client: &StacksClient, + sortition_state: &mut Option, + ) -> Option { let valid = block_info.valid?; let response = if valid { debug!("{self}: Accepting block {}", block_info.block.block_id()); @@ -302,7 +311,7 @@ impl Signer { BlockResponse::accepted( block_info.signer_signature_hash(), signature, - self.calculate_tenure_extend_timestamp(), + self.calculate_tenure_extend_timestamp(stacks_client, sortition_state), ) } else { debug!("{self}: Rejecting block {}", block_info.block.block_id()); @@ -311,7 +320,7 @@ impl Signer { RejectCode::RejectedInPriorRound, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(), + self.calculate_tenure_extend_timestamp(stacks_client, sortition_state), ) }; Some(response) @@ -343,7 +352,9 @@ impl Signer { .block_lookup(self.reward_cycle, &signer_signature_hash) .expect("Failed to connect to signer DB") { - let Some(block_response) = self.determine_response(&block_info) else { + let Some(block_response) = + self.determine_response(&block_info, stacks_client, sortition_state) + else { // We are still waiting for a response for this block. Do nothing. debug!("{self}: Received a block proposal for a block we are already validating."; "signer_sighash" => %signer_signature_hash, @@ -392,6 +403,8 @@ impl Signer { .ok(); } + let tenure_extend_timestamp = + self.calculate_tenure_extend_timestamp(stacks_client, sortition_state); // Check if proposal can be rejected now if not valid against sortition view let block_response = if let Some(sortition_state) = sortition_state { match sortition_state.check_proposal( @@ -414,7 +427,7 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(), + tenure_extend_timestamp, )) } // Block proposal is bad @@ -429,7 +442,7 @@ impl Signer { RejectCode::SortitionViewMismatch, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(), + tenure_extend_timestamp, )) } // Block proposal passed check, still don't know if valid @@ -446,7 +459,7 @@ impl Signer { RejectCode::NoSortitionView, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(), + tenure_extend_timestamp, )) }; @@ -474,7 +487,7 @@ impl Signer { } } else { // Just in case check if the last block validation submission timed out. - self.check_submitted_block_proposal(); + self.check_submitted_block_proposal(stacks_client, sortition_state); if self.submitted_block_proposal.is_none() { // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. info!( @@ -528,6 +541,7 @@ impl Signer { &mut self, stacks_client: &StacksClient, block_validate_ok: &BlockValidateOk, + sortition_state: &mut Option, ) -> Option { crate::monitoring::increment_block_validation_responses(true); let signer_signature_hash = block_validate_ok.signer_signature_hash; @@ -580,7 +594,7 @@ impl Signer { let accepted = BlockAccepted::new( block_info.signer_signature_hash(), signature, - self.calculate_tenure_extend_timestamp(), + self.calculate_tenure_extend_timestamp(stacks_client, sortition_state), ); // have to save the signature _after_ the block info self.handle_block_signature(stacks_client, &accepted); @@ -590,7 +604,9 @@ impl Signer { /// Handle the block validate reject response. Returns our block response if we have one fn handle_block_validate_reject( &mut self, + stacks_client: &StacksClient, block_validate_reject: &BlockValidateReject, + sortition_state: &mut Option, ) -> Option { crate::monitoring::increment_block_validation_responses(false); let signer_signature_hash = block_validate_reject.signer_signature_hash; @@ -635,7 +651,7 @@ impl Signer { block_validate_reject.clone(), &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(), + self.calculate_tenure_extend_timestamp(stacks_client, sortition_state), ); self.signer_db .insert_block(&block_info) @@ -649,15 +665,19 @@ impl Signer { &mut self, stacks_client: &StacksClient, block_validate_response: &BlockValidateResponse, + sortition_state: &mut Option, ) { info!("{self}: Received a block validate response: {block_validate_response:?}"); let block_response = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { - self.handle_block_validate_ok(stacks_client, block_validate_ok) - } - BlockValidateResponse::Reject(block_validate_reject) => { - self.handle_block_validate_reject(block_validate_reject) + self.handle_block_validate_ok(stacks_client, block_validate_ok, sortition_state) } + BlockValidateResponse::Reject(block_validate_reject) => self + .handle_block_validate_reject( + stacks_client, + block_validate_reject, + sortition_state, + ), }; let Some(response) = block_response else { return; @@ -682,7 +702,11 @@ impl Signer { /// Check the current tracked submitted block proposal to see if it has timed out. /// Broadcasts a rejection and marks the block locally rejected if it has. - fn check_submitted_block_proposal(&mut self) { + fn check_submitted_block_proposal( + &mut self, + stacks_client: &StacksClient, + sortition_state: &mut Option, + ) { let Some((block_proposal, block_submission)) = self.submitted_block_proposal.take() else { // Nothing to check. return; @@ -733,7 +757,7 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(), + self.calculate_tenure_extend_timestamp(stacks_client, sortition_state), ); if let Err(e) = block_info.mark_locally_rejected() { warn!("{self}: Failed to mark block as locally rejected: {e:?}",); @@ -1116,7 +1140,7 @@ impl Signer { RejectCode::TestingDirective, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(), + u64::MAX, )) } else { None @@ -1136,9 +1160,45 @@ impl Signer { } } - /// Calculate the tenure extend timestamp based on the tenure start and already consumed idle time - fn calculate_tenure_extend_timestamp(&self) -> u64 { - // TODO: udpate this to grab the idle time consumed against the tenure start time - get_epoch_time_secs() + /// Calculate the tenure extend timestamp based on the tenure start and already consumed idle time. + fn calculate_tenure_extend_timestamp( + &self, + stacks_client: &StacksClient, + sortition_state: &mut Option, + ) -> u64 { + if sortition_state.is_none() { + *sortition_state = + SortitionsView::fetch_view(self.proposal_config.clone(), stacks_client) + .inspect_err(|e| { + warn!( + "{self}: Failed to update sortition view: {e:?}"; + ) + }) + .ok(); + } + if let Some(sortition_state) = sortition_state { + let tenure_process_time = self + .signer_db + .get_globally_accepted_blocks(&sortition_state.cur_sortition.consensus_hash) + .unwrap_or_default() + .iter() + .map(|block| { + if let Some(processed_time) = block.processed_time { + processed_time.saturating_sub(block.proposed_time) + } else { + 0 + } + }) + .sum::(); + + sortition_state + .cur_sortition + .burn_header_timestamp + .saturating_add(self.proposal_config.tenure_idle_timeout.as_secs()) + .saturating_add(tenure_process_time) + } else { + warn!("{self}: Failed to determine tenure extend timestamp. Using default u64::MAX"); + u64::MAX + } } } diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 34083fb22a..bc30e51528 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -23,7 +23,7 @@ use stacks_common::types::chainstate::StacksPrivateKey; use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; -use crate::tests::nakamoto_integrations::{next_block_and, wait_for}; +use crate::tests::nakamoto_integrations::wait_for; use crate::tests::neon_integrations::{ get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6ae34fce42..7e1c94ed76 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6369,6 +6369,7 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); @@ -6458,6 +6459,7 @@ fn signer_chainstate() { signed_group: None, ext: ExtraBlockInfo::None, state: BlockState::Unprocessed, + processed_time: None, }) .unwrap(); @@ -6508,6 +6510,7 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -6546,6 +6549,7 @@ fn signer_chainstate() { signed_group: Some(get_epoch_time_secs()), ext: ExtraBlockInfo::None, state: BlockState::GloballyAccepted, + processed_time: Some(get_epoch_time_secs()), }) .unwrap(); @@ -6586,6 +6590,7 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a7ade631aa..2f689e25ee 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -457,6 +457,7 @@ fn block_proposal_rejection() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), @@ -6949,6 +6950,7 @@ fn block_validation_response_timeout() { first_proposal_burn_block_timing: Duration::from_secs(0), tenure_last_block_proposal_timeout: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(100), + tenure_idle_timeout: Duration::from_secs(300), }; let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), From 91be5ccbd2234b720811495a33d9e7bf8a93f07c Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sat, 16 Nov 2024 15:56:59 -0800 Subject: [PATCH 023/115] wip: integration tests for tenure extends --- testnet/stacks-node/src/tests/signer/v0.rs | 246 ++++++++++++++++++++- 1 file changed, 245 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2f689e25ee..d4fa3c35d3 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -43,7 +43,7 @@ use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STA use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; +use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc, Sha512Trunc256Sum}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -420,6 +420,23 @@ impl SignerTest { } } +fn last_block_contains_tenure_change_tx(cause: TenureChangeCause) -> bool { + let blocks = test_observer::get_blocks(); + let last_block = &blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) if payload.cause == cause => { + info!("Found tenure change transaction: {parsed:?}"); + true + } + _ => false, + } +} + #[test] #[ignore] /// Test that a signer can respond to an invalid block proposal @@ -2483,6 +2500,233 @@ fn signers_broadcast_signed_blocks() { signer_test.shutdown(); } +#[test] +#[ignore] +/// This test verifies that a miner will produce a TenureExtend transaction after the idle timeout is reached. +fn tenure_extend_after_idle() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.tenure_idle_timeout = idle_timeout; + }, + |_| {}, + None, + None, + ); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + + // Verify that the block was mined + wait_for(30, || { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for first nakamoto block to be mined"); + + info!("---- Waiting for a tenure extend ----"); + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Verify that Nakamoto blocks that don't modify the tenure's execution cost +/// don't modify the idle timeout. +fn stx_transfers_dont_effect_idle_timeout() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.tenure_idle_timeout = idle_timeout; + }, + |_| {}, + None, + None, + ); + let naka_conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + info!("---- Nakamoto booted, starting test ----"; + "info_height" => info_before.stacks_tip_height, + "blocks_before" => blocks_before, + ); + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + + info!("---- Verifying that the block was mined ----"); + // Verify that the block was mined + wait_for(30, || { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for first nakamoto block to be mined"); + + info!("---- Getting current idle timeout ----"); + + let reward_cycle = signer_test.get_current_reward_cycle(); + + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + + let get_last_block_hash = || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let block_hash = + hex_bytes(&last_block.get("block_hash").unwrap().as_str().unwrap()[2..]).unwrap(); + Sha512Trunc256Sum::from_vec(&block_hash).unwrap() + }; + + let last_block_hash = get_last_block_hash(); + + let slot_id = 0_u32; + + let get_last_block_response = |slot_id: u32| { + let mut stackerdb = StackerDB::new( + &naka_conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + let latest_msgs = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &[slot_id], + ) + .expect("Failed to get message from stackerdb"); + let latest_msg = latest_msgs.last().unwrap(); + let SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) = latest_msg else { + panic!("Latest message from slot #{slot_id} isn't a block acceptance"); + }; + accepted.clone() + }; + + let latest_acceptance = get_last_block_response(slot_id); + assert_eq!(latest_acceptance.signer_signature_hash, last_block_hash); + + info!( + "---- Last idle timeout: {} ----", + latest_acceptance.tenure_extend_timestamp + ); + + // Now, mine a few nakamoto blocks with just transfers + + let mut sender_nonce = 0; + + let mut last_acceptance = latest_acceptance; + + for i in 0..num_txs { + info!("---- Mining interim block {} ----", i + 1); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + + let info_before = get_chain_info(&signer_test.running_nodes.conf); + wait_for(30, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for nakamoto block to be mined"); + + let latest_acceptance = get_last_block_response(slot_id); + let last_block_hash = get_last_block_hash(); + + assert_eq!(latest_acceptance.signer_signature_hash, last_block_hash); + // Because the block only contains transfers, the idle timeout should not have changed + assert_eq!( + last_acceptance.tenure_extend_timestamp, + latest_acceptance.tenure_extend_timestamp + ); + + last_acceptance = latest_acceptance; + } + + info!("---- Waiting for a tenure extend ----"); + + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks the behaviour of signers when a sortition is empty. Specifically: From 012ed87f056e950f35207c72b40f321e0a455f45 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 18 Nov 2024 09:30:05 -0500 Subject: [PATCH 024/115] feat: add validation time to block proposal response --- stackslib/src/net/api/postblock_proposal.rs | 6 ++- .../src/net/api/tests/postblock_proposal.rs | 48 ++++++++++++++----- 2 files changed, 41 insertions(+), 13 deletions(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index b67b6166aa..bc1d4afcbe 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -145,6 +145,7 @@ pub struct BlockValidateOk { pub signer_signature_hash: Sha512Trunc256Sum, pub cost: ExecutionCost, pub size: u64, + pub validation_time_ms: u128, } /// This enum is used for serializing the response to block @@ -544,6 +545,8 @@ impl NakamotoBlockProposal { }); } + let validation_time_ms = time_elapsed(); + info!( "Participant: validated anchored block"; "block_header_hash" => %computed_block_header_hash, @@ -552,7 +555,7 @@ impl NakamotoBlockProposal { "parent_stacks_block_id" => %block.header.parent_block_id, "block_size" => size, "execution_cost" => %cost, - "validation_time_ms" => time_elapsed(), + "validation_time_ms" => validation_time_ms, "tx_fees_microstacks" => block.txs.iter().fold(0, |agg: u64, tx| { agg.saturating_add(tx.get_tx_fee()) }) @@ -562,6 +565,7 @@ impl NakamotoBlockProposal { signer_signature_hash: block.header.signer_signature_hash(), cost, size, + validation_time_ms, }) } } diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 4f553efd21..7ce3828cf6 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -238,7 +238,7 @@ fn test_try_make_response() { let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); - let mut block = { + let mut good_block = { let chainstate = rpc_test.peer_1.chainstate(); let parent_stacks_header = NakamotoChainState::get_block_header(chainstate.db(), &stacks_tip) @@ -313,12 +313,12 @@ fn test_try_make_response() { }; // Increment the timestamp by 1 to ensure it is different from the previous block - block.header.timestamp += 1; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + good_block.header.timestamp += 1; + rpc_test.peer_1.miner.sign_nakamoto_block(&mut good_block); // post the valid block proposal let proposal = NakamotoBlockProposal { - block: block.clone(), + block: good_block.clone(), chain_id: 0x80000000, }; @@ -333,12 +333,16 @@ fn test_try_make_response() { requests.push(request); // Set the timestamp to a value in the past - block.header.timestamp -= 10000; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + let mut early_time_block = good_block.clone(); + early_time_block.header.timestamp -= 10000; + rpc_test + .peer_1 + .miner + .sign_nakamoto_block(&mut early_time_block); // post the invalid block proposal let proposal = NakamotoBlockProposal { - block: block.clone(), + block: early_time_block, chain_id: 0x80000000, }; @@ -353,12 +357,16 @@ fn test_try_make_response() { requests.push(request); // Set the timestamp to a value in the future - block.header.timestamp += 20000; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + let mut late_time_block = good_block.clone(); + late_time_block.header.timestamp += 20000; + rpc_test + .peer_1 + .miner + .sign_nakamoto_block(&mut late_time_block); // post the invalid block proposal let proposal = NakamotoBlockProposal { - block: block.clone(), + block: late_time_block, chain_id: 0x80000000, }; @@ -380,7 +388,7 @@ fn test_try_make_response() { let response = responses.remove(0); - // Wait for the results to be non-empty + // Wait for the results of all 3 requests loop { if proposal_observer .lock() @@ -401,7 +409,23 @@ fn test_try_make_response() { let mut results = observer.results.lock().unwrap(); let result = results.remove(0); - assert!(result.is_ok()); + match result { + Ok(postblock_proposal::BlockValidateOk { + signer_signature_hash, + cost, + size, + validation_time_ms, + }) => { + assert_eq!( + signer_signature_hash, + good_block.header.signer_signature_hash() + ); + assert_eq!(cost, ExecutionCost::zero()); + assert_eq!(size, 180); + assert!(validation_time_ms > 0 && validation_time_ms < 60000); + } + _ => panic!("expected ok"), + } let result = results.remove(0); match result { From de7b67739959c8aa89864810140c176ba1dfd545 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 18 Nov 2024 10:17:45 -0500 Subject: [PATCH 025/115] chore: make `validation_time_ms` a `u64` --- stackslib/src/net/api/postblock_proposal.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index bc1d4afcbe..38843174b0 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -145,7 +145,7 @@ pub struct BlockValidateOk { pub signer_signature_hash: Sha512Trunc256Sum, pub cost: ExecutionCost, pub size: u64, - pub validation_time_ms: u128, + pub validation_time_ms: u64, } /// This enum is used for serializing the response to block @@ -357,7 +357,12 @@ impl NakamotoBlockProposal { } let ts_start = get_epoch_time_ms(); // Measure time from start of function - let time_elapsed = || get_epoch_time_ms().saturating_sub(ts_start); + let time_elapsed = || { + get_epoch_time_ms() + .saturating_sub(ts_start) + .try_into() + .unwrap_or(u64::MAX) + }; let mainnet = self.chain_id == CHAIN_ID_MAINNET; if self.chain_id != chainstate.chain_id || mainnet != chainstate.mainnet { From 510a12fbdb681212c1e67977703c7c57bada4270 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 18 Nov 2024 10:45:25 -0800 Subject: [PATCH 026/115] Use timestamp of known globally accepted blocks to determine tenure start Signed-off-by: Jacinta Ferrant --- stacks-signer/src/config.rs | 2 +- stacks-signer/src/signerdb.rs | 15 ++--- stacks-signer/src/v0/signer.rs | 64 ++++++++++++------- .../src/tests/nakamoto_integrations.rs | 4 +- 4 files changed, 50 insertions(+), 35 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 1ea0415d82..18412bc5f2 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -136,7 +136,7 @@ pub struct SignerConfig { pub tenure_last_block_proposal_timeout: Duration, /// How much time to wait for a block proposal validation response before marking the block invalid pub block_proposal_validation_timeout: Duration, - /// How much idle tie must pass before allowing a tenure extend + /// How much idle time must pass before allowing a tenure extend pub tenure_idle_timeout: Duration, } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 4ec701172e..cf93b7354a 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -157,10 +157,10 @@ pub struct BlockInfo { pub signed_group: Option, /// The block state relative to the signer's view of the stacks blockchain pub state: BlockState, + /// Amount of validation time in milliseconds + pub validation_time_ms: Option, /// Extra data specific to v0, v1, etc. pub ext: ExtraBlockInfo, - /// Time at which the proposal was processed (epoch time in seconds) - pub processed_time: Option, } impl From for BlockInfo { @@ -177,7 +177,7 @@ impl From for BlockInfo { signed_group: None, ext: ExtraBlockInfo::default(), state: BlockState::Unprocessed, - processed_time: None, + validation_time_ms: None, } } } @@ -193,7 +193,6 @@ impl BlockInfo { } else { self.signed_self.get_or_insert(get_epoch_time_secs()); } - self.processed_time = Some(get_epoch_time_secs()); Ok(()) } @@ -819,7 +818,7 @@ impl SignerDb { &self, tenure: &ConsensusHash, ) -> Result, DBError> { - let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') = ?2"; + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') = ?2 ORDER BY stacks_height DESC"; let args = params![tenure, &BlockState::GloballyAccepted.to_string()]; let result: Vec = query_rows(&self.db, query, args)?; result @@ -1365,14 +1364,12 @@ mod tests { // Verify tenure consensus_hash_1 let block_infos = db.get_globally_accepted_blocks(&consensus_hash_1).unwrap(); - assert_eq!(block_infos.len(), 2); - assert!(block_infos.contains(&block_info_1)); - assert!(block_infos.contains(&block_info_3)); + assert_eq!(block_infos, vec![block_info_3, block_info_1]); // Verify tenure consensus_hash_2 let block_infos = db.get_globally_accepted_blocks(&consensus_hash_2).unwrap(); assert_eq!(block_infos.len(), 1); - assert!(block_infos.contains(&block_info_4)); + assert_eq!(block_infos, vec![block_info_4]); // Verify tenure consensus_hash_3 assert!(db diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 9e421f9994..53512ee426 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -18,6 +18,7 @@ use std::sync::mpsc::Sender; use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use blockstack_lib::chainstate::stacks::TransactionPayload; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; @@ -583,6 +584,9 @@ impl Signer { warn!("{self}: Failed to mark block as locally accepted: {e:?}",); return None; } + // Record the block validation time + block_info.validation_time_ms = Some(block_validate_ok.validation_time_ms); + let signature = self .private_key .sign(&signer_signature_hash.0) @@ -1176,29 +1180,43 @@ impl Signer { }) .ok(); } - if let Some(sortition_state) = sortition_state { - let tenure_process_time = self - .signer_db - .get_globally_accepted_blocks(&sortition_state.cur_sortition.consensus_hash) - .unwrap_or_default() - .iter() - .map(|block| { - if let Some(processed_time) = block.processed_time { - processed_time.saturating_sub(block.proposed_time) - } else { - 0 - } - }) - .sum::(); - - sortition_state - .cur_sortition - .burn_header_timestamp - .saturating_add(self.proposal_config.tenure_idle_timeout.as_secs()) - .saturating_add(tenure_process_time) - } else { - warn!("{self}: Failed to determine tenure extend timestamp. Using default u64::MAX"); - u64::MAX + let Some(sortition_state) = sortition_state else { + warn!("{self}: No sortition state known. Unable to determine tenure extend timestamp for current tenure."); + return get_epoch_time_secs() + .saturating_add(self.proposal_config.tenure_idle_timeout.as_secs()); + }; + // We do not know our tenure start timestamp until we find the last processed tenure change transaction. + // We may not even have it in our database, in which case, we should use the oldest known block in this tenure. + // If we have no blocks known for this tenure, we will assume it has only just started and calculate + // our tenure extend timestamp based on the epoch time in secs. + let mut tenure_start_timestamp = None; + let mut tenure_process_time_ms = 0; + // Note that the globally accepted blocks are already returned in descending order of stacks height, therefore by newest block to oldest block + for block_info in self + .signer_db + .get_globally_accepted_blocks(&sortition_state.cur_sortition.consensus_hash) + .unwrap_or_default() + .iter() + { + // Always use the oldest block as our tenure start timestamp + tenure_start_timestamp = Some(block_info.proposed_time); + tenure_process_time_ms += block_info.validation_time_ms.unwrap_or(0); + + if block_info + .block + .txs + .first() + .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) + .unwrap_or(false) + { + // Tenure change found. No more blocks should count towards this tenure's processing time. + break; + } } + + tenure_start_timestamp + .unwrap_or(get_epoch_time_secs()) + .saturating_add(self.proposal_config.tenure_idle_timeout.as_secs()) + .saturating_sub(tenure_process_time_ms / 1000) } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 7e1c94ed76..aa90b3cfaf 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6459,7 +6459,7 @@ fn signer_chainstate() { signed_group: None, ext: ExtraBlockInfo::None, state: BlockState::Unprocessed, - processed_time: None, + validation_time_ms: None, }) .unwrap(); @@ -6549,7 +6549,7 @@ fn signer_chainstate() { signed_group: Some(get_epoch_time_secs()), ext: ExtraBlockInfo::None, state: BlockState::GloballyAccepted, - processed_time: Some(get_epoch_time_secs()), + validation_time_ms: Some(1000), }) .unwrap(); From 132b759a994a85fec12dacfee0692b331594563d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 18 Nov 2024 10:51:00 -0800 Subject: [PATCH 027/115] Do not use sortition state when responding to a specific block proposal Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 95 ++++++++++------------------------ 1 file changed, 28 insertions(+), 67 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 53512ee426..9c9b51dfc0 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -22,7 +22,7 @@ use blockstack_lib::chainstate::stacks::TransactionPayload; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; -use clarity::types::chainstate::StacksPrivateKey; +use clarity::types::chainstate::{ConsensusHash, StacksPrivateKey}; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; @@ -134,7 +134,7 @@ impl SignerTrait for Signer { if event_parity == Some(other_signer_parity) { return; } - self.check_submitted_block_proposal(stacks_client, sortition_state); + self.check_submitted_block_proposal(); debug!("{self}: Processing event: {event:?}"); let Some(event) = event else { // No event. Do nothing. @@ -144,11 +144,7 @@ impl SignerTrait for Signer { match event { SignerEvent::BlockValidationResponse(block_validate_response) => { debug!("{self}: Received a block proposal result from the stacks node..."); - self.handle_block_validate_response( - stacks_client, - block_validate_response, - sortition_state, - ) + self.handle_block_validate_response(stacks_client, block_validate_response) } SignerEvent::SignerMessages(_signer_set, messages) => { debug!( @@ -296,12 +292,7 @@ impl Signer { /// Determine this signers response to a proposed block /// Returns a BlockResponse if we have already validated the block /// Returns None otherwise - fn determine_response( - &self, - block_info: &BlockInfo, - stacks_client: &StacksClient, - sortition_state: &mut Option, - ) -> Option { + fn determine_response(&self, block_info: &BlockInfo) -> Option { let valid = block_info.valid?; let response = if valid { debug!("{self}: Accepting block {}", block_info.block.block_id()); @@ -312,7 +303,7 @@ impl Signer { BlockResponse::accepted( block_info.signer_signature_hash(), signature, - self.calculate_tenure_extend_timestamp(stacks_client, sortition_state), + self.calculate_tenure_extend_timestamp(&block_info.block.header.consensus_hash), ) } else { debug!("{self}: Rejecting block {}", block_info.block.block_id()); @@ -321,7 +312,7 @@ impl Signer { RejectCode::RejectedInPriorRound, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(stacks_client, sortition_state), + self.calculate_tenure_extend_timestamp(&block_info.block.header.consensus_hash), ) }; Some(response) @@ -353,9 +344,7 @@ impl Signer { .block_lookup(self.reward_cycle, &signer_signature_hash) .expect("Failed to connect to signer DB") { - let Some(block_response) = - self.determine_response(&block_info, stacks_client, sortition_state) - else { + let Some(block_response) = self.determine_response(&block_info) else { // We are still waiting for a response for this block. Do nothing. debug!("{self}: Received a block proposal for a block we are already validating."; "signer_sighash" => %signer_signature_hash, @@ -404,8 +393,6 @@ impl Signer { .ok(); } - let tenure_extend_timestamp = - self.calculate_tenure_extend_timestamp(stacks_client, sortition_state); // Check if proposal can be rejected now if not valid against sortition view let block_response = if let Some(sortition_state) = sortition_state { match sortition_state.check_proposal( @@ -428,7 +415,9 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, - tenure_extend_timestamp, + self.calculate_tenure_extend_timestamp( + &block_proposal.block.header.consensus_hash, + ), )) } // Block proposal is bad @@ -443,7 +432,9 @@ impl Signer { RejectCode::SortitionViewMismatch, &self.private_key, self.mainnet, - tenure_extend_timestamp, + self.calculate_tenure_extend_timestamp( + &block_proposal.block.header.consensus_hash, + ), )) } // Block proposal passed check, still don't know if valid @@ -460,7 +451,7 @@ impl Signer { RejectCode::NoSortitionView, &self.private_key, self.mainnet, - tenure_extend_timestamp, + self.calculate_tenure_extend_timestamp(&block_proposal.block.header.consensus_hash), )) }; @@ -488,7 +479,7 @@ impl Signer { } } else { // Just in case check if the last block validation submission timed out. - self.check_submitted_block_proposal(stacks_client, sortition_state); + self.check_submitted_block_proposal(); if self.submitted_block_proposal.is_none() { // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. info!( @@ -542,7 +533,6 @@ impl Signer { &mut self, stacks_client: &StacksClient, block_validate_ok: &BlockValidateOk, - sortition_state: &mut Option, ) -> Option { crate::monitoring::increment_block_validation_responses(true); let signer_signature_hash = block_validate_ok.signer_signature_hash; @@ -598,7 +588,7 @@ impl Signer { let accepted = BlockAccepted::new( block_info.signer_signature_hash(), signature, - self.calculate_tenure_extend_timestamp(stacks_client, sortition_state), + self.calculate_tenure_extend_timestamp(&block_info.block.header.consensus_hash), ); // have to save the signature _after_ the block info self.handle_block_signature(stacks_client, &accepted); @@ -608,9 +598,7 @@ impl Signer { /// Handle the block validate reject response. Returns our block response if we have one fn handle_block_validate_reject( &mut self, - stacks_client: &StacksClient, block_validate_reject: &BlockValidateReject, - sortition_state: &mut Option, ) -> Option { crate::monitoring::increment_block_validation_responses(false); let signer_signature_hash = block_validate_reject.signer_signature_hash; @@ -655,7 +643,7 @@ impl Signer { block_validate_reject.clone(), &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(stacks_client, sortition_state), + self.calculate_tenure_extend_timestamp(&block_info.block.header.consensus_hash), ); self.signer_db .insert_block(&block_info) @@ -669,19 +657,15 @@ impl Signer { &mut self, stacks_client: &StacksClient, block_validate_response: &BlockValidateResponse, - sortition_state: &mut Option, ) { info!("{self}: Received a block validate response: {block_validate_response:?}"); let block_response = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { - self.handle_block_validate_ok(stacks_client, block_validate_ok, sortition_state) + self.handle_block_validate_ok(stacks_client, block_validate_ok) + } + BlockValidateResponse::Reject(block_validate_reject) => { + self.handle_block_validate_reject(block_validate_reject) } - BlockValidateResponse::Reject(block_validate_reject) => self - .handle_block_validate_reject( - stacks_client, - block_validate_reject, - sortition_state, - ), }; let Some(response) = block_response else { return; @@ -706,11 +690,7 @@ impl Signer { /// Check the current tracked submitted block proposal to see if it has timed out. /// Broadcasts a rejection and marks the block locally rejected if it has. - fn check_submitted_block_proposal( - &mut self, - stacks_client: &StacksClient, - sortition_state: &mut Option, - ) { + fn check_submitted_block_proposal(&mut self) { let Some((block_proposal, block_submission)) = self.submitted_block_proposal.take() else { // Nothing to check. return; @@ -761,7 +741,7 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(stacks_client, sortition_state), + self.calculate_tenure_extend_timestamp(&block_proposal.block.header.consensus_hash), ); if let Err(e) = block_info.mark_locally_rejected() { warn!("{self}: Failed to mark block as locally rejected: {e:?}",); @@ -1165,36 +1145,17 @@ impl Signer { } /// Calculate the tenure extend timestamp based on the tenure start and already consumed idle time. - fn calculate_tenure_extend_timestamp( - &self, - stacks_client: &StacksClient, - sortition_state: &mut Option, - ) -> u64 { - if sortition_state.is_none() { - *sortition_state = - SortitionsView::fetch_view(self.proposal_config.clone(), stacks_client) - .inspect_err(|e| { - warn!( - "{self}: Failed to update sortition view: {e:?}"; - ) - }) - .ok(); - } - let Some(sortition_state) = sortition_state else { - warn!("{self}: No sortition state known. Unable to determine tenure extend timestamp for current tenure."); - return get_epoch_time_secs() - .saturating_add(self.proposal_config.tenure_idle_timeout.as_secs()); - }; - // We do not know our tenure start timestamp until we find the last processed tenure change transaction. - // We may not even have it in our database, in which case, we should use the oldest known block in this tenure. - // If we have no blocks known for this tenure, we will assume it has only just started and calculate + fn calculate_tenure_extend_timestamp(&self, consensus_hash: &ConsensusHash) -> u64 { + // We do not know our tenure start timestamp until we find the last processed tenure change transaction for the given consensus hash. + // We may not even have it in our database, in which case, we should use the oldest known block in the tenure. + // If we have no blocks known for this tenure, we will assume it has only JUST started and calculate // our tenure extend timestamp based on the epoch time in secs. let mut tenure_start_timestamp = None; let mut tenure_process_time_ms = 0; // Note that the globally accepted blocks are already returned in descending order of stacks height, therefore by newest block to oldest block for block_info in self .signer_db - .get_globally_accepted_blocks(&sortition_state.cur_sortition.consensus_hash) + .get_globally_accepted_blocks(consensus_hash) .unwrap_or_default() .iter() { From ca8f4e3fca6c70c28d2b226823b5e68987ccb240 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 18 Nov 2024 10:55:48 -0800 Subject: [PATCH 028/115] Update testing directive rejection to use correct tenure extend timestamp Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 9c9b51dfc0..e28703f56f 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -1124,7 +1124,7 @@ impl Signer { RejectCode::TestingDirective, &self.private_key, self.mainnet, - u64::MAX, + self.calculate_tenure_extend_timestamp(&block_proposal.block.header.consensus_hash), )) } else { None From 32d7808a0cbca316387b6895f346ad0223dfa682 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 18 Nov 2024 10:58:13 -0800 Subject: [PATCH 029/115] Fix comment on validaiton_time_ms Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index cf93b7354a..bc163ee686 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -157,7 +157,7 @@ pub struct BlockInfo { pub signed_group: Option, /// The block state relative to the signer's view of the stacks blockchain pub state: BlockState, - /// Amount of validation time in milliseconds + /// Consumed processing time in milliseconds to validate this block pub validation_time_ms: Option, /// Extra data specific to v0, v1, etc. pub ext: ExtraBlockInfo, From 562982c322ddc460d3ad9c7ee5a4c2981ba308c1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 18 Nov 2024 12:37:20 -0800 Subject: [PATCH 030/115] Use tenure extend timestamp to determine if enough time has passed for a tenure extend Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 15 +++--- stacks-signer/src/signerdb.rs | 46 ++++++++++++++++++- stacks-signer/src/v0/signer.rs | 81 ++++++++++++++------------------- 3 files changed, 84 insertions(+), 58 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 66bf173941..5a6aa04200 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -337,9 +337,14 @@ impl SortitionsView { // in tenure extends, we need to check: // (1) if this is the most recent sortition, an extend is allowed if it changes the burnchain view // (2) if this is the most recent sortition, an extend is allowed if enough time has passed to refresh the block limit + let sortition_consensus_hash = proposed_by.state().consensus_hash; let changed_burn_view = - tenure_extend.burn_view_consensus_hash != proposed_by.state().consensus_hash; - let enough_time_passed = Self::tenure_time_passed_block_lim()?; + tenure_extend.burn_view_consensus_hash != sortition_consensus_hash; + let enough_time_passed = get_epoch_time_secs() + > signer_db.get_tenure_extend_timestamp( + self.config.tenure_idle_timeout, + &sortition_consensus_hash, + ); if !changed_burn_view && !enough_time_passed { warn!( "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; @@ -658,12 +663,6 @@ impl SortitionsView { } } - /// Has the current tenure lasted long enough to extend the block limit? - pub fn tenure_time_passed_block_lim() -> Result { - // TODO - Ok(false) - } - /// Fetch a new view of the recent sortitions pub fn fetch_view( config: ProposalEvalConfig, diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index bc163ee686..7e3659ff12 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -16,9 +16,10 @@ use std::fmt::Display; use std::path::Path; -use std::time::SystemTime; +use std::time::{Duration, SystemTime}; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use blockstack_lib::chainstate::stacks::TransactionPayload; use blockstack_lib::util_lib::db::{ query_row, query_rows, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, Error as DBError, @@ -814,7 +815,7 @@ impl SignerDb { } /// Return the all globally accepted block in a tenure (identified by its consensus hash). - pub fn get_globally_accepted_blocks( + fn get_globally_accepted_blocks( &self, tenure: &ConsensusHash, ) -> Result, DBError> { @@ -826,6 +827,47 @@ impl SignerDb { .map(|info| serde_json::from_str(info).map_err(DBError::from)) .collect() } + + /// Compute the tenure extend timestamp based on the tenure start and already consumed idle time of the + /// globally accepted blocks of the provided tenure (identified by the cosnensus hash) + pub fn get_tenure_extend_timestamp( + &self, + tenure_idle_timeout: Duration, + consensus_hash: &ConsensusHash, + ) -> u64 { + // We do not know our tenure start timestamp until we find the last processed tenure change transaction for the given consensus hash. + // We may not even have it in our database, in which case, we should use the oldest known block in the tenure. + // If we have no blocks known for this tenure, we will assume it has only JUST started and calculate + // our tenure extend timestamp based on the epoch time in secs. + let mut tenure_start_timestamp = None; + let mut tenure_process_time_ms = 0; + // Note that the globally accepted blocks are already returned in descending order of stacks height, therefore by newest block to oldest block + for block_info in self + .get_globally_accepted_blocks(consensus_hash) + .unwrap_or_default() + .iter() + { + // Always use the oldest block as our tenure start timestamp + tenure_start_timestamp = Some(block_info.proposed_time); + tenure_process_time_ms += block_info.validation_time_ms.unwrap_or(0); + + if block_info + .block + .txs + .first() + .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) + .unwrap_or(false) + { + // Tenure change found. No more blocks should count towards this tenure's processing time. + break; + } + } + + tenure_start_timestamp + .unwrap_or(get_epoch_time_secs()) + .saturating_add(tenure_idle_timeout.as_secs()) + .saturating_sub(tenure_process_time_ms / 1000) + } } fn try_deserialize(s: Option) -> Result, DBError> diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index e28703f56f..d6ae24b83d 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -18,11 +18,10 @@ use std::sync::mpsc::Sender; use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; -use blockstack_lib::chainstate::stacks::TransactionPayload; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; -use clarity::types::chainstate::{ConsensusHash, StacksPrivateKey}; +use clarity::types::chainstate::StacksPrivateKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; @@ -303,7 +302,10 @@ impl Signer { BlockResponse::accepted( block_info.signer_signature_hash(), signature, - self.calculate_tenure_extend_timestamp(&block_info.block.header.consensus_hash), + self.signer_db.get_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block.header.consensus_hash, + ), ) } else { debug!("{self}: Rejecting block {}", block_info.block.block_id()); @@ -312,7 +314,10 @@ impl Signer { RejectCode::RejectedInPriorRound, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(&block_info.block.header.consensus_hash), + self.signer_db.get_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block.header.consensus_hash, + ), ) }; Some(response) @@ -415,7 +420,8 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp( + self.signer_db.get_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, &block_proposal.block.header.consensus_hash, ), )) @@ -432,7 +438,8 @@ impl Signer { RejectCode::SortitionViewMismatch, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp( + self.signer_db.get_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, &block_proposal.block.header.consensus_hash, ), )) @@ -451,7 +458,10 @@ impl Signer { RejectCode::NoSortitionView, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(&block_proposal.block.header.consensus_hash), + self.signer_db.get_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block.header.consensus_hash, + ), )) }; @@ -588,7 +598,10 @@ impl Signer { let accepted = BlockAccepted::new( block_info.signer_signature_hash(), signature, - self.calculate_tenure_extend_timestamp(&block_info.block.header.consensus_hash), + self.signer_db.get_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block.header.consensus_hash, + ), ); // have to save the signature _after_ the block info self.handle_block_signature(stacks_client, &accepted); @@ -643,7 +656,10 @@ impl Signer { block_validate_reject.clone(), &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(&block_info.block.header.consensus_hash), + self.signer_db.get_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block.header.consensus_hash, + ), ); self.signer_db .insert_block(&block_info) @@ -741,7 +757,10 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(&block_proposal.block.header.consensus_hash), + self.signer_db.get_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block.header.consensus_hash, + ), ); if let Err(e) = block_info.mark_locally_rejected() { warn!("{self}: Failed to mark block as locally rejected: {e:?}",); @@ -1124,7 +1143,10 @@ impl Signer { RejectCode::TestingDirective, &self.private_key, self.mainnet, - self.calculate_tenure_extend_timestamp(&block_proposal.block.header.consensus_hash), + self.signer_db.get_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block.header.consensus_hash, + ), )) } else { None @@ -1143,41 +1165,4 @@ impl Signer { warn!("{self}: Failed to send mock signature to stacker-db: {e:?}",); } } - - /// Calculate the tenure extend timestamp based on the tenure start and already consumed idle time. - fn calculate_tenure_extend_timestamp(&self, consensus_hash: &ConsensusHash) -> u64 { - // We do not know our tenure start timestamp until we find the last processed tenure change transaction for the given consensus hash. - // We may not even have it in our database, in which case, we should use the oldest known block in the tenure. - // If we have no blocks known for this tenure, we will assume it has only JUST started and calculate - // our tenure extend timestamp based on the epoch time in secs. - let mut tenure_start_timestamp = None; - let mut tenure_process_time_ms = 0; - // Note that the globally accepted blocks are already returned in descending order of stacks height, therefore by newest block to oldest block - for block_info in self - .signer_db - .get_globally_accepted_blocks(consensus_hash) - .unwrap_or_default() - .iter() - { - // Always use the oldest block as our tenure start timestamp - tenure_start_timestamp = Some(block_info.proposed_time); - tenure_process_time_ms += block_info.validation_time_ms.unwrap_or(0); - - if block_info - .block - .txs - .first() - .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) - .unwrap_or(false) - { - // Tenure change found. No more blocks should count towards this tenure's processing time. - break; - } - } - - tenure_start_timestamp - .unwrap_or(get_epoch_time_secs()) - .saturating_add(self.proposal_config.tenure_idle_timeout.as_secs()) - .saturating_sub(tenure_process_time_ms / 1000) - } } From 89eaa8545179aa743bbedc069a28285588254046 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 18 Nov 2024 12:55:53 -0800 Subject: [PATCH 031/115] Test: add extra logging for help with CI Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2f689e25ee..2952fcbae3 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -510,10 +510,16 @@ fn block_proposal_rejection() { { if signer_signature_hash == block_signer_signature_hash_1 { found_signer_signature_hash_1 = true; - assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); + assert!( + matches!(reason_code, RejectCode::SortitionViewMismatch), + "Expected sortition view mismatch rejection. Got: {reason_code}" + ); } else if signer_signature_hash == block_signer_signature_hash_2 { found_signer_signature_hash_2 = true; - assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); + assert!( + matches!(reason_code, RejectCode::ValidationFailed(_)), + "Expected validation failed rejection. Got: {reason_code}" + ); } else { continue; } From 9eeb3e85f9890b8176b112bdcf4af154a8ff5c47 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 18 Nov 2024 14:29:13 -0800 Subject: [PATCH 032/115] CRC: add comment about get globally accepted blocks in descending order Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 7e3659ff12..3ad886cd22 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -814,7 +814,7 @@ impl SignerDb { )) } - /// Return the all globally accepted block in a tenure (identified by its consensus hash). + /// Return the all globally accepted block in a tenure (identified by its consensus hash) in stacks height descending order fn get_globally_accepted_blocks( &self, tenure: &ConsensusHash, From 1665a1a082a79c262bee8f177f9e8a8161df4477 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 19 Nov 2024 10:33:25 -0800 Subject: [PATCH 033/115] CRC: fix typo in comment Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 3ad886cd22..c7bd9f9a6a 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -829,7 +829,7 @@ impl SignerDb { } /// Compute the tenure extend timestamp based on the tenure start and already consumed idle time of the - /// globally accepted blocks of the provided tenure (identified by the cosnensus hash) + /// globally accepted blocks of the provided tenure (identified by the consensus hash) pub fn get_tenure_extend_timestamp( &self, tenure_idle_timeout: Duration, From 4b77c10f01b826bab3bb07ebfedef0f67dbfb11a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 19 Nov 2024 10:33:38 -0800 Subject: [PATCH 034/115] Miner forking test fix attempt Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2952fcbae3..e3d7ba98b5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1866,10 +1866,15 @@ fn miner_forking() { info!("Flushing any pending commits to enable custom winner selection"); let burn_height_before = get_burn_height(); + let blocks_before = test_observer::get_blocks().len(); + let nakamoto_blocks_count_before = get_nakamoto_headers(&conf).len(); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 30, - || Ok(get_burn_height() > burn_height_before), + || { + Ok(get_burn_height() > burn_height_before + && test_observer::get_blocks().len() > blocks_before) + }, ) .unwrap(); @@ -2047,11 +2052,14 @@ fn miner_forking() { }) .expect("Timed out waiting for miner 1 to RBF its old commit op"); + let blocks_before = test_observer::get_blocks().len(); info!("Mine RL1 Tenure"); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_blocks().len() > blocks_before), + ) + .unwrap(); // fetch the current sortition info let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); @@ -2090,14 +2098,16 @@ fn miner_forking() { let peer_1_height = get_chain_info(&conf).stacks_tip_height; let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + let nakamoto_blocks_count = get_nakamoto_headers(&conf).len(); info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + info!("Nakamoto blocks count before test: {nakamoto_blocks_count_before}, Nakamoto blocks count now: {nakamoto_blocks_count}"); assert_eq!(peer_1_height, peer_2_height); let nakamoto_blocks_count = get_nakamoto_headers(&conf).len(); assert_eq!( peer_1_height - pre_nakamoto_peer_1_height, - u64::try_from(nakamoto_blocks_count).unwrap() - 1, // subtract 1 for the first Nakamoto block + u64::try_from(nakamoto_blocks_count - nakamoto_blocks_count_before).unwrap(), // subtract 1 for the first Nakamoto block "There should be no forks in this test" ); From 3519c7faec5c1a978af1a83d8bfc9c5e71f5d49b Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 20 Nov 2024 07:36:01 -0800 Subject: [PATCH 035/115] feat: add artificial delay to block validation processing time --- stackslib/src/net/api/postblock_proposal.rs | 14 ++++++++++++++ testnet/stacks-node/src/tests/signer/v0.rs | 7 ++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 38843174b0..7a04a095de 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -16,6 +16,8 @@ use std::io::{Read, Write}; use std::thread::{self, JoinHandle, Thread}; +#[cfg(any(test, feature = "testing"))] +use std::time::Duration; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -65,6 +67,10 @@ use crate::util_lib::db::Error as DBError; #[cfg(any(test, feature = "testing"))] pub static TEST_VALIDATE_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +/// Artificial delay to add to block validation. +pub static TEST_VALIDATE_DELAY_DURATION_SECS: std::sync::Mutex> = + std::sync::Mutex::new(None); // This enum is used to supply a `reason_code` for validation // rejection responses. This is serialized as an enum with string @@ -364,6 +370,14 @@ impl NakamotoBlockProposal { .unwrap_or(u64::MAX) }; + #[cfg(any(test, feature = "testing"))] + { + if let Some(delay) = *TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap() { + warn!("Sleeping for {} seconds to simulate slow processing", delay); + thread::sleep(Duration::from_secs(delay)); + } + } + let mainnet = self.chain_id == CHAIN_ID_MAINNET; if self.chain_id != chainstate.chain_id || mainnet != chainstate.mainnet { warn!( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index cfe58cfdc2..ca90404180 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -39,7 +39,9 @@ use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::getsigner::GetSignerResponse; -use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; +use stacks::net::api::postblock_proposal::{ + ValidateRejectCode, TEST_VALIDATE_DELAY_DURATION_SECS, TEST_VALIDATE_STALL, +}; use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; @@ -2623,6 +2625,9 @@ fn stx_transfers_dont_effect_idle_timeout() { signer_test.boot_to_epoch_3(); + // Add a delay to the block validation process + TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap().replace(5); + let info_before = get_chain_info(&signer_test.running_nodes.conf); let blocks_before = signer_test .running_nodes From 2c28e16d1b3e9ccc05f53cd4cbb39d73371f924a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 20 Nov 2024 15:04:33 -0800 Subject: [PATCH 036/115] Ignore bootcode contract call and stx transfer only blocks in processing time calculation time Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index b5195d68cf..b2c3fed343 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -19,6 +19,7 @@ use std::path::Path; use std::time::{Duration, SystemTime}; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use blockstack_lib::chainstate::stacks::address::StacksAddressExtensions; use blockstack_lib::chainstate::stacks::TransactionPayload; use blockstack_lib::util_lib::db::{ query_row, query_rows, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, @@ -856,7 +857,7 @@ impl SignerDb { // If we have no blocks known for this tenure, we will assume it has only JUST started and calculate // our tenure extend timestamp based on the epoch time in secs. let mut tenure_start_timestamp = None; - let mut tenure_process_time_ms = 0; + let mut tenure_process_time_ms = 0_u64; // Note that the globally accepted blocks are already returned in descending order of stacks height, therefore by newest block to oldest block for block_info in self .get_globally_accepted_blocks(consensus_hash) @@ -865,7 +866,19 @@ impl SignerDb { { // Always use the oldest block as our tenure start timestamp tenure_start_timestamp = Some(block_info.proposed_time); - tenure_process_time_ms += block_info.validation_time_ms.unwrap_or(0); + let non_bootcode_contract_call_block = block_info.block.txs.iter().any(|tx| { + // We only care about blocks that contain a non bootcode contract call + match &tx.payload { + TransactionPayload::ContractCall(cc) => !cc.address.is_boot_code_addr(), + TransactionPayload::SmartContract(..) => true, + _ => false, + } + }); + + if non_bootcode_contract_call_block { + tenure_process_time_ms = tenure_process_time_ms + .saturating_add(block_info.validation_time_ms.unwrap_or(0)); + } if block_info .block From 4311015b2e6d2a7d56bf6dfb8f89501804f82c72 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 20 Nov 2024 17:05:41 -0800 Subject: [PATCH 037/115] feat: use versioned signer message data --- libsigner/src/v0/messages.rs | 203 +++++++++++++++--- stacks-signer/src/client/stackerdb.rs | 5 +- .../src/nakamoto_node/sign_coordinator.rs | 2 +- 3 files changed, 181 insertions(+), 29 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index f900ea4386..0c88b73de3 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -647,7 +647,7 @@ impl BlockResponse { signer_signature_hash, signature, metadata: SignerMessageMetadata::default(), - tenure_extend_timestamp, + response_data: BlockResponseData::new(tenure_extend_timestamp), }) } @@ -751,6 +751,80 @@ impl SignerMessageMetadata { } } +/// The latest version of the block response data +pub const BLOCK_RESPONSE_DATA_VERSION: u8 = 2; + +/// Versioned, backwards-compatible struct for block response data +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BlockResponseData { + /// The version of the block response data + pub version: u8, + /// The block response data + pub tenure_extend_timestamp: u64, + /// The unknown block response data bytes + pub unknown_bytes: Vec, +} + +impl BlockResponseData { + /// Create a new BlockResponseData for the provided tenure extend timestamp and unknown bytes + pub fn new(tenure_extend_timestamp: u64) -> Self { + Self { + version: BLOCK_RESPONSE_DATA_VERSION, + tenure_extend_timestamp, + unknown_bytes: vec![], + } + } + + /// Create an empty BlockResponseData + pub fn empty() -> Self { + Self::new(u64::MAX) + } + + /// Serialize the "inner" block response data. Used to determine the bytes length of the serialized block response data + fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.tenure_extend_timestamp)?; + // write_next(fd, &self.unknown_bytes)?; + fd.write_all(&self.unknown_bytes) + .map_err(CodecError::WriteError)?; + Ok(()) + } +} + +impl StacksMessageCodec for BlockResponseData { + /// Serialize the block response data. + /// When creating a new version of the block response data, we are only ever + /// appending new bytes to the end of the struct. When serializing, we use + /// `bytes_len` to ensure that older versions of the code can read through the + /// end of the serialized bytes. + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.version)?; + let mut inner_bytes = vec![]; + self.inner_consensus_serialize(&mut inner_bytes)?; + let bytes_len = inner_bytes.len() as u32; + write_next(fd, &bytes_len)?; + fd.write_all(&inner_bytes).map_err(CodecError::WriteError)?; + Ok(()) + } + + /// Deserialize the block response data in a backwards-compatible manner. + /// When creating a new version of the block response data, we are only ever + /// appending new bytes to the end of the struct. When deserializing, we use + /// `bytes_len` to ensure that we read through the end of the serialized bytes. + fn consensus_deserialize(fd: &mut R) -> Result { + let Ok(version) = read_next(fd) else { + return Ok(Self::empty()); + }; + let inner_bytes = read_next::, _>(fd)?; + let mut inner_reader = inner_bytes.as_slice(); + let tenure_extend_timestamp = read_next(&mut inner_reader)?; + Ok(Self { + version, + tenure_extend_timestamp, + unknown_bytes: inner_reader.to_vec(), + }) + } +} + /// A rejection response from a signer for a proposed block #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockAccepted { @@ -760,8 +834,8 @@ pub struct BlockAccepted { pub signature: MessageSignature, /// Signer message metadata pub metadata: SignerMessageMetadata, - /// The timestamp at which a tenure extend will be accepted by the responding signer - pub tenure_extend_timestamp: u64, + /// Extra versioned block response data + pub response_data: BlockResponseData, } impl StacksMessageCodec for BlockAccepted { @@ -769,7 +843,7 @@ impl StacksMessageCodec for BlockAccepted { write_next(fd, &self.signer_signature_hash)?; write_next(fd, &self.signature)?; write_next(fd, &self.metadata)?; - write_next(fd, &self.tenure_extend_timestamp)?; + write_next(fd, &self.response_data)?; Ok(()) } @@ -777,12 +851,12 @@ impl StacksMessageCodec for BlockAccepted { let signer_signature_hash = read_next::(fd)?; let signature = read_next::(fd)?; let metadata = read_next::(fd)?; - let tenure_extend_timestamp = read_next::(fd).unwrap_or(u64::MAX); + let response_data = read_next::(fd)?; Ok(Self { signer_signature_hash, signature, metadata, - tenure_extend_timestamp, + response_data, }) } } @@ -798,7 +872,7 @@ impl BlockAccepted { signer_signature_hash, signature, metadata: SignerMessageMetadata::default(), - tenure_extend_timestamp, + response_data: BlockResponseData::new(tenure_extend_timestamp), } } } @@ -818,8 +892,8 @@ pub struct BlockRejection { pub chain_id: u32, /// Signer message metadata pub metadata: SignerMessageMetadata, - /// The timestamp at which a tenure extend will be accepted by the responding signer - pub tenure_extend_timestamp: u64, + /// Extra versioned block response data + pub response_data: BlockResponseData, } impl BlockRejection { @@ -843,7 +917,7 @@ impl BlockRejection { signature: MessageSignature::empty(), chain_id, metadata: SignerMessageMetadata::default(), - tenure_extend_timestamp: timestamp, + response_data: BlockResponseData::new(timestamp), }; rejection .sign(private_key) @@ -870,7 +944,7 @@ impl BlockRejection { chain_id, signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), - tenure_extend_timestamp: timestamp, + response_data: BlockResponseData::new(timestamp), }; rejection .sign(private_key) @@ -921,7 +995,7 @@ impl StacksMessageCodec for BlockRejection { write_next(fd, &self.chain_id)?; write_next(fd, &self.signature)?; write_next(fd, &self.metadata)?; - write_next(fd, &self.tenure_extend_timestamp)?; + write_next(fd, &self.response_data)?; Ok(()) } @@ -935,7 +1009,7 @@ impl StacksMessageCodec for BlockRejection { let chain_id = read_next::(fd)?; let signature = read_next::(fd)?; let metadata = read_next::(fd)?; - let tenure_extend_timestamp = read_next::(fd).unwrap_or(u64::MAX); + let response_data = read_next::(fd)?; Ok(Self { reason, reason_code, @@ -943,7 +1017,7 @@ impl StacksMessageCodec for BlockRejection { chain_id, signature, metadata, - tenure_extend_timestamp, + response_data, }) } } @@ -1103,7 +1177,7 @@ mod test { signer_signature_hash: Sha512Trunc256Sum([0u8; 32]), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), - tenure_extend_timestamp: thread_rng().next_u64(), + response_data: BlockResponseData::new(thread_rng().next_u64()), }; let response = BlockResponse::Accepted(accepted); let serialized_response = response.serialize_to_vec(); @@ -1130,7 +1204,7 @@ mod test { signer_signature_hash: Sha512Trunc256Sum([2u8; 32]), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), - tenure_extend_timestamp: thread_rng().next_u64(), + response_data: BlockResponseData::new(thread_rng().next_u64()), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)); let serialized_signer_message = signer_message.serialize_to_vec(); @@ -1277,9 +1351,9 @@ mod test { #[test] fn test_backwards_compatibility() { let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3"; - let block_rejected_bytes = hex_bytes(&block_rejected_hex).unwrap(); + let block_rejected_bytes = hex_bytes(block_rejected_hex).unwrap(); let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8"; - let block_accepted_bytes = hex_bytes(&block_accepted_hex).unwrap(); + let block_accepted_bytes = hex_bytes(block_accepted_hex).unwrap(); let block_rejected = read_next::(&mut &block_rejected_bytes[..]) .expect("Failed to deserialize BlockRejection"); let block_accepted = read_next::(&mut &block_accepted_bytes[..]) @@ -1294,7 +1368,7 @@ mod test { chain_id: CHAIN_ID_TESTNET, signature: MessageSignature::from_hex("006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3").unwrap(), metadata: SignerMessageMetadata::empty(), - tenure_extend_timestamp: u64::MAX + response_data: BlockResponseData::new(u64::MAX) })) ); @@ -1307,17 +1381,17 @@ mod test { .unwrap(), metadata: SignerMessageMetadata::empty(), signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), - tenure_extend_timestamp: u64::MAX + response_data: BlockResponseData::new(u64::MAX) })) ); } #[test] fn test_block_response_metadata() { - let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df30000000b48656c6c6f20776f726c6400"; - let block_rejected_bytes = hex_bytes(&block_rejected_hex).unwrap(); - let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e80000000b48656c6c6f20776f726c6400"; - let block_accepted_bytes = hex_bytes(&block_accepted_hex).unwrap(); + let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df30000000b48656c6c6f20776f726c64"; + let block_rejected_bytes = hex_bytes(block_rejected_hex).unwrap(); + let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e80000000b48656c6c6f20776f726c64"; + let block_accepted_bytes = hex_bytes(block_accepted_hex).unwrap(); let block_rejected = read_next::(&mut &block_rejected_bytes[..]) .expect("Failed to deserialize BlockRejection"); let block_accepted = read_next::(&mut &block_accepted_bytes[..]) @@ -1334,7 +1408,7 @@ mod test { metadata: SignerMessageMetadata { server_version: "Hello world".to_string(), }, - tenure_extend_timestamp: u64::MAX, + response_data: BlockResponseData::new(u64::MAX), })) ); @@ -1349,7 +1423,7 @@ mod test { server_version: "Hello world".to_string(), }, signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), - tenure_extend_timestamp: u64::MAX + response_data: BlockResponseData::empty(), })) ); } @@ -1362,4 +1436,81 @@ mod test { .expect("Failed to deserialize SignerMessageMetadata"); assert_eq!(deserialized_metadata, SignerMessageMetadata::empty()); } + + #[test] + fn block_response_data_serialization() { + let mut response_data = BlockResponseData::new(2); + response_data.unknown_bytes = vec![1, 2, 3, 4]; + let mut bytes = vec![]; + response_data.consensus_serialize(&mut bytes).unwrap(); + // 1 byte version + 4 bytes (bytes_len) + 8 bytes tenure_extend_timestamp + 4 bytes unknown_bytes + assert_eq!(bytes.len(), 17); + let deserialized_data = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(response_data, deserialized_data); + + let response_data = BlockResponseData::new(2); + let mut bytes = vec![]; + response_data.consensus_serialize(&mut bytes).unwrap(); + // 1 byte version + 4 bytes (bytes_len) + 8 bytes tenure_extend_timestamp + 0 bytes unknown_bytes + assert_eq!(bytes.len(), 13); + let deserialized_data = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(response_data, deserialized_data); + } + + /// Mock struct for testing "future proofing" of the block response data + pub struct NewerBlockResponseData { + pub version: u8, + pub tenure_extend_timestamp: u64, + pub some_other_field: u64, + pub yet_another_field: u64, + } + + impl NewerBlockResponseData { + pub fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.tenure_extend_timestamp)?; + write_next(fd, &self.some_other_field)?; + write_next(fd, &self.yet_another_field)?; + Ok(()) + } + + pub fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.version)?; + let mut inner_bytes = vec![]; + self.inner_consensus_serialize(&mut inner_bytes)?; + let bytes_len = inner_bytes.len() as u32; + write_next(fd, &bytes_len)?; + fd.write_all(&inner_bytes).map_err(CodecError::WriteError)?; + Ok(()) + } + } + + #[test] + fn test_newer_block_response_data() { + let new_response_data = NewerBlockResponseData { + version: 11, + tenure_extend_timestamp: 2, + some_other_field: 3, + yet_another_field: 4, + }; + + let mut bytes = vec![]; + new_response_data.consensus_serialize(&mut bytes).unwrap(); + let mut reader = bytes.as_slice(); + let deserialized_data = read_next::(&mut reader) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(reader.len(), 0, "Expected bytes to be fully consumed"); + assert_eq!(deserialized_data.version, 11); + assert_eq!(deserialized_data.tenure_extend_timestamp, 2); + // two extra u64s: + assert_eq!(deserialized_data.unknown_bytes.len(), 16); + + // BlockResponseData with unknown bytes can serialize/deserialize back to itself + let mut bytes = vec![]; + deserialized_data.consensus_serialize(&mut bytes).unwrap(); + let deserialized_data_2 = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(deserialized_data, deserialized_data_2); + } } diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index b1a50da2df..934686d1c2 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -236,7 +236,8 @@ mod tests { use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, RejectCode, SignerMessage, SignerMessageMetadata, + BlockRejection, BlockResponse, BlockResponseData, RejectCode, SignerMessage, + SignerMessageMetadata, }; use rand::{thread_rng, RngCore}; @@ -286,7 +287,7 @@ mod tests { chain_id: thread_rng().next_u32(), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::empty(), - tenure_extend_timestamp: thread_rng().next_u64(), + response_data: BlockResponseData::new(thread_rng().next_u64()), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Rejected(block_reject)); let ack = StackerDBChunkAckData { diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 0785f64818..75359673b2 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -458,7 +458,7 @@ impl SignCoordinator { signer_signature_hash: response_hash, signature, metadata, - tenure_extend_timestamp: _, // TOOD: utilize this info + response_data: _, // TOOD: utilize this info } = accepted; let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { From b6cf2388ea0f17e29608346de7ddf4cae08b7094 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 21 Nov 2024 08:44:50 -0800 Subject: [PATCH 038/115] CRC: small optimization to 0 out the validation time of ignored blocks Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 17 +++-------------- stacks-signer/src/v0/signer.rs | 17 ++++++++++++++++- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index b2c3fed343..f30c0124c1 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -19,7 +19,6 @@ use std::path::Path; use std::time::{Duration, SystemTime}; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; -use blockstack_lib::chainstate::stacks::address::StacksAddressExtensions; use blockstack_lib::chainstate::stacks::TransactionPayload; use blockstack_lib::util_lib::db::{ query_row, query_rows, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, @@ -866,19 +865,9 @@ impl SignerDb { { // Always use the oldest block as our tenure start timestamp tenure_start_timestamp = Some(block_info.proposed_time); - let non_bootcode_contract_call_block = block_info.block.txs.iter().any(|tx| { - // We only care about blocks that contain a non bootcode contract call - match &tx.payload { - TransactionPayload::ContractCall(cc) => !cc.address.is_boot_code_addr(), - TransactionPayload::SmartContract(..) => true, - _ => false, - } - }); - - if non_bootcode_contract_call_block { - tenure_process_time_ms = tenure_process_time_ms - .saturating_add(block_info.validation_time_ms.unwrap_or(0)); - } + + tenure_process_time_ms = + tenure_process_time_ms.saturating_add(block_info.validation_time_ms.unwrap_or(0)); if block_info .block diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 356f2cbedc..ea4a3d812d 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -18,6 +18,8 @@ use std::sync::mpsc::Sender; use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use blockstack_lib::chainstate::stacks::address::StacksAddressExtensions; +use blockstack_lib::chainstate::stacks::TransactionPayload; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; @@ -587,7 +589,20 @@ impl Signer { block_info.signed_self.get_or_insert(get_epoch_time_secs()); } // Record the block validation time - block_info.validation_time_ms = Some(block_validate_ok.validation_time_ms); + let non_bootcode_contract_call_block = block_info.block.txs.iter().any(|tx| { + // We only care about blocks that contain a non bootcode contract call + match &tx.payload { + TransactionPayload::ContractCall(cc) => !cc.address.is_boot_code_addr(), + TransactionPayload::SmartContract(..) => true, + _ => false, + } + }); + if non_bootcode_contract_call_block { + block_info.validation_time_ms = Some(block_validate_ok.validation_time_ms); + } else { + // Ignore purely boot code and stx transfers when calculating the processing/validation time + block_info.validation_time_ms = Some(0); + } let signature = self .private_key From f0228c9ff5fce2c00ba490b92c51c8466dc4ee64 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 21 Nov 2024 15:20:55 -0500 Subject: [PATCH 039/115] feat: move sign coordinator logic into its own thread `SignerDBListener` struct is for a new thread that is always processing StackerDB messages from the signers during a mining tenure. `SignerCoordinator` is the interface that the miner uses with the `SignerDBListener`, to propose a block and wait for signatures. --- testnet/stacks-node/src/nakamoto_node.rs | 3 +- .../stacks-node/src/nakamoto_node/miner.rs | 119 ++-- .../src/nakamoto_node/sign_coordinator.rs | 616 ------------------ .../src/nakamoto_node/signer_coordinator.rs | 371 +++++++++++ .../src/nakamoto_node/signerdb_listener.rs | 391 +++++++++++ testnet/stacks-node/src/neon_node.rs | 6 +- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 7 files changed, 819 insertions(+), 689 deletions(-) delete mode 100644 testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs create mode 100644 testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs create mode 100644 testnet/stacks-node/src/nakamoto_node/signerdb_listener.rs diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index edaf12e98b..090170837a 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -42,7 +42,8 @@ use crate::run_loop::RegisteredKey; pub mod miner; pub mod peer; pub mod relayer; -pub mod sign_coordinator; +pub mod signer_coordinator; +pub mod signerdb_listener; use self::peer::PeerThread; use self::relayer::{RelayerDirective, RelayerThread}; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 877eab88a1..d51cccb4ba 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -41,14 +41,13 @@ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; use stacks::util::get_epoch_time_secs; -use stacks::util::secp256k1::MessageSignature; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::vrf::VRFProof; use super::relayer::RelayerThread; -use super::sign_coordinator::SignCoordinator; use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; +use crate::nakamoto_node::signer_coordinator::SignerCoordinator; use crate::nakamoto_node::VRF_MOCK_MINER_KEY; use crate::neon_node; use crate::run_loop::nakamoto::Globals; @@ -291,6 +290,37 @@ impl BlockMinerThread { .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; let mut last_block_rejected = false; + let reward_set = self.load_signer_set()?; + let Some(miner_privkey) = self.config.miner.mining_key else { + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; + let sortdb = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + // Start the signer coordinator + let mut coordinator = SignerCoordinator::new( + self.event_dispatcher.stackerdb_channel.clone(), + self.globals.should_keep_running.clone(), + &reward_set, + &burn_tip, + &self.burnchain, + miner_privkey, + &self.config, + ) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + })?; + // now, actually run this tenure loop { #[cfg(test)] @@ -371,9 +401,22 @@ impl BlockMinerThread { if let Some(mut new_block) = new_block { Self::fault_injection_block_broadcast_stall(&new_block); - let (reward_set, signer_signature) = match self - .gather_signatures(&mut new_block, &mut stackerdbs) - { + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + let signer_signature = match coordinator.propose_block( + &mut new_block, + &self.burn_block, + &self.burnchain, + &sortdb, + &mut chain_state, + &mut stackerdbs, + &self.globals.counters, + &self.burn_election_block.consensus_hash, + ) { Ok(x) => x, Err(e) => match e { NakamotoNodeError::StacksTipChanged => { @@ -413,6 +456,8 @@ impl BlockMinerThread { }; last_block_rejected = false; + let reward_set = self.load_signer_set()?; + new_block.header.signer_signature = signer_signature; if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { warn!("Error accepting own block: {e:?}. Will try mining again."); @@ -526,68 +571,6 @@ impl BlockMinerThread { Ok(reward_set) } - /// Gather a list of signatures from the signers for the block - fn gather_signatures( - &mut self, - new_block: &mut NakamotoBlock, - stackerdbs: &mut StackerDBs, - ) -> Result<(RewardSet, Vec), NakamotoNodeError> { - let Some(miner_privkey) = self.config.miner.mining_key else { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open sortition DB. Cannot mine! {e:?}" - )) - })?; - - let reward_set = self.load_signer_set()?; - - if self.config.get_node_config(false).mock_mining { - return Ok((reward_set, Vec::new())); - } - - let mut coordinator = SignCoordinator::new( - &reward_set, - miner_privkey, - &self.config, - self.globals.should_keep_running.clone(), - self.event_dispatcher.stackerdb_channel.clone(), - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - })?; - - let mut chain_state = - neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open chainstate DB. Cannot mine! {e:?}" - )) - })?; - - let signature = coordinator.run_sign_v0( - new_block, - &self.burn_block, - &self.burnchain, - &sort_db, - &mut chain_state, - stackerdbs, - &self.globals.counters, - &self.burn_election_block.consensus_hash, - )?; - - Ok((reward_set, signature)) - } - /// Fault injection -- possibly fail to broadcast /// Return true to drop the block fn fault_injection_broadcast_fail(&self) -> bool { @@ -706,7 +689,7 @@ impl BlockMinerThread { let miners_contract_id = boot_code_id(MINERS_NAME, chain_state.mainnet); let mut miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); - SignCoordinator::send_miners_message( + SignerCoordinator::send_miners_message( miner_privkey, &sort_db, &self.burn_block, diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs deleted file mode 100644 index 0d52f9d14a..0000000000 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ /dev/null @@ -1,616 +0,0 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::BTreeMap; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::mpsc::Receiver; -use std::sync::{Arc, Mutex}; -use std::time::Duration; - -use hashbrown::{HashMap, HashSet}; -use libsigner::v0::messages::{ - BlockAccepted, BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0, -}; -use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; -use stacks::burnchains::Burnchain; -use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; -use stacks::chainstate::stacks::db::StacksChainState; -use stacks::chainstate::stacks::events::StackerDBChunksEvent; -use stacks::chainstate::stacks::Error as ChainstateError; -use stacks::libstackerdb::StackerDBChunkData; -use stacks::net::stackerdb::StackerDBs; -use stacks::types::PublicKey; -use stacks::util::hash::MerkleHashFunc; -use stacks::util::secp256k1::MessageSignature; -use stacks::util_lib::boot::boot_code_id; -use stacks_common::bitvec::BitVec; -use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; - -use super::Error as NakamotoNodeError; -use crate::event_dispatcher::StackerDBChannel; -use crate::neon::Counters; -use crate::Config; - -/// Fault injection flag to prevent the miner from seeing enough signer signatures. -/// Used to test that the signers will broadcast a block if it gets enough signatures -#[cfg(test)] -pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mutex::new(None); - -/// How long should the coordinator poll on the event receiver before -/// waking up to check timeouts? -static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); - -/// The `SignCoordinator` struct sole function is to serve as the coordinator for Nakamoto block signing. -/// This struct is used by Nakamoto miners to act as the coordinator for the blocks they produce. -pub struct SignCoordinator { - receiver: Option>, - message_key: StacksPrivateKey, - is_mainnet: bool, - miners_session: StackerDBSession, - signer_entries: HashMap, - weight_threshold: u32, - total_weight: u32, - keep_running: Arc, - pub next_signer_bitvec: BitVec<4000>, - stackerdb_channel: Arc>, -} - -impl Drop for SignCoordinator { - fn drop(&mut self) { - let stackerdb_channel = self - .stackerdb_channel - .lock() - .expect("FATAL: failed to lock stackerdb channel"); - stackerdb_channel.replace_receiver(self.receiver.take().expect( - "FATAL: lost possession of the StackerDB channel before dropping SignCoordinator", - )); - } -} - -impl SignCoordinator { - /// * `reward_set` - the active reward set data, used to construct the signer - /// set parameters. - /// * `aggregate_public_key` - the active aggregate key for this cycle - pub fn new( - reward_set: &RewardSet, - message_key: StacksPrivateKey, - config: &Config, - keep_running: Arc, - stackerdb_channel: Arc>, - ) -> Result { - let is_mainnet = config.is_mainnet(); - let Some(ref reward_set_signers) = reward_set.signers else { - error!("Could not initialize signing coordinator for reward set without signer"); - debug!("reward set: {reward_set:?}"); - return Err(ChainstateError::NoRegisteredSigners(0)); - }; - - let signer_entries = SignerEntries::parse(is_mainnet, reward_set_signers).map_err(|e| { - ChainstateError::InvalidStacksBlock(format!( - "Failed to parse NakamotoSignerEntries: {e:?}" - )) - })?; - let rpc_socket = config - .node - .get_rpc_loopback() - .ok_or_else(|| ChainstateError::MinerAborted)?; - let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); - let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); - - let next_signer_bitvec: BitVec<4000> = BitVec::zeros( - reward_set_signers - .clone() - .len() - .try_into() - .expect("FATAL: signer set length greater than u16"), - ) - .expect("FATAL: unable to construct initial bitvec for signer set"); - - debug!( - "Initializing miner/coordinator"; - "num_signers" => signer_entries.signer_pks.len(), - "signer_public_keys" => ?signer_entries.signer_pks, - ); - - let total_weight = reward_set.total_signing_weight().map_err(|e| { - warn!("Failed to calculate total weight for the reward set: {e:?}"); - ChainstateError::NoRegisteredSigners(0) - })?; - - let threshold = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight)?; - - let signer_public_keys = reward_set_signers - .iter() - .cloned() - .enumerate() - .map(|(idx, signer)| { - let Ok(slot_id) = u32::try_from(idx) else { - return Err(ChainstateError::InvalidStacksBlock( - "Signer index exceeds u32".into(), - )); - }; - Ok((slot_id, signer)) - }) - .collect::, ChainstateError>>()?; - #[cfg(test)] - { - // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - use crate::tests::nakamoto_integrations::TEST_SIGNING; - if TEST_SIGNING.lock().unwrap().is_some() { - debug!("Short-circuiting spinning up coordinator from signer commitments. Using test signers channel."); - let (receiver, replaced_other) = stackerdb_channel - .lock() - .expect("FATAL: failed to lock StackerDB channel") - .register_miner_coordinator(); - if replaced_other { - warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); - } - let sign_coordinator = Self { - message_key, - receiver: Some(receiver), - is_mainnet, - miners_session, - next_signer_bitvec, - signer_entries: signer_public_keys, - weight_threshold: threshold, - total_weight, - keep_running, - stackerdb_channel, - }; - return Ok(sign_coordinator); - } - } - - let (receiver, replaced_other) = stackerdb_channel - .lock() - .expect("FATAL: failed to lock StackerDB channel") - .register_miner_coordinator(); - if replaced_other { - warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); - } - - Ok(Self { - receiver: Some(receiver), - message_key, - is_mainnet, - miners_session, - next_signer_bitvec, - signer_entries: signer_public_keys, - weight_threshold: threshold, - total_weight, - keep_running, - stackerdb_channel, - }) - } - - /// Send a message over the miners contract using a `StacksPrivateKey` - #[allow(clippy::too_many_arguments)] - pub fn send_miners_message( - miner_sk: &StacksPrivateKey, - sortdb: &SortitionDB, - tip: &BlockSnapshot, - stackerdbs: &StackerDBs, - message: M, - miner_slot_id: MinerSlotID, - is_mainnet: bool, - miners_session: &mut StackerDBSession, - election_sortition: &ConsensusHash, - ) -> Result<(), String> { - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, election_sortition) - .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? - else { - return Err("No slot for miner".into()); - }; - - let slot_id = slot_range - .start - .saturating_add(miner_slot_id.to_u8().into()); - if !slot_range.contains(&slot_id) { - return Err("Not enough slots for miner messages".into()); - } - // Get the LAST slot version number written to the DB. If not found, use 0. - // Add 1 to get the NEXT version number - // Note: we already check above for the slot's existence - let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); - let slot_version = stackerdbs - .get_slot_version(&miners_contract_id, slot_id) - .map_err(|e| format!("Failed to read slot version: {e:?}"))? - .unwrap_or(0) - .saturating_add(1); - let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message.serialize_to_vec()); - chunk - .sign(miner_sk) - .map_err(|_| "Failed to sign StackerDB chunk")?; - - match miners_session.put_chunk(&chunk) { - Ok(ack) => { - if ack.accepted { - debug!("Wrote message to stackerdb: {ack:?}"); - Ok(()) - } else { - Err(format!("{ack:?}")) - } - } - Err(e) => Err(format!("{e:?}")), - } - } - - /// Do we ignore signer signatures? - #[cfg(test)] - fn fault_injection_ignore_signatures() -> bool { - if *TEST_IGNORE_SIGNERS.lock().unwrap() == Some(true) { - return true; - } - false - } - - #[cfg(not(test))] - fn fault_injection_ignore_signatures() -> bool { - false - } - - /// Check if the tenure needs to change - fn check_burn_tip_changed(sortdb: &SortitionDB, burn_block: &BlockSnapshot) -> bool { - let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - - if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash { - info!("SignCoordinator: Cancel signature aggregation; burnchain tip has changed"); - true - } else { - false - } - } - - /// Start gathering signatures for a Nakamoto block. - /// This function begins by sending a `BlockProposal` message - /// to the signers, and then waits for the signers to respond - /// with their signatures. It does so in two ways, concurrently: - /// * It waits for signer StackerDB messages with signatures. If enough signatures can be - /// found, then the block can be broadcast. - /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are - /// loaded and returned. This can happen if the node receives the block via a signer who - /// fetched all signatures and assembled the signature vector, all before we could. - // Mutants skip here: this function is covered via integration tests, - // which the mutation testing does not see. - #[cfg_attr(test, mutants::skip)] - #[allow(clippy::too_many_arguments)] - pub fn run_sign_v0( - &mut self, - block: &NakamotoBlock, - burn_tip: &BlockSnapshot, - burnchain: &Burnchain, - sortdb: &SortitionDB, - chain_state: &mut StacksChainState, - stackerdbs: &StackerDBs, - counters: &Counters, - election_sortition: &ConsensusHash, - ) -> Result, NakamotoNodeError> { - let reward_cycle_id = burnchain - .block_height_to_reward_cycle(burn_tip.block_height) - .expect("FATAL: tried to initialize coordinator before first burn block height"); - - let block_proposal = BlockProposal { - block: block.clone(), - burn_height: burn_tip.block_height, - reward_cycle: reward_cycle_id, - }; - - let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); - debug!("Sending block proposal message to signers"; - "signer_signature_hash" => %block.header.signer_signature_hash(), - ); - Self::send_miners_message::( - &self.message_key, - sortdb, - burn_tip, - stackerdbs, - block_proposal_message, - MinerSlotID::BlockProposal, - self.is_mainnet, - &mut self.miners_session, - election_sortition, - ) - .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; - counters.bump_naka_proposed_blocks(); - - #[cfg(test)] - { - info!( - "SignCoordinator: sent block proposal to .miners, waiting for test signing channel" - ); - // In test mode, short-circuit waiting for the signers if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - if let Some(signatures) = - crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() - { - debug!("Short-circuiting waiting for signers, using test signature"); - return Ok(signatures); - } - } - - let Some(ref mut receiver) = self.receiver else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Failed to obtain the StackerDB event receiver".into(), - )); - }; - - let mut total_weight_signed: u32 = 0; - let mut total_reject_weight: u32 = 0; - let mut responded_signers = HashSet::new(); - let mut gathered_signatures = BTreeMap::new(); - - info!("SignCoordinator: beginning to watch for block signatures OR posted blocks."; - "threshold" => self.weight_threshold, - ); - - loop { - // look in the nakamoto staging db -- a block can only get stored there if it has - // enough signing weight to clear the threshold - if let Ok(Some((stored_block, _sz))) = chain_state - .nakamoto_blocks_db() - .get_nakamoto_block(&block.block_id()) - .map_err(|e| { - warn!( - "Failed to query chainstate for block {}: {e:?}", - &block.block_id() - ); - e - }) - { - debug!("SignCoordinator: Found signatures in relayed block"); - counters.bump_naka_signer_pushed_blocks(); - return Ok(stored_block.header.signer_signature); - } - - if Self::check_burn_tip_changed(sortdb, burn_tip) { - debug!("SignCoordinator: Exiting due to new burnchain tip"); - return Err(NakamotoNodeError::BurnchainTipChanged); - } - - // one of two things can happen: - // * we get enough signatures from stackerdb from the signers, OR - // * we see our block get processed in our chainstate (meaning, the signers broadcasted - // the block and our node got it and processed it) - let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { - Ok(event) => event, - Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { - continue; - } - Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "StackerDB event receiver disconnected".into(), - )) - } - }; - - // was the node asked to stop? - if !self.keep_running.load(Ordering::SeqCst) { - info!("SignerCoordinator: received node exit request. Aborting"); - return Err(NakamotoNodeError::ChannelClosed); - } - - // check to see if this event we got is a signer event - let is_signer_event = - event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); - - if !is_signer_event { - debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); - continue; - } - - let modified_slots = &event.modified_slots.clone(); - - let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { - warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); - }) else { - continue; - }; - let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { - debug!("Received signer event other than a signer message. Ignoring."); - continue; - }; - if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { - debug!("Received signer event for other reward cycle. Ignoring."); - continue; - }; - let slot_ids = modified_slots - .iter() - .map(|chunk| chunk.slot_id) - .collect::>(); - - debug!("SignCoordinator: Received messages from signers"; - "count" => messages.len(), - "slot_ids" => ?slot_ids, - "threshold" => self.weight_threshold - ); - - for (message, slot_id) in messages.into_iter().zip(slot_ids) { - let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { - return Err(NakamotoNodeError::SignerSignatureError( - "Signer entry not found".into(), - )); - }; - let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) - else { - return Err(NakamotoNodeError::SignerSignatureError( - "Failed to parse signer public key".into(), - )); - }; - - if responded_signers.contains(&signer_pubkey) { - debug!( - "Signer {slot_id} already responded for block {}. Ignoring {message:?}.", block.header.signer_signature_hash(); - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - continue; - } - - match message { - SignerMessageV0::BlockResponse(BlockResponse::Accepted(accepted)) => { - let BlockAccepted { - signer_signature_hash: response_hash, - signature, - metadata, - tenure_extend_timestamp: _, // TOOD: utilize this info - } = accepted; - let block_sighash = block.header.signer_signature_hash(); - if block_sighash != response_hash { - warn!( - "Processed signature for a different block. Will try to continue."; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "response_hash" => %response_hash, - "slot_id" => slot_id, - "reward_cycle_id" => reward_cycle_id, - "response_hash" => %response_hash, - "server_version" => %metadata.server_version - ); - continue; - } - debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); - let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) - else { - warn!("Got invalid signature from a signer. Ignoring."); - continue; - }; - if !valid_sig { - warn!( - "Processed signature but didn't validate over the expected block. Ignoring"; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "slot_id" => slot_id, - ); - continue; - } - - if Self::fault_injection_ignore_signatures() { - warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; - "block_signer_sighash" => %block_sighash, - "signer_pubkey" => signer_pubkey.to_hex(), - "signer_slot_id" => slot_id, - "signature" => %signature, - "signer_weight" => signer_entry.weight, - "total_weight_signed" => total_weight_signed, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - continue; - } - - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } - - info!("SignCoordinator: Signature Added to block"; - "block_signer_sighash" => %block_sighash, - "signer_pubkey" => signer_pubkey.to_hex(), - "signer_slot_id" => slot_id, - "signature" => %signature, - "signer_weight" => signer_entry.weight, - "total_weight_signed" => total_weight_signed, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id(), - "server_version" => metadata.server_version, - ); - gathered_signatures.insert(slot_id, signature); - responded_signers.insert(signer_pubkey); - } - SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { - let block_sighash = block.header.signer_signature_hash(); - if block_sighash != rejected_data.signer_signature_hash { - warn!( - "Processed rejection for a different block. Will try to continue."; - "block_signer_signature_hash" => %block_sighash, - "rejected_data.signer_signature_hash" => %rejected_data.signer_signature_hash, - "slot_id" => slot_id, - "reward_cycle_id" => reward_cycle_id, - ); - continue; - } - let rejected_pubkey = match rejected_data.recover_public_key() { - Ok(rejected_pubkey) => { - if rejected_pubkey != signer_pubkey { - warn!("Recovered public key from rejected data does not match signer's public key. Ignoring."); - continue; - } - rejected_pubkey - } - Err(e) => { - warn!("Failed to recover public key from rejected data: {e:?}. Ignoring."); - continue; - } - }; - responded_signers.insert(rejected_pubkey); - debug!( - "Signer {slot_id} rejected our block {}/{}", - &block.header.consensus_hash, - &block.header.block_hash() - ); - total_reject_weight = total_reject_weight - .checked_add(signer_entry.weight) - .expect("FATAL: total weight rejected exceeds u32::MAX"); - - if total_reject_weight.saturating_add(self.weight_threshold) - > self.total_weight - { - debug!( - "{total_reject_weight}/{} signers vote to reject our block {}/{}", - self.total_weight, - &block.header.consensus_hash, - &block.header.block_hash() - ); - counters.bump_naka_rejected_blocks(); - return Err(NakamotoNodeError::SignersRejected); - } - continue; - } - SignerMessageV0::BlockProposal(_) => { - debug!("Received block proposal message. Ignoring."); - continue; - } - SignerMessageV0::BlockPushed(_) => { - debug!("Received block pushed message. Ignoring."); - continue; - } - SignerMessageV0::MockSignature(_) - | SignerMessageV0::MockProposal(_) - | SignerMessageV0::MockBlock(_) => { - debug!("Received mock message. Ignoring."); - continue; - } - }; - } - // After gathering all signatures, return them if we've hit the threshold - if total_weight_signed >= self.weight_threshold { - info!("SignCoordinator: Received enough signatures. Continuing."; - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - return Ok(gathered_signatures.values().cloned().collect()); - } - } - } -} diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs new file mode 100644 index 0000000000..f461ce93ec --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -0,0 +1,371 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::BTreeMap; +use std::sync::atomic::AtomicBool; +use std::sync::{Arc, Condvar, Mutex}; +use std::thread::JoinHandle; + +use hashbrown::{HashMap, HashSet}; +use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; +use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::codec::StacksMessageCodec; +use stacks::libstackerdb::StackerDBChunkData; +use stacks::net::stackerdb::StackerDBs; +use stacks::types::chainstate::{StacksPrivateKey, StacksPublicKey}; +use stacks::util::hash::Sha512Trunc256Sum; +use stacks::util::secp256k1::MessageSignature; +use stacks::util_lib::boot::boot_code_id; + +use super::signerdb_listener::{SignerDBListener, TimestampInfo}; +use super::Error as NakamotoNodeError; +use crate::event_dispatcher::StackerDBChannel; +use crate::nakamoto_node::signerdb_listener::BlockStatus; +use crate::neon::Counters; +use crate::Config; + +/// Helper function to determine if we should wait for more signatures +fn should_wait(status: Option<&BlockStatus>, weight_threshold: u32, total_weight: u32) -> bool { + match status { + Some(status) => { + status.total_weight_signed < weight_threshold + && status.total_reject_weight.saturating_add(weight_threshold) <= total_weight + } + None => true, + } +} + +/// The state of the signer database listener, used by the miner thread to +/// interact with the signer listener. +pub struct SignerCoordinator { + /// The private key used to sign messages from the miner + message_key: StacksPrivateKey, + /// Is this mainnet? + is_mainnet: bool, + /// The session for writing to the miners contract in the stackerdb + miners_session: StackerDBSession, + /// The total weight of all signers + total_weight: u32, + /// The weight threshold for block approval + weight_threshold: u32, + /// Tracks signatures for blocks + /// - key: Sha512Trunc256Sum (signer signature hash) + /// - value: BlockStatus + blocks: Arc<(Mutex>, Condvar)>, + /// Tracks the timestamps from signers to decide when they should be + /// willing to accept time-based tenure extensions + /// - key: StacksPublicKey + /// - value: TimestampInfo + signer_idle_timestamps: Arc>>, + /// Handle for the signer DB listener thread + listener_thread: Option>, +} + +impl SignerCoordinator { + /// Create a new `SignerCoordinator` instance. + /// This will spawn a new thread to listen for messages from the signer DB. + pub fn new( + stackerdb_channel: Arc>, + keep_running: Arc, + reward_set: &RewardSet, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + message_key: StacksPrivateKey, + config: &Config, + ) -> Result { + // Create the signer DB listener + let mut listener = SignerDBListener::new( + stackerdb_channel, + keep_running.clone(), + reward_set, + burn_tip, + burnchain, + )?; + let is_mainnet = config.is_mainnet(); + let rpc_socket = config + .node + .get_rpc_loopback() + .ok_or_else(|| ChainstateError::MinerAborted)?; + let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); + let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + + let mut sc = Self { + message_key, + is_mainnet, + miners_session, + total_weight: listener.total_weight, + weight_threshold: listener.weight_threshold, + blocks: listener.blocks.clone(), + signer_idle_timestamps: listener.signer_idle_timestamps.clone(), + listener_thread: None, + }; + + // Spawn the signer DB listener thread + let listener_thread = std::thread::Builder::new() + .name("signerdb_listener".to_string()) + .spawn(move || { + if let Err(e) = listener.run() { + error!("SignerDBListener: failed to run: {e:?}"); + } + }) + .map_err(|e| { + error!("Failed to spawn signerdb_listener thread: {e:?}"); + ChainstateError::MinerAborted + })?; + + sc.listener_thread = Some(listener_thread); + + Ok(sc) + } + + /// Send a message over the miners contract using a `StacksPrivateKey` + #[allow(clippy::too_many_arguments)] + pub fn send_miners_message( + miner_sk: &StacksPrivateKey, + sortdb: &SortitionDB, + tip: &BlockSnapshot, + stackerdbs: &StackerDBs, + message: M, + miner_slot_id: MinerSlotID, + is_mainnet: bool, + miners_session: &mut StackerDBSession, + election_sortition: &ConsensusHash, + ) -> Result<(), String> { + let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, election_sortition) + .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? + else { + return Err("No slot for miner".into()); + }; + + let slot_id = slot_range + .start + .saturating_add(miner_slot_id.to_u8().into()); + if !slot_range.contains(&slot_id) { + return Err("Not enough slots for miner messages".into()); + } + // Get the LAST slot version number written to the DB. If not found, use 0. + // Add 1 to get the NEXT version number + // Note: we already check above for the slot's existence + let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); + let slot_version = stackerdbs + .get_slot_version(&miners_contract_id, slot_id) + .map_err(|e| format!("Failed to read slot version: {e:?}"))? + .unwrap_or(0) + .saturating_add(1); + let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message.serialize_to_vec()); + chunk + .sign(miner_sk) + .map_err(|_| "Failed to sign StackerDB chunk")?; + + match miners_session.put_chunk(&chunk) { + Ok(ack) => { + if ack.accepted { + debug!("Wrote message to stackerdb: {ack:?}"); + Ok(()) + } else { + Err(format!("{ack:?}")) + } + } + Err(e) => Err(format!("{e:?}")), + } + } + + /// Propose a Nakamoto block and gather signatures for it. + /// This function begins by sending a `BlockProposal` message to the + /// signers, and then it waits for the signers to respond with their + /// signatures. It does so in two ways, concurrently: + /// * It waits for the signer DB listener to collect enough signatures to + /// accept or reject the block + /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are + /// loaded and returned. This can happen if the node receives the block via a signer who + /// fetched all signatures and assembled the signature vector, all before we could. + // Mutants skip here: this function is covered via integration tests, + // which the mutation testing does not see. + #[cfg_attr(test, mutants::skip)] + #[allow(clippy::too_many_arguments)] + pub fn propose_block( + &mut self, + block: &NakamotoBlock, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + sortdb: &SortitionDB, + chain_state: &mut StacksChainState, + stackerdbs: &StackerDBs, + counters: &Counters, + election_sortition: &ConsensusHash, + ) -> Result, NakamotoNodeError> { + // Add this block to the block status map + let (lock, _cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + let block_status = BlockStatus { + responded_signers: HashSet::new(), + gathered_signatures: BTreeMap::new(), + total_weight_signed: 0, + total_reject_weight: 0, + }; + blocks.insert(block.header.signer_signature_hash(), block_status); + + let reward_cycle_id = burnchain + .block_height_to_reward_cycle(burn_tip.block_height) + .expect("FATAL: tried to initialize coordinator before first burn block height"); + + let block_proposal = BlockProposal { + block: block.clone(), + burn_height: burn_tip.block_height, + reward_cycle: reward_cycle_id, + }; + + let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); + debug!("Sending block proposal message to signers"; + "signer_signature_hash" => %block.header.signer_signature_hash(), + ); + Self::send_miners_message::( + &self.message_key, + sortdb, + burn_tip, + stackerdbs, + block_proposal_message, + MinerSlotID::BlockProposal, + self.is_mainnet, + &mut self.miners_session, + election_sortition, + ) + .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; + counters.bump_naka_proposed_blocks(); + + #[cfg(test)] + { + info!( + "SignerCoordinator: sent block proposal to .miners, waiting for test signing channel" + ); + // In test mode, short-circuit waiting for the signers if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + if let Some(signatures) = + crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + { + debug!("Short-circuiting waiting for signers, using test signature"); + return Ok(signatures); + } + } + + self.get_block_status(&block.header.signer_signature_hash(), chain_state, counters) + } + + /// Get the block status for a given block hash. + /// If we have not yet received enough signatures for this block, this + /// method will block until we do. + fn get_block_status( + &self, + block_hash: &Sha512Trunc256Sum, + chain_state: &mut StacksChainState, + counters: &Counters, + ) -> Result, NakamotoNodeError> { + let (lock, cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + + // TODO: integrate this check into the waiting for the condvar + // Look in the nakamoto staging db -- a block can only get stored there + // if it has enough signing weight to clear the threshold. + // if let Ok(Some((stored_block, _sz))) = chain_state + // .nakamoto_blocks_db() + // .get_nakamoto_block(&block.block_id()) + // .map_err(|e| { + // warn!( + // "Failed to query chainstate for block {}: {e:?}", + // &block.block_id() + // ); + // e + // }) + // { + // debug!("SignCoordinator: Found signatures in relayed block"); + // counters.bump_naka_signer_pushed_blocks(); + // return Ok(stored_block.header.signer_signature); + // } + + // if Self::check_burn_tip_changed(sortdb, burn_tip) { + // debug!("SignCoordinator: Exiting due to new burnchain tip"); + // return Err(NakamotoNodeError::BurnchainTipChanged); + // } + + blocks = cvar + .wait_while(blocks, |map| { + should_wait( + map.get(block_hash), + self.weight_threshold, + self.total_weight, + ) + }) + .expect("FATAL: failed to wait on block status"); + let block_status = blocks.get(block_hash).cloned().ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Block unexpectedly missing from map".into(), + ) + })?; + if block_status + .total_reject_weight + .saturating_add(self.weight_threshold) + > self.total_weight + { + info!( + "{}/{} signers vote to reject block", + block_status.total_reject_weight, self.total_weight; + "stacks_block_hash" => %block_hash, + ); + counters.bump_naka_rejected_blocks(); + Err(NakamotoNodeError::SignersRejected) + } else if block_status.total_weight_signed >= self.weight_threshold { + info!("Received enough signatures, block accepted"; + "stacks_block_hash" => %block_hash, + ); + Ok(block_status.gathered_signatures.values().cloned().collect()) + } else { + info!("Unblocked without reaching the threshold, likely due to an interruption"; + "stacks_block_hash" => %block_hash, + ); + Err(NakamotoNodeError::ChannelClosed) + } + } + + /// Get the timestamp at which at least 70% of the signing power should be + /// willing to accept a time-based tenure extension. + pub fn get_tenure_extend_timestamp(&self) -> u64 { + let signer_idle_timestamps = self + .signer_idle_timestamps + .lock() + .expect("FATAL: failed to lock signer idle timestamps"); + let mut idle_timestamps = signer_idle_timestamps.values().collect::>(); + idle_timestamps.sort_by_key(|info| info.timestamp); + let mut weight_sum = 0; + for info in idle_timestamps { + weight_sum += info.weight; + if weight_sum >= self.weight_threshold { + return info.timestamp; + } + } + + // We don't have enough information to reach a 70% threshold at any + // time, so return u64::MAX to indicate that we should not extend the + // tenure. + u64::MAX + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/signerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/signerdb_listener.rs new file mode 100644 index 0000000000..89769b3e3e --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/signerdb_listener.rs @@ -0,0 +1,391 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::BTreeMap; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::mpsc::Receiver; +use std::sync::{Arc, Condvar, Mutex}; +use std::time::Duration; + +use hashbrown::{HashMap, HashSet}; +use libsigner::v0::messages::{BlockAccepted, BlockResponse, SignerMessage as SignerMessageV0}; +use libsigner::SignerEvent; +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::nakamoto::NakamotoBlockHeader; +use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, SIGNERS_NAME}; +use stacks::chainstate::stacks::events::StackerDBChunksEvent; +use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::types::chainstate::StacksPublicKey; +use stacks::types::PublicKey; +use stacks::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; +use stacks::util::secp256k1::MessageSignature; + +use super::Error as NakamotoNodeError; +use crate::event_dispatcher::StackerDBChannel; + +/// Fault injection flag to prevent the miner from seeing enough signer signatures. +/// Used to test that the signers will broadcast a block if it gets enough signatures +#[cfg(test)] +pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mutex::new(None); + +/// How long should the coordinator poll on the event receiver before +/// waking up to check timeouts? +pub static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); + +#[derive(Debug, Clone)] +pub(crate) struct BlockStatus { + pub responded_signers: HashSet, + pub gathered_signatures: BTreeMap, + pub total_weight_signed: u32, + pub total_reject_weight: u32, +} + +#[derive(Debug, Clone)] +pub(crate) struct TimestampInfo { + pub timestamp: u64, + pub weight: u32, +} + +/// The listener for the signer database, which listens for messages from the +/// signers and tracks the state of block signatures and idle timestamps. +#[derive(Debug)] +pub struct SignerDBListener { + /// Channel to receive StackerDB events + receiver: Receiver, + /// Flag to shut the listener down + keep_running: Arc, + /// The signer set for this tenure (0 or 1) + signer_set: u32, + /// The total weight of all signers + pub(crate) total_weight: u32, + /// The weight threshold for block approval + pub(crate) weight_threshold: u32, + /// The signer entries for this tenure (keyed by slot_id) + signer_entries: HashMap, + /// Tracks signatures for blocks + /// - key: Sha512Trunc256Sum (signer signature hash) + /// - value: BlockStatus + pub(crate) blocks: Arc<(Mutex>, Condvar)>, + /// Tracks the timestamps from signers to decide when they should be + /// willing to accept time-based tenure extensions + /// - key: StacksPublicKey + /// - value: TimestampInfo + pub(crate) signer_idle_timestamps: Arc>>, +} + +impl SignerDBListener { + pub fn new( + stackerdb_channel: Arc>, + keep_running: Arc, + reward_set: &RewardSet, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + ) -> Result { + let (receiver, replaced_other) = stackerdb_channel + .lock() + .expect("FATAL: failed to lock StackerDB channel") + .register_miner_coordinator(); + if replaced_other { + warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); + } + + let total_weight = reward_set.total_signing_weight().map_err(|e| { + warn!("Failed to calculate total weight for the reward set: {e:?}"); + ChainstateError::NoRegisteredSigners(0) + })?; + + let weight_threshold = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight)?; + + let reward_cycle_id = burnchain + .block_height_to_reward_cycle(burn_tip.block_height) + .expect("FATAL: tried to initialize coordinator before first burn block height"); + let signer_set = + u32::try_from(reward_cycle_id % 2).expect("FATAL: reward cycle id % 2 exceeds u32"); + + let Some(ref reward_set_signers) = reward_set.signers else { + error!("Could not initialize signing coordinator for reward set without signer"); + debug!("reward set: {reward_set:?}"); + return Err(ChainstateError::NoRegisteredSigners(0)); + }; + + let signer_entries = reward_set_signers + .iter() + .cloned() + .enumerate() + .map(|(idx, signer)| { + let Ok(slot_id) = u32::try_from(idx) else { + return Err(ChainstateError::InvalidStacksBlock( + "Signer index exceeds u32".into(), + )); + }; + Ok((slot_id, signer)) + }) + .collect::, ChainstateError>>()?; + + Ok(Self { + receiver, + keep_running, + signer_set, + total_weight, + weight_threshold, + signer_entries, + blocks: Arc::new((Mutex::new(HashMap::new()), Condvar::new())), + signer_idle_timestamps: Arc::new(Mutex::new(HashMap::new())), + }) + } + + /// Run the signer database listener. + pub fn run(&mut self) -> Result<(), NakamotoNodeError> { + info!("SignerDBListener: Starting up"); + loop { + let event = match self.receiver.recv_timeout(EVENT_RECEIVER_POLL) { + Ok(event) => event, + Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { + continue; + } + Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { + warn!("SignerDBListener: StackerDB event receiver disconnected"); + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "StackerDB event receiver disconnected".into(), + )); + } + }; + + // was the miner asked to stop? + if !self.keep_running.load(Ordering::SeqCst) { + info!("SignerDBListener: received miner exit request. Aborting"); + return Err(NakamotoNodeError::ChannelClosed); + } + + // check to see if this event we got is a signer event + let is_signer_event = + event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); + + if !is_signer_event { + debug!("SignerDBListener: Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); + continue; + } + + let modified_slots = &event.modified_slots.clone(); + + let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { + warn!("SignerDBListener: Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); + }) else { + continue; + }; + let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { + debug!("SignerDBListener: Received signer event other than a signer message. Ignoring."); + continue; + }; + if signer_set != self.signer_set { + debug!("SignerDBListener: Received signer event for other reward cycle. Ignoring."); + continue; + }; + let slot_ids = modified_slots + .iter() + .map(|chunk| chunk.slot_id) + .collect::>(); + + debug!("SignerDBListener: Received messages from signers"; + "count" => messages.len(), + "slot_ids" => ?slot_ids, + ); + + for (message, slot_id) in messages.into_iter().zip(slot_ids) { + let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { + return Err(NakamotoNodeError::SignerSignatureError( + "Signer entry not found".into(), + )); + }; + let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) + else { + return Err(NakamotoNodeError::SignerSignatureError( + "Failed to parse signer public key".into(), + )); + }; + + match message { + SignerMessageV0::BlockResponse(BlockResponse::Accepted(accepted)) => { + let BlockAccepted { + signer_signature_hash: block_sighash, + signature, + metadata, + tenure_extend_timestamp, // TOOD: utilize this info + } = accepted; + let (lock, cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + + let block = match blocks.get_mut(&block_sighash) { + Some(block) => block, + None => { + info!( + "SignerDBListener: Received signature for block that we did not request. Ignoring."; + "signature" => %signature, + "block_signer_sighash" => %block_sighash, + "slot_id" => slot_id, + "signer_set" => self.signer_set, + ); + continue; + } + }; + + let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) + else { + warn!( + "SignerDBListener: Got invalid signature from a signer. Ignoring." + ); + continue; + }; + if !valid_sig { + warn!( + "SignerDBListener: Processed signature but didn't validate over the expected block. Ignoring"; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + + if Self::fault_injection_ignore_signatures() { + warn!("SignerDBListener: fault injection: ignoring well-formed signature for block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => block.total_weight_signed, + ); + continue; + } + + if !block.gathered_signatures.contains_key(&slot_id) { + block.total_weight_signed = block + .total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + + info!("SignerDBListener: Signature Added to block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => block.total_weight_signed, + "tenure_extend_timestamp" => tenure_extend_timestamp, + "server_version" => metadata.server_version, + ); + block.gathered_signatures.insert(slot_id, signature); + block.responded_signers.insert(signer_pubkey); + + if block.total_weight_signed >= self.weight_threshold { + // Signal to anyone waiting on this block that we have enough signatures + cvar.notify_all(); + } + } + SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { + let (lock, cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + + let block = match blocks.get_mut(&rejected_data.signer_signature_hash) { + Some(block) => block, + None => { + info!( + "SignerDBListener: Received rejection for block that we did not request. Ignoring."; + "block_signer_sighash" => %rejected_data.signer_signature_hash, + "slot_id" => slot_id, + "signer_set" => self.signer_set, + ); + continue; + } + }; + + let rejected_pubkey = match rejected_data.recover_public_key() { + Ok(rejected_pubkey) => { + if rejected_pubkey != signer_pubkey { + warn!("SignerDBListener: Recovered public key from rejected data does not match signer's public key. Ignoring."); + continue; + } + rejected_pubkey + } + Err(e) => { + warn!("SignerDBListener: Failed to recover public key from rejected data: {e:?}. Ignoring."); + continue; + } + }; + block.responded_signers.insert(rejected_pubkey); + block.total_reject_weight = block + .total_reject_weight + .checked_add(signer_entry.weight) + .expect("FATAL: total weight rejected exceeds u32::MAX"); + + info!("SignerDBListener: Signer rejected block"; + "block_signer_sighash" => %rejected_data.signer_signature_hash, + "signer_pubkey" => rejected_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %rejected_data.signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => block.total_weight_signed, + "reason" => rejected_data.reason, + "reason_code" => %rejected_data.reason_code, + "tenure_extend_timestamp" => rejected_data.tenure_extend_timestamp, + "server_version" => rejected_data.metadata.server_version, + ); + + if block + .total_reject_weight + .saturating_add(self.weight_threshold) + > self.total_weight + { + // Signal to anyone waiting on this block that we have enough rejections + cvar.notify_all(); + } + + continue; + } + SignerMessageV0::BlockProposal(_) => { + debug!("Received block proposal message. Ignoring."); + continue; + } + SignerMessageV0::BlockPushed(_) => { + debug!("Received block pushed message. Ignoring."); + continue; + } + SignerMessageV0::MockSignature(_) + | SignerMessageV0::MockProposal(_) + | SignerMessageV0::MockBlock(_) => { + debug!("Received mock message. Ignoring."); + continue; + } + }; + } + } + } + + /// Do we ignore signer signatures? + #[cfg(test)] + fn fault_injection_ignore_signatures() -> bool { + if *TEST_IGNORE_SIGNERS.lock().unwrap() == Some(true) { + return true; + } + false + } + + #[cfg(not(test))] + fn fault_injection_ignore_signatures() -> bool { + false + } +} diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b688db100d..1639f93c43 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -223,7 +223,7 @@ use crate::burnchains::{make_bitcoin_indexer, Error as BurnchainControllerError} use crate::chain_data::MinerStats; use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; -use crate::nakamoto_node::sign_coordinator::SignCoordinator; +use crate::nakamoto_node::signer_coordinator::SignerCoordinator; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; use crate::ChainTip; @@ -2364,7 +2364,7 @@ impl BlockMinerThread { let mut miners_stackerdb = StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); - SignCoordinator::send_miners_message( + SignerCoordinator::send_miners_message( &mining_key, &burn_db, &self.burn_block, @@ -2392,7 +2392,7 @@ impl BlockMinerThread { }; info!("Sending mock block to stackerdb: {mock_block:?}"); - SignCoordinator::send_miners_message( + SignerCoordinator::send_miners_message( &mining_key, &burn_db, &self.burn_block, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fd8d6c1096..fc7e31bea6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -68,7 +68,7 @@ use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_NO_TENURE_EXTEND, }; -use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; +use crate::nakamoto_node::signerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ From e13ba06264114f0528550b79369f07b1e5908637 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 21 Nov 2024 15:09:17 -0800 Subject: [PATCH 040/115] make a smaller table for easier retrieval of the timestamp calculation Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 2 +- stacks-signer/src/lib.rs | 2 + stacks-signer/src/runloop.rs | 8 + stacks-signer/src/signerdb.rs | 470 +++++++++++++++++++++++++------- stacks-signer/src/v0/signer.rs | 28 +- 5 files changed, 408 insertions(+), 102 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 5a6aa04200..779f985d8a 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -341,7 +341,7 @@ impl SortitionsView { let changed_burn_view = tenure_extend.burn_view_consensus_hash != sortition_consensus_hash; let enough_time_passed = get_epoch_time_secs() - > signer_db.get_tenure_extend_timestamp( + > signer_db.calculate_tenure_extend_timestamp( self.config.tenure_idle_timeout, &sortition_consensus_hash, ); diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 244675c65c..d796f7582f 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -76,6 +76,8 @@ pub trait Signer: Debug + Display { ); /// Check if the signer is in the middle of processing blocks fn has_unprocessed_blocks(&self) -> bool; + /// Cleanup signer stale data + fn cleanup_stale_data(&mut self, current_reward_cycle: u64); } /// A wrapper around the running signer type for the signer diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 11faadf871..2850c1354c 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -377,6 +377,14 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo "is_in_next_prepare_phase" => is_in_next_prepare_phase, ); + if reward_cycle_before_refresh != current_reward_cycle { + for signer in self.stacks_signers.values_mut() { + if let ConfiguredSigner::RegisteredSigner(signer) = signer { + signer.cleanup_stale_data(current_reward_cycle); + } + } + } + // Check if we need to refresh the signers: // need to refresh the current signer if we are not configured for the current reward cycle // need to refresh the next signer if we're not configured for the next reward cycle, and we're in the prepare phase diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index f30c0124c1..deb2136e99 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -160,12 +160,20 @@ pub struct BlockInfo { pub state: BlockState, /// Consumed processing time in milliseconds to validate this block pub validation_time_ms: Option, + /// Wether the block is a tenure change block + pub tenure_change: bool, /// Extra data specific to v0, v1, etc. pub ext: ExtraBlockInfo, } impl From for BlockInfo { fn from(value: BlockProposal) -> Self { + let tenure_change = value + .block + .txs + .first() + .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) + .unwrap_or(false); Self { block: value.block, burn_block_height: value.burn_height, @@ -179,6 +187,7 @@ impl From for BlockInfo { ext: ExtraBlockInfo::default(), state: BlockState::Unprocessed, validation_time_ms: None, + tenure_change, } } } @@ -328,6 +337,11 @@ static CREATE_INDEXES_3: &str = r#" CREATE INDEX IF NOT EXISTS block_rejection_signer_addrs_on_block_signature_hash ON block_rejection_signer_addrs(signer_signature_hash); "#; +static CREATE_INDEXES_4: &str = r#" +CREATE INDEX IF NOT EXISTS tenure_blocks_on_consensus_hash ON tenure_blocks(consensus_hash); +CREATE INDEX IF NOT EXISTS tenure_blocks_on_reward_cycle ON tenure_blocks(reward_cycle); +"#; + static CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, @@ -372,7 +386,7 @@ CREATE TABLE IF NOT EXISTS block_signatures ( -- the sighash is sufficient to uniquely identify the block across all burnchain, PoX, -- and stacks forks. signer_signature_hash TEXT NOT NULL, - -- signtaure itself + -- signature itself signature TEXT NOT NULL, PRIMARY KEY (signature) ) STRICT;"#; @@ -389,6 +403,40 @@ CREATE TABLE IF NOT EXISTS block_rejection_signer_addrs ( PRIMARY KEY (signer_addr) ) STRICT;"#; +// A lighter blocks table to aid in calculating tenure processing times +// Will only track the most recent tenure blocks +static CREATE_TENURE_BLOCKS_TABLE: &str = r#" +CREATE TABLE IF NOT EXISTS tenure_blocks ( + signer_signature_hash TEXT NOT NULL PRIMARY KEY, + reward_cycle INTEGER NOT NULL, + consensus_hash TEXT NOT NULL, + proposed_time INTEGER NOT NULL, + validation_time_ms INTEGER NOT NULL, + stacks_height INTEGER NOT NULL, + tenure_change INTEGER NOT NULL +) STRICT;"#; + +static MIGRATE_GLOBALLY_ACCEPTED_BLOCKS_TO_TENURE_BLOCKS: &str = r#" + INSERT INTO tenure_blocks ( + signer_signature_hash, + reward_cycle, + consensus_hash, + proposed_time, + validation_time_ms, + stacks_height, + tenure_change +) +SELECT + signer_signature_hash, + reward_cycle, + consensus_hash, + json_extract(block_info, '$.proposed_time') AS proposed_time, + COALESCE(json_extract(block_info, '$.validation_time_ms'), 0) AS validation_time_ms, + stacks_height, + json_extract(block_info, '$.tenure_change') AS tenure_change +FROM blocks +WHERE json_extract(block_info, '$.state') = 'GloballyAccepted';"#; + static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, CREATE_DB_CONFIG, @@ -425,9 +473,16 @@ static SCHEMA_3: &[&str] = &[ "INSERT INTO db_config (version) VALUES (3);", ]; +static SCHEMA_4: &[&str] = &[ + CREATE_TENURE_BLOCKS_TABLE, + CREATE_INDEXES_4, + MIGRATE_GLOBALLY_ACCEPTED_BLOCKS_TO_TENURE_BLOCKS, + "INSERT INTO db_config (version) VALUES (4);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 3; + pub const SCHEMA_VERSION: u32 = 4; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -447,7 +502,7 @@ impl SignerDb { return Ok(0); } let result = conn - .query_row("SELECT version FROM db_config LIMIT 1", [], |row| { + .query_row("SELECT MAX(version) FROM db_config LIMIT 1", [], |row| { row.get(0) }) .optional(); @@ -499,6 +554,20 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 3 to schema 4 + fn schema_4_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 4 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_4.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + /// Either instantiate a new database, or migrate an existing one /// If the detected version of the existing database is 0 (i.e., a pre-migration /// logic DB, the DB will be dropped). @@ -510,7 +579,8 @@ impl SignerDb { 0 => Self::schema_1_migration(&sql_tx)?, 1 => Self::schema_2_migration(&sql_tx)?, 2 => Self::schema_3_migration(&sql_tx)?, - 3 => break, + 3 => Self::schema_4_migration(&sql_tx)?, + 4 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, @@ -682,18 +752,28 @@ impl SignerDb { "broadcasted" => ?broadcasted, "vote" => vote ); - self.db - .execute( - "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", - params![ - u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), block_json, - signed_over, - &broadcasted, - u64_to_sql(block_info.block.header.chain_length)?, - block_info.block.header.consensus_hash.to_hex(), - ], - )?; - + let sql_tx = tx_begin_immediate(&mut self.db)?; + sql_tx.execute("INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", params![ + u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), block_json, + signed_over, + &broadcasted, + u64_to_sql(block_info.block.header.chain_length)?, + block_info.block.header.consensus_hash.to_hex(), + ])?; + + if block_info.state == BlockState::GloballyAccepted { + // We only insert globally accepted blocks per consensus hash into our reduced table for easy processing time calculations + sql_tx.execute("INSERT OR REPLACE INTO tenure_blocks (signer_signature_hash, reward_cycle, consensus_hash, proposed_time, validation_time_ms, stacks_height, tenure_change) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", params![ + hash.to_string(), + u64_to_sql(block_info.reward_cycle)?, + block_info.block.header.consensus_hash.to_hex(), + u64_to_sql(block_info.proposed_time)?, + u64_to_sql(block_info.validation_time_ms.unwrap_or(0))?, + u64_to_sql(block_info.block.header.chain_length)?, + block_info.tenure_change + ])?; + } + sql_tx.commit()?; Ok(()) } @@ -830,60 +910,53 @@ impl SignerDb { )) } - /// Return the all globally accepted block in a tenure (identified by its consensus hash) in stacks height descending order - fn get_globally_accepted_blocks( - &self, - tenure: &ConsensusHash, - ) -> Result, DBError> { - let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') = ?2 ORDER BY stacks_height DESC"; - let args = params![tenure, &BlockState::GloballyAccepted.to_string()]; - let result: Vec = query_rows(&self.db, query, args)?; - result - .iter() - .map(|info| serde_json::from_str(info).map_err(DBError::from)) - .collect() + /// Cleanup stale data by removing anything equal to or older than the provided reward cycle + pub fn cleanup_stale_data(&mut self, reward_cycle: u64) -> Result<(), DBError> { + self.db.execute( + "DELETE FROM tenure_blocks WHERE reward_cycle <= ?", + params![u64_to_sql(reward_cycle)?], + )?; + Ok(()) } - /// Compute the tenure extend timestamp based on the tenure start and already consumed idle time of the - /// globally accepted blocks of the provided tenure (identified by the consensus hash) - pub fn get_tenure_extend_timestamp( - &self, - tenure_idle_timeout: Duration, - consensus_hash: &ConsensusHash, - ) -> u64 { - // We do not know our tenure start timestamp until we find the last processed tenure change transaction for the given consensus hash. - // We may not even have it in our database, in which case, we should use the oldest known block in the tenure. - // If we have no blocks known for this tenure, we will assume it has only JUST started and calculate - // our tenure extend timestamp based on the epoch time in secs. - let mut tenure_start_timestamp = None; - let mut tenure_process_time_ms = 0_u64; - // Note that the globally accepted blocks are already returned in descending order of stacks height, therefore by newest block to oldest block - for block_info in self - .get_globally_accepted_blocks(consensus_hash) - .unwrap_or_default() - .iter() - { - // Always use the oldest block as our tenure start timestamp - tenure_start_timestamp = Some(block_info.proposed_time); - - tenure_process_time_ms = - tenure_process_time_ms.saturating_add(block_info.validation_time_ms.unwrap_or(0)); - - if block_info - .block - .txs - .first() - .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) - .unwrap_or(false) - { - // Tenure change found. No more blocks should count towards this tenure's processing time. + /// Return the start time (epoch time in seconds) and the processing time in milliseconds of the tenure (idenfitied by consensus_hash). + fn get_tenure_times(&self, tenure: &ConsensusHash) -> Result<(u64, u64), DBError> { + let query = "SELECT tenure_change, proposed_time, validation_time_ms FROM tenure_blocks WHERE consensus_hash = ?1 ORDER BY stacks_height DESC"; + let args = params![tenure]; + let mut stmt = self.db.prepare(query)?; + let rows = stmt.query_map(args, |row| { + let tenure_change_block: u64 = row.get(0)?; + let proposed_time: u64 = row.get(1)?; + let validation_time_ms: u64 = row.get(2)?; + Ok((tenure_change_block > 0, proposed_time, validation_time_ms)) + })?; + let mut tenure_processing_time_ms = 0_u64; + let mut tenure_start_time = None; + for row in rows { + let (tenure_change_block, proposed_time, validation_time_ms) = row?; + tenure_processing_time_ms = + tenure_processing_time_ms.saturating_add(validation_time_ms); + tenure_start_time = Some(proposed_time); + if tenure_change_block { break; } } + Ok(( + tenure_start_time.unwrap_or(get_epoch_time_secs()), + tenure_processing_time_ms, + )) + } - tenure_start_timestamp - .unwrap_or(get_epoch_time_secs()) - .saturating_add(tenure_idle_timeout.as_secs()) + /// Calculate the tenure extend timestamp + pub fn calculate_tenure_extend_timestamp( + &self, + tenure_idle_timeout: Duration, + tenure: &ConsensusHash, + ) -> u64 { + let tenure_idle_timeout_secs = tenure_idle_timeout.as_secs(); + let (tenure_start_time, tenure_process_time_ms) = self.get_tenure_times(tenure).inspect_err(|e| error!("Error occurred calculating tenure extend timestamp: {e:?}. Defaulting to {tenure_idle_timeout_secs} from now.")).unwrap_or((get_epoch_time_secs(), 0)); + tenure_start_time + .saturating_add(tenure_idle_timeout_secs) .saturating_sub(tenure_process_time_ms / 1000) } } @@ -1381,60 +1454,275 @@ mod tests { .is_none()); } - #[test] - fn get_all_globally_accepted_blocks() { - let db_path = tmp_db_path(); - let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + fn generate_tenure_blocks() -> Vec { let consensus_hash_1 = ConsensusHash([0x01; 20]); let consensus_hash_2 = ConsensusHash([0x02; 20]); - let consensus_hash_3 = ConsensusHash([0x03; 20]); let (mut block_info_1, _block_proposal) = create_block_override(|b| { b.block.header.consensus_hash = consensus_hash_1; b.block.header.miner_signature = MessageSignature([0x01; 65]); b.block.header.chain_length = 1; b.burn_height = 1; + b.reward_cycle = 1; }); + block_info_1.state = BlockState::GloballyAccepted; + block_info_1.tenure_change = true; + block_info_1.validation_time_ms = Some(1000); + block_info_1.proposed_time = get_epoch_time_secs() + 500; + let (mut block_info_2, _block_proposal) = create_block_override(|b| { b.block.header.consensus_hash = consensus_hash_1; b.block.header.miner_signature = MessageSignature([0x02; 65]); b.block.header.chain_length = 2; b.burn_height = 2; + b.reward_cycle = 1; }); + block_info_2.state = BlockState::GloballyAccepted; + block_info_2.validation_time_ms = Some(2000); + block_info_2.proposed_time = block_info_1.proposed_time + 5; + let (mut block_info_3, _block_proposal) = create_block_override(|b| { b.block.header.consensus_hash = consensus_hash_1; b.block.header.miner_signature = MessageSignature([0x03; 65]); b.block.header.chain_length = 3; - b.burn_height = 3; + b.burn_height = 2; + b.reward_cycle = 2; }); + block_info_3.state = BlockState::GloballyAccepted; + block_info_3.tenure_change = true; + block_info_3.validation_time_ms = Some(5000); + block_info_3.proposed_time = block_info_1.proposed_time + 10; + let (mut block_info_4, _block_proposal) = create_block_override(|b| { - b.block.header.consensus_hash = consensus_hash_2; - b.block.header.miner_signature = MessageSignature([0x03; 65]); + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x04; 65]); b.block.header.chain_length = 3; - b.burn_height = 4; + b.burn_height = 2; + b.reward_cycle = 2; }); - block_info_1.mark_globally_accepted().unwrap(); - block_info_2.mark_locally_accepted(false).unwrap(); - block_info_3.mark_globally_accepted().unwrap(); - block_info_4.mark_globally_accepted().unwrap(); + block_info_4.state = BlockState::LocallyAccepted; + block_info_4.validation_time_ms = Some(9000); + block_info_4.proposed_time = block_info_1.proposed_time + 15; - db.insert_block(&block_info_1).unwrap(); - db.insert_block(&block_info_2).unwrap(); - db.insert_block(&block_info_3).unwrap(); - db.insert_block(&block_info_4).unwrap(); + let (mut block_info_5, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_2; + b.block.header.miner_signature = MessageSignature([0x05; 65]); + b.block.header.chain_length = 4; + b.burn_height = 3; + b.reward_cycle = 3; + }); + block_info_5.state = BlockState::GloballyAccepted; + block_info_5.validation_time_ms = Some(20000); + block_info_5.proposed_time = block_info_1.proposed_time + 20; + + vec![ + block_info_1, + block_info_2, + block_info_3, + block_info_4, + block_info_5, + ] + } + + #[test] + fn tenure_times() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let block_infos = generate_tenure_blocks(); + let consensus_hash_1 = block_infos[0].block.header.consensus_hash; + let consensus_hash_2 = block_infos.last().unwrap().block.header.consensus_hash; + let consensus_hash_3 = ConsensusHash([0x03; 20]); + + db.insert_block(&block_infos[0]).unwrap(); + db.insert_block(&block_infos[1]).unwrap(); // Verify tenure consensus_hash_1 - let block_infos = db.get_globally_accepted_blocks(&consensus_hash_1).unwrap(); - assert_eq!(block_infos, vec![block_info_3, block_info_1]); + let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_1).unwrap(); + assert_eq!(start_time, block_infos[0].proposed_time); + assert_eq!(processing_time, 3000); + + db.insert_block(&block_infos[2]).unwrap(); + db.insert_block(&block_infos[3]).unwrap(); + + let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_1).unwrap(); + assert_eq!(start_time, block_infos[2].proposed_time); + assert_eq!(processing_time, 5000); + + db.insert_block(&block_infos[4]).unwrap(); // Verify tenure consensus_hash_2 - let block_infos = db.get_globally_accepted_blocks(&consensus_hash_2).unwrap(); - assert_eq!(block_infos.len(), 1); - assert_eq!(block_infos, vec![block_info_4]); + let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_2).unwrap(); + assert_eq!(start_time, block_infos[4].proposed_time); + assert_eq!(processing_time, 20000); + + // Verify tenure consensus_hash_3 (unknown hash) + let (start_time, validation_time) = db.get_tenure_times(&consensus_hash_3).unwrap(); + assert!(start_time < block_infos[0].proposed_time, "Should have been generated from get_epoch_time_secs() making it much older than our artificially late proposal times"); + assert_eq!(validation_time, 0); + } - // Verify tenure consensus_hash_3 - assert!(db - .get_globally_accepted_blocks(&consensus_hash_3) - .unwrap() - .is_empty()); + #[test] + fn tenure_extend_timestamp() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + + let block_infos = generate_tenure_blocks(); + let consensus_hash_1 = block_infos[0].block.header.consensus_hash; + let consensus_hash_2 = block_infos.last().unwrap().block.header.consensus_hash; + let consensus_hash_3 = ConsensusHash([0x03; 20]); + + db.insert_block(&block_infos[0]).unwrap(); + db.insert_block(&block_infos[1]).unwrap(); + + let tenure_idle_timeout = Duration::from_secs(10); + // Verify tenure consensus_hash_1 + let timestamp_hash_1_before = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &consensus_hash_1); + assert_eq!( + timestamp_hash_1_before, + block_infos[0] + .proposed_time + .saturating_add(tenure_idle_timeout.as_secs()) + .saturating_sub(3) + ); + + db.insert_block(&block_infos[2]).unwrap(); + db.insert_block(&block_infos[3]).unwrap(); + + let timestamp_hash_1_after = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &consensus_hash_1); + assert_eq!( + timestamp_hash_1_after, + block_infos[2] + .proposed_time + .saturating_add(tenure_idle_timeout.as_secs()) + .saturating_sub(5) + ); + + db.insert_block(&block_infos[4]).unwrap(); + + // Verify tenure consensus_hash_2 + let timestamp_hash_2 = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &consensus_hash_2); + assert_eq!( + timestamp_hash_2, + block_infos[4] + .proposed_time + .saturating_add(tenure_idle_timeout.as_secs()) + .saturating_sub(20) + ); + + // Verify tenure consensus_hash_3 (unknown hash) + let timestamp_hash_3 = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &consensus_hash_3); + assert!( + timestamp_hash_3.saturating_sub(tenure_idle_timeout.as_secs()) + < block_infos[0].proposed_time + ); + } + + #[test] + fn tenure_blocks_migration() { + let db_path = tmp_db_path(); + let db = SignerDb::new(db_path).expect("Failed to create signer db"); + let block_infos = generate_tenure_blocks(); + let consensus_hash_1 = block_infos[0].block.header.consensus_hash; + let consensus_hash_2 = block_infos.last().unwrap().block.header.consensus_hash; + let consensus_hash_3 = ConsensusHash([0x03; 20]); + + // Manually insert to make sure the migration works as expected! It should ignore any blocks that are locally accepted + let insert_sql = "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)"; + + for block_info in block_infos.iter() { + let block_json = + serde_json::to_string(&block_info).expect("Unable to serialize block info"); + db.db + .execute( + insert_sql, + params![ + u64_to_sql(block_info.reward_cycle).unwrap(), + u64_to_sql(block_info.burn_block_height).unwrap(), + block_info.signer_signature_hash().to_string(), + block_json, + block_info.signed_over, + Some(true), + u64_to_sql(block_info.block.header.chain_length).unwrap(), + block_info.block.header.consensus_hash.to_hex(), + ], + ) + .unwrap(); + } + + let (tenure_start, validation_time_ms) = db.get_tenure_times(&consensus_hash_1).unwrap(); + assert!(tenure_start < block_infos[0].proposed_time); + assert_eq!(validation_time_ms, 0); + let (tenure_start, validation_time_ms) = db.get_tenure_times(&consensus_hash_2).unwrap(); + assert!(tenure_start < block_infos[0].proposed_time); + assert_eq!(validation_time_ms, 0); + let (tenure_start, validation_time_ms) = db.get_tenure_times(&consensus_hash_3).unwrap(); + assert!(tenure_start < block_infos[0].proposed_time); + assert_eq!(validation_time_ms, 0); + + db.db + .execute_batch(MIGRATE_GLOBALLY_ACCEPTED_BLOCKS_TO_TENURE_BLOCKS) + .unwrap(); + + // Verify tenure consensus_hash_1 + let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_1).unwrap(); + assert_eq!(start_time, block_infos[2].proposed_time); + assert_eq!(processing_time, 5000); + + // Verify tenure consensus_hash_2 + let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_2).unwrap(); + assert_eq!(start_time, block_infos[4].proposed_time); + assert_eq!(processing_time, 20000); + + // Verify tenure consensus_hash_3 (uknown hash) + let (start_time, validation_time) = db.get_tenure_times(&consensus_hash_3).unwrap(); + assert!(start_time < block_infos[0].proposed_time, "Should have been generated from get_epoch_time_secs() making it much older than our artificially late proposal times"); + assert_eq!(validation_time, 0); + } + + #[test] + fn cleanup() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let block_infos = generate_tenure_blocks(); + let consensus_hash_1 = block_infos[0].block.header.consensus_hash; + let consensus_hash_2 = block_infos.last().unwrap().block.header.consensus_hash; + + for block_info in &block_infos { + db.insert_block(block_info).unwrap(); + } + + // Verify this does nothing. All data is still there. + db.cleanup_stale_data(block_infos[0].reward_cycle - 1) + .unwrap(); + + // Verify tenure consensus_hash_1 + let (start_time_1, processing_time_1) = db.get_tenure_times(&consensus_hash_1).unwrap(); + assert_eq!(start_time_1, block_infos[2].proposed_time); + assert_eq!(processing_time_1, 5000); + + // Verify tenure consensus_hash_2 + let (start_time_2, processing_time_2) = db.get_tenure_times(&consensus_hash_2).unwrap(); + assert_eq!(start_time_2, block_infos[4].proposed_time); + assert_eq!(processing_time_2, 20000); + + // Verify this deletes some data + db.cleanup_stale_data(block_infos[2].reward_cycle).unwrap(); + + // Verify tenure consensus_hash_1 AFTER deletion has updated correctly. + let (start_time_1_after, processing_time_1_after) = + db.get_tenure_times(&consensus_hash_1).unwrap(); + assert_ne!(start_time_1_after, start_time_1); + assert_ne!(processing_time_1_after, processing_time_1); + assert!(start_time_1_after < block_infos[0].proposed_time, "Should have been generated from get_epoch_time_secs() making it much older than our artificially late proposal times"); + assert_eq!(processing_time_1_after, 0); + + // Verify tenure consensus_hash_2 AFTER deletion has not updated. + let (start_time_2_after, processing_time_2_after) = + db.get_tenure_times(&consensus_hash_2).unwrap(); + assert_eq!(start_time_2_after, start_time_2); + assert_eq!(processing_time_2_after, processing_time_2); } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index ea4a3d812d..f40025df37 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -260,6 +260,14 @@ impl SignerTrait for Signer { true }) } + + fn cleanup_stale_data(&mut self, current_reward_cycle: u64) { + // We currently delete only data older than 2 reward cycles ago. + let _ = self + .signer_db + .cleanup_stale_data(current_reward_cycle.saturating_sub(2)) + .inspect_err(|e| error!("self: Failed to cleanup stale signerdb data: {e:?}")); + } } impl From for Signer { @@ -294,7 +302,7 @@ impl Signer { /// Determine this signers response to a proposed block /// Returns a BlockResponse if we have already validated the block /// Returns None otherwise - fn determine_response(&self, block_info: &BlockInfo) -> Option { + fn determine_response(&mut self, block_info: &BlockInfo) -> Option { let valid = block_info.valid?; let response = if valid { debug!("{self}: Accepting block {}", block_info.block.block_id()); @@ -305,7 +313,7 @@ impl Signer { BlockResponse::accepted( block_info.signer_signature_hash(), signature, - self.signer_db.get_tenure_extend_timestamp( + self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, &block_info.block.header.consensus_hash, ), @@ -317,7 +325,7 @@ impl Signer { RejectCode::RejectedInPriorRound, &self.private_key, self.mainnet, - self.signer_db.get_tenure_extend_timestamp( + self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, &block_info.block.header.consensus_hash, ), @@ -423,7 +431,7 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, - self.signer_db.get_tenure_extend_timestamp( + self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, &block_proposal.block.header.consensus_hash, ), @@ -441,7 +449,7 @@ impl Signer { RejectCode::SortitionViewMismatch, &self.private_key, self.mainnet, - self.signer_db.get_tenure_extend_timestamp( + self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, &block_proposal.block.header.consensus_hash, ), @@ -461,7 +469,7 @@ impl Signer { RejectCode::NoSortitionView, &self.private_key, self.mainnet, - self.signer_db.get_tenure_extend_timestamp( + self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, &block_proposal.block.header.consensus_hash, ), @@ -615,7 +623,7 @@ impl Signer { let accepted = BlockAccepted::new( block_info.signer_signature_hash(), signature, - self.signer_db.get_tenure_extend_timestamp( + self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, &block_info.block.header.consensus_hash, ), @@ -673,7 +681,7 @@ impl Signer { block_validate_reject.clone(), &self.private_key, self.mainnet, - self.signer_db.get_tenure_extend_timestamp( + self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, &block_info.block.header.consensus_hash, ), @@ -774,7 +782,7 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, - self.signer_db.get_tenure_extend_timestamp( + self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, &block_proposal.block.header.consensus_hash, ), @@ -1160,7 +1168,7 @@ impl Signer { RejectCode::TestingDirective, &self.private_key, self.mainnet, - self.signer_db.get_tenure_extend_timestamp( + self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, &block_proposal.block.header.consensus_hash, ), From f3051470cc4a4af5259713aa669379aa140fca47 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 21 Nov 2024 15:21:16 -0800 Subject: [PATCH 041/115] Only migrate the most recent reward cycle data (from the current and previous) into tenure_blocks Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index deb2136e99..0b9f80f0e8 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -435,7 +435,10 @@ SELECT stacks_height, json_extract(block_info, '$.tenure_change') AS tenure_change FROM blocks -WHERE json_extract(block_info, '$.state') = 'GloballyAccepted';"#; +WHERE json_extract(block_info, '$.state') = 'GloballyAccepted' + AND reward_cycle + 2 > ( + SELECT MAX(reward_cycle) FROM blocks + );"#; static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, @@ -1624,12 +1627,24 @@ mod tests { fn tenure_blocks_migration() { let db_path = tmp_db_path(); let db = SignerDb::new(db_path).expect("Failed to create signer db"); - let block_infos = generate_tenure_blocks(); + let mut block_infos = generate_tenure_blocks(); let consensus_hash_1 = block_infos[0].block.header.consensus_hash; let consensus_hash_2 = block_infos.last().unwrap().block.header.consensus_hash; let consensus_hash_3 = ConsensusHash([0x03; 20]); + // Let's try to migrate over something that is older than the max reward cycle in our list + let (mut old_block_info, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = block_infos[4].block.header.consensus_hash; + b.block.header.miner_signature = MessageSignature([0x06; 65]); + b.block.header.chain_length = 5; + b.burn_height = 3; + b.reward_cycle = block_infos[4].reward_cycle - 2; + }); + old_block_info.state = BlockState::GloballyAccepted; + old_block_info.validation_time_ms = Some(20000); + old_block_info.proposed_time = block_infos[4].proposed_time + 5; + block_infos.push(old_block_info); - // Manually insert to make sure the migration works as expected! It should ignore any blocks that are locally accepted + // Manually insert to make sure the migration works as expected! It should ignore any blocks that are locally accepted or are more than 2 reward cycles older than the max reward cycle let insert_sql = "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)"; for block_info in block_infos.iter() { From 71dd8560799789ceb1943112f8dd1d7c4e5b14eb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 21 Nov 2024 15:36:23 -0800 Subject: [PATCH 042/115] Make tenure_blocks insertion a trigger for blocks Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 122 +++++++++------------------------- 1 file changed, 31 insertions(+), 91 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 0b9f80f0e8..d6b70cc668 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -416,8 +416,10 @@ CREATE TABLE IF NOT EXISTS tenure_blocks ( tenure_change INTEGER NOT NULL ) STRICT;"#; +// Migration logic necessary to move from blocks into tenure_blocks table +// It will only migrate globally accepted blocks that are less than 2 reward cycles old. static MIGRATE_GLOBALLY_ACCEPTED_BLOCKS_TO_TENURE_BLOCKS: &str = r#" - INSERT INTO tenure_blocks ( +INSERT INTO tenure_blocks ( signer_signature_hash, reward_cycle, consensus_hash, @@ -440,6 +442,32 @@ WHERE json_extract(block_info, '$.state') = 'GloballyAccepted' SELECT MAX(reward_cycle) FROM blocks );"#; +static CREATE_TENURE_BLOCKS_ON_BLOCKS_TRIGGER: &str = r#" +CREATE TRIGGER insert_into_tenure_blocks +AFTER INSERT ON blocks +FOR EACH ROW +WHEN json_extract(NEW.block_info, '$.state') = 'GloballyAccepted' +BEGIN + INSERT OR REPLACE INTO tenure_blocks ( + signer_signature_hash, + reward_cycle, + consensus_hash, + proposed_time, + validation_time_ms, + stacks_height, + tenure_change + ) + VALUES ( + NEW.signer_signature_hash, + NEW.reward_cycle, + NEW.consensus_hash, + json_extract(NEW.block_info, '$.proposed_time'), + COALESCE(json_extract(NEW.block_info, '$.validation_time_ms'), 0), + NEW.stacks_height, + json_extract(NEW.block_info, '$.tenure_change') + ); +END;"#; + static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, CREATE_DB_CONFIG, @@ -478,6 +506,7 @@ static SCHEMA_3: &[&str] = &[ static SCHEMA_4: &[&str] = &[ CREATE_TENURE_BLOCKS_TABLE, + CREATE_TENURE_BLOCKS_ON_BLOCKS_TRIGGER, CREATE_INDEXES_4, MIGRATE_GLOBALLY_ACCEPTED_BLOCKS_TO_TENURE_BLOCKS, "INSERT INTO db_config (version) VALUES (4);", @@ -755,28 +784,13 @@ impl SignerDb { "broadcasted" => ?broadcasted, "vote" => vote ); - let sql_tx = tx_begin_immediate(&mut self.db)?; - sql_tx.execute("INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", params![ + self.db.execute("INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", params![ u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), block_json, signed_over, &broadcasted, u64_to_sql(block_info.block.header.chain_length)?, block_info.block.header.consensus_hash.to_hex(), ])?; - - if block_info.state == BlockState::GloballyAccepted { - // We only insert globally accepted blocks per consensus hash into our reduced table for easy processing time calculations - sql_tx.execute("INSERT OR REPLACE INTO tenure_blocks (signer_signature_hash, reward_cycle, consensus_hash, proposed_time, validation_time_ms, stacks_height, tenure_change) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", params![ - hash.to_string(), - u64_to_sql(block_info.reward_cycle)?, - block_info.block.header.consensus_hash.to_hex(), - u64_to_sql(block_info.proposed_time)?, - u64_to_sql(block_info.validation_time_ms.unwrap_or(0))?, - u64_to_sql(block_info.block.header.chain_length)?, - block_info.tenure_change - ])?; - } - sql_tx.commit()?; Ok(()) } @@ -1623,80 +1637,6 @@ mod tests { ); } - #[test] - fn tenure_blocks_migration() { - let db_path = tmp_db_path(); - let db = SignerDb::new(db_path).expect("Failed to create signer db"); - let mut block_infos = generate_tenure_blocks(); - let consensus_hash_1 = block_infos[0].block.header.consensus_hash; - let consensus_hash_2 = block_infos.last().unwrap().block.header.consensus_hash; - let consensus_hash_3 = ConsensusHash([0x03; 20]); - // Let's try to migrate over something that is older than the max reward cycle in our list - let (mut old_block_info, _block_proposal) = create_block_override(|b| { - b.block.header.consensus_hash = block_infos[4].block.header.consensus_hash; - b.block.header.miner_signature = MessageSignature([0x06; 65]); - b.block.header.chain_length = 5; - b.burn_height = 3; - b.reward_cycle = block_infos[4].reward_cycle - 2; - }); - old_block_info.state = BlockState::GloballyAccepted; - old_block_info.validation_time_ms = Some(20000); - old_block_info.proposed_time = block_infos[4].proposed_time + 5; - block_infos.push(old_block_info); - - // Manually insert to make sure the migration works as expected! It should ignore any blocks that are locally accepted or are more than 2 reward cycles older than the max reward cycle - let insert_sql = "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)"; - - for block_info in block_infos.iter() { - let block_json = - serde_json::to_string(&block_info).expect("Unable to serialize block info"); - db.db - .execute( - insert_sql, - params![ - u64_to_sql(block_info.reward_cycle).unwrap(), - u64_to_sql(block_info.burn_block_height).unwrap(), - block_info.signer_signature_hash().to_string(), - block_json, - block_info.signed_over, - Some(true), - u64_to_sql(block_info.block.header.chain_length).unwrap(), - block_info.block.header.consensus_hash.to_hex(), - ], - ) - .unwrap(); - } - - let (tenure_start, validation_time_ms) = db.get_tenure_times(&consensus_hash_1).unwrap(); - assert!(tenure_start < block_infos[0].proposed_time); - assert_eq!(validation_time_ms, 0); - let (tenure_start, validation_time_ms) = db.get_tenure_times(&consensus_hash_2).unwrap(); - assert!(tenure_start < block_infos[0].proposed_time); - assert_eq!(validation_time_ms, 0); - let (tenure_start, validation_time_ms) = db.get_tenure_times(&consensus_hash_3).unwrap(); - assert!(tenure_start < block_infos[0].proposed_time); - assert_eq!(validation_time_ms, 0); - - db.db - .execute_batch(MIGRATE_GLOBALLY_ACCEPTED_BLOCKS_TO_TENURE_BLOCKS) - .unwrap(); - - // Verify tenure consensus_hash_1 - let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_1).unwrap(); - assert_eq!(start_time, block_infos[2].proposed_time); - assert_eq!(processing_time, 5000); - - // Verify tenure consensus_hash_2 - let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_2).unwrap(); - assert_eq!(start_time, block_infos[4].proposed_time); - assert_eq!(processing_time, 20000); - - // Verify tenure consensus_hash_3 (uknown hash) - let (start_time, validation_time) = db.get_tenure_times(&consensus_hash_3).unwrap(); - assert!(start_time < block_infos[0].proposed_time, "Should have been generated from get_epoch_time_secs() making it much older than our artificially late proposal times"); - assert_eq!(validation_time, 0); - } - #[test] fn cleanup() { let db_path = tmp_db_path(); From 4896d70cd651f62a06e13604f224c4987f96fd30 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 21 Nov 2024 17:08:40 -0800 Subject: [PATCH 043/115] fix: max possible size for deserializing block response data --- libsigner/src/v0/messages.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 0c88b73de3..2a7d55a5ab 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -73,6 +73,9 @@ use crate::{ SignerMessage as SignerMessageTrait, VERSION_STRING, }; +/// Maximum size of the [BlockResponseData] serialized bytes +pub const BLOCK_RESPONSE_DATA_MAX_SIZE: u32 = 2 * 1024 * 1024; // 2MB + define_u8_enum!( /// Enum representing the stackerdb message identifier: this is /// the contract index in the signers contracts (i.e., X in signers-0-X) @@ -761,7 +764,8 @@ pub struct BlockResponseData { pub version: u8, /// The block response data pub tenure_extend_timestamp: u64, - /// The unknown block response data bytes + /// When deserializing future versions, + /// there may be extra bytes that we don't know about pub unknown_bytes: Vec, } @@ -800,9 +804,7 @@ impl StacksMessageCodec for BlockResponseData { write_next(fd, &self.version)?; let mut inner_bytes = vec![]; self.inner_consensus_serialize(&mut inner_bytes)?; - let bytes_len = inner_bytes.len() as u32; - write_next(fd, &bytes_len)?; - fd.write_all(&inner_bytes).map_err(CodecError::WriteError)?; + write_next(fd, &inner_bytes)?; Ok(()) } @@ -814,7 +816,7 @@ impl StacksMessageCodec for BlockResponseData { let Ok(version) = read_next(fd) else { return Ok(Self::empty()); }; - let inner_bytes = read_next::, _>(fd)?; + let inner_bytes: Vec = read_next_at_most(fd, BLOCK_RESPONSE_DATA_MAX_SIZE)?; let mut inner_reader = inner_bytes.as_slice(); let tenure_extend_timestamp = read_next(&mut inner_reader)?; Ok(Self { From 38e3ef0b62da1c963e89f01c5aab4cede1ee99f0 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 21 Nov 2024 17:09:03 -0800 Subject: [PATCH 044/115] feat: unit test using older version of BlockAccepted to verify deserialization --- libsigner/src/v0/messages.rs | 56 ++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 2a7d55a5ab..fd639a91f4 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -1515,4 +1515,60 @@ mod test { .expect("Failed to deserialize BlockResponseData"); assert_eq!(deserialized_data, deserialized_data_2); } + + /// Test using an older version of BlockAccepted to verify that we can deserialize + /// future versions + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + pub struct BlockAcceptedOld { + /// The signer signature hash of the block that was accepted + pub signer_signature_hash: Sha512Trunc256Sum, + /// The signer's signature across the acceptance + pub signature: MessageSignature, + /// Signer message metadata + pub metadata: SignerMessageMetadata, + } + + impl StacksMessageCodec for BlockAcceptedOld { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.signer_signature_hash)?; + write_next(fd, &self.signature)?; + write_next(fd, &self.metadata)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let signer_signature_hash = read_next::(fd)?; + let signature = read_next::(fd)?; + let metadata = read_next::(fd)?; + Ok(Self { + signer_signature_hash, + signature, + metadata, + }) + } + } + + #[test] + fn block_accepted_old_version_can_deserialize() { + let block_accepted = BlockAccepted { + signer_signature_hash: Sha512Trunc256Sum::from_hex("11717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19").unwrap(), + metadata: SignerMessageMetadata::default(), + signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), + response_data: BlockResponseData::new(u64::MAX) + }; + + let mut bytes = vec![]; + block_accepted.consensus_serialize(&mut bytes).unwrap(); + + // Ensure the old version can deserialize + let block_accepted_old = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockAcceptedOld"); + assert_eq!( + block_accepted.signer_signature_hash, + block_accepted_old.signer_signature_hash + ); + assert_eq!(block_accepted.signature, block_accepted_old.signature); + assert_eq!(block_accepted.metadata, block_accepted_old.metadata); + } } From c8a0f95baa576c3e4f98ac780f5782be49f05e9a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 22 Nov 2024 10:12:13 -0800 Subject: [PATCH 045/115] CRC: fix broken test build and add is_zero helper fn and check for execution cost of block validation Signed-off-by: Jacinta Ferrant --- clarity/src/vm/costs/mod.rs | 8 +++++++ stacks-signer/src/v0/signer.rs | 21 +++++-------------- .../src/tests/nakamoto_integrations.rs | 12 +++++++++++ 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index b3ee746fcf..897927bc6d 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -1327,6 +1327,14 @@ impl ExecutionCost { read_length: first.read_length.max(second.read_length), } } + + pub fn is_zero(&self) -> bool { + self.write_length == 0 + && self.write_count == 0 + && self.read_length == 0 + && self.read_count == 0 + && self.runtime == 0 + } } // ONLY WORKS IF INPUT IS u64 diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index f40025df37..05174e258e 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -18,8 +18,6 @@ use std::sync::mpsc::Sender; use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; -use blockstack_lib::chainstate::stacks::address::StacksAddressExtensions; -use blockstack_lib::chainstate::stacks::TransactionPayload; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; @@ -596,21 +594,12 @@ impl Signer { } block_info.signed_self.get_or_insert(get_epoch_time_secs()); } - // Record the block validation time - let non_bootcode_contract_call_block = block_info.block.txs.iter().any(|tx| { - // We only care about blocks that contain a non bootcode contract call - match &tx.payload { - TransactionPayload::ContractCall(cc) => !cc.address.is_boot_code_addr(), - TransactionPayload::SmartContract(..) => true, - _ => false, - } - }); - if non_bootcode_contract_call_block { - block_info.validation_time_ms = Some(block_validate_ok.validation_time_ms); + // Record the block validation time but do not consider stx transfers or boot contract calls + if block_validate_ok.cost.is_zero() { + 0 } else { - // Ignore purely boot code and stx transfers when calculating the processing/validation time - block_info.validation_time_ms = Some(0); - } + block_validate_ok.validation_time_ms + }; let signature = self .private_key diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 936598fd06..709a5d62c7 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6460,6 +6460,12 @@ fn signer_chainstate() { ext: ExtraBlockInfo::None, state: BlockState::Unprocessed, validation_time_ms: None, + tenure_change: proposal + .0 + .txs + .first() + .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) + .unwrap_or(false), }) .unwrap(); @@ -6550,6 +6556,12 @@ fn signer_chainstate() { ext: ExtraBlockInfo::None, state: BlockState::GloballyAccepted, validation_time_ms: Some(1000), + tenure_change: proposal_interim + .0 + .txs + .first() + .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) + .unwrap_or(false), }) .unwrap(); From 6a848e3143de57e5c6b584eea10e11137050f035 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 22 Nov 2024 12:39:10 -0800 Subject: [PATCH 046/115] chore: add wip tests to bitcoin-tests --- .github/workflows/bitcoin-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 12094e88ee..92ff5eb8fd 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -122,6 +122,8 @@ jobs: - tests::signer::v0::signer_set_rollover - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend + - tests::signer::v0::tenure_extend_after_idle + - tests::signer::v0::stx_transfers_dont_effect_idle_timeout - tests::signer::v0::multiple_miners_with_custom_chain_id - tests::signer::v0::block_commit_delay - tests::signer::v0::continue_after_fast_block_no_sortition From 04270d7522a86977148e9edb522952cf5dbe57cf Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 22 Nov 2024 15:43:28 -0500 Subject: [PATCH 047/115] feat: add timeout and additional checks in `get_block_status` --- .../src/nakamoto_node/signer_coordinator.rs | 174 +++++++++++------- .../src/nakamoto_node/signerdb_listener.rs | 26 ++- 2 files changed, 129 insertions(+), 71 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index f461ce93ec..5588bfaf3e 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -31,24 +31,28 @@ use stacks::chainstate::stacks::Error as ChainstateError; use stacks::codec::StacksMessageCodec; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; -use stacks::types::chainstate::{StacksPrivateKey, StacksPublicKey}; +use stacks::types::chainstate::{StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::util::hash::Sha512Trunc256Sum; use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; -use super::signerdb_listener::{SignerDBListener, TimestampInfo}; +use super::signerdb_listener::{SignerDBListener, TimestampInfo, EVENT_RECEIVER_POLL}; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; use crate::nakamoto_node::signerdb_listener::BlockStatus; use crate::neon::Counters; use crate::Config; -/// Helper function to determine if we should wait for more signatures -fn should_wait(status: Option<&BlockStatus>, weight_threshold: u32, total_weight: u32) -> bool { +/// Helper function to determine if signer threshold has been reached for a block +fn is_threshold_reached( + status: Option<&BlockStatus>, + weight_threshold: u32, + total_weight: u32, +) -> bool { match status { Some(status) => { - status.total_weight_signed < weight_threshold - && status.total_reject_weight.saturating_add(weight_threshold) <= total_weight + status.total_weight_signed >= weight_threshold + || status.total_reject_weight.saturating_add(weight_threshold) > total_weight } None => true, } @@ -268,81 +272,104 @@ impl SignerCoordinator { } } - self.get_block_status(&block.header.signer_signature_hash(), chain_state, counters) + self.get_block_status( + &block.header.signer_signature_hash(), + &block.block_id(), + chain_state, + sortdb, + burn_tip, + counters, + ) } /// Get the block status for a given block hash. /// If we have not yet received enough signatures for this block, this - /// method will block until we do. + /// method will block until we do. If this block shows up in the staging DB + /// before we have enough signatures, we will return the signatures from + /// there. If a new burnchain tip is detected, we will return an error. fn get_block_status( &self, - block_hash: &Sha512Trunc256Sum, + block_signer_sighash: &Sha512Trunc256Sum, + block_id: &StacksBlockId, chain_state: &mut StacksChainState, + sortdb: &SortitionDB, + burn_tip: &BlockSnapshot, counters: &Counters, ) -> Result, NakamotoNodeError> { let (lock, cvar) = &*self.blocks; let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); - // TODO: integrate this check into the waiting for the condvar - // Look in the nakamoto staging db -- a block can only get stored there - // if it has enough signing weight to clear the threshold. - // if let Ok(Some((stored_block, _sz))) = chain_state - // .nakamoto_blocks_db() - // .get_nakamoto_block(&block.block_id()) - // .map_err(|e| { - // warn!( - // "Failed to query chainstate for block {}: {e:?}", - // &block.block_id() - // ); - // e - // }) - // { - // debug!("SignCoordinator: Found signatures in relayed block"); - // counters.bump_naka_signer_pushed_blocks(); - // return Ok(stored_block.header.signer_signature); - // } + loop { + let (guard, timeout_result) = cvar + .wait_timeout_while(blocks, EVENT_RECEIVER_POLL, |map| { + !is_threshold_reached( + map.get(block_signer_sighash), + self.weight_threshold, + self.total_weight, + ) + }) + .expect("FATAL: failed to wait on block status cond var"); + blocks = guard; - // if Self::check_burn_tip_changed(sortdb, burn_tip) { - // debug!("SignCoordinator: Exiting due to new burnchain tip"); - // return Err(NakamotoNodeError::BurnchainTipChanged); - // } + // If we just received a timeout, we should check if the burnchain + // tip has changed or if we received this signed block already in + // the staging db. + if timeout_result.timed_out() { + // Look in the nakamoto staging db -- a block can only get stored there + // if it has enough signing weight to clear the threshold. + if let Ok(Some((stored_block, _sz))) = chain_state + .nakamoto_blocks_db() + .get_nakamoto_block(block_id) + .map_err(|e| { + warn!( + "Failed to query chainstate for block: {e:?}"; + "block_id" => %block_id, + "block_signer_sighash" => %block_signer_sighash, + ); + e + }) + { + debug!("SignCoordinator: Found signatures in relayed block"); + counters.bump_naka_signer_pushed_blocks(); + return Ok(stored_block.header.signer_signature); + } - blocks = cvar - .wait_while(blocks, |map| { - should_wait( - map.get(block_hash), - self.weight_threshold, - self.total_weight, - ) - }) - .expect("FATAL: failed to wait on block status"); - let block_status = blocks.get(block_hash).cloned().ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Block unexpectedly missing from map".into(), - ) - })?; - if block_status - .total_reject_weight - .saturating_add(self.weight_threshold) - > self.total_weight - { - info!( - "{}/{} signers vote to reject block", - block_status.total_reject_weight, self.total_weight; - "stacks_block_hash" => %block_hash, - ); - counters.bump_naka_rejected_blocks(); - Err(NakamotoNodeError::SignersRejected) - } else if block_status.total_weight_signed >= self.weight_threshold { - info!("Received enough signatures, block accepted"; - "stacks_block_hash" => %block_hash, - ); - Ok(block_status.gathered_signatures.values().cloned().collect()) - } else { - info!("Unblocked without reaching the threshold, likely due to an interruption"; - "stacks_block_hash" => %block_hash, - ); - Err(NakamotoNodeError::ChannelClosed) + if Self::check_burn_tip_changed(sortdb, burn_tip) { + debug!("SignCoordinator: Exiting due to new burnchain tip"); + return Err(NakamotoNodeError::BurnchainTipChanged); + } + } + // Else, we have received enough signatures to proceed + else { + let block_status = blocks.get(block_signer_sighash).ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Block unexpectedly missing from map".into(), + ) + })?; + + if block_status + .total_reject_weight + .saturating_add(self.weight_threshold) + > self.total_weight + { + info!( + "{}/{} signers vote to reject block", + block_status.total_reject_weight, self.total_weight; + "block_signer_sighash" => %block_signer_sighash, + ); + counters.bump_naka_rejected_blocks(); + return Err(NakamotoNodeError::SignersRejected); + } else if block_status.total_weight_signed >= self.weight_threshold { + info!("Received enough signatures, block accepted"; + "block_signer_sighash" => %block_signer_sighash, + ); + return Ok(block_status.gathered_signatures.values().cloned().collect()); + } else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Unblocked without reaching the threshold".into(), + )); + } + } } } @@ -368,4 +395,17 @@ impl SignerCoordinator { // tenure. u64::MAX } + + /// Check if the tenure needs to change + fn check_burn_tip_changed(sortdb: &SortitionDB, burn_block: &BlockSnapshot) -> bool { + let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash { + info!("SignCoordinator: Cancel signature aggregation; burnchain tip has changed"); + true + } else { + false + } + } } diff --git a/testnet/stacks-node/src/nakamoto_node/signerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/signerdb_listener.rs index 89769b3e3e..1b70beee07 100644 --- a/testnet/stacks-node/src/nakamoto_node/signerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/signerdb_listener.rs @@ -295,6 +295,13 @@ impl SignerDBListener { // Signal to anyone waiting on this block that we have enough signatures cvar.notify_all(); } + + // Update the idle timestamp for this signer + self.update_idle_timestamp( + signer_pubkey, + tenure_extend_timestamp, + signer_entry.weight, + ); } SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { let (lock, cvar) = &*self.blocks; @@ -354,27 +361,38 @@ impl SignerDBListener { cvar.notify_all(); } - continue; + // Update the idle timestamp for this signer + self.update_idle_timestamp( + signer_pubkey, + rejected_data.tenure_extend_timestamp, + signer_entry.weight, + ); } SignerMessageV0::BlockProposal(_) => { debug!("Received block proposal message. Ignoring."); - continue; } SignerMessageV0::BlockPushed(_) => { debug!("Received block pushed message. Ignoring."); - continue; } SignerMessageV0::MockSignature(_) | SignerMessageV0::MockProposal(_) | SignerMessageV0::MockBlock(_) => { debug!("Received mock message. Ignoring."); - continue; } }; } } } + fn update_idle_timestamp(&self, signer_pubkey: StacksPublicKey, timestamp: u64, weight: u32) { + let mut idle_timestamps = self + .signer_idle_timestamps + .lock() + .expect("FATAL: failed to lock idle timestamps"); + let timestamp_info = TimestampInfo { timestamp, weight }; + idle_timestamps.insert(signer_pubkey, timestamp_info); + } + /// Do we ignore signer signatures? #[cfg(test)] fn fault_injection_ignore_signatures() -> bool { From f92c8198967ca806be498e14d670db9c1cdff7a4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 22 Nov 2024 15:51:23 -0500 Subject: [PATCH 048/115] chore: `SignerDBListener` -> `StackerDBListener` --- testnet/stacks-node/src/nakamoto_node.rs | 2 +- ...erdb_listener.rs => stackerdb_listener.rs} | 44 ++++++++++--------- 2 files changed, 24 insertions(+), 22 deletions(-) rename testnet/stacks-node/src/nakamoto_node/{signerdb_listener.rs => stackerdb_listener.rs} (89%) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 090170837a..9944bc16b1 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -43,7 +43,7 @@ pub mod miner; pub mod peer; pub mod relayer; pub mod signer_coordinator; -pub mod signerdb_listener; +pub mod stackerdb_listener; use self::peer::PeerThread; use self::relayer::{RelayerDirective, RelayerThread}; diff --git a/testnet/stacks-node/src/nakamoto_node/signerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs similarity index 89% rename from testnet/stacks-node/src/nakamoto_node/signerdb_listener.rs rename to testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 1b70beee07..91f9539de1 100644 --- a/testnet/stacks-node/src/nakamoto_node/signerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -59,10 +59,10 @@ pub(crate) struct TimestampInfo { pub weight: u32, } -/// The listener for the signer database, which listens for messages from the +/// The listener for the StackerDB, which listens for messages from the /// signers and tracks the state of block signatures and idle timestamps. #[derive(Debug)] -pub struct SignerDBListener { +pub struct StackerDBListener { /// Channel to receive StackerDB events receiver: Receiver, /// Flag to shut the listener down @@ -86,7 +86,7 @@ pub struct SignerDBListener { pub(crate) signer_idle_timestamps: Arc>>, } -impl SignerDBListener { +impl StackerDBListener { pub fn new( stackerdb_channel: Arc>, keep_running: Arc, @@ -147,9 +147,9 @@ impl SignerDBListener { }) } - /// Run the signer database listener. + /// Run the StackerDB listener. pub fn run(&mut self) -> Result<(), NakamotoNodeError> { - info!("SignerDBListener: Starting up"); + info!("StackerDBListener: Starting up"); loop { let event = match self.receiver.recv_timeout(EVENT_RECEIVER_POLL) { Ok(event) => event, @@ -157,7 +157,7 @@ impl SignerDBListener { continue; } Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { - warn!("SignerDBListener: StackerDB event receiver disconnected"); + warn!("StackerDBListener: StackerDB event receiver disconnected"); return Err(NakamotoNodeError::SigningCoordinatorFailure( "StackerDB event receiver disconnected".into(), )); @@ -166,7 +166,7 @@ impl SignerDBListener { // was the miner asked to stop? if !self.keep_running.load(Ordering::SeqCst) { - info!("SignerDBListener: received miner exit request. Aborting"); + info!("StackerDBListener: received miner exit request. Aborting"); return Err(NakamotoNodeError::ChannelClosed); } @@ -175,23 +175,25 @@ impl SignerDBListener { event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); if !is_signer_event { - debug!("SignerDBListener: Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); + debug!("StackerDBListener: Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); continue; } let modified_slots = &event.modified_slots.clone(); let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { - warn!("SignerDBListener: Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); + warn!("StackerDBListener: Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); }) else { continue; }; let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { - debug!("SignerDBListener: Received signer event other than a signer message. Ignoring."); + debug!("StackerDBListener: Received signer event other than a signer message. Ignoring."); continue; }; if signer_set != self.signer_set { - debug!("SignerDBListener: Received signer event for other reward cycle. Ignoring."); + debug!( + "StackerDBListener: Received signer event for other reward cycle. Ignoring." + ); continue; }; let slot_ids = modified_slots @@ -199,7 +201,7 @@ impl SignerDBListener { .map(|chunk| chunk.slot_id) .collect::>(); - debug!("SignerDBListener: Received messages from signers"; + debug!("StackerDBListener: Received messages from signers"; "count" => messages.len(), "slot_ids" => ?slot_ids, ); @@ -232,7 +234,7 @@ impl SignerDBListener { Some(block) => block, None => { info!( - "SignerDBListener: Received signature for block that we did not request. Ignoring."; + "StackerDBListener: Received signature for block that we did not request. Ignoring."; "signature" => %signature, "block_signer_sighash" => %block_sighash, "slot_id" => slot_id, @@ -245,13 +247,13 @@ impl SignerDBListener { let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) else { warn!( - "SignerDBListener: Got invalid signature from a signer. Ignoring." + "StackerDBListener: Got invalid signature from a signer. Ignoring." ); continue; }; if !valid_sig { warn!( - "SignerDBListener: Processed signature but didn't validate over the expected block. Ignoring"; + "StackerDBListener: Processed signature but didn't validate over the expected block. Ignoring"; "signature" => %signature, "block_signer_signature_hash" => %block_sighash, "slot_id" => slot_id, @@ -260,7 +262,7 @@ impl SignerDBListener { } if Self::fault_injection_ignore_signatures() { - warn!("SignerDBListener: fault injection: ignoring well-formed signature for block"; + warn!("StackerDBListener: fault injection: ignoring well-formed signature for block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, @@ -278,7 +280,7 @@ impl SignerDBListener { .expect("FATAL: total weight signed exceeds u32::MAX"); } - info!("SignerDBListener: Signature Added to block"; + info!("StackerDBListener: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, @@ -311,7 +313,7 @@ impl SignerDBListener { Some(block) => block, None => { info!( - "SignerDBListener: Received rejection for block that we did not request. Ignoring."; + "StackerDBListener: Received rejection for block that we did not request. Ignoring."; "block_signer_sighash" => %rejected_data.signer_signature_hash, "slot_id" => slot_id, "signer_set" => self.signer_set, @@ -323,13 +325,13 @@ impl SignerDBListener { let rejected_pubkey = match rejected_data.recover_public_key() { Ok(rejected_pubkey) => { if rejected_pubkey != signer_pubkey { - warn!("SignerDBListener: Recovered public key from rejected data does not match signer's public key. Ignoring."); + warn!("StackerDBListener: Recovered public key from rejected data does not match signer's public key. Ignoring."); continue; } rejected_pubkey } Err(e) => { - warn!("SignerDBListener: Failed to recover public key from rejected data: {e:?}. Ignoring."); + warn!("StackerDBListener: Failed to recover public key from rejected data: {e:?}. Ignoring."); continue; } }; @@ -339,7 +341,7 @@ impl SignerDBListener { .checked_add(signer_entry.weight) .expect("FATAL: total weight rejected exceeds u32::MAX"); - info!("SignerDBListener: Signer rejected block"; + info!("StackerDBListener: Signer rejected block"; "block_signer_sighash" => %rejected_data.signer_signature_hash, "signer_pubkey" => rejected_pubkey.to_hex(), "signer_slot_id" => slot_id, From cf540a5752df7a6ded37981160ec23be741d1523 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 22 Nov 2024 15:53:25 -0500 Subject: [PATCH 049/115] ifix: resolve merge errors --- .../stacks-node/src/nakamoto_node/stackerdb_listener.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 91f9539de1..6b02dd73d6 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -225,8 +225,10 @@ impl StackerDBListener { signer_signature_hash: block_sighash, signature, metadata, - tenure_extend_timestamp, // TOOD: utilize this info + response_data, } = accepted; + let tenure_extend_timestamp = response_data.tenure_extend_timestamp; + let (lock, cvar) = &*self.blocks; let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); @@ -350,7 +352,7 @@ impl StackerDBListener { "total_weight_signed" => block.total_weight_signed, "reason" => rejected_data.reason, "reason_code" => %rejected_data.reason_code, - "tenure_extend_timestamp" => rejected_data.tenure_extend_timestamp, + "tenure_extend_timestamp" => rejected_data.response_data.tenure_extend_timestamp, "server_version" => rejected_data.metadata.server_version, ); @@ -366,7 +368,7 @@ impl StackerDBListener { // Update the idle timestamp for this signer self.update_idle_timestamp( signer_pubkey, - rejected_data.tenure_extend_timestamp, + rejected_data.response_data.tenure_extend_timestamp, signer_entry.weight, ); } From aea205baf062d25db7df387831ecab7d22598766 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 22 Nov 2024 15:55:54 -0500 Subject: [PATCH 050/115] chore: finish rename --- .../src/nakamoto_node/signer_coordinator.rs | 15 ++++++++------- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 5588bfaf3e..018edc2740 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -36,10 +36,11 @@ use stacks::util::hash::Sha512Trunc256Sum; use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; -use super::signerdb_listener::{SignerDBListener, TimestampInfo, EVENT_RECEIVER_POLL}; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; -use crate::nakamoto_node::signerdb_listener::BlockStatus; +use crate::nakamoto_node::stackerdb_listener::{ + BlockStatus, StackerDBListener, TimestampInfo, EVENT_RECEIVER_POLL, +}; use crate::neon::Counters; use crate::Config; @@ -96,8 +97,8 @@ impl SignerCoordinator { message_key: StacksPrivateKey, config: &Config, ) -> Result { - // Create the signer DB listener - let mut listener = SignerDBListener::new( + // Create the stacker DB listener + let mut listener = StackerDBListener::new( stackerdb_channel, keep_running.clone(), reward_set, @@ -125,14 +126,14 @@ impl SignerCoordinator { // Spawn the signer DB listener thread let listener_thread = std::thread::Builder::new() - .name("signerdb_listener".to_string()) + .name("stackerdb_listener".to_string()) .spawn(move || { if let Err(e) = listener.run() { - error!("SignerDBListener: failed to run: {e:?}"); + error!("StackerDBListener: failed to run: {e:?}"); } }) .map_err(|e| { - error!("Failed to spawn signerdb_listener thread: {e:?}"); + error!("Failed to spawn stackerdb_listener thread: {e:?}"); ChainstateError::MinerAborted })?; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fc7e31bea6..3a1c7f0179 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -68,7 +68,7 @@ use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_NO_TENURE_EXTEND, }; -use crate::nakamoto_node::signerdb_listener::TEST_IGNORE_SIGNERS; +use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ From d173d98507c68cc2467292daa8ae1a38644117cf Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 22 Nov 2024 17:19:04 -0500 Subject: [PATCH 051/115] fix: replace the channel receiver when the listener stops --- .../src/nakamoto_node/stackerdb_listener.rs | 31 ++++++++++++++++--- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 6b02dd73d6..cfadbc6fee 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -61,10 +61,11 @@ pub(crate) struct TimestampInfo { /// The listener for the StackerDB, which listens for messages from the /// signers and tracks the state of block signatures and idle timestamps. -#[derive(Debug)] pub struct StackerDBListener { - /// Channel to receive StackerDB events - receiver: Receiver, + /// Channel to communicate with StackerDB + stackerdb_channel: Arc>, + /// Receiver end of the StackerDB events channel + receiver: Option>, /// Flag to shut the listener down keep_running: Arc, /// The signer set for this tenure (0 or 1) @@ -136,7 +137,8 @@ impl StackerDBListener { .collect::, ChainstateError>>()?; Ok(Self { - receiver, + stackerdb_channel, + receiver: Some(receiver), keep_running, signer_set, total_weight, @@ -150,8 +152,15 @@ impl StackerDBListener { /// Run the StackerDB listener. pub fn run(&mut self) -> Result<(), NakamotoNodeError> { info!("StackerDBListener: Starting up"); + + let Some(receiver) = &self.receiver else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "StackerDBListener: Failed to obtain the StackerDB event receiver".into(), + )); + }; + loop { - let event = match self.receiver.recv_timeout(EVENT_RECEIVER_POLL) { + let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { Ok(event) => event, Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { continue; @@ -411,3 +420,15 @@ impl StackerDBListener { false } } + +impl Drop for StackerDBListener { + fn drop(&mut self) { + let stackerdb_channel = self + .stackerdb_channel + .lock() + .expect("FATAL: failed to lock stackerdb channel"); + stackerdb_channel.replace_receiver(self.receiver.take().expect( + "FATAL: lost possession of the StackerDB channel before dropping SignCoordinator", + )); + } +} From a21fa4d8d8cbd07a3e6eccc5707139b188004901 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 25 Nov 2024 15:10:54 -0500 Subject: [PATCH 052/115] feat: shutdown stacker db listener with miner --- .../stacks-node/src/nakamoto_node/miner.rs | 342 +++++++++--------- .../src/nakamoto_node/signer_coordinator.rs | 20 +- .../src/nakamoto_node/stackerdb_listener.rs | 22 +- 3 files changed, 215 insertions(+), 169 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index d51cccb4ba..ea54f508b6 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -323,187 +323,205 @@ impl BlockMinerThread { // now, actually run this tenure loop { - #[cfg(test)] - if *TEST_MINE_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Mining is stalled due to testing directive"); - while *TEST_MINE_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - warn!("Mining is no longer stalled due to testing directive. Continuing..."); + if let Err(e) = self.miner_main_loop( + &mut coordinator, + &sortdb, + &mut stackerdbs, + &mut last_block_rejected, + ) { + // Before stopping this miner, shutdown the coordinator thread. + coordinator.shutdown(); + return Err(e); } - let new_block = loop { - // If we're mock mining, we may not have processed the block that the - // actual tenure winner committed to yet. So, before attempting to - // mock mine, check if the parent is processed. - if self.config.get_node_config(false).mock_mining { - let burn_db_path = self.config.get_burn_db_file_path(); - let mut burn_db = SortitionDB::open( - &burn_db_path, - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); - let burn_tip_changed = self.check_burn_tip_changed(&burn_db); - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) - .expect("FATAL: could not open chainstate DB"); - match burn_tip_changed - .and_then(|_| self.load_block_parent_info(&mut burn_db, &mut chain_state)) - { - Ok(..) => {} - Err(NakamotoNodeError::ParentNotFound) => { - info!("Mock miner has not processed parent block yet, sleeping and trying again"); - thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); - continue; - } - Err(e) => { - warn!("Mock miner failed to load parent info: {e:?}"); - return Err(e); - } - } - } + } + } - match self.mine_block() { - Ok(x) => { - if !self.validate_timestamp(&x)? { - info!("Block mined too quickly. Will try again."; - "block_timestamp" => x.header.timestamp, - ); - continue; - } - break Some(x); - } - Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { - info!("Miner interrupted while mining, will try again"); - // sleep, and try again. if the miner was interrupted because the burnchain - // view changed, the next `mine_block()` invocation will error + /// The main loop for the miner thread. This is where the miner will mine + /// blocks and then attempt to sign and broadcast them. + fn miner_main_loop( + &mut self, + coordinator: &mut SignerCoordinator, + sortdb: &SortitionDB, + stackerdbs: &mut StackerDBs, + last_block_rejected: &mut bool, + ) -> Result<(), NakamotoNodeError> { + #[cfg(test)] + if *TEST_MINE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Mining is stalled due to testing directive"); + while *TEST_MINE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + warn!("Mining is no longer stalled due to testing directive. Continuing..."); + } + let new_block = loop { + // If we're mock mining, we may not have processed the block that the + // actual tenure winner committed to yet. So, before attempting to + // mock mine, check if the parent is processed. + if self.config.get_node_config(false).mock_mining { + let burn_db_path = self.config.get_burn_db_file_path(); + let mut burn_db = + SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + let burn_tip_changed = self.check_burn_tip_changed(&burn_db); + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + match burn_tip_changed + .and_then(|_| self.load_block_parent_info(&mut burn_db, &mut chain_state)) + { + Ok(..) => {} + Err(NakamotoNodeError::ParentNotFound) => { + info!("Mock miner has not processed parent block yet, sleeping and trying again"); thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); continue; } - Err(NakamotoNodeError::MiningFailure( - ChainstateError::NoTransactionsToMine, - )) => { - debug!("Miner did not find any transactions to mine"); - break None; - } Err(e) => { - warn!("Failed to mine block: {e:?}"); - - // try again, in case a new sortition is pending - self.globals - .raise_initiative(format!("MiningFailure: {e:?}")); - return Err(NakamotoNodeError::MiningFailure( - ChainstateError::MinerAborted, - )); + warn!("Mock miner failed to load parent info: {e:?}"); + return Err(e); } } - }; - - if let Some(mut new_block) = new_block { - Self::fault_injection_block_broadcast_stall(&new_block); - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open chainstate DB. Cannot mine! {e:?}" - )) - })?; - let signer_signature = match coordinator.propose_block( - &mut new_block, - &self.burn_block, - &self.burnchain, - &sortdb, - &mut chain_state, - &mut stackerdbs, - &self.globals.counters, - &self.burn_election_block.consensus_hash, - ) { - Ok(x) => x, - Err(e) => match e { - NakamotoNodeError::StacksTipChanged => { - info!("Stacks tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Err(e); - } - NakamotoNodeError::BurnchainTipChanged => { - info!("Burnchain tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Err(e); - } - _ => { - // Sleep for a bit to allow signers to catch up - let pause_ms = if last_block_rejected { - self.config.miner.subsequent_rejection_pause_ms - } else { - self.config.miner.first_rejection_pause_ms - }; - - error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - thread::sleep(Duration::from_millis(pause_ms)); - last_block_rejected = true; - continue; - } - }, - }; - last_block_rejected = false; - - let reward_set = self.load_signer_set()?; + } - new_block.header.signer_signature = signer_signature; - if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { - warn!("Error accepting own block: {e:?}. Will try mining again."); + match self.mine_block() { + Ok(x) => { + if !self.validate_timestamp(&x)? { + info!("Block mined too quickly. Will try again."; + "block_timestamp" => x.header.timestamp, + ); + continue; + } + break Some(x); + } + Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { + info!("Miner interrupted while mining, will try again"); + // sleep, and try again. if the miner was interrupted because the burnchain + // view changed, the next `mine_block()` invocation will error + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); continue; - } else { - info!( - "Miner: Block signed by signer set and broadcasted"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "stacks_block_hash" => %new_block.header.block_hash(), - "stacks_block_id" => %new_block.header.block_id(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); } - - // update mined-block counters and mined-tenure counters - self.globals.counters.bump_naka_mined_blocks(); - if self.last_block_mined.is_some() { - // this is the first block of the tenure, bump tenure counter - self.globals.counters.bump_naka_mined_tenures(); + Err(NakamotoNodeError::MiningFailure(ChainstateError::NoTransactionsToMine)) => { + debug!("Miner did not find any transactions to mine"); + break None; + } + Err(e) => { + warn!("Failed to mine block: {e:?}"); + + // try again, in case a new sortition is pending + self.globals + .raise_initiative(format!("MiningFailure: {e:?}")); + return Err(NakamotoNodeError::MiningFailure( + ChainstateError::MinerAborted, + )); } + } + }; + + if let Some(mut new_block) = new_block { + Self::fault_injection_block_broadcast_stall(&new_block); + let mut chain_state = + neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + let signer_signature = match coordinator.propose_block( + &mut new_block, + &self.burn_block, + &self.burnchain, + &sortdb, + &mut chain_state, + stackerdbs, + &self.globals.counters, + &self.burn_election_block.consensus_hash, + ) { + Ok(x) => x, + Err(e) => match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + _ => { + // Sleep for a bit to allow signers to catch up + let pause_ms = if *last_block_rejected { + self.config.miner.subsequent_rejection_pause_ms + } else { + self.config.miner.first_rejection_pause_ms + }; + + error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + thread::sleep(Duration::from_millis(pause_ms)); + *last_block_rejected = true; + return Ok(()); + } + }, + }; + *last_block_rejected = false; + + let reward_set = self.load_signer_set()?; - // wake up chains coordinator - Self::fault_injection_block_announce_stall(&new_block); - self.globals.coord().announce_new_stacks_block(); + new_block.header.signer_signature = signer_signature; + if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { + warn!("Error accepting own block: {e:?}. Will try mining again."); + return Ok(()); + } else { + info!( + "Miner: Block signed by signer set and broadcasted"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "stacks_block_hash" => %new_block.header.block_hash(), + "stacks_block_id" => %new_block.header.block_id(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + } - self.last_block_mined = Some(new_block); + // update mined-block counters and mined-tenure counters + self.globals.counters.bump_naka_mined_blocks(); + if self.last_block_mined.is_some() { + // this is the first block of the tenure, bump tenure counter + self.globals.counters.bump_naka_mined_tenures(); } - let Ok(sort_db) = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) else { - error!("Failed to open sortition DB. Will try mining again."); - continue; - }; + // wake up chains coordinator + Self::fault_injection_block_announce_stall(&new_block); + self.globals.coord().announce_new_stacks_block(); - let wait_start = Instant::now(); - while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { - thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); - if self.check_burn_tip_changed(&sort_db).is_err() { - return Err(NakamotoNodeError::BurnchainTipChanged); - } + self.last_block_mined = Some(new_block); + } + + let Ok(sort_db) = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) else { + error!("Failed to open sortition DB. Will try mining again."); + return Ok(()); + }; + + let wait_start = Instant::now(); + while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); + if self.check_burn_tip_changed(&sort_db).is_err() { + return Err(NakamotoNodeError::BurnchainTipChanged); } } + + Ok(()) } /// Load the signer set active for this miner's blocks. This is the diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 018edc2740..575f43adf7 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -81,6 +81,8 @@ pub struct SignerCoordinator { /// - key: StacksPublicKey /// - value: TimestampInfo signer_idle_timestamps: Arc>>, + /// Keep running flag for the signer DB listener thread + keep_running: Arc, /// Handle for the signer DB listener thread listener_thread: Option>, } @@ -90,16 +92,19 @@ impl SignerCoordinator { /// This will spawn a new thread to listen for messages from the signer DB. pub fn new( stackerdb_channel: Arc>, - keep_running: Arc, + node_keep_running: Arc, reward_set: &RewardSet, burn_tip: &BlockSnapshot, burnchain: &Burnchain, message_key: StacksPrivateKey, config: &Config, ) -> Result { + let keep_running = Arc::new(AtomicBool::new(true)); + // Create the stacker DB listener let mut listener = StackerDBListener::new( stackerdb_channel, + node_keep_running.clone(), keep_running.clone(), reward_set, burn_tip, @@ -121,6 +126,7 @@ impl SignerCoordinator { weight_threshold: listener.weight_threshold, blocks: listener.blocks.clone(), signer_idle_timestamps: listener.signer_idle_timestamps.clone(), + keep_running, listener_thread: None, }; @@ -409,4 +415,16 @@ impl SignerCoordinator { false } } + + pub fn shutdown(&mut self) { + if let Some(listener_thread) = self.listener_thread.take() { + info!("SignerCoordinator: shutting down stacker db listener thread"); + self.keep_running + .store(false, std::sync::atomic::Ordering::Relaxed); + if let Err(e) = listener_thread.join() { + error!("Failed to join signer listener thread: {e:?}"); + } + debug!("SignerCoordinator: stacker db listener thread has shut down"); + } + } } diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index cfadbc6fee..eddad6e6d8 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -66,6 +66,8 @@ pub struct StackerDBListener { stackerdb_channel: Arc>, /// Receiver end of the StackerDB events channel receiver: Option>, + /// Flag to shut the node down + node_keep_running: Arc, /// Flag to shut the listener down keep_running: Arc, /// The signer set for this tenure (0 or 1) @@ -90,6 +92,7 @@ pub struct StackerDBListener { impl StackerDBListener { pub fn new( stackerdb_channel: Arc>, + node_keep_running: Arc, keep_running: Arc, reward_set: &RewardSet, burn_tip: &BlockSnapshot, @@ -139,6 +142,7 @@ impl StackerDBListener { Ok(Self { stackerdb_channel, receiver: Some(receiver), + node_keep_running, keep_running, signer_set, total_weight, @@ -160,6 +164,18 @@ impl StackerDBListener { }; loop { + // was the node asked to stop? + if !self.node_keep_running.load(Ordering::SeqCst) { + info!("StackerDBListener: received node exit request. Aborting"); + return Err(NakamotoNodeError::ChannelClosed); + } + + // was the listener asked to stop? + if !self.keep_running.load(Ordering::SeqCst) { + info!("StackerDBListener: received listener exit request. Aborting"); + return Err(NakamotoNodeError::ChannelClosed); + } + let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { Ok(event) => event, Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { @@ -173,12 +189,6 @@ impl StackerDBListener { } }; - // was the miner asked to stop? - if !self.keep_running.load(Ordering::SeqCst) { - info!("StackerDBListener: received miner exit request. Aborting"); - return Err(NakamotoNodeError::ChannelClosed); - } - // check to see if this event we got is a signer event let is_signer_event = event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); From ab0807e5bcc999025f7847cae6e7d0fcd4e3b9d3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 25 Nov 2024 16:59:21 -0500 Subject: [PATCH 053/115] chore: return okay when exit is requested --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 3 ++- testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 575f43adf7..2fcb7b1ce3 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -99,6 +99,7 @@ impl SignerCoordinator { message_key: StacksPrivateKey, config: &Config, ) -> Result { + info!("SignerCoordinator: starting up"); let keep_running = Arc::new(AtomicBool::new(true)); // Create the stacker DB listener @@ -135,7 +136,7 @@ impl SignerCoordinator { .name("stackerdb_listener".to_string()) .spawn(move || { if let Err(e) = listener.run() { - error!("StackerDBListener: failed to run: {e:?}"); + error!("StackerDBListener: exited with error: {e:?}"); } }) .map_err(|e| { diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index eddad6e6d8..4f213abf6c 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -167,13 +167,13 @@ impl StackerDBListener { // was the node asked to stop? if !self.node_keep_running.load(Ordering::SeqCst) { info!("StackerDBListener: received node exit request. Aborting"); - return Err(NakamotoNodeError::ChannelClosed); + return Ok(()); } // was the listener asked to stop? if !self.keep_running.load(Ordering::SeqCst) { info!("StackerDBListener: received listener exit request. Aborting"); - return Err(NakamotoNodeError::ChannelClosed); + return Ok(()); } let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { From 5d516a801b4c848ab9e76a74ee90717851b56299 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 25 Nov 2024 21:28:36 -0500 Subject: [PATCH 054/115] fix: drop lock in `propose_block` --- .../src/nakamoto_node/signer_coordinator.rs | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 2fcb7b1ce3..b58f27aab9 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -225,16 +225,19 @@ impl SignerCoordinator { counters: &Counters, election_sortition: &ConsensusHash, ) -> Result, NakamotoNodeError> { - // Add this block to the block status map - let (lock, _cvar) = &*self.blocks; - let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); - let block_status = BlockStatus { - responded_signers: HashSet::new(), - gathered_signatures: BTreeMap::new(), - total_weight_signed: 0, - total_reject_weight: 0, - }; - blocks.insert(block.header.signer_signature_hash(), block_status); + // Add this block to the block status map. + // Create a scope to drop the lock on the block status map. + { + let (lock, _cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + let block_status = BlockStatus { + responded_signers: HashSet::new(), + gathered_signatures: BTreeMap::new(), + total_weight_signed: 0, + total_reject_weight: 0, + }; + blocks.insert(block.header.signer_signature_hash(), block_status); + } let reward_cycle_id = burnchain .block_height_to_reward_cycle(burn_tip.block_height) @@ -410,7 +413,7 @@ impl SignerCoordinator { .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash { - info!("SignCoordinator: Cancel signature aggregation; burnchain tip has changed"); + info!("SignerCoordinator: Cancel signature aggregation; burnchain tip has changed"); true } else { false From 1a67a1c8c1a0ca3022405cd4c4d79ce939134165 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 25 Nov 2024 22:11:22 -0500 Subject: [PATCH 055/115] feat: extend tenure based on time See #5476 --- .../stacks-node/src/nakamoto_node/miner.rs | 66 +++++++++++++++---- .../src/nakamoto_node/signer_coordinator.rs | 3 + 2 files changed, 57 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ea54f508b6..68dd3f4fd3 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -140,6 +140,8 @@ pub struct BlockMinerThread { burnchain: Burnchain, /// Last block mined last_block_mined: Option, + /// Number of blocks mined in this tenure + mined_blocks: u64, /// Copy of the node's registered VRF key registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner @@ -173,6 +175,7 @@ impl BlockMinerThread { keychain: rt.keychain.clone(), burnchain: rt.burnchain.clone(), last_block_mined: None, + mined_blocks: 0, registered_key, burn_election_block, burn_block, @@ -345,6 +348,7 @@ impl BlockMinerThread { stackerdbs: &mut StackerDBs, last_block_rejected: &mut bool, ) -> Result<(), NakamotoNodeError> { + info!("Miner: Starting main loop"); #[cfg(test)] if *TEST_MINE_STALL.lock().unwrap() == Some(true) { // Do an extra check just so we don't log EVERY time. @@ -382,7 +386,8 @@ impl BlockMinerThread { } } - match self.mine_block() { + info!("Miner: Mining a new block"); + match self.mine_block(coordinator) { Ok(x) => { if !self.validate_timestamp(&x)? { info!("Block mined too quickly. Will try again."; @@ -502,6 +507,7 @@ impl BlockMinerThread { self.globals.coord().announce_new_stacks_block(); self.last_block_mined = Some(new_block); + self.mined_blocks += 1; } let Ok(sort_db) = SortitionDB::open( @@ -999,8 +1005,12 @@ impl BlockMinerThread { #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block. - fn mine_block(&mut self) -> Result { + fn mine_block( + &mut self, + coordinator: &mut SignerCoordinator, + ) -> Result { debug!("block miner thread ID is {:?}", thread::current().id()); + info!("Miner: Mining block"); let burn_db_path = self.config.get_burn_db_file_path(); let reward_set = self.load_signer_set()?; @@ -1043,8 +1053,12 @@ impl BlockMinerThread { &parent_block_info, vrf_proof, target_epoch_id, + coordinator, )?; + // TODO: If we are doing a time-based tenure extend, we need to reset + // the budget and the block_count here + parent_block_info.stacks_parent_header.microblock_tail = None; let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok(); @@ -1128,24 +1142,52 @@ impl BlockMinerThread { #[cfg_attr(test, mutants::skip)] /// Create the tenure start info for the block we're going to build fn make_tenure_start_info( - &self, + &mut self, chainstate: &StacksChainState, parent_block_info: &ParentStacksBlockInfo, vrf_proof: VRFProof, target_epoch_id: StacksEpochId, + coordinator: &mut SignerCoordinator, ) -> Result { + info!("Miner: Creating tenure start info"); let current_miner_nonce = parent_block_info.coinbase_nonce; - let Some(parent_tenure_info) = &parent_block_info.parent_tenure else { - return Ok(NakamotoTenureInfo { - coinbase_tx: None, - tenure_change_tx: None, - }); + let parent_tenure_info = match &parent_block_info.parent_tenure { + Some(info) => info.clone(), + None => { + // We may be able to extend the current tenure + if self.last_block_mined.is_none() { + info!("Miner: No parent tenure and no last block mined"); + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } + ParentTenureInfo { + parent_tenure_blocks: self.mined_blocks, + parent_tenure_consensus_hash: self.burn_election_block.consensus_hash, + } + } }; if self.last_block_mined.is_some() { - return Ok(NakamotoTenureInfo { - coinbase_tx: None, - tenure_change_tx: None, - }); + info!("make_tenure_start_info: last block mined is some"); + // Check if we can extend the current tenure + let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); + info!( + "make_tenure_start_info: tenure_extend_timestamp: {}, now: {}", + tenure_extend_timestamp, + get_epoch_time_secs() + ); + if get_epoch_time_secs() < tenure_extend_timestamp { + info!("Miner: Not extending tenure"); + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } + info!("Miner: Extending tenure"); + self.reason = MinerReason::Extended { + burn_view_consensus_hash: self.burn_election_block.consensus_hash, + }; } let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index b58f27aab9..c12d2f9bcb 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -387,16 +387,19 @@ impl SignerCoordinator { /// Get the timestamp at which at least 70% of the signing power should be /// willing to accept a time-based tenure extension. pub fn get_tenure_extend_timestamp(&self) -> u64 { + info!("SignerCoordinator: getting tenure extension timestamp"); let signer_idle_timestamps = self .signer_idle_timestamps .lock() .expect("FATAL: failed to lock signer idle timestamps"); + info!("SignerCoordinator: signer_idle_timestamps: {signer_idle_timestamps:?}"); let mut idle_timestamps = signer_idle_timestamps.values().collect::>(); idle_timestamps.sort_by_key(|info| info.timestamp); let mut weight_sum = 0; for info in idle_timestamps { weight_sum += info.weight; if weight_sum >= self.weight_threshold { + info!("SignerCoordinator: 70% threshold reached"); return info.timestamp; } } From 48c8a1017c0a2101711488a5ef82a71b02314024 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 26 Nov 2024 07:13:00 -0500 Subject: [PATCH 056/115] chore: cleanup --- .../stacks-node/src/nakamoto_node/miner.rs | 31 ++++++++----------- .../src/nakamoto_node/signer_coordinator.rs | 3 +- 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 68dd3f4fd3..aa8316ad70 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -348,7 +348,6 @@ impl BlockMinerThread { stackerdbs: &mut StackerDBs, last_block_rejected: &mut bool, ) -> Result<(), NakamotoNodeError> { - info!("Miner: Starting main loop"); #[cfg(test)] if *TEST_MINE_STALL.lock().unwrap() == Some(true) { // Do an extra check just so we don't log EVERY time. @@ -386,7 +385,6 @@ impl BlockMinerThread { } } - info!("Miner: Mining a new block"); match self.mine_block(coordinator) { Ok(x) => { if !self.validate_timestamp(&x)? { @@ -1056,9 +1054,6 @@ impl BlockMinerThread { coordinator, )?; - // TODO: If we are doing a time-based tenure extend, we need to reset - // the budget and the block_count here - parent_block_info.stacks_parent_header.microblock_tail = None; let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok(); @@ -1149,14 +1144,13 @@ impl BlockMinerThread { target_epoch_id: StacksEpochId, coordinator: &mut SignerCoordinator, ) -> Result { - info!("Miner: Creating tenure start info"); let current_miner_nonce = parent_block_info.coinbase_nonce; let parent_tenure_info = match &parent_block_info.parent_tenure { Some(info) => info.clone(), None => { // We may be able to extend the current tenure if self.last_block_mined.is_none() { - info!("Miner: No parent tenure and no last block mined"); + debug!("Miner: No parent tenure and no last block mined"); return Ok(NakamotoTenureInfo { coinbase_tx: None, tenure_change_tx: None, @@ -1169,25 +1163,19 @@ impl BlockMinerThread { } }; if self.last_block_mined.is_some() { - info!("make_tenure_start_info: last block mined is some"); // Check if we can extend the current tenure let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); - info!( - "make_tenure_start_info: tenure_extend_timestamp: {}, now: {}", - tenure_extend_timestamp, - get_epoch_time_secs() - ); if get_epoch_time_secs() < tenure_extend_timestamp { - info!("Miner: Not extending tenure"); return Ok(NakamotoTenureInfo { coinbase_tx: None, tenure_change_tx: None, }); } - info!("Miner: Extending tenure"); - self.reason = MinerReason::Extended { - burn_view_consensus_hash: self.burn_election_block.consensus_hash, - }; + debug!("Miner: Time-based tenure extend"; + "current_timestamp" => get_epoch_time_secs(), + "tenure_extend_timestamp" => tenure_extend_timestamp, + ); + self.tenure_extend_reset(); } let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); @@ -1254,6 +1242,13 @@ impl BlockMinerThread { Ok(()) } } + + fn tenure_extend_reset(&mut self) { + self.reason = MinerReason::Extended { + burn_view_consensus_hash: self.burn_block.consensus_hash, + }; + self.mined_blocks = 0; + } } impl ParentStacksBlockInfo { diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index c12d2f9bcb..1e0e7694fd 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -387,12 +387,11 @@ impl SignerCoordinator { /// Get the timestamp at which at least 70% of the signing power should be /// willing to accept a time-based tenure extension. pub fn get_tenure_extend_timestamp(&self) -> u64 { - info!("SignerCoordinator: getting tenure extension timestamp"); let signer_idle_timestamps = self .signer_idle_timestamps .lock() .expect("FATAL: failed to lock signer idle timestamps"); - info!("SignerCoordinator: signer_idle_timestamps: {signer_idle_timestamps:?}"); + debug!("SignerCoordinator: signer_idle_timestamps: {signer_idle_timestamps:?}"); let mut idle_timestamps = signer_idle_timestamps.values().collect::>(); idle_timestamps.sort_by_key(|info| info.timestamp); let mut weight_sum = 0; From 8929537fc4c528c999155a46919032b1ede7ca77 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 26 Nov 2024 11:24:44 -0500 Subject: [PATCH 057/115] fix: resolve errors after merge --- .../stacks-node/src/nakamoto_node/miner.rs | 65 +------------------ .../stacks-node/src/nakamoto_node/relayer.rs | 2 - 2 files changed, 1 insertion(+), 66 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index e42e45d056..6b8b3ff0b9 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -61,8 +61,6 @@ pub static TEST_BROADCAST_STALL: std::sync::Mutex> = std::sync::Mut pub static TEST_BLOCK_ANNOUNCE_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); #[cfg(test)] pub static TEST_SKIP_P2P_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); -#[cfg(test)] -pub static TEST_NO_TENURE_EXTEND: std::sync::Mutex> = std::sync::Mutex::new(None); /// If the miner was interrupted while mining a block, how long should the /// miner thread sleep before trying again? @@ -388,7 +386,7 @@ impl BlockMinerThread { Ok(x) => { if !self.validate_timestamp(&x)? { info!("Block mined too quickly. Will try again."; - "block_timestamp" => x.header.timestamp, + "block_timestamp" => x.header.timestamp, ); continue; } @@ -590,67 +588,6 @@ impl BlockMinerThread { Ok(reward_set) } - /// Gather a list of signatures from the signers for the block - fn gather_signatures( - &mut self, - new_block: &mut NakamotoBlock, - stackerdbs: &mut StackerDBs, - ) -> Result<(RewardSet, Vec), NakamotoNodeError> { - let Some(miner_privkey) = self.config.miner.mining_key else { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open sortition DB. Cannot mine! {e:?}" - )) - })?; - - let reward_set = self.load_signer_set()?; - - if self.config.get_node_config(false).mock_mining { - return Ok((reward_set, Vec::new())); - } - - let mut coordinator = SignCoordinator::new( - &reward_set, - miner_privkey, - &self.config, - self.globals.should_keep_running.clone(), - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - })?; - - let mut chain_state = - neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open chainstate DB. Cannot mine! {e:?}" - )) - })?; - - let signature = coordinator.run_sign_v0( - new_block, - &self.burn_block, - &self.burnchain, - &sort_db, - &mut chain_state, - stackerdbs, - &self.globals.counters, - &self.burn_election_block.consensus_hash, - )?; - - Ok((reward_set, signature)) - } - /// Fault injection -- possibly fail to broadcast /// Return true to drop the block fn fault_injection_broadcast_fail(&self) -> bool { diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index fd86f476f3..b346cdc346 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -58,8 +58,6 @@ use super::{ BLOCK_PROCESSOR_STACK_SIZE, }; use crate::burnchains::BurnchainController; -#[cfg(test)] -use crate::nakamoto_node::miner::TEST_NO_TENURE_EXTEND; use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; use crate::neon_node::{ fault_injection_skip_mining, open_chainstate_with_faults, LeaderKeyRegistrationState, From 65c5b70c16cbdd70344565291e3b15846c16b21b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 26 Nov 2024 11:29:25 -0500 Subject: [PATCH 058/115] chore: remove duplicates in CHANGELOGs due to merge --- CHANGELOG.md | 1 - stacks-signer/CHANGELOG.md | 1 - 2 files changed, 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 14401642f5..c89e9df484 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,6 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Remove the panic for reporting DB deadlocks (just error and continue waiting) - Add index to `metadata_table` in Clarity DB on `blockhash` - Add `block_commit_delay_ms` to the config file to control the time to wait after seeing a new burn block, before submitting a block commit, to allow time for the first Nakamoto block of the new tenure to be mined, allowing this miner to avoid the need to RBF the block commit. -- If the winning miner of a sortition is committed to the wrong parent tenure, the previous miner can immediately tenure extend and continue mining since the winning miner would never be able to propose a valid block. (#5361) - Add `tenure_cost_limit_per_block_percentage` to the miner config file to control the percentage remaining tenure cost limit to consume per nakamoto block. - Add `/v3/blocks/height/:block_height` rpc endpoint - If the winning miner of a sortition is committed to the wrong parent tenure, the previous miner can immediately tenure extend and continue mining since the winning miner would never be able to propose a valid block. (#5361) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 7f64a3298b..8514bc3b67 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,7 +11,6 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed -- Allow a miner to extend their tenure immediately if the winner of the next tenure has committed to the wrong parent tenure (#5361) - Add tenure extend timestamp to signer block responses - Added tenure_idle_timeout_secs configuration option for determining when a tenure extend will be accepted - Allow a miner to extend their tenure immediately if the winner of the next tenure has committed to the wrong parent tenure (#5361) From 280d536046d8181b3e1fcfae029d22e707e90b56 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 26 Nov 2024 11:50:50 -0500 Subject: [PATCH 059/115] fix: merge artifact --- stacks-signer/src/signerdb.rs | 41 ----------------------------------- 1 file changed, 41 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 6c99aa5b64..ac76679f89 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -1512,45 +1512,4 @@ mod tests { .unwrap() .is_empty()); } - - #[test] - fn test_get_canonical_tip() { - let db_path = tmp_db_path(); - let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - - let (mut block_info_1, _block_proposal_1) = create_block_override(|b| { - b.block.header.miner_signature = MessageSignature([0x01; 65]); - b.block.header.chain_length = 1; - b.burn_height = 1; - }); - - let (mut block_info_2, _block_proposal_2) = create_block_override(|b| { - b.block.header.miner_signature = MessageSignature([0x02; 65]); - b.block.header.chain_length = 2; - b.burn_height = 2; - }); - - db.insert_block(&block_info_1) - .expect("Unable to insert block into db"); - db.insert_block(&block_info_2) - .expect("Unable to insert block into db"); - - assert!(db.get_canonical_tip().unwrap().is_none()); - - block_info_1 - .mark_globally_accepted() - .expect("Failed to mark block as globally accepted"); - db.insert_block(&block_info_1) - .expect("Unable to insert block into db"); - - assert_eq!(db.get_canonical_tip().unwrap().unwrap(), block_info_1); - - block_info_2 - .mark_globally_accepted() - .expect("Failed to mark block as globally accepted"); - db.insert_block(&block_info_2) - .expect("Unable to insert block into db"); - - assert_eq!(db.get_canonical_tip().unwrap().unwrap(), block_info_2); - } } From a4d378f623eec12b0b8a4b50912c2179407e99de Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 26 Nov 2024 11:53:03 -0500 Subject: [PATCH 060/115] fix: merge artifacts --- testnet/stacks-node/src/tests/signer/v0.rs | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4574563efa..a0af4d58ac 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -67,7 +67,7 @@ use super::SignerTest; use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::{ - TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_NO_TENURE_EXTEND, + TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; @@ -960,10 +960,6 @@ fn forked_tenure_testing( sleep_ms(1000); info!("------------------------- Reached Epoch 3.0 -------------------------"); - // Disable tenure extend so that miners will not tenure extend when the - // test is checking for fork behavior. - TEST_NO_TENURE_EXTEND.lock().unwrap().replace(true); - let naka_conf = signer_test.running_nodes.conf.clone(); let burnchain = naka_conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); @@ -1318,10 +1314,6 @@ fn bitcoind_forking_test() { let pre_epoch_3_nonce = get_account(&http_origin, &miner_address).nonce; let pre_fork_tenures = 10; - // Disable tenure extend so that miners will not tenure extend when the - // test is checking for fork behavior. - TEST_NO_TENURE_EXTEND.lock().unwrap().replace(true); - for i in 0..pre_fork_tenures { info!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); @@ -1969,10 +1961,6 @@ fn miner_forking() { "RL1 did not win the sortition" ); - // Disable tenure extend so that miners will not tenure extend when the - // test is checking for fork behavior. - TEST_NO_TENURE_EXTEND.lock().unwrap().replace(true); - info!( "------------------------- RL2 Wins Sortition With Outdated View -------------------------" ); @@ -4331,10 +4319,6 @@ fn partial_tenure_fork() { info!("------------------------- Reached Epoch 3.0 -------------------------"); - // Disable tenure extend so that miners will not tenure extend when the - // test is checking for fork behavior. - TEST_NO_TENURE_EXTEND.lock().unwrap().replace(true); - // due to the random nature of mining sortitions, the way this test is structured // is that we keep track of how many tenures each miner produced, and once enough sortitions // have been produced such that each miner has produced 3 tenures, we stop and check the From 379ce666e473b6ad90e6cd2030e4cb65193f5ae0 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 26 Nov 2024 14:26:02 -0500 Subject: [PATCH 061/115] chore: upgrade debug log to info --- testnet/stacks-node/src/nakamoto_node/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 6b8b3ff0b9..e048c9456a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1162,7 +1162,7 @@ impl BlockMinerThread { tenure_change_tx: None, }); } - debug!("Miner: Time-based tenure extend"; + info!("Miner: Time-based tenure extend"; "current_timestamp" => get_epoch_time_secs(), "tenure_extend_timestamp" => tenure_extend_timestamp, ); From 09ea89b1317f22722b1c47e5982b547b0c60e9fe Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 26 Nov 2024 11:49:08 -0800 Subject: [PATCH 062/115] CRC: delete old blocks table and migrate data over to new one with block info extracted Signed-off-by: Jacinta Ferrant --- stacks-signer/Cargo.toml | 2 +- stacks-signer/src/chainstate.rs | 16 +- stacks-signer/src/lib.rs | 2 - stacks-signer/src/runloop.rs | 8 - stacks-signer/src/signerdb.rs | 374 ++++++++---------- stacks-signer/src/tests/chainstate.rs | 28 +- stacks-signer/src/v0/signer.rs | 63 +-- .../src/tests/nakamoto_integrations.rs | 44 +-- 8 files changed, 203 insertions(+), 334 deletions(-) diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 139c34fba8..3beba641f2 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -45,7 +45,7 @@ tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } rand = { workspace = true } url = "2.1.0" -rusqlite = { workspace = true } +rusqlite = { workspace = true, features = ["functions"] } [dev-dependencies] clarity = { path = "../clarity", features = ["testing"] } diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 40371ff239..f5a04ebbc9 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -193,7 +193,6 @@ impl SortitionsView { signer_db: &mut SignerDb, block: &NakamotoBlock, block_pk: &StacksPublicKey, - reward_cycle: u64, reset_view_if_wrong_consensus_hash: bool, ) -> Result { if self @@ -287,14 +286,7 @@ impl SortitionsView { "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), ); self.reset_view(client)?; - return self.check_proposal( - client, - signer_db, - block, - block_pk, - reward_cycle, - false, - ); + return self.check_proposal(client, signer_db, block, block_pk, false); } warn!( "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; @@ -352,7 +344,6 @@ impl SortitionsView { &proposed_by, tenure_change, block, - reward_cycle, signer_db, client, )? { @@ -547,7 +538,6 @@ impl SortitionsView { fn check_tenure_change_confirms_parent( tenure_change: &TenureChangePayload, block: &NakamotoBlock, - reward_cycle: u64, signer_db: &mut SignerDb, client: &StacksClient, tenure_last_block_proposal_timeout: Duration, @@ -592,7 +582,7 @@ impl SortitionsView { // If we have seen this block already, make sure its state is updated to globally accepted. // Otherwise, don't worry about it. if let Ok(Some(mut block_info)) = - signer_db.block_lookup(reward_cycle, &nakamoto_tip.signer_signature_hash()) + signer_db.block_lookup(&nakamoto_tip.signer_signature_hash()) { if block_info.state != BlockState::GloballyAccepted { if let Err(e) = block_info.mark_globally_accepted() { @@ -627,7 +617,6 @@ impl SortitionsView { proposed_by: &ProposedBy, tenure_change: &TenureChangePayload, block: &NakamotoBlock, - reward_cycle: u64, signer_db: &mut SignerDb, client: &StacksClient, ) -> Result { @@ -635,7 +624,6 @@ impl SortitionsView { let confirms_expected_parent = Self::check_tenure_change_confirms_parent( tenure_change, block, - reward_cycle, signer_db, client, self.config.tenure_last_block_proposal_timeout, diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index d796f7582f..244675c65c 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -76,8 +76,6 @@ pub trait Signer: Debug + Display { ); /// Check if the signer is in the middle of processing blocks fn has_unprocessed_blocks(&self) -> bool; - /// Cleanup signer stale data - fn cleanup_stale_data(&mut self, current_reward_cycle: u64); } /// A wrapper around the running signer type for the signer diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 2850c1354c..11faadf871 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -377,14 +377,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo "is_in_next_prepare_phase" => is_in_next_prepare_phase, ); - if reward_cycle_before_refresh != current_reward_cycle { - for signer in self.stacks_signers.values_mut() { - if let ConfiguredSigner::RegisteredSigner(signer) = signer { - signer.cleanup_stale_data(current_reward_cycle); - } - } - } - // Check if we need to refresh the signers: // need to refresh the current signer if we are not configured for the current reward cycle // need to refresh the next signer if we're not configured for the next reward cycle, and we're in the prepare phase diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 09379af6b9..3676b92eea 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -26,6 +26,7 @@ use blockstack_lib::util_lib::db::{ }; use clarity::types::chainstate::{BurnchainHeaderHash, StacksAddress}; use libsigner::BlockProposal; +use rusqlite::functions::FunctionFlags; use rusqlite::{ params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Transaction, }; @@ -160,20 +161,12 @@ pub struct BlockInfo { pub state: BlockState, /// Consumed processing time in milliseconds to validate this block pub validation_time_ms: Option, - /// Wether the block is a tenure change block - pub tenure_change: bool, /// Extra data specific to v0, v1, etc. pub ext: ExtraBlockInfo, } impl From for BlockInfo { fn from(value: BlockProposal) -> Self { - let tenure_change = value - .block - .txs - .first() - .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) - .unwrap_or(false); Self { block: value.block, burn_block_height: value.burn_height, @@ -187,11 +180,19 @@ impl From for BlockInfo { ext: ExtraBlockInfo::default(), state: BlockState::Unprocessed, validation_time_ms: None, - tenure_change, } } } impl BlockInfo { + /// Wether the block is a tenure change block or not + pub fn is_tenure_change(&self) -> bool { + self.block + .txs + .first() + .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) + .unwrap_or(false) + } + /// Mark this block as locally accepted, valid, signed over, and records either the self or group signed timestamp in the block info if it wasn't /// already set. pub fn mark_locally_accepted(&mut self, group_signed: bool) -> Result<(), String> { @@ -343,8 +344,11 @@ CREATE INDEX IF NOT EXISTS blocks_signed_group ON blocks ((json_extract(block_in "#; static CREATE_INDEXES_5: &str = r#" -CREATE INDEX IF NOT EXISTS tenure_blocks_on_consensus_hash ON tenure_blocks(consensus_hash); -CREATE INDEX IF NOT EXISTS tenure_blocks_on_reward_cycle ON tenure_blocks(reward_cycle); +CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (consensus_hash, signed_over); +CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); +CREATE INDEX IF NOT EXISTS blocks_state ON blocks (consensus_hash, state); +CREATE INDEX IF NOT EXISTS blocks_state ON blocks (state); +CREATE INDEX IF NOT EXISTS blocks_signed_group ON blocks (signed_group); "#; static CREATE_SIGNER_STATE_TABLE: &str = " @@ -408,70 +412,68 @@ CREATE TABLE IF NOT EXISTS block_rejection_signer_addrs ( PRIMARY KEY (signer_addr) ) STRICT;"#; -// A lighter blocks table to aid in calculating tenure processing times -// Will only track the most recent tenure blocks -static CREATE_TENURE_BLOCKS_TABLE: &str = r#" -CREATE TABLE IF NOT EXISTS tenure_blocks ( +// Migration logic necessary to move blocks from the old blocks table to the new blocks table +static MIGRATE_BLOCKS_TABLE_2_BLOCKS_TABLE_3: &str = r#" +CREATE TABLE IF NOT EXISTS temp_blocks ( + -- The block sighash commits to all of the stacks and burnchain state as of its parent, + -- as well as the tenure itself so there's no need to include the reward cycle. Just + -- the sighash is sufficient to uniquely identify the block across all burnchain, PoX, + -- and stacks forks. signer_signature_hash TEXT NOT NULL PRIMARY KEY, reward_cycle INTEGER NOT NULL, + block_info TEXT NOT NULL, consensus_hash TEXT NOT NULL, - proposed_time INTEGER NOT NULL, - validation_time_ms INTEGER NOT NULL, + signed_over INTEGER NOT NULL, + broadcasted INTEGER, stacks_height INTEGER NOT NULL, + burn_block_height INTEGER NOT NULL, + valid INTEGER, + state TEXT NOT NULL, + signed_group INTEGER, + signed_self INTEGER, + proposed_time INTEGER NOT NULL, + validation_time_ms INTEGER, tenure_change INTEGER NOT NULL -) STRICT;"#; +) STRICT; -// Migration logic necessary to move from blocks into tenure_blocks table -// It will only migrate globally accepted blocks that are less than 2 reward cycles old. -static MIGRATE_GLOBALLY_ACCEPTED_BLOCKS_TO_TENURE_BLOCKS: &str = r#" -INSERT INTO tenure_blocks ( +INSERT INTO temp_blocks ( signer_signature_hash, reward_cycle, + block_info, consensus_hash, + signed_over, + broadcasted, + stacks_height, + burn_block_height, + valid, + state, + signed_group, + signed_self, proposed_time, validation_time_ms, - stacks_height, tenure_change ) SELECT signer_signature_hash, reward_cycle, + block_info, consensus_hash, - json_extract(block_info, '$.proposed_time') AS proposed_time, - COALESCE(json_extract(block_info, '$.validation_time_ms'), 0) AS validation_time_ms, + signed_over, + broadcasted, stacks_height, - json_extract(block_info, '$.tenure_change') AS tenure_change -FROM blocks -WHERE json_extract(block_info, '$.state') = 'GloballyAccepted' - AND reward_cycle + 2 > ( - SELECT MAX(reward_cycle) FROM blocks - );"#; - -static CREATE_TENURE_BLOCKS_ON_BLOCKS_TRIGGER: &str = r#" -CREATE TRIGGER insert_into_tenure_blocks -AFTER INSERT ON blocks -FOR EACH ROW -WHEN json_extract(NEW.block_info, '$.state') = 'GloballyAccepted' -BEGIN - INSERT OR REPLACE INTO tenure_blocks ( - signer_signature_hash, - reward_cycle, - consensus_hash, - proposed_time, - validation_time_ms, - stacks_height, - tenure_change - ) - VALUES ( - NEW.signer_signature_hash, - NEW.reward_cycle, - NEW.consensus_hash, - json_extract(NEW.block_info, '$.proposed_time'), - COALESCE(json_extract(NEW.block_info, '$.validation_time_ms'), 0), - NEW.stacks_height, - json_extract(NEW.block_info, '$.tenure_change') - ); -END;"#; + burn_block_height, + json_extract(block_info, '$.valid') AS valid, + json_extract(block_info, '$.state') AS state, + json_extract(block_info, '$.signed_group') AS signed_group, + json_extract(block_info, '$.signed_self') AS signed_self, + json_extract(block_info, '$.proposed_time') AS proposed_time, + json_extract(block_info, '$.validation_time_ms') AS validation_time_ms, + is_tenure_change(block_info) AS tenure_change +FROM blocks; + +DROP TABLE blocks; + +ALTER TABLE temp_blocks RENAME TO blocks;"#; static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, @@ -515,16 +517,15 @@ static SCHEMA_4: &[&str] = &[ ]; static SCHEMA_5: &[&str] = &[ - CREATE_TENURE_BLOCKS_TABLE, - CREATE_TENURE_BLOCKS_ON_BLOCKS_TRIGGER, - CREATE_INDEXES_4, - MIGRATE_GLOBALLY_ACCEPTED_BLOCKS_TO_TENURE_BLOCKS, - "INSERT INTO db_config (version) VALUES (4);", + MIGRATE_BLOCKS_TABLE_2_BLOCKS_TABLE_3, + CREATE_INDEXES_5, + "DELETE FROM db_config;", // Be extra careful. Make sure there is only ever one row in the table. + "INSERT INTO db_config (version) VALUES (5);", ]; impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 4; + pub const SCHEMA_VERSION: u32 = 5; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -612,7 +613,7 @@ impl SignerDb { /// Migrate from schema 4 to schema 5 fn schema_5_migration(tx: &Transaction) -> Result<(), DBError> { - if Self::get_schema_version(tx)? >= 4 { + if Self::get_schema_version(tx)? >= 5 { // no migration necessary return Ok(()); } @@ -628,6 +629,18 @@ impl SignerDb { /// If the detected version of the existing database is 0 (i.e., a pre-migration /// logic DB, the DB will be dropped). fn create_or_migrate(&mut self) -> Result<(), DBError> { + // Register helper function for determining if a block is a tenure change transaction + self.db.create_scalar_function( + "is_tenure_change", + 1, + FunctionFlags::SQLITE_UTF8 | FunctionFlags::SQLITE_DETERMINISTIC, + |ctx| { + let value = ctx.get::(0)?; + let block_info = serde_json::from_str::(&value) + .map_err(|e| SqliteError::UserFunctionError(e.into()))?; + Ok(block_info.is_tenure_change()) + }, + )?; let sql_tx = tx_begin_immediate(&mut self.db)?; loop { let version = Self::get_schema_version(&sql_tx)?; @@ -683,15 +696,11 @@ impl SignerDb { /// Fetch a block from the database using the block's /// `signer_signature_hash` - pub fn block_lookup( - &self, - reward_cycle: u64, - hash: &Sha512Trunc256Sum, - ) -> Result, DBError> { + pub fn block_lookup(&self, hash: &Sha512Trunc256Sum) -> Result, DBError> { let result: Option = query_row( &self.db, - "SELECT block_info FROM blocks WHERE reward_cycle = ? AND signer_signature_hash = ?", - params![u64_to_sql(reward_cycle)?, hash.to_string()], + "SELECT block_info FROM blocks WHERE signer_signature_hash = ?", + params![hash.to_string()], )?; try_deserialize(result) @@ -724,7 +733,7 @@ impl SignerDb { &self, tenure: &ConsensusHash, ) -> Result, DBError> { - let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') IN (?2, ?3) ORDER BY stacks_height DESC LIMIT 1"; + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND state IN (?2, ?3) ORDER BY stacks_height DESC LIMIT 1"; let args = params![ tenure, &BlockState::GloballyAccepted.to_string(), @@ -740,7 +749,7 @@ impl SignerDb { &self, tenure: &ConsensusHash, ) -> Result, DBError> { - let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') = ?2 ORDER BY stacks_height DESC LIMIT 1"; + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND state = ?2 ORDER BY stacks_height DESC LIMIT 1"; let args = params![tenure, &BlockState::GloballyAccepted.to_string()]; let result: Option = query_row(&self.db, query, args)?; @@ -749,7 +758,7 @@ impl SignerDb { /// Return the canonical tip -- the last globally accepted block. pub fn get_canonical_tip(&self) -> Result, DBError> { - let query = "SELECT block_info FROM blocks WHERE json_extract(block_info, '$.state') = ?1 ORDER BY stacks_height DESC, json_extract(block_info, '$.signed_group') DESC LIMIT 1"; + let query = "SELECT block_info FROM blocks WHERE state = ?1 ORDER BY stacks_height DESC, signed_group DESC LIMIT 1"; let args = params![&BlockState::GloballyAccepted.to_string()]; let result: Option = query_row(&self.db, query, args)?; @@ -803,12 +812,12 @@ impl SignerDb { serde_json::to_string(&block_info).expect("Unable to serialize block info"); let hash = &block_info.signer_signature_hash(); let block_id = &block_info.block.block_id(); - let signed_over = &block_info.signed_over; + let signed_over = block_info.signed_over; let vote = block_info .vote .as_ref() .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); - let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, hash)?; + let broadcasted = self.get_block_broadcasted(hash)?; debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, "burn_block_height" => %block_info.burn_block_height, @@ -818,19 +827,28 @@ impl SignerDb { "broadcasted" => ?broadcasted, "vote" => vote ); - self.db.execute("INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", params![ - u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), block_json, - signed_over, + self.db.execute("INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash, valid, state, signed_group, signed_self, proposed_time, validation_time_ms, tenure_change) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)", params![ + u64_to_sql(block_info.reward_cycle)?, + u64_to_sql(block_info.burn_block_height)?, + hash.to_string(), + block_json, + &block_info.signed_over, &broadcasted, u64_to_sql(block_info.block.header.chain_length)?, block_info.block.header.consensus_hash.to_hex(), + &block_info.valid, &block_info.state.to_string(), + &block_info.signed_group, + &block_info.signed_self, + &block_info.proposed_time, + &block_info.validation_time_ms, + &block_info.is_tenure_change() ])?; Ok(()) } /// Determine if there are any unprocessed blocks pub fn has_unprocessed_blocks(&self, reward_cycle: u64) -> Result { - let query = "SELECT block_info FROM blocks WHERE reward_cycle = ?1 AND json_extract(block_info, '$.state') = ?2 LIMIT 1"; + let query = "SELECT block_info FROM blocks WHERE reward_cycle = ?1 AND state = ?2 LIMIT 1"; let result: Option = query_row( &self.db, query, @@ -908,15 +926,13 @@ impl SignerDb { /// Mark a block as having been broadcasted and therefore GloballyAccepted pub fn set_block_broadcasted( &self, - reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, ts: u64, ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2) WHERE reward_cycle = ?3 AND signer_signature_hash = ?4"; + let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2), state = $2 WHERE signer_signature_hash = ?3"; let args = params![ u64_to_sql(ts)?, BlockState::GloballyAccepted.to_string(), - u64_to_sql(reward_cycle)?, block_sighash ]; @@ -928,12 +944,11 @@ impl SignerDb { /// Get the timestamp at which the block was broadcasted. pub fn get_block_broadcasted( &self, - reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, ) -> Result, DBError> { let qry = - "SELECT IFNULL(broadcasted,0) AS broadcasted FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; - let args = params![u64_to_sql(reward_cycle)?, block_sighash]; + "SELECT IFNULL(broadcasted,0) AS broadcasted FROM blocks WHERE signer_signature_hash = ?"; + let args = params![block_sighash]; let Some(broadcasted): Option = query_row(&self.db, qry, args)? else { return Ok(None); @@ -947,11 +962,10 @@ impl SignerDb { /// Get the current state of a given block in the database pub fn get_block_state( &self, - reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, ) -> Result, DBError> { - let qry = "SELECT json_extract(block_info, '$.state') FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2 LIMIT 1"; - let args = params![&u64_to_sql(reward_cycle)?, block_sighash]; + let qry = "SELECT state FROM blocks WHERE signer_signature_hash = ?1 LIMIT 1"; + let args = params![block_sighash]; let state_opt: Option = query_row(&self.db, qry, args)?; let Some(state) = state_opt else { return Ok(None); @@ -961,24 +975,15 @@ impl SignerDb { )) } - /// Cleanup stale data by removing anything equal to or older than the provided reward cycle - pub fn cleanup_stale_data(&mut self, reward_cycle: u64) -> Result<(), DBError> { - self.db.execute( - "DELETE FROM tenure_blocks WHERE reward_cycle <= ?", - params![u64_to_sql(reward_cycle)?], - )?; - Ok(()) - } - /// Return the start time (epoch time in seconds) and the processing time in milliseconds of the tenure (idenfitied by consensus_hash). fn get_tenure_times(&self, tenure: &ConsensusHash) -> Result<(u64, u64), DBError> { - let query = "SELECT tenure_change, proposed_time, validation_time_ms FROM tenure_blocks WHERE consensus_hash = ?1 ORDER BY stacks_height DESC"; + let query = "SELECT tenure_change, proposed_time, validation_time_ms FROM blocks WHERE consensus_hash = ?1 ORDER BY stacks_height DESC"; let args = params![tenure]; let mut stmt = self.db.prepare(query)?; let rows = stmt.query_map(args, |row| { let tenure_change_block: u64 = row.get(0)?; let proposed_time: u64 = row.get(1)?; - let validation_time_ms: u64 = row.get(2)?; + let validation_time_ms: Option = row.get(2)?; Ok((tenure_change_block > 0, proposed_time, validation_time_ms)) })?; let mut tenure_processing_time_ms = 0_u64; @@ -986,7 +991,7 @@ impl SignerDb { for row in rows { let (tenure_change_block, proposed_time, validation_time_ms) = row?; tenure_processing_time_ms = - tenure_processing_time_ms.saturating_add(validation_time_ms); + tenure_processing_time_ms.saturating_add(validation_time_ms.unwrap_or(0)); tenure_start_time = Some(proposed_time); if tenure_change_block { break; @@ -1038,6 +1043,12 @@ mod tests { use std::path::PathBuf; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; + use blockstack_lib::chainstate::stacks::{ + StacksTransaction, TenureChangeCause, TenureChangePayload, TransactionAuth, + TransactionVersion, + }; + use clarity::types::chainstate::{StacksBlockId, StacksPrivateKey, StacksPublicKey}; + use clarity::util::hash::Hash160; use clarity::util::secp256k1::MessageSignature; use libsigner::BlockProposal; @@ -1080,39 +1091,42 @@ mod tests { fn test_basic_signer_db_with_path(db_path: impl AsRef) { let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - let (block_info, block_proposal) = create_block(); - let reward_cycle = block_info.reward_cycle; - db.insert_block(&block_info) + let (block_info_1, block_proposal_1) = create_block_override(|b| { + b.block.header.consensus_hash = ConsensusHash([0x01; 20]); + }); + let (block_info_2, block_proposal_2) = create_block_override(|b| { + b.block.header.consensus_hash = ConsensusHash([0x02; 20]); + }); + db.insert_block(&block_info_1) .expect("Unable to insert block into db"); let block_info = db - .block_lookup( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal_1.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block from db"); - assert_eq!(BlockInfo::from(block_proposal.clone()), block_info); + assert_eq!(BlockInfo::from(block_proposal_1.clone()), block_info); - // Test looking up a block from a different reward cycle + // Test looking up a block with an unknown hash let block_info = db - .block_lookup( - reward_cycle + 1, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal_2.block.header.signer_signature_hash()) .unwrap(); assert!(block_info.is_none()); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + let block_info = db + .block_lookup(&block_proposal_2.block.header.signer_signature_hash()) + .unwrap() + .expect("Unable to get block from db"); + + assert_eq!(BlockInfo::from(block_proposal_2.clone()), block_info); // test getting the block state let block_state = db - .get_block_state( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .get_block_state(&block_proposal_1.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block state from db"); - assert_eq!(block_state, BlockInfo::from(block_proposal.clone()).state); + assert_eq!(block_state, BlockInfo::from(block_proposal_1.clone()).state); } #[test] @@ -1132,15 +1146,11 @@ mod tests { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); let (block_info, block_proposal) = create_block(); - let reward_cycle = block_info.reward_cycle; db.insert_block(&block_info) .expect("Unable to insert block into db"); let block_info = db - .block_lookup( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block from db"); @@ -1166,10 +1176,7 @@ mod tests { .expect("Unable to insert block into db"); let block_info = db - .block_lookup( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block from db"); @@ -1341,48 +1348,32 @@ mod tests { .expect("Unable to insert block into db"); assert!(db - .get_block_broadcasted( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) + .get_block_broadcasted(&block_info_1.signer_signature_hash()) .unwrap() .is_none()); assert_eq!( - db.block_lookup( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) - .expect("Unable to get block from db") - .expect("Unable to get block from db") - .state, + db.block_lookup(&block_info_1.signer_signature_hash()) + .expect("Unable to get block from db") + .expect("Unable to get block from db") + .state, BlockState::Unprocessed ); - db.set_block_broadcasted( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash(), - 12345, - ) - .unwrap(); + db.set_block_broadcasted(&block_info_1.signer_signature_hash(), 12345) + .unwrap(); assert_eq!( - db.block_lookup( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) - .expect("Unable to get block from db") - .expect("Unable to get block from db") - .state, + db.block_lookup(&block_info_1.signer_signature_hash()) + .expect("Unable to get block from db") + .expect("Unable to get block from db") + .state, BlockState::GloballyAccepted ); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); assert_eq!( - db.get_block_broadcasted( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) - .unwrap() - .unwrap(), + db.get_block_broadcasted(&block_info_1.signer_signature_hash()) + .unwrap() + .unwrap(), 12345 ); } @@ -1547,6 +1538,25 @@ mod tests { } fn generate_tenure_blocks() -> Vec { + let tenure_change_payload = TenureChangePayload { + tenure_consensus_hash: ConsensusHash([0x04; 20]), // same as in nakamoto header + prev_tenure_consensus_hash: ConsensusHash([0x01; 20]), + burn_view_consensus_hash: ConsensusHash([0x04; 20]), + previous_tenure_end: StacksBlockId([0x03; 32]), + previous_tenure_blocks: 1, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&StacksPublicKey::from_private( + &StacksPrivateKey::new(), + )), + }; + let tenure_change_tx_payload = + TransactionPayload::TenureChange(tenure_change_payload.clone()); + let tenure_change_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&StacksPrivateKey::new()).unwrap(), + tenure_change_tx_payload.clone(), + ); + let consensus_hash_1 = ConsensusHash([0x01; 20]); let consensus_hash_2 = ConsensusHash([0x02; 20]); let (mut block_info_1, _block_proposal) = create_block_override(|b| { @@ -1557,7 +1567,7 @@ mod tests { b.reward_cycle = 1; }); block_info_1.state = BlockState::GloballyAccepted; - block_info_1.tenure_change = true; + block_info_1.block.txs.push(tenure_change_tx.clone()); block_info_1.validation_time_ms = Some(1000); block_info_1.proposed_time = get_epoch_time_secs() + 500; @@ -1580,7 +1590,7 @@ mod tests { b.reward_cycle = 2; }); block_info_3.state = BlockState::GloballyAccepted; - block_info_3.tenure_change = true; + block_info_3.block.txs.push(tenure_change_tx); block_info_3.validation_time_ms = Some(5000); block_info_3.proposed_time = block_info_1.proposed_time + 10; @@ -1711,48 +1721,4 @@ mod tests { < block_infos[0].proposed_time ); } - - #[test] - fn cleanup() { - let db_path = tmp_db_path(); - let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - let block_infos = generate_tenure_blocks(); - let consensus_hash_1 = block_infos[0].block.header.consensus_hash; - let consensus_hash_2 = block_infos.last().unwrap().block.header.consensus_hash; - - for block_info in &block_infos { - db.insert_block(block_info).unwrap(); - } - - // Verify this does nothing. All data is still there. - db.cleanup_stale_data(block_infos[0].reward_cycle - 1) - .unwrap(); - - // Verify tenure consensus_hash_1 - let (start_time_1, processing_time_1) = db.get_tenure_times(&consensus_hash_1).unwrap(); - assert_eq!(start_time_1, block_infos[2].proposed_time); - assert_eq!(processing_time_1, 5000); - - // Verify tenure consensus_hash_2 - let (start_time_2, processing_time_2) = db.get_tenure_times(&consensus_hash_2).unwrap(); - assert_eq!(start_time_2, block_infos[4].proposed_time); - assert_eq!(processing_time_2, 20000); - - // Verify this deletes some data - db.cleanup_stale_data(block_infos[2].reward_cycle).unwrap(); - - // Verify tenure consensus_hash_1 AFTER deletion has updated correctly. - let (start_time_1_after, processing_time_1_after) = - db.get_tenure_times(&consensus_hash_1).unwrap(); - assert_ne!(start_time_1_after, start_time_1); - assert_ne!(processing_time_1_after, processing_time_1); - assert!(start_time_1_after < block_infos[0].proposed_time, "Should have been generated from get_epoch_time_secs() making it much older than our artificially late proposal times"); - assert_eq!(processing_time_1_after, 0); - - // Verify tenure consensus_hash_2 AFTER deletion has not updated. - let (start_time_2_after, processing_time_2_after) = - db.get_tenure_times(&consensus_hash_2).unwrap(); - assert_eq!(start_time_2_after, start_time_2); - assert_eq!(processing_time_2_after, processing_time_2); - } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 72fdf9c629..2037a25def 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -133,13 +133,13 @@ fn check_proposal_units() { setup_test_environment("check_proposal_units"); assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); view.last_sortition = None; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); } @@ -155,7 +155,6 @@ fn check_proposal_miner_pkh_mismatch() { &mut signer_db, &block, &different_block_pk, - 1, false, ) .unwrap()); @@ -167,7 +166,6 @@ fn check_proposal_miner_pkh_mismatch() { &mut signer_db, &block, &different_block_pk, - 1, false, ) .unwrap()); @@ -264,7 +262,7 @@ fn reorg_timing_testing( config, } = MockServerClient::new(); let h = std::thread::spawn(move || { - view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1, false) + view.check_proposal(&client, &mut signer_db, &block, &block_pk, false) }); header_clone.chain_length -= 1; let response = crate::client::tests::build_get_tenure_tip_response( @@ -301,16 +299,16 @@ fn check_proposal_invalid_status() { setup_test_environment("invalid_status"); block.header.consensus_hash = view.cur_sortition.consensus_hash; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; @@ -321,7 +319,7 @@ fn check_proposal_invalid_status() { // parent blocks have been seen before, while the signer state checks are only reasoning about // stacks blocks seen by the signer, which may be a subset) assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); } @@ -370,7 +368,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); let mut extend_payload = make_tenure_change_payload(); @@ -380,7 +378,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); } @@ -407,7 +405,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &curr_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -418,7 +415,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &last_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -431,7 +427,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &curr_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -442,7 +437,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &last_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -534,7 +528,7 @@ fn check_proposal_refresh() { setup_test_environment("check_proposal_refresh"); block.header.consensus_hash = view.cur_sortition.consensus_hash; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); let MockServerClient { @@ -576,7 +570,7 @@ fn check_proposal_refresh() { view.cur_sortition.consensus_hash = ConsensusHash([128; 20]); let h = std::thread::spawn(move || { - view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1, true) + view.check_proposal(&client, &mut signer_db, &block, &block_pk, true) }); crate::client::tests::write_response( server, diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 05174e258e..35d2b8205e 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -258,14 +258,6 @@ impl SignerTrait for Signer { true }) } - - fn cleanup_stale_data(&mut self, current_reward_cycle: u64) { - // We currently delete only data older than 2 reward cycles ago. - let _ = self - .signer_db - .cleanup_stale_data(current_reward_cycle.saturating_sub(2)) - .inspect_err(|e| error!("self: Failed to cleanup stale signerdb data: {e:?}")); - } } impl From for Signer { @@ -355,7 +347,7 @@ impl Signer { let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); if let Some(block_info) = self .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) + .block_lookup(&signer_signature_hash) .expect("Failed to connect to signer DB") { let Some(block_response) = self.determine_response(&block_info) else { @@ -414,7 +406,6 @@ impl Signer { &mut self.signer_db, &block_proposal.block, miner_pubkey, - self.reward_cycle, true, ) { // Error validating block @@ -566,10 +557,7 @@ impl Signer { self.submitted_block_proposal = None; } // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { + let mut block_info = match self.signer_db.block_lookup(&signer_signature_hash) { Ok(Some(block_info)) => { if block_info.is_locally_finalized() { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); @@ -639,10 +627,7 @@ impl Signer { { self.submitted_block_proposal = None; } - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { + let mut block_info = match self.signer_db.block_lookup(&signer_signature_hash) { Ok(Some(block_info)) => { if block_info.is_locally_finalized() { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); @@ -732,10 +717,7 @@ impl Signer { } let signature_sighash = block_proposal.block.header.signer_signature_hash(); // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signature_sighash) - { + let mut block_info = match self.signer_db.block_lookup(&signature_sighash) { Ok(Some(block_info)) => { if block_info.state == BlockState::GloballyRejected || block_info.state == BlockState::GloballyAccepted @@ -822,7 +804,7 @@ impl Signer { let block_hash = &rejection.signer_signature_hash; let signature = &rejection.signature; - let mut block_info = match self.signer_db.block_lookup(self.reward_cycle, block_hash) { + let mut block_info = match self.signer_db.block_lookup(block_hash) { Ok(Some(block_info)) => { if block_info.state == BlockState::GloballyRejected || block_info.state == BlockState::GloballyAccepted @@ -928,10 +910,7 @@ impl Signer { ); // Have we already processed this block? - match self - .signer_db - .get_block_state(self.reward_cycle, block_hash) - { + match self.signer_db.get_block_state(block_hash) { Ok(Some(state)) => { if state == BlockState::GloballyAccepted || state == BlockState::GloballyRejected { debug!("{self}: Received block signature for a block that is already marked as {}. Ignoring...", state); @@ -1013,14 +992,10 @@ impl Signer { } // have enough signatures to broadcast! - let Ok(Some(mut block_info)) = self - .signer_db - .block_lookup(self.reward_cycle, block_hash) - .map_err(|e| { - warn!("{self}: Failed to load block {block_hash}: {e:?})"); - e - }) - else { + let Ok(Some(mut block_info)) = self.signer_db.block_lookup(block_hash).map_err(|e| { + warn!("{self}: Failed to load block {block_hash}: {e:?})"); + e + }) else { warn!("{self}: No such block {block_hash}"); return; }; @@ -1093,11 +1068,10 @@ impl Signer { ); stacks_client.post_block_until_ok(self, &block); - if let Err(e) = self.signer_db.set_block_broadcasted( - self.reward_cycle, - &block_hash, - get_epoch_time_secs(), - ) { + if let Err(e) = self + .signer_db + .set_block_broadcasted(&block_hash, get_epoch_time_secs()) + { warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); } } @@ -1113,11 +1087,10 @@ impl Signer { "consensus_hash" => %block.header.consensus_hash ); - if let Err(e) = self.signer_db.set_block_broadcasted( - self.reward_cycle, - &block_hash, - get_epoch_time_secs(), - ) { + if let Err(e) = self + .signer_db + .set_block_broadcasted(&block_hash, get_epoch_time_secs()) + { warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); } return true; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 51623c053d..223a169577 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6365,13 +6365,6 @@ fn signer_chainstate() { ) .unwrap(); - let reward_cycle = burnchain - .block_height_to_reward_cycle( - SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .unwrap() - .block_height, - ) - .unwrap(); // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), @@ -6393,7 +6386,6 @@ fn signer_chainstate() { &mut signer_db, prior_tenure_first, miner_pk, - reward_cycle, true, ) .unwrap(); @@ -6403,14 +6395,7 @@ fn signer_chainstate() { ); for block in prior_tenure_interims.iter() { let valid = sortitions_view - .check_proposal( - &signer_client, - &mut signer_db, - block, - miner_pk, - reward_cycle, - true, - ) + .check_proposal(&signer_client, &mut signer_db, block, miner_pk, true) .unwrap(); assert!( !valid, @@ -6445,7 +6430,6 @@ fn signer_chainstate() { &mut signer_db, &proposal.0, &proposal.1, - reward_cycle, true, ) .unwrap(); @@ -6468,12 +6452,6 @@ fn signer_chainstate() { ext: ExtraBlockInfo::None, state: BlockState::Unprocessed, validation_time_ms: None, - tenure_change: proposal - .0 - .txs - .first() - .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) - .unwrap_or(false), }) .unwrap(); @@ -6508,7 +6486,6 @@ fn signer_chainstate() { &mut signer_db, &proposal_interim.0, &proposal_interim.1, - reward_cycle, true, ) .unwrap(); @@ -6540,7 +6517,6 @@ fn signer_chainstate() { &mut signer_db, &proposal_interim.0, &proposal_interim.1, - reward_cycle, true, ) .unwrap(); @@ -6564,12 +6540,6 @@ fn signer_chainstate() { ext: ExtraBlockInfo::None, state: BlockState::GloballyAccepted, validation_time_ms: Some(1000), - tenure_change: proposal_interim - .0 - .txs - .first() - .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) - .unwrap_or(false), }) .unwrap(); @@ -6613,12 +6583,6 @@ fn signer_chainstate() { tenure_idle_timeout: Duration::from_secs(300), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); - let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .unwrap() - .block_height; - let reward_cycle = burnchain - .block_height_to_reward_cycle(burn_block_height) - .unwrap(); assert!( !sortitions_view .check_proposal( @@ -6626,7 +6590,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -6684,7 +6647,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -6748,7 +6710,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -6814,7 +6775,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -9667,8 +9627,6 @@ fn test_shadow_recovery() { let coord_channel = signer_test.running_nodes.coord_channel.clone(); let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); - let burnchain = naka_conf.get_burnchain(); - // make another tenure next_block_and_mine_commit( btc_regtest_controller, From 6ae5408e555897858ece75ea7b8a4a1ce7267bf5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 26 Nov 2024 12:49:56 -0800 Subject: [PATCH 063/115] CRC: fix typo and use the block validation Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 2 +- stacks-signer/src/v0/signer.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 3676b92eea..1bd6529e14 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -184,7 +184,7 @@ impl From for BlockInfo { } } impl BlockInfo { - /// Wether the block is a tenure change block or not + /// Whether the block is a tenure change block or not pub fn is_tenure_change(&self) -> bool { self.block .txs diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 35d2b8205e..4d7134c5e0 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -583,10 +583,10 @@ impl Signer { block_info.signed_self.get_or_insert(get_epoch_time_secs()); } // Record the block validation time but do not consider stx transfers or boot contract calls - if block_validate_ok.cost.is_zero() { - 0 + block_info.validation_time_ms = if block_validate_ok.cost.is_zero() { + Some(0) } else { - block_validate_ok.validation_time_ms + Some(block_validate_ok.validation_time_ms) }; let signature = self From 47460a7615ac92977a2c667d53a0bb90030be82a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 26 Nov 2024 13:53:55 -0800 Subject: [PATCH 064/115] CRC: update test to account for state properly and fix broken query get_tenure_times Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 1bd6529e14..6a80aa4116 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -977,8 +977,8 @@ impl SignerDb { /// Return the start time (epoch time in seconds) and the processing time in milliseconds of the tenure (idenfitied by consensus_hash). fn get_tenure_times(&self, tenure: &ConsensusHash) -> Result<(u64, u64), DBError> { - let query = "SELECT tenure_change, proposed_time, validation_time_ms FROM blocks WHERE consensus_hash = ?1 ORDER BY stacks_height DESC"; - let args = params![tenure]; + let query = "SELECT tenure_change, proposed_time, validation_time_ms FROM blocks WHERE consensus_hash = ?1 AND state = ?2 ORDER BY stacks_height DESC"; + let args = params![tenure, BlockState::GloballyAccepted.to_string()]; let mut stmt = self.db.prepare(query)?; let rows = stmt.query_map(args, |row| { let tenure_change_block: u64 = row.get(0)?; @@ -1564,7 +1564,6 @@ mod tests { b.block.header.miner_signature = MessageSignature([0x01; 65]); b.block.header.chain_length = 1; b.burn_height = 1; - b.reward_cycle = 1; }); block_info_1.state = BlockState::GloballyAccepted; block_info_1.block.txs.push(tenure_change_tx.clone()); @@ -1576,7 +1575,6 @@ mod tests { b.block.header.miner_signature = MessageSignature([0x02; 65]); b.block.header.chain_length = 2; b.burn_height = 2; - b.reward_cycle = 1; }); block_info_2.state = BlockState::GloballyAccepted; block_info_2.validation_time_ms = Some(2000); @@ -1587,19 +1585,18 @@ mod tests { b.block.header.miner_signature = MessageSignature([0x03; 65]); b.block.header.chain_length = 3; b.burn_height = 2; - b.reward_cycle = 2; }); block_info_3.state = BlockState::GloballyAccepted; block_info_3.block.txs.push(tenure_change_tx); block_info_3.validation_time_ms = Some(5000); block_info_3.proposed_time = block_info_1.proposed_time + 10; + // This should have no effect on the time calculations as its not a globally accepted block let (mut block_info_4, _block_proposal) = create_block_override(|b| { b.block.header.consensus_hash = consensus_hash_1; b.block.header.miner_signature = MessageSignature([0x04; 65]); b.block.header.chain_length = 3; b.burn_height = 2; - b.reward_cycle = 2; }); block_info_4.state = BlockState::LocallyAccepted; block_info_4.validation_time_ms = Some(9000); @@ -1610,18 +1607,29 @@ mod tests { b.block.header.miner_signature = MessageSignature([0x05; 65]); b.block.header.chain_length = 4; b.burn_height = 3; - b.reward_cycle = 3; }); block_info_5.state = BlockState::GloballyAccepted; block_info_5.validation_time_ms = Some(20000); block_info_5.proposed_time = block_info_1.proposed_time + 20; + // This should have no effect on the time calculations as its not a globally accepted block + let (mut block_info_6, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_2; + b.block.header.miner_signature = MessageSignature([0x06; 65]); + b.block.header.chain_length = 5; + b.burn_height = 3; + }); + block_info_6.state = BlockState::LocallyAccepted; + block_info_6.validation_time_ms = Some(40000); + block_info_6.proposed_time = block_info_1.proposed_time + 25; + vec![ block_info_1, block_info_2, block_info_3, block_info_4, block_info_5, + block_info_6, ] } @@ -1650,6 +1658,7 @@ mod tests { assert_eq!(processing_time, 5000); db.insert_block(&block_infos[4]).unwrap(); + db.insert_block(&block_infos[5]).unwrap(); // Verify tenure consensus_hash_2 let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_2).unwrap(); @@ -1701,6 +1710,7 @@ mod tests { ); db.insert_block(&block_infos[4]).unwrap(); + db.insert_block(&block_infos[5]).unwrap(); // Verify tenure consensus_hash_2 let timestamp_hash_2 = From 1011e218fc2fac33e3be86cff48bf029b350f4fa Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 26 Nov 2024 14:19:31 -0800 Subject: [PATCH 065/115] CRC: remove data migration specific scalar functions Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 6a80aa4116..48fc02ab2e 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -625,11 +625,10 @@ impl SignerDb { Ok(()) } - /// Either instantiate a new database, or migrate an existing one - /// If the detected version of the existing database is 0 (i.e., a pre-migration - /// logic DB, the DB will be dropped). - fn create_or_migrate(&mut self) -> Result<(), DBError> { + /// Register custom scalar functions used by the database + fn register_scalar_functions(&self) -> Result<(), DBError> { // Register helper function for determining if a block is a tenure change transaction + // Required only for data migration from Schema 4 to Schema 5 self.db.create_scalar_function( "is_tenure_change", 1, @@ -641,6 +640,20 @@ impl SignerDb { Ok(block_info.is_tenure_change()) }, )?; + Ok(()) + } + + /// Drop registered scalar functions used only for data migrations + fn remove_scalar_functions(&self) -> Result<(), DBError> { + self.db.remove_function("is_tenure_change", 1)?; + Ok(()) + } + + /// Either instantiate a new database, or migrate an existing one + /// If the detected version of the existing database is 0 (i.e., a pre-migration + /// logic DB, the DB will be dropped). + fn create_or_migrate(&mut self) -> Result<(), DBError> { + self.register_scalar_functions()?; let sql_tx = tx_begin_immediate(&mut self.db)?; loop { let version = Self::get_schema_version(&sql_tx)?; @@ -658,6 +671,7 @@ impl SignerDb { } } sql_tx.commit()?; + self.remove_scalar_functions()?; Ok(()) } From cef0dd4d443b702dd19a354cdd7afe91b4f6a7eb Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 27 Nov 2024 08:41:51 -0500 Subject: [PATCH 066/115] refactor: move synchronization details into `StackerDBListenerComms` --- .../src/nakamoto_node/signer_coordinator.rs | 206 +++++++----------- .../src/nakamoto_node/stackerdb_listener.rs | 103 ++++++++- 2 files changed, 175 insertions(+), 134 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 1e0e7694fd..70c9aab190 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -13,12 +13,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::BTreeMap; use std::sync::atomic::AtomicBool; -use std::sync::{Arc, Condvar, Mutex}; +use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; -use hashbrown::{HashMap, HashSet}; use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; @@ -31,34 +29,18 @@ use stacks::chainstate::stacks::Error as ChainstateError; use stacks::codec::StacksMessageCodec; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; -use stacks::types::chainstate::{StacksBlockId, StacksPrivateKey, StacksPublicKey}; +use stacks::types::chainstate::{StacksBlockId, StacksPrivateKey}; use stacks::util::hash::Sha512Trunc256Sum; use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; +use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; -use crate::nakamoto_node::stackerdb_listener::{ - BlockStatus, StackerDBListener, TimestampInfo, EVENT_RECEIVER_POLL, -}; +use crate::nakamoto_node::stackerdb_listener::{StackerDBListener, EVENT_RECEIVER_POLL}; use crate::neon::Counters; use crate::Config; -/// Helper function to determine if signer threshold has been reached for a block -fn is_threshold_reached( - status: Option<&BlockStatus>, - weight_threshold: u32, - total_weight: u32, -) -> bool { - match status { - Some(status) => { - status.total_weight_signed >= weight_threshold - || status.total_reject_weight.saturating_add(weight_threshold) > total_weight - } - None => true, - } -} - /// The state of the signer database listener, used by the miner thread to /// interact with the signer listener. pub struct SignerCoordinator { @@ -72,15 +54,8 @@ pub struct SignerCoordinator { total_weight: u32, /// The weight threshold for block approval weight_threshold: u32, - /// Tracks signatures for blocks - /// - key: Sha512Trunc256Sum (signer signature hash) - /// - value: BlockStatus - blocks: Arc<(Mutex>, Condvar)>, - /// Tracks the timestamps from signers to decide when they should be - /// willing to accept time-based tenure extensions - /// - key: StacksPublicKey - /// - value: TimestampInfo - signer_idle_timestamps: Arc>>, + /// Interface to the StackerDB listener thread's data + stackerdb_comms: StackerDBListenerComms, /// Keep running flag for the signer DB listener thread keep_running: Arc, /// Handle for the signer DB listener thread @@ -125,8 +100,7 @@ impl SignerCoordinator { miners_session, total_weight: listener.total_weight, weight_threshold: listener.weight_threshold, - blocks: listener.blocks.clone(), - signer_idle_timestamps: listener.signer_idle_timestamps.clone(), + stackerdb_comms: listener.get_comms(), keep_running, listener_thread: None, }; @@ -226,18 +200,7 @@ impl SignerCoordinator { election_sortition: &ConsensusHash, ) -> Result, NakamotoNodeError> { // Add this block to the block status map. - // Create a scope to drop the lock on the block status map. - { - let (lock, _cvar) = &*self.blocks; - let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); - let block_status = BlockStatus { - responded_signers: HashSet::new(), - gathered_signatures: BTreeMap::new(), - total_weight_signed: 0, - total_reject_weight: 0, - }; - blocks.insert(block.header.signer_signature_hash(), block_status); - } + self.stackerdb_comms.insert_block(&block.header); let reward_cycle_id = burnchain .block_height_to_reward_cycle(burn_tip.block_height) @@ -307,79 +270,74 @@ impl SignerCoordinator { burn_tip: &BlockSnapshot, counters: &Counters, ) -> Result, NakamotoNodeError> { - let (lock, cvar) = &*self.blocks; - let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); - loop { - let (guard, timeout_result) = cvar - .wait_timeout_while(blocks, EVENT_RECEIVER_POLL, |map| { - !is_threshold_reached( - map.get(block_signer_sighash), - self.weight_threshold, - self.total_weight, - ) - }) - .expect("FATAL: failed to wait on block status cond var"); - blocks = guard; + let block_status = match self.stackerdb_comms.wait_for_block_status( + block_signer_sighash, + EVENT_RECEIVER_POLL, + |status| { + status.total_weight_signed < self.weight_threshold + && status + .total_reject_weight + .saturating_add(self.weight_threshold) + <= self.total_weight + }, + )? { + Some(status) => status, + None => { + // If we just received a timeout, we should check if the burnchain + // tip has changed or if we received this signed block already in + // the staging db. + debug!("SignerCoordinator: Timeout waiting for block signatures"); - // If we just received a timeout, we should check if the burnchain - // tip has changed or if we received this signed block already in - // the staging db. - if timeout_result.timed_out() { - // Look in the nakamoto staging db -- a block can only get stored there - // if it has enough signing weight to clear the threshold. - if let Ok(Some((stored_block, _sz))) = chain_state - .nakamoto_blocks_db() - .get_nakamoto_block(block_id) - .map_err(|e| { - warn!( - "Failed to query chainstate for block: {e:?}"; - "block_id" => %block_id, - "block_signer_sighash" => %block_signer_sighash, - ); - e - }) - { - debug!("SignCoordinator: Found signatures in relayed block"); - counters.bump_naka_signer_pushed_blocks(); - return Ok(stored_block.header.signer_signature); - } + // Look in the nakamoto staging db -- a block can only get stored there + // if it has enough signing weight to clear the threshold. + if let Ok(Some((stored_block, _sz))) = chain_state + .nakamoto_blocks_db() + .get_nakamoto_block(block_id) + .map_err(|e| { + warn!( + "Failed to query chainstate for block: {e:?}"; + "block_id" => %block_id, + "block_signer_sighash" => %block_signer_sighash, + ); + e + }) + { + debug!("SignCoordinator: Found signatures in relayed block"); + counters.bump_naka_signer_pushed_blocks(); + return Ok(stored_block.header.signer_signature); + } - if Self::check_burn_tip_changed(sortdb, burn_tip) { - debug!("SignCoordinator: Exiting due to new burnchain tip"); - return Err(NakamotoNodeError::BurnchainTipChanged); - } - } - // Else, we have received enough signatures to proceed - else { - let block_status = blocks.get(block_signer_sighash).ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Block unexpectedly missing from map".into(), - ) - })?; + if Self::check_burn_tip_changed(sortdb, burn_tip) { + debug!("SignCoordinator: Exiting due to new burnchain tip"); + return Err(NakamotoNodeError::BurnchainTipChanged); + } - if block_status - .total_reject_weight - .saturating_add(self.weight_threshold) - > self.total_weight - { - info!( - "{}/{} signers vote to reject block", - block_status.total_reject_weight, self.total_weight; - "block_signer_sighash" => %block_signer_sighash, - ); - counters.bump_naka_rejected_blocks(); - return Err(NakamotoNodeError::SignersRejected); - } else if block_status.total_weight_signed >= self.weight_threshold { - info!("Received enough signatures, block accepted"; - "block_signer_sighash" => %block_signer_sighash, - ); - return Ok(block_status.gathered_signatures.values().cloned().collect()); - } else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Unblocked without reaching the threshold".into(), - )); + continue; } + }; + + if block_status + .total_reject_weight + .saturating_add(self.weight_threshold) + > self.total_weight + { + info!( + "{}/{} signers vote to reject block", + block_status.total_reject_weight, self.total_weight; + "block_signer_sighash" => %block_signer_sighash, + ); + counters.bump_naka_rejected_blocks(); + return Err(NakamotoNodeError::SignersRejected); + } else if block_status.total_weight_signed >= self.weight_threshold { + info!("Received enough signatures, block accepted"; + "block_signer_sighash" => %block_signer_sighash, + ); + return Ok(block_status.gathered_signatures.values().cloned().collect()); + } else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Unblocked without reaching the threshold".into(), + )); } } } @@ -387,26 +345,8 @@ impl SignerCoordinator { /// Get the timestamp at which at least 70% of the signing power should be /// willing to accept a time-based tenure extension. pub fn get_tenure_extend_timestamp(&self) -> u64 { - let signer_idle_timestamps = self - .signer_idle_timestamps - .lock() - .expect("FATAL: failed to lock signer idle timestamps"); - debug!("SignerCoordinator: signer_idle_timestamps: {signer_idle_timestamps:?}"); - let mut idle_timestamps = signer_idle_timestamps.values().collect::>(); - idle_timestamps.sort_by_key(|info| info.timestamp); - let mut weight_sum = 0; - for info in idle_timestamps { - weight_sum += info.weight; - if weight_sum >= self.weight_threshold { - info!("SignerCoordinator: 70% threshold reached"); - return info.timestamp; - } - } - - // We don't have enough information to reach a 70% threshold at any - // time, so return u64::MAX to indicate that we should not extend the - // tenure. - u64::MAX + self.stackerdb_comms + .get_tenure_extend_timestamp(self.weight_threshold) } /// Check if the tenure needs to change diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 4f213abf6c..a5036efa6e 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -46,7 +46,7 @@ pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mute pub static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); #[derive(Debug, Clone)] -pub(crate) struct BlockStatus { +pub struct BlockStatus { pub responded_signers: HashSet, pub gathered_signatures: BTreeMap, pub total_weight_signed: u32, @@ -89,6 +89,19 @@ pub struct StackerDBListener { pub(crate) signer_idle_timestamps: Arc>>, } +/// Interface for other threads to retrieve info from the StackerDBListener +pub struct StackerDBListenerComms { + /// Tracks signatures for blocks + /// - key: Sha512Trunc256Sum (signer signature hash) + /// - value: BlockStatus + blocks: Arc<(Mutex>, Condvar)>, + /// Tracks the timestamps from signers to decide when they should be + /// willing to accept time-based tenure extensions + /// - key: StacksPublicKey + /// - value: TimestampInfo + signer_idle_timestamps: Arc>>, +} + impl StackerDBListener { pub fn new( stackerdb_channel: Arc>, @@ -153,6 +166,13 @@ impl StackerDBListener { }) } + pub fn get_comms(&self) -> StackerDBListenerComms { + StackerDBListenerComms { + blocks: self.blocks.clone(), + signer_idle_timestamps: self.signer_idle_timestamps.clone(), + } + } + /// Run the StackerDB listener. pub fn run(&mut self) -> Result<(), NakamotoNodeError> { info!("StackerDBListener: Starting up"); @@ -442,3 +462,84 @@ impl Drop for StackerDBListener { )); } } + +impl StackerDBListenerComms { + /// Insert a block into the block status map with initial values. + pub fn insert_block(&self, block: &NakamotoBlockHeader) { + let (lock, _cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + let block_status = BlockStatus { + responded_signers: HashSet::new(), + gathered_signatures: BTreeMap::new(), + total_weight_signed: 0, + total_reject_weight: 0, + }; + blocks.insert(block.signer_signature_hash(), block_status); + } + + /// Get the status for `block` from the Stacker DB listener. + /// If the block is not found in the map, return an error. + /// If the block is found, call `condition` to check if the block status + /// satisfies the condition. + /// If the condition is satisfied, return the block status as + /// `Ok(Some(status))`. + /// If the condition is not satisfied, wait for it to be satisfied. + /// If the timeout is reached, return `Ok(None)`. + pub fn wait_for_block_status( + &self, + block_signer_sighash: &Sha512Trunc256Sum, + timeout: Duration, + condition: F, + ) -> Result, NakamotoNodeError> + where + F: Fn(&BlockStatus) -> bool, + { + let (lock, cvar) = &*self.blocks; + let blocks = lock.lock().expect("FATAL: failed to lock block status"); + + let (guard, timeout_result) = cvar + .wait_timeout_while(blocks, timeout, |map| { + let Some(status) = map.get(block_signer_sighash) else { + return true; + }; + condition(status) + }) + .expect("FATAL: failed to wait on block status cond var"); + + // If we timed out, return None + if timeout_result.timed_out() { + return Ok(None); + } + match guard.get(block_signer_sighash) { + Some(status) => Ok(Some(status.clone())), + None => Err(NakamotoNodeError::SigningCoordinatorFailure( + "Block not found in status map".into(), + )), + } + } + + /// Get the timestamp at which at least 70% of the signing power should be + /// willing to accept a time-based tenure extension. + pub fn get_tenure_extend_timestamp(&self, weight_threshold: u32) -> u64 { + let signer_idle_timestamps = self + .signer_idle_timestamps + .lock() + .expect("FATAL: failed to lock signer idle timestamps"); + debug!("SignerCoordinator: signer_idle_timestamps: {signer_idle_timestamps:?}"); + let mut idle_timestamps = signer_idle_timestamps.values().collect::>(); + idle_timestamps.sort_by_key(|info| info.timestamp); + let mut weight_sum = 0; + for info in idle_timestamps { + weight_sum += info.weight; + if weight_sum >= weight_threshold { + info!("SignerCoordinator: 70% threshold reached"); + return info.timestamp; + } + } + + // We don't have enough information to reach a 70% threshold at any + // time, so return u64::MAX to indicate that we should not extend the + // tenure. + u64::MAX + } +} From efe2ab4ea3328e1371567af2c8e4a51ed56b93bd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 27 Nov 2024 09:25:00 -0800 Subject: [PATCH 067/115] Fix signer db set_block_broadcasted typo and update test to catch Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 48fc02ab2e..49ef6aabb2 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -943,7 +943,7 @@ impl SignerDb { block_sighash: &Sha512Trunc256Sum, ts: u64, ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2), state = $2 WHERE signer_signature_hash = ?3"; + let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2), state = ?2 WHERE signer_signature_hash = ?3"; let args = params![ u64_to_sql(ts)?, BlockState::GloballyAccepted.to_string(), @@ -995,10 +995,10 @@ impl SignerDb { let args = params![tenure, BlockState::GloballyAccepted.to_string()]; let mut stmt = self.db.prepare(query)?; let rows = stmt.query_map(args, |row| { - let tenure_change_block: u64 = row.get(0)?; + let tenure_change_block: bool = row.get(0)?; let proposed_time: u64 = row.get(1)?; let validation_time_ms: Option = row.get(2)?; - Ok((tenure_change_block > 0, proposed_time, validation_time_ms)) + Ok((tenure_change_block, proposed_time, validation_time_ms)) })?; let mut tenure_processing_time_ms = 0_u64; let mut tenure_start_time = None; @@ -1372,6 +1372,10 @@ mod tests { .state, BlockState::Unprocessed ); + assert!(db + .get_last_globally_accepted_block(&block_info_1.block.header.consensus_hash) + .unwrap() + .is_none()); db.set_block_broadcasted(&block_info_1.signer_signature_hash(), 12345) .unwrap(); assert_eq!( @@ -1381,6 +1385,13 @@ mod tests { .state, BlockState::GloballyAccepted ); + assert_eq!( + db.get_last_globally_accepted_block(&block_info_1.block.header.consensus_hash) + .unwrap() + .unwrap() + .signer_signature_hash(), + block_info_1.block.header.signer_signature_hash() + ); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); From 96f1a59a3bf7e2d6652b9928cbd2d47c9595474c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 27 Nov 2024 10:31:25 -0800 Subject: [PATCH 068/115] Increase idle timeout and make sure that the blocks are globally accepted before assuming same results Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 53 +++++++++++++++------- 1 file changed, 36 insertions(+), 17 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 52d8dd4b05..2a319cc1e0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2654,7 +2654,7 @@ fn stx_transfers_dont_effect_idle_timeout() { let send_fee = 180; let num_txs = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let idle_timeout = Duration::from_secs(30); + let idle_timeout = Duration::from_secs(60); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, (send_amt + send_fee) * num_txs)], @@ -2674,7 +2674,10 @@ fn stx_transfers_dont_effect_idle_timeout() { // Add a delay to the block validation process TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap().replace(5); - let info_before = get_chain_info(&signer_test.running_nodes.conf); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined @@ -2692,7 +2695,10 @@ fn stx_transfers_dont_effect_idle_timeout() { .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - let info = get_chain_info(&signer_test.running_nodes.conf); + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) }) .expect("Timed out waiting for first nakamoto block to be mined"); @@ -2742,20 +2748,21 @@ fn stx_transfers_dont_effect_idle_timeout() { accepted.clone() }; - let latest_acceptance = get_last_block_response(slot_id); - assert_eq!(latest_acceptance.signer_signature_hash, last_block_hash); + let initial_acceptance = get_last_block_response(slot_id); + assert_eq!(initial_acceptance.signer_signature_hash, last_block_hash); info!( "---- Last idle timeout: {} ----", - latest_acceptance.response_data.tenure_extend_timestamp + initial_acceptance.response_data.tenure_extend_timestamp ); // Now, mine a few nakamoto blocks with just transfers let mut sender_nonce = 0; - let mut last_acceptance = latest_acceptance; - + // Note that this response was BEFORE the block was globally accepted. it will report a guestimated idle time + let initial_acceptance = initial_acceptance; + let mut first_global_acceptance = None; for i in 0..num_txs { info!("---- Mining interim block {} ----", i + 1); let transfer_tx = make_stacks_transfer( @@ -2768,10 +2775,15 @@ fn stx_transfers_dont_effect_idle_timeout() { ); submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; - - let info_before = get_chain_info(&signer_test.running_nodes.conf); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); wait_for(30, || { - let info = get_chain_info(&signer_test.running_nodes.conf); + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); Ok(info.stacks_tip_height > info_before.stacks_tip_height) }) .expect("Timed out waiting for nakamoto block to be mined"); @@ -2780,13 +2792,20 @@ fn stx_transfers_dont_effect_idle_timeout() { let last_block_hash = get_last_block_hash(); assert_eq!(latest_acceptance.signer_signature_hash, last_block_hash); - // Because the block only contains transfers, the idle timeout should not have changed - assert_eq!( - last_acceptance.response_data.tenure_extend_timestamp, - latest_acceptance.response_data.tenure_extend_timestamp - ); - last_acceptance = latest_acceptance; + if first_global_acceptance.is_none() { + assert!(latest_acceptance.response_data.tenure_extend_timestamp < initial_acceptance.response_data.tenure_extend_timestamp, "First global acceptance should be less than initial guesstimated acceptance as its based on block proposal time rather than epoch time at time of response."); + first_global_acceptance = Some(latest_acceptance); + } else { + // Because the block only contains transfers, the idle timeout should not have changed between blocks post the tenure change + assert_eq!( + latest_acceptance.response_data.tenure_extend_timestamp, + first_global_acceptance + .as_ref() + .map(|acceptance| acceptance.response_data.tenure_extend_timestamp) + .unwrap() + ); + }; } info!("---- Waiting for a tenure extend ----"); From 1b78589d0bfb225165f71801ba010617c6991f51 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 27 Nov 2024 20:30:32 -0800 Subject: [PATCH 069/115] Fix CREATE_INDEXES_5 Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 49ef6aabb2..1e5b4eca55 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -345,8 +345,7 @@ CREATE INDEX IF NOT EXISTS blocks_signed_group ON blocks ((json_extract(block_in static CREATE_INDEXES_5: &str = r#" CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (consensus_hash, signed_over); -CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); -CREATE INDEX IF NOT EXISTS blocks_state ON blocks (consensus_hash, state); +CREATE INDEX IF NOT EXISTS blocks_consensus_hash_state ON blocks (consensus_hash, state); CREATE INDEX IF NOT EXISTS blocks_state ON blocks (state); CREATE INDEX IF NOT EXISTS blocks_signed_group ON blocks (signed_group); "#; From ae9383a47c5ea113cb6cfc14725f10719e2f5461 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 2 Dec 2024 10:56:11 -0500 Subject: [PATCH 070/115] chore: downgrade log to debug --- testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index a5036efa6e..853abf99c1 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -532,7 +532,9 @@ impl StackerDBListenerComms { for info in idle_timestamps { weight_sum += info.weight; if weight_sum >= weight_threshold { - info!("SignerCoordinator: 70% threshold reached"); + debug!("SignerCoordinator: 70% threshold reached for tenure extension timestamp"; + "timestamp" => info.timestamp, + ); return info.timestamp; } } From b6076b995df1ae943187c9b9f11f8ed33c72e9d9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Dec 2024 17:00:49 -0500 Subject: [PATCH 071/115] fix: expose locally-overriden hint-replicas data for each stackerdb replica --- stackslib/src/net/connection.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 4eeec0daaf..0e58adb36e 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::VecDeque; +use std::collections::{HashMap, VecDeque}; use std::io::{Read, Write}; use std::ops::{Deref, DerefMut}; use std::sync::mpsc::{ @@ -24,7 +24,7 @@ use std::time::Duration; use std::{io, net}; use clarity::vm::costs::ExecutionCost; -use clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX; +use clarity::vm::types::{QualifiedContractIdentifier, BOUND_VALUE_SERIALIZATION_HEX}; use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; use stacks_common::types::net::PeerAddress; use stacks_common::util::hash::to_hex; @@ -44,7 +44,8 @@ use crate::net::neighbors::{ WALK_SEED_PROBABILITY, WALK_STATE_TIMEOUT, }; use crate::net::{ - Error as net_error, MessageSequence, Preamble, ProtocolFamily, RelayData, StacksHttp, StacksP2P, + Error as net_error, MessageSequence, NeighborAddress, Preamble, ProtocolFamily, RelayData, + StacksHttp, StacksP2P, }; /// Receiver notification handle. @@ -433,6 +434,8 @@ pub struct ConnectionOptions { pub nakamoto_unconfirmed_downloader_interval_ms: u128, /// The authorization token to enable privileged RPC endpoints pub auth_token: Option, + /// StackerDB replicas to talk to for a particular smart contract + pub stackerdb_hint_replicas: HashMap>, // fault injection /// Disable neighbor walk and discovery @@ -565,6 +568,7 @@ impl std::default::Default for ConnectionOptions { nakamoto_inv_sync_burst_interval_ms: 1_000, // wait 1 second after a sortition before running inventory sync nakamoto_unconfirmed_downloader_interval_ms: 5_000, // run unconfirmed downloader once every 5 seconds auth_token: None, + stackerdb_hint_replicas: HashMap::new(), // no faults on by default disable_neighbor_walk: false, From d4a0c5c65750c51e4fb4171307d3f3ac05f42efa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Dec 2024 17:01:16 -0500 Subject: [PATCH 072/115] chore: API sync --- stackslib/src/net/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 89e56fe29c..4af4d2a397 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3141,7 +3141,7 @@ pub mod test { &mut stacks_node.chainstate, &sortdb, old_stackerdb_configs, - config.connection_opts.num_neighbors, + &config.connection_opts, ) .expect("Failed to refresh stackerdb configs"); From 28134bf535560bcebbaae4b14a4cf7061c315f7d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Dec 2024 17:01:29 -0500 Subject: [PATCH 073/115] chore: API sync; only count authenticated outbound nodes --- stackslib/src/net/p2p.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 71ca82f8bf..13f7ad7fac 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -841,6 +841,9 @@ impl PeerNetwork { ) -> usize { let mut count = 0; for (_, convo) in self.peers.iter() { + if !convo.is_authenticated() { + continue; + } if !convo.is_outbound() { continue; } @@ -4158,7 +4161,7 @@ impl PeerNetwork { chainstate, sortdb, stacker_db_configs, - self.connection_opts.num_neighbors, + &self.connection_opts, )?; Ok(()) } From 19e3cb58b4f957ee12385fbddbd4e4565490d226 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Dec 2024 17:01:53 -0500 Subject: [PATCH 074/115] chore: override stackerdb smart contract hint-replicas with local hint-replicas if given --- stackslib/src/net/stackerdb/config.rs | 181 ++++++++++++++------------ 1 file changed, 99 insertions(+), 82 deletions(-) diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index 97f8214913..fbc1f28245 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -285,6 +285,94 @@ impl StackerDBConfig { Ok(ret) } + /// Evaluate contract-given hint-replicas + fn eval_hint_replicas( + contract_id: &QualifiedContractIdentifier, + hint_replicas_list: Vec, + ) -> Result, NetError> { + let mut hint_replicas = vec![]; + for hint_replica_value in hint_replicas_list.into_iter() { + let hint_replica_data = hint_replica_value.expect_tuple()?; + + let addr_byte_list = hint_replica_data + .get("addr") + .expect("FATAL: missing 'addr'") + .clone() + .expect_list()?; + let port = hint_replica_data + .get("port") + .expect("FATAL: missing 'port'") + .clone() + .expect_u128()?; + let pubkey_hash_bytes = hint_replica_data + .get("public-key-hash") + .expect("FATAL: missing 'public-key-hash") + .clone() + .expect_buff_padded(20, 0)?; + + let mut addr_bytes = vec![]; + for byte_val in addr_byte_list.into_iter() { + let byte = byte_val.expect_u128()?; + if byte > (u8::MAX as u128) { + let reason = format!( + "Contract {} stipulates an addr byte above u8::MAX", + contract_id + ); + warn!("{}", &reason); + return Err(NetError::InvalidStackerDBContract( + contract_id.clone(), + reason, + )); + } + addr_bytes.push(byte as u8); + } + if addr_bytes.len() != 16 { + let reason = format!( + "Contract {} did not stipulate a full 16-octet IP address", + contract_id + ); + warn!("{}", &reason); + return Err(NetError::InvalidStackerDBContract( + contract_id.clone(), + reason, + )); + } + + if port < 1024 || port > u128::from(u16::MAX - 1) { + let reason = format!( + "Contract {} stipulates a port lower than 1024 or above u16::MAX - 1", + contract_id + ); + warn!("{}", &reason); + return Err(NetError::InvalidStackerDBContract( + contract_id.clone(), + reason, + )); + } + // NOTE: port is now known to be in range [1024, 65535] + + let mut pubkey_hash_slice = [0u8; 20]; + pubkey_hash_slice.copy_from_slice(&pubkey_hash_bytes[0..20]); + + let peer_addr = PeerAddress::from_slice(&addr_bytes).expect("FATAL: not 16 bytes"); + if peer_addr.is_in_private_range() { + debug!( + "Ignoring private IP address '{}' in hint-replicas", + &peer_addr.to_socketaddr(port as u16) + ); + continue; + } + + let naddr = NeighborAddress { + addrbytes: peer_addr, + port: port as u16, + public_key_hash: Hash160(pubkey_hash_slice), + }; + hint_replicas.push(naddr); + } + Ok(hint_replicas) + } + /// Evaluate the contract to get its config fn eval_config( chainstate: &mut StacksChainState, @@ -293,6 +381,7 @@ impl StackerDBConfig { tip: &StacksBlockId, signers: Vec<(StacksAddress, u32)>, local_max_neighbors: u64, + local_hint_replicas: Option>, ) -> Result { let value = chainstate.eval_read_only(burn_dbconn, tip, contract_id, "(stackerdb-get-config)")?; @@ -394,91 +483,17 @@ impl StackerDBConfig { max_neighbors = u128::from(local_max_neighbors); } - let hint_replicas_list = config_tuple - .get("hint-replicas") - .expect("FATAL: missing 'hint-replicas'") - .clone() - .expect_list()?; - let mut hint_replicas = vec![]; - for hint_replica_value in hint_replicas_list.into_iter() { - let hint_replica_data = hint_replica_value.expect_tuple()?; - - let addr_byte_list = hint_replica_data - .get("addr") - .expect("FATAL: missing 'addr'") + let hint_replicas = if let Some(replicas) = local_hint_replicas { + replicas.clone() + } else { + let hint_replicas_list = config_tuple + .get("hint-replicas") + .expect("FATAL: missing 'hint-replicas'") .clone() .expect_list()?; - let port = hint_replica_data - .get("port") - .expect("FATAL: missing 'port'") - .clone() - .expect_u128()?; - let pubkey_hash_bytes = hint_replica_data - .get("public-key-hash") - .expect("FATAL: missing 'public-key-hash") - .clone() - .expect_buff_padded(20, 0)?; - let mut addr_bytes = vec![]; - for byte_val in addr_byte_list.into_iter() { - let byte = byte_val.expect_u128()?; - if byte > (u8::MAX as u128) { - let reason = format!( - "Contract {} stipulates an addr byte above u8::MAX", - contract_id - ); - warn!("{}", &reason); - return Err(NetError::InvalidStackerDBContract( - contract_id.clone(), - reason, - )); - } - addr_bytes.push(byte as u8); - } - if addr_bytes.len() != 16 { - let reason = format!( - "Contract {} did not stipulate a full 16-octet IP address", - contract_id - ); - warn!("{}", &reason); - return Err(NetError::InvalidStackerDBContract( - contract_id.clone(), - reason, - )); - } - - if port < 1024 || port > u128::from(u16::MAX - 1) { - let reason = format!( - "Contract {} stipulates a port lower than 1024 or above u16::MAX - 1", - contract_id - ); - warn!("{}", &reason); - return Err(NetError::InvalidStackerDBContract( - contract_id.clone(), - reason, - )); - } - // NOTE: port is now known to be in range [1024, 65535] - - let mut pubkey_hash_slice = [0u8; 20]; - pubkey_hash_slice.copy_from_slice(&pubkey_hash_bytes[0..20]); - - let peer_addr = PeerAddress::from_slice(&addr_bytes).expect("FATAL: not 16 bytes"); - if peer_addr.is_in_private_range() { - debug!( - "Ignoring private IP address '{}' in hint-replicas", - &peer_addr.to_socketaddr(port as u16) - ); - continue; - } - - let naddr = NeighborAddress { - addrbytes: peer_addr, - port: port as u16, - public_key_hash: Hash160(pubkey_hash_slice), - }; - hint_replicas.push(naddr); - } + Self::eval_hint_replicas(contract_id, hint_replicas_list)? + }; Ok(StackerDBConfig { chunk_size: chunk_size as u64, @@ -497,6 +512,7 @@ impl StackerDBConfig { sortition_db: &SortitionDB, contract_id: &QualifiedContractIdentifier, max_neighbors: u64, + local_hint_replicas: Option>, ) -> Result { let chain_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortition_db)? @@ -578,6 +594,7 @@ impl StackerDBConfig { &chain_tip_hash, signers, max_neighbors, + local_hint_replicas, )?; Ok(config) } From 6794642a7e55ea2027dcdd106e91932944446abe Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Dec 2024 17:02:17 -0500 Subject: [PATCH 075/115] chore: API sync --- stackslib/src/net/stackerdb/mod.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index bbbec21290..9d1b25af51 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -133,6 +133,7 @@ use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::MINERS_NAME; use crate::chainstate::stacks::db::StacksChainState; +use crate::net::connection::ConnectionOptions; use crate::net::neighbors::NeighborComms; use crate::net::p2p::PeerNetwork; use crate::net::{ @@ -285,8 +286,9 @@ impl StackerDBs { chainstate: &mut StacksChainState, sortdb: &SortitionDB, stacker_db_configs: HashMap, - num_neighbors: u64, + connection_opts: &ConnectionOptions, ) -> Result, net_error> { + let num_neighbors = connection_opts.num_neighbors; let existing_contract_ids = self.get_stackerdb_contract_ids()?; let mut new_stackerdb_configs = HashMap::new(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; @@ -314,6 +316,10 @@ impl StackerDBs { &sortdb, &stackerdb_contract_id, num_neighbors, + connection_opts + .stackerdb_hint_replicas + .get(&stackerdb_contract_id) + .cloned(), ) .unwrap_or_else(|e| { if matches!(e, net_error::NoSuchStackerDB(_)) && stackerdb_contract_id.is_boot() From 8a44ece596b2dfbfe6d8382c4628b139d2580835 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Dec 2024 17:02:33 -0500 Subject: [PATCH 076/115] chore: test local hint-replicas override --- stackslib/src/net/stackerdb/tests/config.rs | 121 +++++++++++++++++++- 1 file changed, 120 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/tests/config.rs b/stackslib/src/net/stackerdb/tests/config.rs index a075d7b974..cff4ca1059 100644 --- a/stackslib/src/net/stackerdb/tests/config.rs +++ b/stackslib/src/net/stackerdb/tests/config.rs @@ -528,7 +528,7 @@ fn test_valid_and_invalid_stackerdb_configs() { ContractName::try_from(format!("test-{}", i)).unwrap(), ); peer.with_db_state(|sortdb, chainstate, _, _| { - match StackerDBConfig::from_smart_contract(chainstate, sortdb, &contract_id, 32) { + match StackerDBConfig::from_smart_contract(chainstate, sortdb, &contract_id, 32, None) { Ok(config) => { let expected = result .clone() @@ -551,3 +551,122 @@ fn test_valid_and_invalid_stackerdb_configs() { .unwrap(); } } + +#[test] +fn test_hint_replicas_override() { + let AUTO_UNLOCK_HEIGHT = 12; + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants.reward_cycle_length = 5; + burnchain.pox_constants.prepare_length = 2; + burnchain.pox_constants.anchor_threshold = 1; + burnchain.pox_constants.v1_unlock_height = AUTO_UNLOCK_HEIGHT + EMPTY_SORTITIONS; + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + let epochs = StacksEpoch::all(0, 0, EMPTY_SORTITIONS as u64 + 10); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + "test_valid_and_invalid_stackerdb_configs", + Some(epochs.clone()), + Some(&observer), + ); + + let contract_owner = keys.pop().unwrap(); + let contract_id = QualifiedContractIdentifier::new( + StacksAddress::from_public_keys( + 26, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&contract_owner)], + ) + .unwrap() + .into(), + ContractName::try_from("test-0").unwrap(), + ); + + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + + let override_replica = NeighborAddress { + addrbytes: PeerAddress([2u8; 16]), + port: 123, + public_key_hash: Hash160([3u8; 20]), + }; + + let mut coinbase_nonce = 0; + let mut txs = vec![]; + + let config_contract = r#" + (define-public (stackerdb-get-signer-slots) + (ok (list { signer: 'ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B, num-slots: u3 }))) + + (define-public (stackerdb-get-config) + (ok { + chunk-size: u123, + write-freq: u4, + max-writes: u56, + max-neighbors: u7, + hint-replicas: (list + { + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), + port: u8901, + public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 + }) + })) + "#; + + let expected_config = StackerDBConfig { + chunk_size: 123, + signers: vec![( + StacksAddress { + version: 26, + bytes: Hash160::from_hex("b4fdae98b64b9cd6c9436f3b965558966afe890b").unwrap(), + }, + 3, + )], + write_freq: 4, + max_writes: 56, + hint_replicas: vec![override_replica.clone()], + max_neighbors: 7, + }; + + let tx = make_smart_contract("test-0", &config_contract, &contract_owner, 0, 10000); + txs.push(tx); + + peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + peer.with_db_state(|sortdb, chainstate, _, _| { + match StackerDBConfig::from_smart_contract( + chainstate, + sortdb, + &contract_id, + 32, + Some(vec![override_replica.clone()]), + ) { + Ok(config) => { + assert_eq!(config, expected_config); + } + Err(e) => { + panic!("Unexpected error: {:?}", &e); + } + } + Ok(()) + }) + .unwrap(); +} From 979fefe9d20d0765d34e70ef7101288194ef61b2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Dec 2024 17:02:46 -0500 Subject: [PATCH 077/115] chore: add config setting for stackerdb_hint_replicas; also make private_neighbors false by default --- testnet/stacks-node/src/config.rs | 27 +++++++++++++++++++++++++-- testnet/stacks-node/src/neon_node.rs | 2 +- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 785ce057e5..4e7ec7bc88 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -50,7 +50,7 @@ use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMe use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; use stacks::net::atlas::AtlasConfig; use stacks::net::connection::ConnectionOptions; -use stacks::net::{Neighbor, NeighborKey}; +use stacks::net::{Neighbor, NeighborAddress, NeighborKey}; use stacks::types::chainstate::BurnchainHeaderHash; use stacks::types::EpochList; use stacks::util_lib::boot::boot_code_id; @@ -2223,6 +2223,7 @@ pub struct ConnectionOptionsFile { pub auth_token: Option, pub antientropy_retry: Option, pub reject_blocks_pushed: Option, + pub stackerdb_hint_replicas: Option, } impl ConnectionOptionsFile { @@ -2352,12 +2353,34 @@ impl ConnectionOptionsFile { handshake_timeout: self.handshake_timeout.unwrap_or(5), max_sockets: self.max_sockets.unwrap_or(800) as usize, antientropy_public: self.antientropy_public.unwrap_or(true), - private_neighbors: self.private_neighbors.unwrap_or(true), + private_neighbors: self.private_neighbors.unwrap_or(false), auth_token: self.auth_token, antientropy_retry: self.antientropy_retry.unwrap_or(default.antientropy_retry), reject_blocks_pushed: self .reject_blocks_pushed .unwrap_or(default.reject_blocks_pushed), + stackerdb_hint_replicas: self + .stackerdb_hint_replicas + .map(|stackerdb_hint_replicas_json| { + let hint_replicas_res: Result< + Vec<(QualifiedContractIdentifier, Vec)>, + String, + > = serde_json::from_str(&stackerdb_hint_replicas_json) + .map_err(|e| format!("Failed to decode `stackerdb_hint_replicas`: {e:?}")); + hint_replicas_res + }) + .transpose()? + .and_then(|stackerdb_replicas_list| { + // coalesce to a hashmap, but don't worry about duplicate entries + // (garbage in, garbage out) + let stackerdb_hint_replicas: HashMap< + QualifiedContractIdentifier, + Vec, + > = stackerdb_replicas_list.into_iter().collect(); + + Some(stackerdb_hint_replicas) + }) + .unwrap_or(default.stackerdb_hint_replicas), ..default }) } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b688db100d..9dba79a2c5 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4810,7 +4810,7 @@ impl StacksNode { &mut chainstate, &sortdb, stackerdb_configs, - config.connection_options.num_neighbors, + &config.connection_options, ) .unwrap(); From 1e520b90827b7c9a98331de5da2ad925ed5fa325 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 3 Dec 2024 10:34:57 -0800 Subject: [PATCH 078/115] Fix math in tenure extend timestamp calculation to ADD not SUB block processing time Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 1e5b4eca55..5478ca5fa7 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -1026,7 +1026,7 @@ impl SignerDb { let (tenure_start_time, tenure_process_time_ms) = self.get_tenure_times(tenure).inspect_err(|e| error!("Error occurred calculating tenure extend timestamp: {e:?}. Defaulting to {tenure_idle_timeout_secs} from now.")).unwrap_or((get_epoch_time_secs(), 0)); tenure_start_time .saturating_add(tenure_idle_timeout_secs) - .saturating_sub(tenure_process_time_ms / 1000) + .saturating_add(tenure_process_time_ms / 1000) } } @@ -1717,7 +1717,7 @@ mod tests { block_infos[0] .proposed_time .saturating_add(tenure_idle_timeout.as_secs()) - .saturating_sub(3) + .saturating_add(3) ); db.insert_block(&block_infos[2]).unwrap(); @@ -1730,7 +1730,7 @@ mod tests { block_infos[2] .proposed_time .saturating_add(tenure_idle_timeout.as_secs()) - .saturating_sub(5) + .saturating_add(5) ); db.insert_block(&block_infos[4]).unwrap(); @@ -1744,14 +1744,14 @@ mod tests { block_infos[4] .proposed_time .saturating_add(tenure_idle_timeout.as_secs()) - .saturating_sub(20) + .saturating_add(20) ); // Verify tenure consensus_hash_3 (unknown hash) let timestamp_hash_3 = db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &consensus_hash_3); assert!( - timestamp_hash_3.saturating_sub(tenure_idle_timeout.as_secs()) + timestamp_hash_3.saturating_add(tenure_idle_timeout.as_secs()) < block_infos[0].proposed_time ); } From 61911eff8522514ad015b9decbe97bd264c8ad00 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 3 Dec 2024 13:41:55 -0500 Subject: [PATCH 079/115] chore: add debug log for stackerdb listener timeout --- testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 853abf99c1..2da3d3da60 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -199,6 +199,7 @@ impl StackerDBListener { let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { Ok(event) => event, Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { + debug!("StackerDBListener: No StackerDB event received. Checking flags and polling again."); continue; } Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { From 6e4bcc81626304a3e0a7de5e3e04f0c22d0fe389 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 3 Dec 2024 15:19:35 -0500 Subject: [PATCH 080/115] refactor: pass `reward_set` instead of re-loading it --- stackslib/src/chainstate/nakamoto/mod.rs | 4 ++-- testnet/stacks-node/src/nakamoto_node/miner.rs | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index ca37e30121..1e22076b4d 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2452,7 +2452,7 @@ impl NakamotoChainState { db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, - reward_set: RewardSet, + reward_set: &RewardSet, obtain_method: NakamotoBlockObtainMethod, ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); @@ -2522,7 +2522,7 @@ impl NakamotoChainState { return Ok(false); }; - let signing_weight = match block.header.verify_signer_signatures(&reward_set) { + let signing_weight = match block.header.verify_signer_signatures(reward_set) { Ok(x) => x, Err(e) => { warn!("Received block, but the signer signatures are invalid"; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index e048c9456a..d0dbbabb47 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -328,6 +328,7 @@ impl BlockMinerThread { &sortdb, &mut stackerdbs, &mut last_block_rejected, + &reward_set, ) { // Before stopping this miner, shutdown the coordinator thread. coordinator.shutdown(); @@ -344,6 +345,7 @@ impl BlockMinerThread { sortdb: &SortitionDB, stackerdbs: &mut StackerDBs, last_block_rejected: &mut bool, + reward_set: &RewardSet, ) -> Result<(), NakamotoNodeError> { #[cfg(test)] if *TEST_MINE_STALL.lock().unwrap() == Some(true) { @@ -471,8 +473,6 @@ impl BlockMinerThread { }; *last_block_rejected = false; - let reward_set = self.load_signer_set()?; - new_block.header.signer_signature = signer_signature; if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { warn!("Error accepting own block: {e:?}. Will try mining again."); @@ -612,7 +612,7 @@ impl BlockMinerThread { sort_db: &SortitionDB, chain_state: &mut StacksChainState, block: &NakamotoBlock, - reward_set: RewardSet, + reward_set: &RewardSet, ) -> Result<(), ChainstateError> { if Self::fault_injection_skip_block_broadcast() { warn!( @@ -669,7 +669,7 @@ impl BlockMinerThread { fn broadcast( &mut self, block: NakamotoBlock, - reward_set: RewardSet, + reward_set: &RewardSet, stackerdbs: &StackerDBs, ) -> Result<(), NakamotoNodeError> { let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) From f2ce342269f0584bc4540f2a0d8ac13cce939038 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 3 Dec 2024 15:23:08 -0500 Subject: [PATCH 081/115] chore: improve comment on `mined_blocks` --- testnet/stacks-node/src/nakamoto_node/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index d0dbbabb47..f649046096 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -138,7 +138,7 @@ pub struct BlockMinerThread { burnchain: Burnchain, /// Last block mined last_block_mined: Option, - /// Number of blocks mined in this tenure + /// Number of blocks mined since a tenure change/extend mined_blocks: u64, /// Copy of the node's registered VRF key registered_key: RegisteredKey, From fdb05d1c7e0ba2ea11ec98b147e7e1e5c1d3b825 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 3 Dec 2024 16:48:14 -0500 Subject: [PATCH 082/115] fix: missing change for previous refactor --- stackslib/src/net/relay.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index cb7d310321..b93171916c 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1077,7 +1077,7 @@ impl Relayer { sort_handle, &staging_db_tx, headers_conn, - reward_set, + &reward_set, obtained_method, )?; staging_db_tx.commit()?; From de1881b5f48810a4db0b608c87036306185c18b4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 3 Dec 2024 16:57:03 -0500 Subject: [PATCH 083/115] refactor: use `TestFlag` --- .../src/nakamoto_node/stackerdb_listener.rs | 17 ++++++++++------- testnet/stacks-node/src/tests/signer/v0.rs | 10 +++++----- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 2da3d3da60..f9ada97e57 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -20,6 +20,8 @@ use std::sync::{Arc, Condvar, Mutex}; use std::time::Duration; use hashbrown::{HashMap, HashSet}; +#[cfg(test)] +use lazy_static::lazy_static; use libsigner::v0::messages::{BlockAccepted, BlockResponse, SignerMessage as SignerMessageV0}; use libsigner::SignerEvent; use stacks::burnchains::Burnchain; @@ -35,11 +37,15 @@ use stacks::util::secp256k1::MessageSignature; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; +#[cfg(test)] +use crate::neon::TestFlag; -/// Fault injection flag to prevent the miner from seeing enough signer signatures. -/// Used to test that the signers will broadcast a block if it gets enough signatures #[cfg(test)] -pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mutex::new(None); +lazy_static! { + /// Fault injection flag to prevent the miner from seeing enough signer signatures. + /// Used to test that the signers will broadcast a block if it gets enough signatures + pub static ref TEST_IGNORE_SIGNERS: TestFlag = TestFlag::default(); +} /// How long should the coordinator poll on the event receiver before /// waking up to check timeouts? @@ -440,10 +446,7 @@ impl StackerDBListener { /// Do we ignore signer signatures? #[cfg(test)] fn fault_injection_ignore_signatures() -> bool { - if *TEST_IGNORE_SIGNERS.lock().unwrap() == Some(true) { - return true; - } - false + TEST_IGNORE_SIGNERS.get() } #[cfg(not(test))] diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a0af4d58ac..0051902852 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2513,7 +2513,7 @@ fn signers_broadcast_signed_blocks() { }) .expect("Timed out waiting for first nakamoto block to be mined"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined @@ -2798,7 +2798,7 @@ fn empty_sortition_before_approval() { let stacks_height_before = info.stacks_tip_height; info!("Forcing miner to ignore signatures for next block"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); info!("Pausing block commits to trigger an empty sortition."); signer_test @@ -2851,7 +2851,7 @@ fn empty_sortition_before_approval() { .replace(false); info!("Stop ignoring signers and wait for the tip to advance"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + TEST_IGNORE_SIGNERS.set(false); wait_for(60, || { let info = get_chain_info(&signer_test.running_nodes.conf); @@ -5608,7 +5608,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // broadcasted to the miner so it can end its tenure before block confirmation obtained // Clear the stackerdb chunks info!("Forcing miner to ignore block responses for block N+1"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); info!("Delaying signer block N+1 broadcasting to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); test_observer::clear(); @@ -5735,7 +5735,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .expect("Timed out waiting for block proposal of N+1' block proposal"); info!("Allowing miner to accept block responses again. "); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + TEST_IGNORE_SIGNERS.set(false); info!("Allowing signers to broadcast block N+1 to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); From f06321673295df889fdc34592bccc4c29548c3f2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 3 Dec 2024 18:02:50 -0500 Subject: [PATCH 084/115] chore: PR feedback --- testnet/stacks-node/src/config.rs | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 4e7ec7bc88..ad780fa17c 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2370,16 +2370,7 @@ impl ConnectionOptionsFile { hint_replicas_res }) .transpose()? - .and_then(|stackerdb_replicas_list| { - // coalesce to a hashmap, but don't worry about duplicate entries - // (garbage in, garbage out) - let stackerdb_hint_replicas: HashMap< - QualifiedContractIdentifier, - Vec, - > = stackerdb_replicas_list.into_iter().collect(); - - Some(stackerdb_hint_replicas) - }) + .map(HashMap::from_iter) .unwrap_or(default.stackerdb_hint_replicas), ..default }) From e4551b9878b9f6b663d8c1b6d68419ac3663ace2 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 3 Dec 2024 18:09:37 -0800 Subject: [PATCH 085/115] feat: test for idle extend with active mining --- libsigner/src/v0/messages.rs | 16 + testnet/stacks-node/src/run_loop/neon.rs | 12 + testnet/stacks-node/src/tests/signer/mod.rs | 98 ++++-- testnet/stacks-node/src/tests/signer/v0.rs | 340 +++++++++++++++----- 4 files changed, 361 insertions(+), 105 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index fd639a91f4..087c4ba7a3 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -670,6 +670,22 @@ impl BlockResponse { timestamp, )) } + + /// Get the tenure extend timestamp from the block response + pub fn get_tenure_extend_timestamp(&self) -> u64 { + match self { + BlockResponse::Accepted(accepted) => accepted.response_data.tenure_extend_timestamp, + BlockResponse::Rejected(rejection) => rejection.response_data.tenure_extend_timestamp, + } + } + + /// Get the signer signature hash from the block response + pub fn get_signer_signature_hash(&self) -> Sha512Trunc256Sum { + match self { + BlockResponse::Accepted(accepted) => accepted.signer_signature_hash, + BlockResponse::Rejected(rejection) => rejection.signer_signature_hash, + } + } } impl StacksMessageCodec for BlockResponse { diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 5e021e50ab..b2171b4e8b 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -73,6 +73,18 @@ impl Default for RunLoopCounter { } } +impl RunLoopCounter { + #[cfg(test)] + pub fn get(&self) -> u64 { + self.0.load(Ordering::SeqCst) + } + + #[cfg(test)] + pub fn load(&self, ordering: Ordering) -> u64 { + self.0.load(ordering) + } +} + #[cfg(test)] impl std::ops::Deref for RunLoopCounter { type Target = Arc; diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 946a566c13..07a44e428e 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -36,7 +36,9 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::types::PrincipalData; -use libsigner::v0::messages::{BlockResponse, SignerMessage}; +use libsigner::v0::messages::{ + BlockAccepted, BlockResponse, MessageSlotID, PeerInfo, SignerMessage, +}; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -53,14 +55,14 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; -use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; +use stacks_signer::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; -use crate::neon::{Counters, TestFlag}; +use crate::neon::{Counters, RunLoopCounter, TestFlag}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ @@ -81,13 +83,13 @@ pub struct RunningNodes { pub btcd_controller: BitcoinCoreController, pub run_loop_thread: thread::JoinHandle<()>, pub run_loop_stopper: Arc, - pub vrfs_submitted: Arc, - pub commits_submitted: Arc, - pub blocks_processed: Arc, - pub nakamoto_blocks_proposed: Arc, - pub nakamoto_blocks_mined: Arc, - pub nakamoto_blocks_rejected: Arc, - pub nakamoto_blocks_signer_pushed: Arc, + pub vrfs_submitted: RunLoopCounter, + pub commits_submitted: RunLoopCounter, + pub blocks_processed: RunLoopCounter, + pub nakamoto_blocks_proposed: RunLoopCounter, + pub nakamoto_blocks_mined: RunLoopCounter, + pub nakamoto_blocks_rejected: RunLoopCounter, + pub nakamoto_blocks_signer_pushed: RunLoopCounter, pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, @@ -307,10 +309,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest info_before.stacks_tip_height) }) .unwrap(); @@ -355,6 +358,24 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ()) { + let blocks_before = self.running_nodes.nakamoto_blocks_mined.get(); + let info_before = self.get_peer_info(); + + f(); + + // Verify that the block was mined + wait_for(timeout_secs, || { + let blocks_mined = self.running_nodes.nakamoto_blocks_mined.get(); + let info = self.get_peer_info(); + Ok(blocks_mined > blocks_before + && info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for nakamoto block to be mined"); + } + /// Wait for a confirmed block and return a list of individual /// signer signatures fn wait_for_confirmed_block_v0( @@ -618,6 +639,45 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest BlockResponse { + let mut stackerdb = StackerDB::new( + &self.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + self.get_current_reward_cycle(), + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + let latest_msgs = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &[slot_id], + ) + .expect("Failed to get message from stackerdb"); + let latest_msg = latest_msgs.last().unwrap(); + let SignerMessage::BlockResponse(block_response) = latest_msg else { + panic!("Latest message from slot #{slot_id} isn't a block acceptance"); + }; + block_response.clone() + } + + /// Get the latest block acceptance from the given slot + pub fn get_latest_block_acceptance(&self, slot_id: u32) -> BlockAccepted { + let block_response = self.get_latest_block_response(slot_id); + match block_response { + BlockResponse::Accepted(accepted) => accepted, + _ => panic!("Latest block response from slot #{slot_id} isn't a block acceptance"), + } + } + + /// Get /v2/info from the node + pub fn get_peer_info(&self) -> PeerInfo { + self.stacks_client + .get_peer_info() + .expect("Failed to get peer info") + } } fn setup_stx_btc_node( @@ -747,13 +807,13 @@ fn setup_stx_btc_node( btc_regtest_controller, run_loop_thread, run_loop_stopper, - vrfs_submitted: vrfs_submitted.0, - commits_submitted: commits_submitted.0, - blocks_processed: blocks_processed.0, - nakamoto_blocks_proposed: naka_blocks_proposed.0, - nakamoto_blocks_mined: naka_blocks_mined.0, - nakamoto_blocks_rejected: naka_blocks_rejected.0, - nakamoto_blocks_signer_pushed: naka_signer_pushed_blocks.0, + vrfs_submitted, + commits_submitted, + blocks_processed, + nakamoto_blocks_proposed: naka_blocks_proposed, + nakamoto_blocks_mined: naka_blocks_mined, + nakamoto_blocks_rejected: naka_blocks_rejected, + nakamoto_blocks_signer_pushed: naka_signer_pushed_blocks, nakamoto_test_skip_commit_op, coord_channel, conf: naka_conf, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index ef05f8e9cb..7686e9e2b8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -45,6 +45,7 @@ use stacks::net::api::postblock_proposal::{ use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; +use stacks::util::get_epoch_time_secs; use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc, Sha512Trunc256Sum}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; @@ -83,7 +84,9 @@ use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; -use crate::tests::{self, gen_random_port, make_stacks_transfer}; +use crate::tests::{ + self, gen_random_port, make_contract_call, make_contract_publish, make_stacks_transfer, +}; use crate::{nakamoto_node, BitcoinRegtestController, BurnchainController, Config, Keychain}; impl SignerTest { @@ -2601,24 +2604,8 @@ fn tenure_extend_after_idle() { signer_test.boot_to_epoch_3(); info!("---- Nakamoto booted, starting test ----"); - let info_before = get_chain_info(&signer_test.running_nodes.conf); - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); signer_test.mine_nakamoto_block(Duration::from_secs(30)); - // Verify that the block was mined - wait_for(30, || { - let blocks_mined = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - let info = get_chain_info(&signer_test.running_nodes.conf); - Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for first nakamoto block to be mined"); - info!("---- Waiting for a tenure extend ----"); // Now, wait for a block with a tenure extend @@ -2674,35 +2661,14 @@ fn stx_transfers_dont_effect_idle_timeout() { // Add a delay to the block validation process TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap().replace(5); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); + let info_before = signer_test.get_peer_info(); + let blocks_before = signer_test.running_nodes.nakamoto_blocks_mined.get(); info!("---- Nakamoto booted, starting test ----"; "info_height" => info_before.stacks_tip_height, "blocks_before" => blocks_before, ); signer_test.mine_nakamoto_block(Duration::from_secs(30)); - info!("---- Verifying that the block was mined ----"); - // Verify that the block was mined - wait_for(30, || { - let blocks_mined = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - let info = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for first nakamoto block to be mined"); - info!("---- Getting current idle timeout ----"); let reward_cycle = signer_test.get_current_reward_cycle(); @@ -2726,29 +2692,7 @@ fn stx_transfers_dont_effect_idle_timeout() { let slot_id = 0_u32; - let get_last_block_response = |slot_id: u32| { - let mut stackerdb = StackerDB::new( - &naka_conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is - false, - reward_cycle, - SignerSlotID(0), // We are just reading so again, don't care about index. - ); - let latest_msgs = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::BlockResponse) - .expect("Failed to get BlockResponse stackerdb session"), - &[slot_id], - ) - .expect("Failed to get message from stackerdb"); - let latest_msg = latest_msgs.last().unwrap(); - let SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) = latest_msg else { - panic!("Latest message from slot #{slot_id} isn't a block acceptance"); - }; - accepted.clone() - }; - - let initial_acceptance = get_last_block_response(slot_id); + let initial_acceptance = signer_test.get_latest_block_acceptance(slot_id); assert_eq!(initial_acceptance.signer_signature_hash, last_block_hash); info!( @@ -2765,30 +2709,20 @@ fn stx_transfers_dont_effect_idle_timeout() { let mut first_global_acceptance = None; for i in 0..num_txs { info!("---- Mining interim block {} ----", i + 1); - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - naka_conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - wait_for(30, || { - let info = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info.stacks_tip_height > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for nakamoto block to be mined"); + signer_test.wait_for_nakamoto_block(30, || { + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + }); - let latest_acceptance = get_last_block_response(slot_id); + let latest_acceptance = signer_test.get_latest_block_acceptance(slot_id); let last_block_hash = get_last_block_hash(); assert_eq!(latest_acceptance.signer_signature_hash, last_block_hash); @@ -2813,6 +2747,240 @@ fn stx_transfers_dont_effect_idle_timeout() { signer_test.shutdown(); } +#[test] +#[ignore] +/// Verify that a tenure extend will occur after an idle timeout +/// while actively mining. +fn idle_tenure_extend_active_mining() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let deployer_sk = Secp256k1PrivateKey::new(); + let deployer_addr = tests::to_addr(&deployer_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 5; + let num_naka_blocks = 5; + let tenure_count = 2; + let tx_fee = 10000; + let deploy_fee = 190200; + let amount = + deploy_fee + tx_fee * num_txs * tenure_count * num_naka_blocks * 100 + 100 * tenure_count; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(60); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, amount), (deployer_addr, amount)], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.tenure_idle_timeout = idle_timeout; + }, + |_| {}, + None, + None, + ); + let naka_conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + let mut sender_nonces: HashMap = HashMap::new(); + + let get_and_increment_nonce = + |sender_sk: &Secp256k1PrivateKey, sender_nonces: &mut HashMap| { + let nonce = sender_nonces.get(&sender_sk.to_hex()).unwrap_or(&0); + let result = *nonce; + sender_nonces.insert(sender_sk.to_hex(), result + 1); + result + }; + + signer_test.boot_to_epoch_3(); + + // Add a delay to the block validation process + TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap().replace(3); + + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + + info!("---- Getting current idle timeout ----"); + + let get_last_block_hash = || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let block_hash = + hex_bytes(&last_block.get("block_hash").unwrap().as_str().unwrap()[2..]).unwrap(); + Sha512Trunc256Sum::from_vec(&block_hash).unwrap() + }; + + let last_block_hash = get_last_block_hash(); + + let slot_id = 0_u32; + + let get_last_block_hash = || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let block_hash = + hex_bytes(&last_block.get("block_hash").unwrap().as_str().unwrap()[2..]).unwrap(); + Sha512Trunc256Sum::from_vec(&block_hash).unwrap() + }; + + let log_idle_diff = |timestamp: u64| { + let now = get_epoch_time_secs(); + let diff = timestamp.saturating_sub(now); + info!("----- Idle diff: {diff} seconds -----"); + }; + + let initial_response = signer_test.get_latest_block_response(slot_id); + assert_eq!( + initial_response.get_signer_signature_hash(), + last_block_hash + ); + + info!( + "---- Last idle timeout: {} ----", + initial_response.get_tenure_extend_timestamp() + ); + + // Deploy a contract that will be called a lot + + let contract_src = format!( + r#" +(define-data-var my-var uint u0) +(define-public (f) (begin {} (ok 1))) (begin (f)) + "#, + (0..250) + .map(|_| format!("(var-get my-var)")) + .collect::>() + .join(" ") + ); + + // First, lets deploy the contract + let deployer_nonce = get_and_increment_nonce(&deployer_sk, &mut sender_nonces); + let contract_tx = make_contract_publish( + &deployer_sk, + deployer_nonce, + deploy_fee, + naka_conf.burnchain.chain_id, + "small-contract", + &contract_src, + ); + submit_tx(&http_origin, &contract_tx); + + info!("----- Submitted deploy txs, mining BTC block -----"); + + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + let mut last_response = signer_test.get_latest_block_response(slot_id); + + // Make multiple tenures that get extended through idle timeouts + for t in 1..=tenure_count { + info!("----- Mining tenure {t} -----"); + log_idle_diff(last_response.get_tenure_extend_timestamp()); + // Now, start a tenure with contract calls + for i in 1..=num_naka_blocks { + // Just in case these Nakamoto blocks pass the idle timeout (probably because CI is slow), exit early + if i != 1 && last_block_contains_tenure_change_tx(TenureChangeCause::Extended) { + info!("---- Tenure extended before mining {i} nakamoto blocks -----"); + break; + } + info!("----- Mining nakamoto block {i} in tenure {t} -----"); + + signer_test.wait_for_nakamoto_block(30, || { + // Throw in a STX transfer to test mixed blocks + let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + for _ in 0..num_txs { + let deployer_nonce = get_and_increment_nonce(&deployer_sk, &mut sender_nonces); + // Fill up the mempool with contract calls + let contract_tx = make_contract_call( + &deployer_sk, + deployer_nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &deployer_addr, + "small-contract", + "f", + &[], + ); + match submit_tx_fallible(&http_origin, &contract_tx) { + Ok(_txid) => {} + Err(_e) => { + // If we fail to submit a tx, we need to make sure we don't + // increment the nonce for this sender, so we don't end up + // skipping a tx. + sender_nonces.insert(deployer_sk.to_hex(), deployer_nonce); + } + } + } + }); + let latest_response = signer_test.get_latest_block_response(slot_id); + let naka_blocks = test_observer::get_mined_nakamoto_blocks(); + info!( + "----- Latest tenure extend timestamp: {} -----", + latest_response.get_tenure_extend_timestamp() + ); + log_idle_diff(latest_response.get_tenure_extend_timestamp()); + info!( + "----- Latest block transaction events: {} -----", + naka_blocks.last().unwrap().tx_events.len() + ); + assert_eq!( + latest_response.get_signer_signature_hash(), + get_last_block_hash(), + "Expected the latest block response to be for the latest block" + ); + assert_ne!( + last_response.get_tenure_extend_timestamp(), + latest_response.get_tenure_extend_timestamp(), + "Tenure extend timestamp should change with each block" + ); + last_response = latest_response; + } + + let current_time = get_epoch_time_secs(); + let extend_diff = last_response + .get_tenure_extend_timestamp() + .saturating_sub(current_time); + + info!( + "----- After mining {num_naka_blocks} nakamoto blocks in tenure {t}, waiting for TenureExtend -----"; + "tenure_extend_timestamp" => last_response.get_tenure_extend_timestamp(), + "extend_diff" => extend_diff, + "current_time" => current_time, + ); + + // Now, wait for the idle timeout to trigger + wait_for(extend_diff + 30, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Expected a tenure extend after idle timeout"); + + last_response = signer_test.get_latest_block_response(slot_id); + + info!("----- Tenure {t} extended -----"); + log_idle_diff(last_response.get_tenure_extend_timestamp()); + } + + info!("------------------------- Test Shutdown -------------------------"); + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks the behaviour of signers when a sortition is empty. Specifically: From f4188b241cda91f8c883a0569935ce0bc2fce875 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 4 Dec 2024 08:22:38 -0800 Subject: [PATCH 086/115] fix: add new test to bitcoin-tests --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index fd2d5606a2..60886ad4a7 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -124,6 +124,7 @@ jobs: - tests::signer::v0::continue_after_tenure_extend - tests::signer::v0::tenure_extend_after_idle - tests::signer::v0::stx_transfers_dont_effect_idle_timeout + - tests::signer::v0::idle_tenure_extend_active_mining - tests::signer::v0::multiple_miners_with_custom_chain_id - tests::signer::v0::block_commit_delay - tests::signer::v0::continue_after_fast_block_no_sortition From 6cb70e8bf69976f4a8c8876e0f8cabe5a993d262 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 4 Dec 2024 09:06:33 -0800 Subject: [PATCH 087/115] fix: pr comments --- testnet/stacks-node/src/tests/signer/mod.rs | 7 ++++++- testnet/stacks-node/src/tests/signer/v0.rs | 3 --- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 07a44e428e..ff128d0a03 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -313,6 +313,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest info_before.stacks_tip_height) + let blocks_mined = self.running_nodes.nakamoto_blocks_mined.get(); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height + && blocks_mined > mined_before) }) .unwrap(); let mined_block_elapsed_time = mined_block_time.elapsed(); @@ -359,6 +362,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ()) { let blocks_before = self.running_nodes.nakamoto_blocks_mined.get(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7686e9e2b8..4922941819 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2592,7 +2592,6 @@ fn tenure_extend_after_idle() { num_signers, vec![(sender_addr, send_amt + send_fee)], |config| { - // make the duration long enough that the miner will be marked as malicious config.tenure_idle_timeout = idle_timeout; }, |_| {}, @@ -2646,7 +2645,6 @@ fn stx_transfers_dont_effect_idle_timeout() { num_signers, vec![(sender_addr, (send_amt + send_fee) * num_txs)], |config| { - // make the duration long enough that the miner will be marked as malicious config.tenure_idle_timeout = idle_timeout; }, |_| {}, @@ -2782,7 +2780,6 @@ fn idle_tenure_extend_active_mining() { num_signers, vec![(sender_addr, amount), (deployer_addr, amount)], |config| { - // make the duration long enough that the miner will be marked as malicious config.tenure_idle_timeout = idle_timeout; }, |_| {}, From 92d8b29d97e3dbb5729935e6c812d39fb197034f Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 4 Dec 2024 09:16:06 -0800 Subject: [PATCH 088/115] fix: mine a few naka blocks after tenure extend --- testnet/stacks-node/src/tests/signer/v0.rs | 24 ++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4922941819..bac82e93ef 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2974,6 +2974,30 @@ fn idle_tenure_extend_active_mining() { log_idle_diff(last_response.get_tenure_extend_timestamp()); } + // After the last extend, mine a few more naka blocks + for i in 1..=num_naka_blocks { + // Just in case these Nakamoto blocks pass the idle timeout (probably because CI is slow), exit early + if i != 1 && last_block_contains_tenure_change_tx(TenureChangeCause::Extended) { + info!("---- Tenure extended before mining {i} nakamoto blocks -----"); + break; + } + info!("----- Mining nakamoto block {i} after last tenure extend -----"); + + signer_test.wait_for_nakamoto_block(30, || { + // Throw in a STX transfer to test mixed blocks + let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + }); + } + info!("------------------------- Test Shutdown -------------------------"); signer_test.shutdown(); } From 2e266a20622cb4722d0743be3cb180a8d5e2569e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 4 Dec 2024 16:03:09 -0500 Subject: [PATCH 089/115] refactor: add `ExecutionCost::ZERO` --- clarity/src/vm/ast/mod.rs | 2 +- clarity/src/vm/costs/mod.rs | 32 ++++++++----------- stackslib/src/chainstate/nakamoto/mod.rs | 6 ++-- .../src/chainstate/stacks/boot/pox_2_tests.rs | 4 +-- .../src/chainstate/stacks/boot/pox_3_tests.rs | 2 +- .../src/chainstate/stacks/db/accounts.rs | 2 +- stackslib/src/chainstate/stacks/db/blocks.rs | 6 ++-- stackslib/src/chainstate/stacks/db/mod.rs | 4 +-- .../src/chainstate/stacks/db/transactions.rs | 4 +-- .../src/chainstate/stacks/db/unconfirmed.rs | 10 +++--- stackslib/src/chainstate/stacks/miner.rs | 4 +-- .../stacks/tests/block_construction.rs | 2 +- stackslib/src/clarity_vm/clarity.rs | 4 +-- stackslib/src/core/tests/mod.rs | 30 ++++++++--------- stackslib/src/cost_estimates/tests/common.rs | 4 +-- .../cost_estimates/tests/cost_estimators.rs | 2 +- .../src/cost_estimates/tests/fee_scalar.rs | 4 +-- stackslib/src/net/api/getstxtransfercost.rs | 2 +- stackslib/src/net/api/tests/mod.rs | 2 +- .../src/net/api/tests/postblock_proposal.rs | 2 +- testnet/stacks-node/src/event_dispatcher.rs | 8 ++--- testnet/stacks-node/src/neon_node.rs | 7 ++-- testnet/stacks-node/src/run_loop/mod.rs | 4 +-- testnet/stacks-node/src/tests/epoch_205.rs | 8 ++--- .../src/tests/neon_integrations.rs | 2 +- 25 files changed, 75 insertions(+), 82 deletions(-) diff --git a/clarity/src/vm/ast/mod.rs b/clarity/src/vm/ast/mod.rs index 1cff959695..5c615f46fa 100644 --- a/clarity/src/vm/ast/mod.rs +++ b/clarity/src/vm/ast/mod.rs @@ -353,7 +353,7 @@ mod test { ) -> std::result::Result { self.invoked_functions.push((cost_f, input.to_vec())); self.invocation_count += 1; - Ok(ExecutionCost::zero()) + Ok(ExecutionCost::ZERO) } fn add_cost(&mut self, _cost: ExecutionCost) -> std::result::Result<(), CostErrors> { self.cost_addition_count += 1; diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 897927bc6d..8561dccb23 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -140,7 +140,7 @@ impl CostTracker for () { _cost_function: ClarityCostFunction, _input: &[u64], ) -> std::result::Result { - Ok(ExecutionCost::zero()) + Ok(ExecutionCost::ZERO) } fn add_cost(&mut self, _cost: ExecutionCost) -> std::result::Result<(), CostErrors> { Ok(()) @@ -707,7 +707,7 @@ impl LimitedCostTracker { contract_call_circuits: HashMap::new(), limit, memory_limit: CLARITY_MEMORY_LIMIT, - total: ExecutionCost::zero(), + total: ExecutionCost::ZERO, memory: 0, epoch, mainnet, @@ -731,7 +731,7 @@ impl LimitedCostTracker { contract_call_circuits: HashMap::new(), limit, memory_limit: CLARITY_MEMORY_LIMIT, - total: ExecutionCost::zero(), + total: ExecutionCost::ZERO, memory: 0, epoch, mainnet, @@ -879,7 +879,7 @@ impl LimitedCostTracker { pub fn get_total(&self) -> ExecutionCost { match self { Self::Limited(TrackerData { total, .. }) => total.clone(), - Self::Free => ExecutionCost::zero(), + Self::Free => ExecutionCost::ZERO, } } #[allow(clippy::panic)] @@ -1049,7 +1049,7 @@ impl CostTracker for LimitedCostTracker { match self { Self::Free => { // tracker is free, return zero! - return Ok(ExecutionCost::zero()); + return Ok(ExecutionCost::ZERO); } Self::Limited(ref mut data) => { if cost_function == ClarityCostFunction::Unimplemented { @@ -1194,15 +1194,13 @@ impl CostOverflowingMath for u64 { } impl ExecutionCost { - pub fn zero() -> ExecutionCost { - Self { - runtime: 0, - write_length: 0, - read_count: 0, - write_count: 0, - read_length: 0, - } - } + pub const ZERO: Self = Self { + runtime: 0, + write_length: 0, + read_count: 0, + write_count: 0, + read_length: 0, + }; /// Returns the percentage of self consumed in `numerator`'s largest proportion dimension. pub fn proportion_largest_dimension(&self, numerator: &ExecutionCost) -> u64 { @@ -1329,11 +1327,7 @@ impl ExecutionCost { } pub fn is_zero(&self) -> bool { - self.write_length == 0 - && self.write_count == 0 - && self.read_length == 0 - && self.read_count == 0 - && self.runtime == 0 + *self == Self::ZERO } } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 4cc2090a57..35f6e5d1e1 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3896,7 +3896,7 @@ impl NakamotoChainState { // Nakamoto must load block cost from parent if this block isn't a tenure change. // If this is a tenure-extend, then the execution cost is reset. let initial_cost = if new_tenure || tenure_extend { - ExecutionCost::zero() + ExecutionCost::ZERO } else { let parent_cost_total = Self::get_total_tenure_cost_at(chainstate_tx.as_tx(), &parent_index_hash)? @@ -4223,7 +4223,7 @@ impl NakamotoChainState { tx_receipts, matured_rewards, matured_rewards_info: matured_rewards_info_opt, - parent_microblocks_cost: ExecutionCost::zero(), + parent_microblocks_cost: ExecutionCost::ZERO, anchored_block_cost: block_execution_cost, parent_burn_block_hash, parent_burn_block_height: u32::try_from(parent_burn_block_height).unwrap_or(0), // shouldn't be fatal @@ -4713,7 +4713,7 @@ impl NakamotoChainState { tx_receipts, matured_rewards, matured_rewards_info: matured_rewards_info_opt, - parent_microblocks_cost: ExecutionCost::zero(), + parent_microblocks_cost: ExecutionCost::ZERO, anchored_block_cost: block_execution_cost, parent_burn_block_hash, parent_burn_block_height: u32::try_from(parent_burn_block_height).unwrap_or(0), // shouldn't be fatal diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 7ae25d00f6..64782c67d6 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -1039,7 +1039,7 @@ fn test_simple_pox_lockup_transition_pox_2() { bob_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == key_to_stacks_addr(&charlie) { assert!( - r.execution_cost != ExecutionCost::zero(), + r.execution_cost != ExecutionCost::ZERO, "Execution cost is not zero!" ); charlie_txs.insert(t.auth.get_origin_nonce(), r); @@ -1385,7 +1385,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { bob_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == charlie_address { assert!( - r.execution_cost != ExecutionCost::zero(), + r.execution_cost != ExecutionCost::ZERO, "Execution cost is not zero!" ); charlie_txs.insert(t.auth.get_origin_nonce(), r); diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index dc65db0324..8a173c6adc 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -459,7 +459,7 @@ fn simple_pox_lockup_transition_pox_2() { bob_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == key_to_stacks_addr(&charlie) { assert!( - r.execution_cost != ExecutionCost::zero(), + r.execution_cost != ExecutionCost::ZERO, "Execution cost is not zero!" ); charlie_txs.insert(t.auth.get_origin_nonce(), r); diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index b05365d5ac..bf84cc1362 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -1192,7 +1192,7 @@ mod test { new_tip.microblock_tail.clone(), &block_reward, None, - &ExecutionCost::zero(), + &ExecutionCost::ZERO, 123, false, vec![], diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 04f772da02..0d87ca207e 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4327,7 +4327,7 @@ impl StacksChainState { post_condition_aborted: false, stx_burned: 0, contract_analysis: None, - execution_cost: ExecutionCost::zero(), + execution_cost: ExecutionCost::ZERO, microblock_header: None, tx_index: 0, vm_error: None, @@ -5131,7 +5131,7 @@ impl StacksChainState { ); cost } else { - ExecutionCost::zero() + ExecutionCost::ZERO }; let mut clarity_tx = StacksChainState::chainstate_block_begin( @@ -5218,7 +5218,7 @@ impl StacksChainState { // if we get here, then we need to reset the block-cost back to 0 since this begins the // epoch defined by this miner. - clarity_tx.reset_cost(ExecutionCost::zero()); + clarity_tx.reset_cost(ExecutionCost::ZERO); // is this stacks block the first of a new epoch? let (applied_epoch_transition, mut tx_receipts) = diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 6b6f523f88..a94a6165ee 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1623,7 +1623,7 @@ impl StacksChainState { allocations_tx, allocation_events, Value::okay_true(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ); receipts.push(allocations_receipt); @@ -1724,7 +1724,7 @@ impl StacksChainState { &mut tx, &parent_hash, &first_tip_info, - &ExecutionCost::zero(), + &ExecutionCost::ZERO, 0, )?; tx.commit()?; diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index e9de9139a2..38f4eb3c50 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -192,7 +192,7 @@ impl StacksTransactionReceipt { result: Value::okay_true(), stx_burned: 0, contract_analysis: None, - execution_cost: ExecutionCost::zero(), + execution_cost: ExecutionCost::ZERO, microblock_header: None, tx_index: 0, vm_error: None, @@ -307,7 +307,7 @@ impl StacksTransactionReceipt { result: Value::okay_true(), stx_burned: 0, contract_analysis: None, - execution_cost: ExecutionCost::zero(), + execution_cost: ExecutionCost::ZERO, microblock_header: None, tx_index: 0, vm_error: None, diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 7da2ff1599..6f7a9fe9ea 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -254,7 +254,7 @@ impl UnconfirmedState { let mut total_burns = 0; let mut all_receipts = vec![]; let mut mined_txs = UnconfirmedTxMap::new(); - let mut new_cost = ExecutionCost::zero(); + let mut new_cost = ExecutionCost::ZERO; let mut new_bytes = 0; let mut num_new_mblocks = 0; let mut have_state = self.have_state; @@ -351,7 +351,7 @@ impl UnconfirmedState { // apply injected faults if self.disable_cost_check { warn!("Fault injection: disabling microblock miner's cost tracking"); - self.cost_so_far = ExecutionCost::zero(); + self.cost_so_far = ExecutionCost::ZERO; } if self.disable_bytes_check { warn!("Fault injection: disabling microblock miner's size tracking"); @@ -709,7 +709,7 @@ mod test { } let mut anchor_size = 0; - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let (burn_ops, stacks_block, _) = peer.make_tenure( |ref mut miner, @@ -946,7 +946,7 @@ mod test { } let mut anchor_size = 0; - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let (burn_ops, stacks_block, _) = peer.make_tenure( |ref mut miner, @@ -1205,7 +1205,7 @@ mod test { } let mut anchor_size = 0; - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let (burn_ops, stacks_block, _) = peer.make_tenure( |ref mut miner, diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 7a72cc1652..082e9c374c 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1190,7 +1190,7 @@ impl<'a> StacksMicroblockBuilder<'a> { } if self.runtime.disable_cost_check { warn!("Fault injection: disabling miner limit on microblock runtime cost"); - clarity_tx.reset_cost(ExecutionCost::zero()); + clarity_tx.reset_cost(ExecutionCost::ZERO); } self.runtime.bytes_so_far = bytes_so_far; @@ -1418,7 +1418,7 @@ impl<'a> StacksMicroblockBuilder<'a> { } if self.runtime.disable_cost_check { warn!("Fault injection: disabling miner limit on microblock runtime cost"); - clarity_tx.reset_cost(ExecutionCost::zero()); + clarity_tx.reset_cost(ExecutionCost::ZERO); } self.runtime.bytes_so_far = bytes_so_far; diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 7b7720b996..90fc7f1705 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5064,7 +5064,7 @@ fn paramaterized_mempool_walk_test( available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index c89679f414..a5497cea24 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -215,7 +215,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { pub fn cost_so_far(&self) -> ExecutionCost { match self.cost_track { Some(ref track) => track.get_total(), - None => ExecutionCost::zero(), + None => ExecutionCost::ZERO, } } @@ -1765,7 +1765,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { pub fn cost_so_far(&self) -> ExecutionCost { match self.cost_track { Some(ref track) => track.get_total(), - None => ExecutionCost::zero(), + None => ExecutionCost::ZERO, } } diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 03447e9bf4..a209ef0677 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -151,7 +151,7 @@ pub fn make_block( &mut chainstate_tx, &new_index_hash, &new_tip_info, - &ExecutionCost::zero(), + &ExecutionCost::ZERO, block_height, ) .unwrap(); @@ -288,7 +288,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -326,7 +326,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -363,7 +363,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -405,7 +405,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -445,7 +445,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -683,7 +683,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -720,7 +720,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -757,7 +757,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -866,7 +866,7 @@ fn test_iterate_candidates_skipped_transaction() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event() @@ -981,7 +981,7 @@ fn test_iterate_candidates_processing_error_transaction() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event() @@ -1096,7 +1096,7 @@ fn test_iterate_candidates_problematic_transaction() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event() @@ -1248,7 +1248,7 @@ fn test_iterate_candidates_concurrent_write_lock() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -2792,7 +2792,7 @@ fn test_filter_txs_by_type() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -2827,7 +2827,7 @@ fn test_filter_txs_by_type() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index 01f6c32ec7..4ce9ea48cc 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -44,8 +44,8 @@ pub fn make_block_receipt(tx_receipts: Vec) -> StacksE tx_receipts, matured_rewards: vec![], matured_rewards_info: None, - parent_microblocks_cost: ExecutionCost::zero(), - anchored_block_cost: ExecutionCost::zero(), + parent_microblocks_cost: ExecutionCost::ZERO, + anchored_block_cost: ExecutionCost::ZERO, parent_burn_block_hash: BurnchainHeaderHash([0; 32]), parent_burn_block_height: 1, parent_burn_block_timestamp: 1, diff --git a/stackslib/src/cost_estimates/tests/cost_estimators.rs b/stackslib/src/cost_estimates/tests/cost_estimators.rs index eabbb4a148..1ed6b034e5 100644 --- a/stackslib/src/cost_estimates/tests/cost_estimators.rs +++ b/stackslib/src/cost_estimates/tests/cost_estimators.rs @@ -102,7 +102,7 @@ fn make_dummy_transfer_tx() -> StacksTransactionReceipt { tx, vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ) } diff --git a/stackslib/src/cost_estimates/tests/fee_scalar.rs b/stackslib/src/cost_estimates/tests/fee_scalar.rs index 685fc6430a..3bfc4b966a 100644 --- a/stackslib/src/cost_estimates/tests/fee_scalar.rs +++ b/stackslib/src/cost_estimates/tests/fee_scalar.rs @@ -94,7 +94,7 @@ fn make_dummy_transfer_tx(fee: u64) -> StacksTransactionReceipt { tx, vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ) } @@ -115,7 +115,7 @@ fn make_dummy_cc_tx(fee: u64) -> StacksTransactionReceipt { vec![], Value::okay(Value::Bool(true)).unwrap(), 0, - ExecutionCost::zero(), + ExecutionCost::ZERO, ) } diff --git a/stackslib/src/net/api/getstxtransfercost.rs b/stackslib/src/net/api/getstxtransfercost.rs index b8801e7d7c..78e6e66851 100644 --- a/stackslib/src/net/api/getstxtransfercost.rs +++ b/stackslib/src/net/api/getstxtransfercost.rs @@ -108,7 +108,7 @@ impl RPCRequestHandler for RPCGetStxTransferCostRequestHandler { if let Some((_, fee_estimator, metric)) = rpc_args.get_estimators_ref() { // STX transfer transactions have zero runtime cost - let estimated_cost = ExecutionCost::zero(); + let estimated_cost = ExecutionCost::ZERO; let estimations = RPCPostFeeRateRequestHandler::estimate_tx_fee_from_cost_and_length( &preamble, diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index cd8a337acb..c6c62dd1fe 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -431,7 +431,7 @@ impl<'a> TestRPC<'a> { let tip = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let mut anchor_size = 0; // make a block diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 33625bb97b..481d0b2047 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -424,7 +424,7 @@ fn test_try_make_response() { signer_signature_hash, good_block.header.signer_signature_hash() ); - assert_eq!(cost, ExecutionCost::zero()); + assert_eq!(cost, ExecutionCost::ZERO); assert_eq!(size, 180); assert!(validation_time_ms > 0 && validation_time_ms < 60000); } diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 8144cd8ec5..4d6eec8922 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1730,8 +1730,8 @@ mod test { let parent_burn_block_hash = BurnchainHeaderHash([0; 32]); let parent_burn_block_height = 0; let parent_burn_block_timestamp = 0; - let anchored_consumed = ExecutionCost::zero(); - let mblock_confirmed_consumed = ExecutionCost::zero(); + let anchored_consumed = ExecutionCost::ZERO; + let mblock_confirmed_consumed = ExecutionCost::ZERO; let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); let block_timestamp = Some(123456); @@ -1800,8 +1800,8 @@ mod test { let parent_burn_block_hash = BurnchainHeaderHash([0; 32]); let parent_burn_block_height = 0; let parent_burn_block_timestamp = 0; - let anchored_consumed = ExecutionCost::zero(); - let mblock_confirmed_consumed = ExecutionCost::zero(); + let anchored_consumed = ExecutionCost::ZERO; + let mblock_confirmed_consumed = ExecutionCost::ZERO; let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); let block_timestamp = Some(123456); diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 1639f93c43..d2143f4986 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -611,8 +611,7 @@ impl MicroblockMinerThread { match StacksChainState::get_anchored_block_header_info(chainstate.db(), &ch, &bhh) { Ok(Some(_)) => { let parent_index_hash = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - let cost_so_far = if relayer_thread.microblock_stream_cost == ExecutionCost::zero() - { + let cost_so_far = if relayer_thread.microblock_stream_cost == ExecutionCost::ZERO { // unknown cost, or this is idempotent. StacksChainState::get_stacks_block_anchored_cost( chainstate.db(), @@ -2845,7 +2844,7 @@ impl RelayerThread { miner_tip: None, last_microblock_tenure_time: 0, microblock_deadline: 0, - microblock_stream_cost: ExecutionCost::zero(), + microblock_stream_cost: ExecutionCost::ZERO, relayer, @@ -3503,7 +3502,7 @@ impl RelayerThread { if best_tip == new_miner_tip && best_tip != my_miner_tip { // tip has changed debug!("Relayer: Best miner tip went from {my_miner_tip:?} to {new_miner_tip:?}"); - self.microblock_stream_cost = ExecutionCost::zero(); + self.microblock_stream_cost = ExecutionCost::ZERO; } self.miner_tip = best_tip; } diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index 7990c04332..819ace144c 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -199,8 +199,8 @@ pub fn announce_boot_receipts( block_header_0.burn_header_hash, block_header_0.burn_header_height, block_header_0.burn_header_timestamp, - &ExecutionCost::zero(), - &ExecutionCost::zero(), + &ExecutionCost::ZERO, + &ExecutionCost::ZERO, pox_constants, &None, &None, diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index b305a7429a..1964612bd4 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -1123,8 +1123,8 @@ fn bigger_microblock_streams_in_2_05() { sleep_ms(120_000); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let mut epoch_20_stream_cost = ExecutionCost::zero(); - let mut epoch_205_stream_cost = ExecutionCost::zero(); + let mut epoch_20_stream_cost = ExecutionCost::ZERO; + let mut epoch_205_stream_cost = ExecutionCost::ZERO; // max == largest number of transactions per stream in a given epoch (2.0 or 2.05) // total == number of transactions across all streams in a given epoch (2.0 or 2.05) @@ -1155,7 +1155,7 @@ fn bigger_microblock_streams_in_2_05() { eprintln!("{}", transactions.len()); let mut num_big_microblock_txs = 0; - let mut total_execution_cost = ExecutionCost::zero(); + let mut total_execution_cost = ExecutionCost::ZERO; for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); @@ -1204,7 +1204,7 @@ fn bigger_microblock_streams_in_2_05() { epoch_20_stream_cost = total_execution_cost; break; } - if in_205 && total_execution_cost.exceeds(&ExecutionCost::zero()) { + if in_205 && total_execution_cost.exceeds(&ExecutionCost::ZERO) { have_confirmed_205_stream = true; epoch_205_stream_cost = total_execution_cost; break; diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 7d9f1f0dc8..e3d592d23c 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -5766,7 +5766,7 @@ fn cost_voting_integration() { let transactions = block.get("transactions").unwrap().as_array().unwrap(); eprintln!("{}", transactions.len()); let mut tested = false; - let mut exec_cost = ExecutionCost::zero(); + let mut exec_cost = ExecutionCost::ZERO; for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { From 2548c56a023136877c567db6c8c55ea7d377e5dc Mon Sep 17 00:00:00 2001 From: Brice Date: Wed, 4 Dec 2024 16:12:48 -0500 Subject: [PATCH 090/115] chore: Rust improvements Co-authored-by: Jeff Bencin --- stacks-signer/src/v0/signer.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 4d7134c5e0..b706df4075 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -992,9 +992,8 @@ impl Signer { } // have enough signatures to broadcast! - let Ok(Some(mut block_info)) = self.signer_db.block_lookup(block_hash).map_err(|e| { + let Ok(Some(mut block_info)) = self.signer_db.block_lookup(block_hash).inspect_err(|e| { warn!("{self}: Failed to load block {block_hash}: {e:?})"); - e }) else { warn!("{self}: No such block {block_hash}"); return; From 6e06712f23a1b0bd695c929065385a5991409fc3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 4 Dec 2024 16:18:40 -0500 Subject: [PATCH 091/115] refactor: use `Instant` for timing block validation --- stackslib/src/net/api/postblock_proposal.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index e2e3e9cd44..35e7d6116d 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -18,6 +18,7 @@ use std::io::{Read, Write}; use std::thread::{self, JoinHandle, Thread}; #[cfg(any(test, feature = "testing"))] use std::time::Duration; +use std::time::Instant; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -361,14 +362,7 @@ impl NakamotoBlockProposal { info!("Block validation is no longer stalled due to testing directive."); } } - let ts_start = get_epoch_time_ms(); - // Measure time from start of function - let time_elapsed = || { - get_epoch_time_ms() - .saturating_sub(ts_start) - .try_into() - .unwrap_or(u64::MAX) - }; + let start = Instant::now(); #[cfg(any(test, feature = "testing"))] { @@ -579,7 +573,7 @@ impl NakamotoBlockProposal { }); } - let validation_time_ms = time_elapsed(); + let validation_time_ms = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); info!( "Participant: validated anchored block"; From 674d517784ceb1001f5734d6ef7da0b695c24015 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 5 Dec 2024 09:27:42 -0500 Subject: [PATCH 092/115] chore: change `%-full` to `percent_full` in log It's better not to use special characters like `%` in these structured logging field names for maximum compatibility. For example, this shows as `__full` in Grafana. --- stackslib/src/chainstate/nakamoto/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 74ecd19bc1..68cdb2454a 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -649,7 +649,7 @@ impl NakamotoBlockBuilder { "parent_block_id" => %block.header.parent_block_id, "block_size" => size, "execution_consumed" => %consumed, - "%-full" => block_limit.proportion_largest_dimension(&consumed), + "percent_full" => block_limit.proportion_largest_dimension(&consumed), "assembly_time_ms" => ts_end.saturating_sub(ts_start), "consensus_hash" => %block.header.consensus_hash ); From ff66b688954ad328415de4e25da1476ad654e921 Mon Sep 17 00:00:00 2001 From: Brice Date: Thu, 5 Dec 2024 14:22:00 -0500 Subject: [PATCH 093/115] chore: better rustiness Co-authored-by: Jeff Bencin --- .../src/nakamoto_node/stackerdb_listener.rs | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index f9ada97e57..972d65fba7 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -278,18 +278,15 @@ impl StackerDBListener { let (lock, cvar) = &*self.blocks; let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); - let block = match blocks.get_mut(&block_sighash) { - Some(block) => block, - None => { - info!( - "StackerDBListener: Received signature for block that we did not request. Ignoring."; - "signature" => %signature, - "block_signer_sighash" => %block_sighash, - "slot_id" => slot_id, - "signer_set" => self.signer_set, - ); - continue; - } + let Some(block) = blocks.get_mut(&block_sighash) else { + info!( + "StackerDBListener: Received signature for block that we did not request. Ignoring."; + "signature" => %signature, + "block_signer_sighash" => %block_sighash, + "slot_id" => slot_id, + "signer_set" => self.signer_set, + ); + continue; }; let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) From b9d36a196e2498b98eebd6864ad8fe9e78af14cb Mon Sep 17 00:00:00 2001 From: Brice Date: Thu, 5 Dec 2024 14:23:11 -0500 Subject: [PATCH 094/115] refactor: better Rustiness Co-authored-by: Jeff Bencin --- .../src/nakamoto_node/stackerdb_listener.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 972d65fba7..8dde5501ff 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -354,17 +354,14 @@ impl StackerDBListener { let (lock, cvar) = &*self.blocks; let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); - let block = match blocks.get_mut(&rejected_data.signer_signature_hash) { - Some(block) => block, - None => { - info!( - "StackerDBListener: Received rejection for block that we did not request. Ignoring."; - "block_signer_sighash" => %rejected_data.signer_signature_hash, - "slot_id" => slot_id, - "signer_set" => self.signer_set, - ); - continue; - } + let Some(block) = blocks.get_mut(&rejected_data.signer_signature_hash) else { + info!( + "StackerDBListener: Received rejection for block that we did not request. Ignoring."; + "block_signer_sighash" => %rejected_data.signer_signature_hash, + "slot_id" => slot_id, + "signer_set" => self.signer_set, + ); + continue; }; let rejected_pubkey = match rejected_data.recover_public_key() { From c195c68e54ada6eb784671c963fc0275f46a3e53 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 5 Dec 2024 14:37:11 -0500 Subject: [PATCH 095/115] refactor: more suggestions from PR review --- .../src/nakamoto_node/stackerdb_listener.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 8dde5501ff..2770eb0f57 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -16,12 +16,12 @@ use std::collections::BTreeMap; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Receiver; +#[cfg(test)] +use std::sync::LazyLock; use std::sync::{Arc, Condvar, Mutex}; use std::time::Duration; use hashbrown::{HashMap, HashSet}; -#[cfg(test)] -use lazy_static::lazy_static; use libsigner::v0::messages::{BlockAccepted, BlockResponse, SignerMessage as SignerMessageV0}; use libsigner::SignerEvent; use stacks::burnchains::Burnchain; @@ -41,11 +41,9 @@ use crate::event_dispatcher::StackerDBChannel; use crate::neon::TestFlag; #[cfg(test)] -lazy_static! { - /// Fault injection flag to prevent the miner from seeing enough signer signatures. - /// Used to test that the signers will broadcast a block if it gets enough signatures - pub static ref TEST_IGNORE_SIGNERS: TestFlag = TestFlag::default(); -} +/// Fault injection flag to prevent the miner from seeing enough signer signatures. +/// Used to test that the signers will broadcast a block if it gets enough signatures +pub static TEST_IGNORE_SIGNERS: LazyLock = LazyLock::new(TestFlag::default); /// How long should the coordinator poll on the event receiver before /// waking up to check timeouts? @@ -243,7 +241,7 @@ impl StackerDBListener { continue; }; let slot_ids = modified_slots - .iter() + .into_iter() .map(|chunk| chunk.slot_id) .collect::>(); @@ -354,7 +352,8 @@ impl StackerDBListener { let (lock, cvar) = &*self.blocks; let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); - let Some(block) = blocks.get_mut(&rejected_data.signer_signature_hash) else { + let Some(block) = blocks.get_mut(&rejected_data.signer_signature_hash) + else { info!( "StackerDBListener: Received rejection for block that we did not request. Ignoring."; "block_signer_sighash" => %rejected_data.signer_signature_hash, From 484f4175c65b15b46fb22793c3c304d7cfe80951 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Dec 2024 13:16:47 -0500 Subject: [PATCH 096/115] docs: more info in tenure extend debug log --- testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 2770eb0f57..97b5620a7c 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -32,6 +32,7 @@ use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::Error as ChainstateError; use stacks::types::chainstate::StacksPublicKey; use stacks::types::PublicKey; +use stacks::util::get_epoch_time_secs; use stacks::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; use stacks::util::secp256k1::MessageSignature; @@ -530,7 +531,8 @@ impl StackerDBListenerComms { weight_sum += info.weight; if weight_sum >= weight_threshold { debug!("SignerCoordinator: 70% threshold reached for tenure extension timestamp"; - "timestamp" => info.timestamp, + "tenure_extend_timestamp" => info.timestamp, + "tenure_extend_in" => (info.timestamp - get_epoch_time_secs()) ); return info.timestamp; } From 3bccb233c8b2cbe8d53b32bc7e0a77156d3fba37 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Dec 2024 14:11:21 -0500 Subject: [PATCH 097/115] docs: clarify changelog comment --- stacks-signer/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index d1d8854042..2fec112d1f 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -12,7 +12,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - Added tenure extend timestamp to signer block responses -- Added tenure_idle_timeout_secs configuration option for determining when a tenure extend will be accepted +- Added tenure_idle_timeout_secs configuration option for determining when a time-based tenure extend will be accepted ## [3.0.0.0.4.0] From 92b1a3de187a81fd3679011baa1807915d4598d7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 6 Dec 2024 15:04:25 -0500 Subject: [PATCH 098/115] Add some debug logs to tenure extend timestamp calc and roundup the processing time Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index f4bc23ccfc..14b97ae218 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -1001,15 +1001,19 @@ impl SignerDb { })?; let mut tenure_processing_time_ms = 0_u64; let mut tenure_start_time = None; - for row in rows { + let mut nmb_rows = 0; + for (i, row) in rows.enumerate() { + nmb_rows += 1; let (tenure_change_block, proposed_time, validation_time_ms) = row?; tenure_processing_time_ms = tenure_processing_time_ms.saturating_add(validation_time_ms.unwrap_or(0)); tenure_start_time = Some(proposed_time); if tenure_change_block { + debug!("Found tenure change block {i} blocks ago in tenure {tenure}"); break; } } + debug!("Calculated tenure extend timestamp from {nmb_rows} blocks in tenure {tenure}"); Ok(( tenure_start_time.unwrap_or(get_epoch_time_secs()), tenure_processing_time_ms, @@ -1024,9 +1028,19 @@ impl SignerDb { ) -> u64 { let tenure_idle_timeout_secs = tenure_idle_timeout.as_secs(); let (tenure_start_time, tenure_process_time_ms) = self.get_tenure_times(tenure).inspect_err(|e| error!("Error occurred calculating tenure extend timestamp: {e:?}. Defaulting to {tenure_idle_timeout_secs} from now.")).unwrap_or((get_epoch_time_secs(), 0)); - tenure_start_time + // Plus (ms + 999)/1000 to round up to the nearest second + let tenure_extend_timestamp = tenure_start_time .saturating_add(tenure_idle_timeout_secs) - .saturating_add(tenure_process_time_ms / 1000) + .saturating_add(tenure_process_time_ms.saturating_add(999) / 1000); + debug!("Calculated tenure extend timestamp"; + "tenure_extend_timestamp" => tenure_extend_timestamp, + "tenure_start_time" => tenure_start_time, + "tenure_process_time_ms" => tenure_process_time_ms, + "tenure_idle_timeout_secs" => tenure_idle_timeout_secs, + "tenure_extend_in" => tenure_extend_timestamp.saturating_sub(get_epoch_time_secs()), + "consensus_hash" => %tenure, + ); + tenure_extend_timestamp } } From 7a7549ce532e0d606785283c3e737e712cfa63c4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 6 Dec 2024 15:11:33 -0500 Subject: [PATCH 099/115] chore: set nakamoto block version to 2 --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 35f6e5d1e1..fe150a0d87 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -131,7 +131,7 @@ pub use self::staging_blocks::{ NakamotoStagingBlocksConn, NakamotoStagingBlocksConnRef, NakamotoStagingBlocksTx, }; -pub const NAKAMOTO_BLOCK_VERSION: u8 = 0; +pub const NAKAMOTO_BLOCK_VERSION: u8 = 2; define_named_enum!(HeaderTypeNames { Nakamoto("nakamoto"), From 062a3b8057746fb8a80030ece23e4db52fa7ae2e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 6 Dec 2024 15:24:06 -0500 Subject: [PATCH 100/115] chore: pick testnet activation heights for epoch 3.1 and stacks halvenings --- stacks-common/src/types/mod.rs | 8 ++++---- testnet/stacks-node/conf/testnet-follower-conf.toml | 4 ++++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 4a1f34cbc7..49fdfa84fd 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -172,19 +172,19 @@ pub const COINBASE_INTERVALS_TESTNET: LazyCell<[CoinbaseInterval; 5]> = LazyCell }, CoinbaseInterval { coinbase: 500 * u128::from(MICROSTACKS_PER_STACKS), - effective_start_height: 1000, + effective_start_height: 77_777, }, CoinbaseInterval { coinbase: 250 * u128::from(MICROSTACKS_PER_STACKS), - effective_start_height: 2000, + effective_start_height: 77_777 * 7, }, CoinbaseInterval { coinbase: 125 * u128::from(MICROSTACKS_PER_STACKS), - effective_start_height: 3000, + effective_start_height: 77_777 * 14, }, CoinbaseInterval { coinbase: (625 * u128::from(MICROSTACKS_PER_STACKS)) / 10, - effective_start_height: 4000, + effective_start_height: 77_777 * 21, }, ]; assert!(CoinbaseInterval::check_order(&emissions_schedule)); diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index 80226c5b89..c294a628b4 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -78,3 +78,7 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" start_height = 56_457 + +[[burnchain.epochs]] +epoch_name = "3.1" +start_height = 77_770 From c0da03ef3651137e4089e8ab302cc0b2e50b249d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 6 Dec 2024 15:32:18 -0500 Subject: [PATCH 101/115] Fix tenure timestamp check to be consistent with signers, only accepting strictly greater than values Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 9439a602f9..0ce27fe1d1 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1160,7 +1160,7 @@ impl BlockMinerThread { if self.last_block_mined.is_some() { // Check if we can extend the current tenure let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); - if get_epoch_time_secs() < tenure_extend_timestamp { + if get_epoch_time_secs() <= tenure_extend_timestamp { return Ok(NakamotoTenureInfo { coinbase_tx: None, tenure_change_tx: None, From 19e4d426735cd2b7ddb713104b28c455b714b9d9 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 6 Dec 2024 12:39:09 -0800 Subject: [PATCH 102/115] chore: more logging when rejecting time-based extend --- stacks-signer/src/chainstate.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index f5a04ebbc9..ddbb671adc 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -365,16 +365,19 @@ impl SortitionsView { let sortition_consensus_hash = proposed_by.state().consensus_hash; let changed_burn_view = tenure_extend.burn_view_consensus_hash != sortition_consensus_hash; - let enough_time_passed = get_epoch_time_secs() - > signer_db.calculate_tenure_extend_timestamp( - self.config.tenure_idle_timeout, - &sortition_consensus_hash, - ); + let extend_timestamp = signer_db.calculate_tenure_extend_timestamp( + self.config.tenure_idle_timeout, + &sortition_consensus_hash, + ); + let epoch_time = get_epoch_time_secs(); + let enough_time_passed = epoch_time > extend_timestamp; if !changed_burn_view && !enough_time_passed { warn!( "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "extend_timestamp" => extend_timestamp, + "epoch_time" => epoch_time, ); return Ok(false); } From d828159d70a34ac77ca85fcfece24b134f65fa53 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Dec 2024 15:50:38 -0500 Subject: [PATCH 103/115] chore: allow negative in `tenure_extend_in` log --- testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 97b5620a7c..6a6cb14a8c 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -532,7 +532,7 @@ impl StackerDBListenerComms { if weight_sum >= weight_threshold { debug!("SignerCoordinator: 70% threshold reached for tenure extension timestamp"; "tenure_extend_timestamp" => info.timestamp, - "tenure_extend_in" => (info.timestamp - get_epoch_time_secs()) + "tenure_extend_in" => (info.timestamp as i64 - get_epoch_time_secs() as i64) ); return info.timestamp; } From 7b265f7fafd7b3325a77321d4abfef3d97b3c761 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Dec 2024 16:42:59 -0500 Subject: [PATCH 104/115] feat: make tenure idle timestamps monotonically increase This is just in case the miner receives a signature for an old block, it will not adjust the timestamp down from the latest is already had seen. --- .../src/nakamoto_node/stackerdb_listener.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 6a6cb14a8c..00c21ec003 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -433,6 +433,16 @@ impl StackerDBListener { .signer_idle_timestamps .lock() .expect("FATAL: failed to lock idle timestamps"); + + // Check the current timestamp for the given signer_pubkey + if let Some(existing_info) = idle_timestamps.get(&signer_pubkey) { + // Only update if the new timestamp is greater + if timestamp <= existing_info.timestamp { + return; // Exit early if the new timestamp is not greater + } + } + + // Update the map with the new timestamp and weight let timestamp_info = TimestampInfo { timestamp, weight }; idle_timestamps.insert(signer_pubkey, timestamp_info); } From 62f50661e385962bbb15999c75fa0243062fe994 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 6 Dec 2024 15:16:49 -0800 Subject: [PATCH 105/115] fix: revert stack block version change --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d874186988..dbaf226015 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -131,7 +131,7 @@ pub use self::staging_blocks::{ NakamotoStagingBlocksConn, NakamotoStagingBlocksConnRef, NakamotoStagingBlocksTx, }; -pub const NAKAMOTO_BLOCK_VERSION: u8 = 1; +pub const NAKAMOTO_BLOCK_VERSION: u8 = 0; define_named_enum!(HeaderTypeNames { Nakamoto("nakamoto"), From 86751a0390be14b4ecd5431d3075b28c7a13ffa5 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Sat, 7 Dec 2024 10:11:29 -0800 Subject: [PATCH 106/115] Adding changelog for epoch 3.1 and revert block version change (#5538) --- CHANGELOG.md | 7 +++++++ stacks-signer/CHANGELOG.md | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f383cb596c..920621c045 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,13 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added +### Changed + +## [3.1.0.0.0] + +### Added + +- **SIP-029 consensus rules, activating in epoch 3.1 at block 875,000** (see [SIP-029](https://github.com/will-corcoran/sips/blob/feat/sip-029-halving-alignment/sips/sip-029/sip-029-halving-alignment.md) for details) - New RPC endpoints - `/v2/clarity/marf/:marf_key_hash` - `/v2/clarity/metadata/:principal/:contract_name/:clarity_metadata_key` diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index a332b344ce..01613ab23c 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,14 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +## [3.1.0.0.0.0] + +### Added + +- **SIP-029 consensus rules, activating in epoch 3.1 at block 875,000** (see [SIP-029](https://github.com/will-corcoran/sips/blob/feat/sip-029-halving-alignment/sips/sip-029/sip-029-halving-alignment.md) for details) + +### Changed + ## [3.0.0.0.4.0] ### Added From 761ae4dcc4b2b8111e867d7c13fd06412f99fd26 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sat, 7 Dec 2024 16:34:54 -0500 Subject: [PATCH 107/115] Rollover tenure extend timestamp for tenure change blocks that are accepted Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 3 ++- stacks-signer/src/signerdb.rs | 41 +++++++++++++++++++++++---------- stacks-signer/src/v0/signer.rs | 27 ++++++++++++++-------- 3 files changed, 49 insertions(+), 22 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index ddbb671adc..f2f042dffb 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -367,7 +367,8 @@ impl SortitionsView { tenure_extend.burn_view_consensus_hash != sortition_consensus_hash; let extend_timestamp = signer_db.calculate_tenure_extend_timestamp( self.config.tenure_idle_timeout, - &sortition_consensus_hash, + &block, + false, ); let epoch_time = get_epoch_time_secs(); let enough_time_passed = epoch_time > extend_timestamp; diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 14b97ae218..9c4c348f8e 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -1020,14 +1020,22 @@ impl SignerDb { )) } - /// Calculate the tenure extend timestamp + /// Calculate the tenure extend timestamp. If determine the timestamp for a block rejection, check_tenure_extend should be set to false to avoid recalculating + /// the tenure extend timestamp for a tenure extend block. pub fn calculate_tenure_extend_timestamp( &self, tenure_idle_timeout: Duration, - tenure: &ConsensusHash, + block: &NakamotoBlock, + check_tenure_extend: bool, ) -> u64 { + if check_tenure_extend && block.get_tenure_tx_payload().is_some() { + let tenure_extend_timestamp = + get_epoch_time_secs().wrapping_add(tenure_idle_timeout.as_secs()); + debug!("Calculated tenure extend timestamp for a tenure extend block. Rolling over timestamp: {tenure_extend_timestamp}"); + return tenure_extend_timestamp; + } let tenure_idle_timeout_secs = tenure_idle_timeout.as_secs(); - let (tenure_start_time, tenure_process_time_ms) = self.get_tenure_times(tenure).inspect_err(|e| error!("Error occurred calculating tenure extend timestamp: {e:?}. Defaulting to {tenure_idle_timeout_secs} from now.")).unwrap_or((get_epoch_time_secs(), 0)); + let (tenure_start_time, tenure_process_time_ms) = self.get_tenure_times(&block.header.consensus_hash).inspect_err(|e| error!("Error occurred calculating tenure extend timestamp: {e:?}. Defaulting to {tenure_idle_timeout_secs} from now.")).unwrap_or((get_epoch_time_secs(), 0)); // Plus (ms + 999)/1000 to round up to the nearest second let tenure_extend_timestamp = tenure_start_time .saturating_add(tenure_idle_timeout_secs) @@ -1038,7 +1046,7 @@ impl SignerDb { "tenure_process_time_ms" => tenure_process_time_ms, "tenure_idle_timeout_secs" => tenure_idle_timeout_secs, "tenure_extend_in" => tenure_extend_timestamp.saturating_sub(get_epoch_time_secs()), - "consensus_hash" => %tenure, + "consensus_hash" => %block.header.consensus_hash, ); tenure_extend_timestamp } @@ -1705,9 +1713,8 @@ mod tests { let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); let block_infos = generate_tenure_blocks(); - let consensus_hash_1 = block_infos[0].block.header.consensus_hash; - let consensus_hash_2 = block_infos.last().unwrap().block.header.consensus_hash; - let consensus_hash_3 = ConsensusHash([0x03; 20]); + let mut unknown_block = block_infos[0].block.clone(); + unknown_block.header.consensus_hash = ConsensusHash([0x03; 20]); db.insert_block(&block_infos[0]).unwrap(); db.insert_block(&block_infos[1]).unwrap(); @@ -1715,7 +1722,7 @@ mod tests { let tenure_idle_timeout = Duration::from_secs(10); // Verify tenure consensus_hash_1 let timestamp_hash_1_before = - db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &consensus_hash_1); + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &block_infos[0].block, true); assert_eq!( timestamp_hash_1_before, block_infos[0] @@ -1728,7 +1735,8 @@ mod tests { db.insert_block(&block_infos[3]).unwrap(); let timestamp_hash_1_after = - db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &consensus_hash_1); + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &block_infos[0].block, true); + assert_eq!( timestamp_hash_1_after, block_infos[2] @@ -1741,8 +1749,11 @@ mod tests { db.insert_block(&block_infos[5]).unwrap(); // Verify tenure consensus_hash_2 - let timestamp_hash_2 = - db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &consensus_hash_2); + let timestamp_hash_2 = db.calculate_tenure_extend_timestamp( + tenure_idle_timeout, + &block_infos.last().unwrap().block, + true, + ); assert_eq!( timestamp_hash_2, block_infos[4] @@ -1751,9 +1762,15 @@ mod tests { .saturating_add(20) ); + let now = get_epoch_time_secs().saturating_add(tenure_idle_timeout.as_secs()); + let timestamp_hash_2_no_tenure_extend = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &block_infos[0].block, false); + assert_ne!(timestamp_hash_2, timestamp_hash_2_no_tenure_extend); + assert!(now < timestamp_hash_2_no_tenure_extend); + // Verify tenure consensus_hash_3 (unknown hash) let timestamp_hash_3 = - db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &consensus_hash_3); + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &unknown_block, true); assert!( timestamp_hash_3.saturating_add(tenure_idle_timeout.as_secs()) < block_infos[0].proposed_time diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b706df4075..22a02bc107 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -305,7 +305,8 @@ impl Signer { signature, self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, - &block_info.block.header.consensus_hash, + &block_info.block, + true, ), ) } else { @@ -317,7 +318,8 @@ impl Signer { self.mainnet, self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, - &block_info.block.header.consensus_hash, + &block_info.block, + false, ), ) }; @@ -422,7 +424,8 @@ impl Signer { self.mainnet, self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, - &block_proposal.block.header.consensus_hash, + &block_proposal.block, + false, ), )) } @@ -440,7 +443,8 @@ impl Signer { self.mainnet, self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, - &block_proposal.block.header.consensus_hash, + &block_proposal.block, + false, ), )) } @@ -460,7 +464,8 @@ impl Signer { self.mainnet, self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, - &block_proposal.block.header.consensus_hash, + &block_proposal.block, + false, ), )) }; @@ -602,7 +607,8 @@ impl Signer { signature, self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, - &block_info.block.header.consensus_hash, + &block_info.block, + true, ), ); // have to save the signature _after_ the block info @@ -657,7 +663,8 @@ impl Signer { self.mainnet, self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, - &block_info.block.header.consensus_hash, + &block_info.block, + false, ), ); self.signer_db @@ -755,7 +762,8 @@ impl Signer { self.mainnet, self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, - &block_proposal.block.header.consensus_hash, + &block_proposal.block, + false, ), ); if let Err(e) = block_info.mark_locally_rejected() { @@ -1131,7 +1139,8 @@ impl Signer { self.mainnet, self.signer_db.calculate_tenure_extend_timestamp( self.proposal_config.tenure_idle_timeout, - &block_proposal.block.header.consensus_hash, + &block_proposal.block, + false, ), )) } else { From 8156b547d96c4856e153f19d2570546190c4b1db Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sat, 7 Dec 2024 17:13:24 -0500 Subject: [PATCH 108/115] fix: revert change to block version This should not change until we are sure signers have been updated to accept later block versions. --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index fe150a0d87..35f6e5d1e1 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -131,7 +131,7 @@ pub use self::staging_blocks::{ NakamotoStagingBlocksConn, NakamotoStagingBlocksConnRef, NakamotoStagingBlocksTx, }; -pub const NAKAMOTO_BLOCK_VERSION: u8 = 2; +pub const NAKAMOTO_BLOCK_VERSION: u8 = 0; define_named_enum!(HeaderTypeNames { Nakamoto("nakamoto"), From 565ad2a198480e1613afc86d35ca2056836906e4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 9 Dec 2024 08:06:49 -0500 Subject: [PATCH 109/115] fix: do not propose block if mock-mining --- .../stacks-node/src/nakamoto_node/miner.rs | 110 +++++++++--------- 1 file changed, 58 insertions(+), 52 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 0ce27fe1d1..2fe38145bd 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -422,58 +422,64 @@ impl BlockMinerThread { if let Some(mut new_block) = new_block { Self::fault_injection_block_broadcast_stall(&new_block); - let mut chain_state = - neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open chainstate DB. Cannot mine! {e:?}" - )) - })?; - let signer_signature = match coordinator.propose_block( - &mut new_block, - &self.burn_block, - &self.burnchain, - &sortdb, - &mut chain_state, - stackerdbs, - &self.globals.counters, - &self.burn_election_block.consensus_hash, - ) { - Ok(x) => x, - Err(e) => match e { - NakamotoNodeError::StacksTipChanged => { - info!("Stacks tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Err(e); - } - NakamotoNodeError::BurnchainTipChanged => { - info!("Burnchain tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Err(e); - } - _ => { - // Sleep for a bit to allow signers to catch up - let pause_ms = if *last_block_rejected { - self.config.miner.subsequent_rejection_pause_ms - } else { - self.config.miner.first_rejection_pause_ms - }; - - error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - thread::sleep(Duration::from_millis(pause_ms)); - *last_block_rejected = true; - return Ok(()); - } - }, + + let signer_signature = if self.config.get_node_config(false).mock_mining { + // If we're mock mining, we don't actually propose the block. + Vec::new() + } else { + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + match coordinator.propose_block( + &mut new_block, + &self.burn_block, + &self.burnchain, + &sortdb, + &mut chain_state, + stackerdbs, + &self.globals.counters, + &self.burn_election_block.consensus_hash, + ) { + Ok(x) => x, + Err(e) => match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + _ => { + // Sleep for a bit to allow signers to catch up + let pause_ms = if *last_block_rejected { + self.config.miner.subsequent_rejection_pause_ms + } else { + self.config.miner.first_rejection_pause_ms + }; + + error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + thread::sleep(Duration::from_millis(pause_ms)); + *last_block_rejected = true; + return Ok(()); + } + }, + } }; *last_block_rejected = false; From 1f79b52ed1080e4c82e6bc3f1ad275227ac4c90e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 9 Dec 2024 08:07:55 -0500 Subject: [PATCH 110/115] fix: do not broadcast block when mock-mining --- testnet/stacks-node/src/nakamoto_node/miner.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 2fe38145bd..8f1984beac 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -691,6 +691,11 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); + if self.config.get_node_config(false).mock_mining { + // If we're mock mining, we don't actually broadcast the block. + return Ok(()); + } + if self.config.miner.mining_key.is_none() { return Err(NakamotoNodeError::MinerConfigurationFailed( "No mining key configured, cannot mine", From 5bedd6e43d69036f172f65302939c7e14294875b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 9 Dec 2024 08:46:19 -0500 Subject: [PATCH 111/115] refactor: move checks early in `broadcast` --- testnet/stacks-node/src/nakamoto_node/miner.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 8f1984beac..c37afd6836 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -682,15 +682,6 @@ impl BlockMinerThread { reward_set: &RewardSet, stackerdbs: &StackerDBs, ) -> Result<(), NakamotoNodeError> { - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) - .expect("FATAL: could not open chainstate DB"); - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); - if self.config.get_node_config(false).mock_mining { // If we're mock mining, we don't actually broadcast the block. return Ok(()); @@ -702,6 +693,15 @@ impl BlockMinerThread { )); }; + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + let sort_db = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + // push block via p2p block push self.broadcast_p2p(&sort_db, &mut chain_state, &block, reward_set) .map_err(NakamotoNodeError::AcceptFailure)?; From b201dcda2f9d4251104026f8945216c378f30795 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 9 Dec 2024 09:06:25 -0500 Subject: [PATCH 112/115] refactor: clean up mock-mining changes for readability --- .../stacks-node/src/nakamoto_node/miner.rs | 131 ++++++++++-------- 1 file changed, 73 insertions(+), 58 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index c37afd6836..6a4ea39b60 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -41,6 +41,7 @@ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; use stacks::util::get_epoch_time_secs; +use stacks::util::secp256k1::MessageSignature; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::vrf::VRFProof; @@ -422,64 +423,48 @@ impl BlockMinerThread { if let Some(mut new_block) = new_block { Self::fault_injection_block_broadcast_stall(&new_block); - - let signer_signature = if self.config.get_node_config(false).mock_mining { - // If we're mock mining, we don't actually propose the block. - Vec::new() - } else { - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open chainstate DB. Cannot mine! {e:?}" - )) - })?; - match coordinator.propose_block( - &mut new_block, - &self.burn_block, - &self.burnchain, - &sortdb, - &mut chain_state, - stackerdbs, - &self.globals.counters, - &self.burn_election_block.consensus_hash, - ) { - Ok(x) => x, - Err(e) => match e { - NakamotoNodeError::StacksTipChanged => { - info!("Stacks tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Err(e); - } - NakamotoNodeError::BurnchainTipChanged => { - info!("Burnchain tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Err(e); - } - _ => { - // Sleep for a bit to allow signers to catch up - let pause_ms = if *last_block_rejected { - self.config.miner.subsequent_rejection_pause_ms - } else { - self.config.miner.first_rejection_pause_ms - }; - - error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - thread::sleep(Duration::from_millis(pause_ms)); - *last_block_rejected = true; - return Ok(()); - } - }, - } + let signer_signature = match self.propose_block( + coordinator, + &mut new_block, + sortdb, + stackerdbs, + ) { + Ok(x) => x, + Err(e) => match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + _ => { + // Sleep for a bit to allow signers to catch up + let pause_ms = if *last_block_rejected { + self.config.miner.subsequent_rejection_pause_ms + } else { + self.config.miner.first_rejection_pause_ms + }; + + error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + thread::sleep(Duration::from_millis(pause_ms)); + *last_block_rejected = true; + return Ok(()); + } + }, }; *last_block_rejected = false; @@ -533,6 +518,36 @@ impl BlockMinerThread { Ok(()) } + fn propose_block( + &self, + coordinator: &mut SignerCoordinator, + new_block: &mut NakamotoBlock, + sortdb: &SortitionDB, + stackerdbs: &mut StackerDBs, + ) -> Result, NakamotoNodeError> { + if self.config.get_node_config(false).mock_mining { + // If we're mock mining, we don't actually propose the block. + return Ok(Vec::new()); + } + + let mut chain_state = + neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + coordinator.propose_block( + new_block, + &self.burn_block, + &self.burnchain, + sortdb, + &mut chain_state, + stackerdbs, + &self.globals.counters, + &self.burn_election_block.consensus_hash, + ) + } + /// Load the signer set active for this miner's blocks. This is the /// active reward set during `self.burn_election_block`. The miner /// thread caches this information, and this method will consult From 5a69d108a6208892422409e528f17cc8b7106f16 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 9 Dec 2024 11:39:45 -0800 Subject: [PATCH 113/115] add epoch 3.1 config for testnet-miner-conf.toml --- testnet/stacks-node/conf/testnet-miner-conf.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index 93455dcee5..65f8cace68 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -74,3 +74,7 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" start_height = 56_457 + +[[burnchain.epochs]] +epoch_name = "3.1" +start_height = 77_770 From 0b2336269d1c5b7a186174daffcd2a6278e97175 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 9 Dec 2024 15:03:35 -0500 Subject: [PATCH 114/115] docs: add time-based tenure extend to CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76bc175722..05df6a51a3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added +- A miner will now generate a tenure-extend when at least 70% of the signers have confirmed that they are willing to allow one, via the new timestamp included in block responses. This allows the miner to refresh its budget in between Bitcoin blocks. ([#5476](https://github.com/stacks-network/stacks-core/discussions/5476)) - New RPC endpoints - `/v2/clarity/marf/:marf_key_hash` - `/v2/clarity/metadata/:principal/:contract_name/:clarity_metadata_key` From 00b5f651fb640703f74d7cf84e5cbac5ad14f482 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 9 Dec 2024 12:12:43 -0800 Subject: [PATCH 115/115] Fix text formatting in changelog --- stacks-signer/CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 517f5b30ea..2f1187de51 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -27,7 +27,6 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - **SIP-029 consensus rules, activating in epoch 3.1 at block 875,000** (see [SIP-029](https://github.com/will-corcoran/sips/blob/feat/sip-029-halving-alignment/sips/sip-029/sip-029-halving-alignment.md) for details) ### Changed ->>>>>>> chore/3.1.0.0.0_changelog ## [3.0.0.0.4.0]