From 9d29b129f25745440ea9e1d3698cbc0c8163f075 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 13 Apr 2021 02:27:07 -0400 Subject: [PATCH 01/21] Add copyright statement; implement exponential timeout with jitter when re-trying an Atlas request (capped at a 10-minute delay interval) --- src/net/atlas/db.rs | 16 +++++++++ src/net/atlas/download.rs | 68 +++++++++++++++++++++++++++++++++++---- src/net/atlas/mod.rs | 17 ++++++++++ src/net/atlas/tests.rs | 16 +++++++++ 4 files changed, 110 insertions(+), 7 deletions(-) diff --git a/src/net/atlas/db.rs b/src/net/atlas/db.rs index a529fe5090..d3a84a3326 100644 --- a/src/net/atlas/db.rs +++ b/src/net/atlas/db.rs @@ -1,3 +1,19 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2021 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use rusqlite::types::ToSql; use rusqlite::Row; use rusqlite::Transaction; diff --git a/src/net/atlas/download.rs b/src/net/atlas/download.rs index 56e8bd6566..7a73a976c9 100644 --- a/src/net/atlas/download.rs +++ b/src/net/atlas/download.rs @@ -1,4 +1,22 @@ -use super::{AtlasDB, Attachment, AttachmentInstance, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST}; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2021 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::{ + AtlasDB, Attachment, AttachmentInstance, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, MAX_RETRY_DELAY, +}; use chainstate::burn::{BlockHeaderHash, ConsensusHash}; use chainstate::stacks::db::StacksChainState; use chainstate::stacks::{StacksBlockHeader, StacksBlockId}; @@ -23,6 +41,10 @@ use std::fmt; use std::hash::{Hash, Hasher}; use std::net::{IpAddr, SocketAddr}; +use rand::thread_rng; +use rand::Rng; +use std::cmp; + #[derive(Debug)] pub struct AttachmentsDownloader { priority_queue: BinaryHeap, @@ -43,6 +65,29 @@ impl AttachmentsDownloader { } } + pub fn has_ready_batches(&self) -> bool { + for batch in self.priority_queue.iter() { + if batch.retry_deadline < get_epoch_time_secs() { + return true; + } + } + return false; + } + + pub fn pop_next_ready_batch(&mut self) -> Option { + let next_is_ready = if let Some(ref next) = self.priority_queue.peek() { + next.retry_deadline < get_epoch_time_secs() + } else { + false + }; + + if next_is_ready { + self.priority_queue.pop() + } else { + None + } + } + pub fn run( &mut self, dns_client: &mut DNSClient, @@ -66,7 +111,7 @@ impl AttachmentsDownloader { let ongoing_fsm = match self.ongoing_batch.take() { Some(batch) => batch, None => { - if self.priority_queue.is_empty() { + if self.priority_queue.is_empty() || !self.has_ready_batches() { // Nothing to do! return Ok((vec![], vec![])); } @@ -88,8 +133,7 @@ impl AttachmentsDownloader { } let attachments_batch = self - .priority_queue - .pop() + .pop_next_ready_batch() .expect("Unable to pop attachments bactch from queue"); let ctx = AttachmentsBatchStateContext::new( attachments_batch, @@ -1003,6 +1047,7 @@ pub struct AttachmentsBatch { pub index_block_hash: StacksBlockId, pub attachments_instances: HashMap>, pub retry_count: u64, + pub retry_deadline: u64, } impl AttachmentsBatch { @@ -1012,6 +1057,7 @@ impl AttachmentsBatch { index_block_hash: StacksBlockId([0u8; 32]), attachments_instances: HashMap::new(), retry_count: 0, + retry_deadline: 0, } } @@ -1023,7 +1069,7 @@ impl AttachmentsBatch { if self.block_height != attachment.block_height || self.index_block_hash != attachment.index_block_hash { - warn!("Atlas: attempt to add unrelated AttachmentInstance ({}, {}) to AttachmentBatch", attachment.attachment_index, attachment.index_block_hash); + warn!("Atlas: attempt to add unrelated AttachmentInstance ({}, {}) to AttachmentsBatch", attachment.attachment_index, attachment.index_block_hash); return; } } @@ -1048,6 +1094,14 @@ impl AttachmentsBatch { pub fn bump_retry_count(&mut self) { self.retry_count += 1; + let delay = cmp::min( + MAX_RETRY_DELAY, + 2u64.saturating_pow(self.retry_count as u32) + + (thread_rng().gen::() % 2u64.saturating_pow((self.retry_count - 1) as u32)), + ); + + debug!("Atlas: Re-attempt download in {} seconds", delay); + self.retry_deadline = get_epoch_time_secs() + delay; } pub fn has_fully_succeed(&self) -> bool { @@ -1105,8 +1159,8 @@ impl AttachmentsBatch { impl Ord for AttachmentsBatch { fn cmp(&self, other: &AttachmentsBatch) -> Ordering { other - .retry_count - .cmp(&self.retry_count) + .retry_deadline + .cmp(&self.retry_deadline) .then_with(|| { self.attachments_instances_count() .cmp(&other.attachments_instances_count()) diff --git a/src/net/atlas/mod.rs b/src/net/atlas/mod.rs index 732048a0bc..4ee3a445ac 100644 --- a/src/net/atlas/mod.rs +++ b/src/net/atlas/mod.rs @@ -1,3 +1,19 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2021 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + pub mod db; pub mod download; @@ -20,6 +36,7 @@ use std::convert::TryFrom; use std::hash::{Hash, Hasher}; pub const MAX_ATTACHMENT_INV_PAGES_PER_REQUEST: usize = 8; +pub const MAX_RETRY_DELAY: u64 = 600; lazy_static! { pub static ref BNS_CHARS_REGEX: Regex = Regex::new("^([a-z0-9]|[-_])*$").unwrap(); diff --git a/src/net/atlas/tests.rs b/src/net/atlas/tests.rs index 22adcadc00..9a40f0056c 100644 --- a/src/net/atlas/tests.rs +++ b/src/net/atlas/tests.rs @@ -1,3 +1,19 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2021 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use super::download::{ AttachmentRequest, AttachmentsBatch, AttachmentsBatchStateContext, AttachmentsInventoryRequest, BatchedRequestsResult, ReliabilityReport, From ee468769a17c78f52a305917cb0c948b4a910078 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 13 Apr 2021 16:34:17 -0400 Subject: [PATCH 02/21] Fix new_bytes calculation (thanks @pavitthrap) --- src/chainstate/stacks/db/unconfirmed.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/chainstate/stacks/db/unconfirmed.rs b/src/chainstate/stacks/db/unconfirmed.rs index 4b6889ad7f..dff3e7fde9 100644 --- a/src/chainstate/stacks/db/unconfirmed.rs +++ b/src/chainstate/stacks/db/unconfirmed.rs @@ -207,7 +207,7 @@ impl UnconfirmedState { last_mblock = Some(mblock_header); last_mblock_seq = seq; - new_bytes = { + new_bytes += { let mut total = 0; for tx in mblock.txs.iter() { let mut bytes = vec![]; From 80128c8bb72e24500f933237b94f7ec50caf6aff Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 13 Apr 2021 18:38:22 -0400 Subject: [PATCH 03/21] only sync the last few reward cycles on mainnet by default --- src/net/inv.rs | 2 +- testnet/stacks-node/src/config.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/net/inv.rs b/src/net/inv.rs index 0a0ad6c0b4..781fe1ea14 100644 --- a/src/net/inv.rs +++ b/src/net/inv.rs @@ -92,7 +92,7 @@ pub const FULL_INV_SYNC_INTERVAL: u64 = 12 * 3600; pub const FULL_INV_SYNC_INTERVAL: u64 = 120; #[cfg(not(test))] -pub const INV_REWARD_CYCLES: u64 = 6; +pub const INV_REWARD_CYCLES: u64 = 3; #[cfg(test)] pub const INV_REWARD_CYCLES: u64 = 1; diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index c7d4e5b9f7..77a64f0236 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -398,6 +398,7 @@ lazy_static! { max_neighbors_of_neighbor: 10, // maximum number of neighbors we'll handshake with when doing a neighbor walk (I/O for this can be expensive, so keep small-ish) walk_interval: 60, // how often, in seconds, we do a neighbor walk inv_sync_interval: 45, // how often, in seconds, we refresh block inventories + inv_reward_cycles: 3, // how many reward cycles to look back on, for mainnet download_interval: 10, // how often, in seconds, we do a block download scan (should be less than inv_sync_interval) dns_timeout: 15_000, max_inflight_blocks: 6, From 7c80879f1657802a07d7e131b3a7751572be7e9d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 03:04:30 -0400 Subject: [PATCH 04/21] add fault injection to disable the cost/size overflow checks --- src/chainstate/stacks/db/unconfirmed.rs | 28 +++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/chainstate/stacks/db/unconfirmed.rs b/src/chainstate/stacks/db/unconfirmed.rs index dff3e7fde9..7d89c8c1aa 100644 --- a/src/chainstate/stacks/db/unconfirmed.rs +++ b/src/chainstate/stacks/db/unconfirmed.rs @@ -57,6 +57,10 @@ pub struct UnconfirmedState { readonly: bool, dirty: bool, num_mblocks_added: u64, + + // fault injection for testing + pub disable_cost_check: bool, + pub disable_bytes_check: bool, } impl UnconfirmedState { @@ -85,6 +89,9 @@ impl UnconfirmedState { readonly: false, dirty: false, num_mblocks_added: 0, + + disable_cost_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_COST_CHECK), + disable_bytes_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_BYTES_CHECK), }) } @@ -115,6 +122,9 @@ impl UnconfirmedState { readonly: true, dirty: false, num_mblocks_added: 0, + + disable_cost_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_COST_CHECK), + disable_bytes_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_BYTES_CHECK), }) } @@ -237,6 +247,16 @@ impl UnconfirmedState { self.bytes_so_far += new_bytes; self.num_mblocks_added += num_new_mblocks; + // apply injected faults + if self.disable_cost_check { + warn!("Fault injection: disabling microblock miner's cost tracking"); + self.cost_so_far = ExecutionCost::zero(); + } + if self.disable_bytes_check { + warn!("Fault injection: disabling microblock miner's size tracking"); + self.bytes_so_far = 0; + } + Ok((total_fees, total_burns, all_receipts)) } @@ -311,6 +331,14 @@ impl UnconfirmedState { ) -> Option<(StacksTransaction, BlockHeaderHash, u16)> { self.mined_txs.get(txid).map(|x| x.clone()) } + + pub fn num_microblocks(&self) -> u64 { + if self.last_mblock.is_some() { + (self.last_mblock_seq as u64) + 1 + } else { + 0 + } + } } impl StacksChainState { From 32535c2292b03b0282219565c088ef02a2005253 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 03:05:08 -0400 Subject: [PATCH 05/21] add fault injection to the microblock miner, in order to force it to mine a microblock stream that is too big or costly to confirm --- src/chainstate/stacks/miner.rs | 36 ++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/src/chainstate/stacks/miner.rs b/src/chainstate/stacks/miner.rs index d11f2fdcb0..3dbeed4a56 100644 --- a/src/chainstate/stacks/miner.rs +++ b/src/chainstate/stacks/miner.rs @@ -65,6 +65,10 @@ struct MicroblockMinerRuntime { considered: Option>, num_mined: u64, tip: StacksBlockId, + + // fault injection, inherited from unconfirmed + disable_bytes_check: bool, + disable_cost_check: bool, } #[derive(PartialEq)] @@ -87,6 +91,9 @@ impl From<&UnconfirmedState> for MicroblockMinerRuntime { considered: Some(considered), num_mined: 0, tip: unconfirmed.confirmed_chain_tip.clone(), + + disable_bytes_check: unconfirmed.disable_bytes_check, + disable_cost_check: unconfirmed.disable_cost_check, } } } @@ -386,6 +393,16 @@ impl<'a> StacksMicroblockBuilder<'a> { } } + // do fault injection + if self.runtime.disable_bytes_check { + warn!("Fault injection: disabling miner limit on microblock stream size"); + bytes_so_far = 0; + } + if self.runtime.disable_cost_check { + warn!("Fault injection: disabling miner limit on microblock runtime cost"); + clarity_tx.reset_cost(ExecutionCost::zero()); + } + self.runtime.bytes_so_far = bytes_so_far; self.clarity_tx.replace(clarity_tx); self.runtime.considered.replace(considered); @@ -465,6 +482,16 @@ impl<'a> StacksMicroblockBuilder<'a> { }, ); + // do fault injection + if self.runtime.disable_bytes_check { + warn!("Fault injection: disabling miner limit on microblock stream size"); + bytes_so_far = 0; + } + if self.runtime.disable_cost_check { + warn!("Fault injection: disabling miner limit on microblock runtime cost"); + clarity_tx.reset_cost(ExecutionCost::zero()); + } + self.runtime.bytes_so_far = bytes_so_far; self.clarity_tx.replace(clarity_tx); self.runtime.considered.replace(considered); @@ -498,8 +525,9 @@ impl<'a> Drop for StacksMicroblockBuilder<'a> { debug!( "Drop StacksMicroblockBuilder"; "chain tip" => %&self.runtime.tip, - "txs considered" => &self.runtime.considered.as_ref().map(|x| x.len()).unwrap_or(0), - "txs mined" => self.runtime.num_mined, + "txs mined off tip" => &self.runtime.considered.as_ref().map(|x| x.len()).unwrap_or(0), + "txs added" => self.runtime.num_mined, + "bytes so far" => self.runtime.bytes_so_far, "cost so far" => &format!("{:?}", &self.get_cost_so_far()) ); self.clarity_tx @@ -1363,8 +1391,8 @@ impl StacksBlockBuilder { ); debug!( - "Build anchored block off of {}/{} height {}", - &tip_consensus_hash, &tip_block_hash, tip_height + "Build anchored block off of {}/{} height {} budget {:?}", + &tip_consensus_hash, &tip_block_hash, tip_height, execution_budget ); let (mut header_reader_chainstate, _) = chainstate_handle.reopen()?; // used for reading block headers during an epoch From 6b4f828d2db0a34e195f920271530fbf00582bf1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 03:05:33 -0400 Subject: [PATCH 06/21] add fault-injection check and constants --- src/core/mod.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/core/mod.rs b/src/core/mod.rs index ff1d0e38da..45f67f4714 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -119,6 +119,20 @@ pub const BLOCK_LIMIT_MAINNET: ExecutionCost = ExecutionCost { runtime: 5_000_000_000, }; +pub const FAULT_DISABLE_MICROBLOCKS_COST_CHECK: &str = "MICROBLOCKS_DISABLE_COST_CHECK"; +pub const FAULT_DISABLE_MICROBLOCKS_BYTES_CHECK: &str = "MICROBLOCKS_DISABLE_BYTES_CHECK"; + +pub fn check_fault_injection(fault_name: &str) -> bool { + use std::env; + + // only activates if we're testing + if env::var("BITCOIND_TEST") != Ok("1".to_string()) { + return false; + } + + env::var(fault_name) == Ok("1".to_string()) +} + /// Synchronize burn transactions from the Bitcoin blockchain pub fn sync_burnchain_bitcoin( working_dir: &String, From c6f6d785e7deee8b7bb539934f37fc65d8b1f20e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 03:05:48 -0400 Subject: [PATCH 07/21] initialize the attachment downloader on p2p network initialization --- src/net/p2p.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/net/p2p.rs b/src/net/p2p.rs index bcdcb9f98a..4593412dfc 100644 --- a/src/net/p2p.rs +++ b/src/net/p2p.rs @@ -352,7 +352,7 @@ impl PeerNetwork { debug!("{:?}: disable inbound neighbor walks", &local_peer); } - PeerNetwork { + let mut network = PeerNetwork { local_peer: local_peer, peer_version: peer_version, chain_view: chain_view, @@ -428,7 +428,10 @@ impl PeerNetwork { pending_messages: HashMap::new(), fault_last_disconnect: 0, - } + }; + + network.init_attachments_downloader(vec![]); + network } /// start serving. @@ -4434,7 +4437,7 @@ impl PeerNetwork { }) { Ok(_) => {} Err(e) => { - warn!("Atlas: updating attachment inventory failed {}", e); + warn!("Atlas: updating attachment inventory failed: {}", e); } } From 7eb8da9186c37cc13679f505d26219bbc8868f35 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 03:06:17 -0400 Subject: [PATCH 08/21] make it possible to set the maximum RBF ratio beyond 1.5x, so the miner can attempt to mine many anchored blocks --- testnet/stacks-node/src/config.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 77a64f0236..e578dd4b9e 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -21,6 +21,7 @@ use stacks::vm::costs::ExecutionCost; use stacks::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdentifier}; const DEFAULT_SATS_PER_VB: u64 = 50; +const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const LEADER_KEY_TX_ESTIM_SIZE: u64 = 290; const BLOCK_COMMIT_TX_ESTIM_SIZE: u64 = 350; @@ -571,6 +572,9 @@ impl Config { satoshis_per_byte: burnchain .satoshis_per_byte .unwrap_or(default_burnchain_config.satoshis_per_byte), + max_rbf: burnchain + .max_rbf + .unwrap_or(default_burnchain_config.max_rbf), leader_key_tx_estimated_size: burnchain .leader_key_tx_estimated_size .unwrap_or(default_burnchain_config.leader_key_tx_estimated_size), @@ -984,6 +988,7 @@ pub struct BurnchainConfig { pub process_exit_at_block_height: Option, pub poll_time_secs: u64, pub satoshis_per_byte: u64, + pub max_rbf: u64, pub leader_key_tx_estimated_size: u64, pub block_commit_tx_estimated_size: u64, pub rbf_fee_increment: u64, @@ -1010,6 +1015,7 @@ impl BurnchainConfig { process_exit_at_block_height: None, poll_time_secs: 10, // TODO: this is a testnet specific value. satoshis_per_byte: DEFAULT_SATS_PER_VB, + max_rbf: DEFAULT_MAX_RBF_RATE, leader_key_tx_estimated_size: LEADER_KEY_TX_ESTIM_SIZE, block_commit_tx_estimated_size: BLOCK_COMMIT_TX_ESTIM_SIZE, rbf_fee_increment: DEFAULT_RBF_FEE_RATE_INCREMENT, @@ -1065,6 +1071,7 @@ pub struct BurnchainConfigFile { pub leader_key_tx_estimated_size: Option, pub block_commit_tx_estimated_size: Option, pub rbf_fee_increment: Option, + pub max_rbf: Option, } #[derive(Clone, Debug, Default)] From 170ddcd6dcd58da49da936a3ea181e46eea01741 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 03:07:05 -0400 Subject: [PATCH 09/21] plumb through new config option max_rbf --- .../src/burnchains/bitcoin_regtest_controller.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 9aa382c30a..c1a70a5b47 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1032,9 +1032,14 @@ impl BitcoinRegtestController { } } - // Stop as soon as the fee_rate is 1.50 higher, stop RBF - if ongoing_op.fees.fee_rate > (self.config.burnchain.satoshis_per_byte * 150 / 100) { - warn!("RBF'd block commits reached 1.5x satoshi per byte fee rate, not resubmitting"); + // Stop as soon as the fee_rate is ${self.config.burnchain.max_rbf} percent higher, stop RBF + if ongoing_op.fees.fee_rate + > (self.config.burnchain.satoshis_per_byte * self.config.burnchain.max_rbf / 100) + { + warn!( + "RBF'd block commits reached {}% satoshi per byte fee rate, not resubmitting", + self.config.burnchain.max_rbf + ); self.ongoing_block_commit = Some(ongoing_op); return None; } From a8835b1c21326d9b2f62b73f2b5fb651363b58c4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 03:07:38 -0400 Subject: [PATCH 10/21] Fix a few bugs in the microblock tenure logic: * if we try to mine an anchored block whose microblock stream is too long, then try again, but using the microblock prefix that is short enough * make sure to reset the microblock miner state when we discover a new anchored block * coalesce repeated requests to run tenure for blocks and microblocks, in case they happen faster than we can act on them * implement a "number of unconfirmed microblocks" block counter, for use in testing --- testnet/stacks-node/src/neon_node.rs | 419 +++++++++++++++++---------- 1 file changed, 266 insertions(+), 153 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 648c4aa51e..ac8a07cc06 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -47,6 +47,7 @@ use stacks::util::get_epoch_time_ms; use stacks::util::get_epoch_time_secs; use stacks::util::hash::{to_hex, Hash160, Sha256Sum}; use stacks::util::secp256k1::Secp256k1PrivateKey; +use stacks::util::sleep_ms; use stacks::util::strings::{UrlString, VecDisplay}; use stacks::util::vrf::VRFPublicKey; use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError}; @@ -87,9 +88,9 @@ struct MicroblockMinerState { enum RelayerDirective { HandleNetResult(NetworkResult), ProcessTenure(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), - RunTenure(RegisteredKey, BlockSnapshot), + RunTenure(RegisteredKey, BlockSnapshot, u128), RegisterKey(BlockSnapshot), - RunMicroblockTenure, + RunMicroblockTenure(u128), Exit, } @@ -127,6 +128,14 @@ fn bump_processed_counter(blocks_processed: &BlocksProcessedCounter) { #[cfg(not(test))] fn bump_processed_counter(_blocks_processed: &BlocksProcessedCounter) {} +#[cfg(test)] +fn set_processed_counter(blocks_processed: &BlocksProcessedCounter, value: u64) { + blocks_processed.store(value, std::sync::atomic::Ordering::SeqCst); +} + +#[cfg(not(test))] +fn set_processed_counter(_blocks_processed: &BlocksProcessedCounter) {} + /// Process artifacts from the tenure. /// At this point, we're modifying the chainstate, and merging the artifacts from the previous tenure. fn inner_process_tenure( @@ -302,13 +311,13 @@ fn mine_one_microblock( mempool: &MemPoolDB, ) -> Result { debug!( - "Try to mine one microblock off of {}/{} (at seq {})", + "Try to mine one microblock off of {}/{} (total: {})", µblock_state.parent_consensus_hash, µblock_state.parent_block_hash, chainstate .unconfirmed_state .as_ref() - .map(|us| us.last_mblock_seq) + .map(|us| us.num_microblocks()) .unwrap_or(0) ); @@ -375,91 +384,82 @@ fn try_mine_microblock( chainstate: &mut StacksChainState, sortdb: &SortitionDB, mem_pool: &MemPoolDB, - winning_tip_opt: Option<&(ConsensusHash, BlockHeaderHash, Secp256k1PrivateKey)>, + winning_tip: (ConsensusHash, BlockHeaderHash, Secp256k1PrivateKey), ) -> Result, NetError> { + let ch = winning_tip.0; + let bhh = winning_tip.1; + let microblock_privkey = winning_tip.2; + let mut next_microblock = None; if microblock_miner_state.is_none() { - // are we the current sortition winner? Do we need to instantiate? - if let Some((ch, bhh, microblock_privkey)) = winning_tip_opt.as_ref() { - debug!( - "Instantiate microblock mining state off of {}/{}", - &ch, &bhh - ); - // we won a block! proceed to build a microblock tail if we've stored it - match StacksChainState::get_anchored_block_header_info(chainstate.db(), ch, bhh) { - Ok(Some(_)) => { - let parent_index_hash = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - let cost_so_far = StacksChainState::get_stacks_block_anchored_cost( - chainstate.db(), - &parent_index_hash, - )? - .ok_or(NetError::NotFoundError)?; - microblock_miner_state.replace(MicroblockMinerState { - parent_consensus_hash: ch.clone(), - parent_block_hash: bhh.clone(), - miner_key: microblock_privkey.clone(), - frequency: config.node.microblock_frequency, - last_mined: 0, - quantity: 0, - cost_so_far: cost_so_far, - }); - } - Ok(None) => { - warn!( - "No such anchored block: {}/{}. Cannot mine microblocks", - ch, bhh - ); - } - Err(e) => { - warn!( - "Failed to get anchored block cost for {}/{}: {:?}", - ch, bhh, &e - ); - } + debug!( + "Instantiate microblock mining state off of {}/{}", + &ch, &bhh + ); + // we won a block! proceed to build a microblock tail if we've stored it + match StacksChainState::get_anchored_block_header_info(chainstate.db(), &ch, &bhh) { + Ok(Some(_)) => { + let parent_index_hash = StacksBlockHeader::make_index_block_hash(&ch, &bhh); + let cost_so_far = StacksChainState::get_stacks_block_anchored_cost( + chainstate.db(), + &parent_index_hash, + )? + .ok_or(NetError::NotFoundError)?; + microblock_miner_state.replace(MicroblockMinerState { + parent_consensus_hash: ch.clone(), + parent_block_hash: bhh.clone(), + miner_key: microblock_privkey.clone(), + frequency: config.node.microblock_frequency, + last_mined: 0, + quantity: 0, + cost_so_far: cost_so_far, + }); + } + Ok(None) => { + warn!( + "No such anchored block: {}/{}. Cannot mine microblocks", + ch, bhh + ); + } + Err(e) => { + warn!( + "Failed to get anchored block cost for {}/{}: {:?}", + ch, bhh, &e + ); } } } - if let Some((ch, bhh, ..)) = winning_tip_opt.as_ref() { - if let Some(mut microblock_miner) = microblock_miner_state.take() { - if microblock_miner.parent_consensus_hash == *ch - && microblock_miner.parent_block_hash == *bhh + if let Some(mut microblock_miner) = microblock_miner_state.take() { + if microblock_miner.parent_consensus_hash == ch && microblock_miner.parent_block_hash == bhh + { + if microblock_miner.last_mined + (microblock_miner.frequency as u128) + < get_epoch_time_ms() { - if microblock_miner.last_mined + (microblock_miner.frequency as u128) - < get_epoch_time_ms() - { - // opportunistically try and mine, but only if there's no attachable blocks - let num_attachable = - StacksChainState::count_attachable_staging_blocks(chainstate.db(), 1, 0)?; - if num_attachable == 0 { - match mine_one_microblock( - &mut microblock_miner, - sortdb, - chainstate, - &mem_pool, - ) { - Ok(microblock) => { - // will need to relay this - next_microblock = Some(microblock); - } - Err(ChainstateError::NoTransactionsToMine) => { - info!("Will keep polling mempool for transactions to include in a microblock"); - } - Err(e) => { - warn!("Failed to mine one microblock: {:?}", &e); - } + // opportunistically try and mine, but only if there's no attachable blocks + let num_attachable = + StacksChainState::count_attachable_staging_blocks(chainstate.db(), 1, 0)?; + if num_attachable == 0 { + match mine_one_microblock(&mut microblock_miner, sortdb, chainstate, &mem_pool) + { + Ok(microblock) => { + // will need to relay this + next_microblock = Some(microblock); + } + Err(ChainstateError::NoTransactionsToMine) => { + info!("Will keep polling mempool for transactions to include in a microblock"); + } + Err(e) => { + warn!("Failed to mine one microblock: {:?}", &e); } } } - microblock_miner.last_mined = get_epoch_time_ms(); - microblock_miner_state.replace(microblock_miner); } - // otherwise, we're not the sortition winner, and the microblock miner state can be - // discarded. + microblock_miner.last_mined = get_epoch_time_ms(); + microblock_miner_state.replace(microblock_miner); } - } else { - // no longer a winning tip - let _ = microblock_miner_state.take(); + // otherwise, we're not the sortition winner, and the microblock miner state can be + // discarded. } Ok(next_microblock) @@ -472,49 +472,61 @@ fn run_microblock_tenure( sortdb: &mut SortitionDB, mem_pool: &MemPoolDB, relayer: &mut Relayer, - miner_tip: Option<&(ConsensusHash, BlockHeaderHash, Secp256k1PrivateKey)>, + miner_tip: (ConsensusHash, BlockHeaderHash, Secp256k1PrivateKey), + microblocks_processed: BlocksProcessedCounter, ) { // TODO: this is sensitive to poll latency -- can we call this on a fixed // schedule, regardless of network activity? - if let Some((ref parent_consensus_hash, ref parent_block_hash, _)) = miner_tip.as_ref() { - debug!( - "Run microblock tenure for {}/{}", - parent_consensus_hash, parent_block_hash - ); + let parent_consensus_hash = &miner_tip.0; + let parent_block_hash = &miner_tip.1; - // Mine microblocks, if we're active - let next_microblock_opt = match try_mine_microblock( - &config, - microblock_miner_state, - chainstate, - sortdb, - mem_pool, - miner_tip.clone(), - ) { - Ok(x) => x, - Err(e) => { - warn!("Failed to mine next microblock: {:?}", &e); - None - } - }; + debug!( + "Run microblock tenure for {}/{}", + parent_consensus_hash, parent_block_hash + ); - // did we mine anything? - if let Some(next_microblock) = next_microblock_opt { - // apply it - Relayer::refresh_unconfirmed(chainstate, sortdb); + // Mine microblocks, if we're active + let next_microblock_opt = match try_mine_microblock( + &config, + microblock_miner_state, + chainstate, + sortdb, + mem_pool, + miner_tip.clone(), + ) { + Ok(x) => x, + Err(e) => { + warn!("Failed to mine next microblock: {:?}", &e); + None + } + }; - // send it off - let microblock_hash = next_microblock.block_hash(); - if let Err(e) = relayer.broadcast_microblock( - parent_consensus_hash, - parent_block_hash, - next_microblock, - ) { - error!( - "Failure trying to broadcast microblock {}: {}", - microblock_hash, e - ); - } + // did we mine anything? + if let Some(next_microblock) = next_microblock_opt { + // apply it + let microblock_hash = next_microblock.block_hash(); + + Relayer::refresh_unconfirmed(chainstate, sortdb); + let num_mblocks = chainstate + .unconfirmed_state + .as_ref() + .map(|ref unconfirmed| unconfirmed.num_microblocks()) + .unwrap_or(0); + + debug!( + "Relayer: mined one microblock: {} (total: {})", + µblock_hash, num_mblocks + ); + set_processed_counter(µblocks_processed, num_mblocks); + + // send it off + if let Err(e) = + relayer.broadcast_microblock(parent_consensus_hash, parent_block_hash, next_microblock) + { + error!( + "Failure trying to broadcast microblock {}: {}", + microblock_hash, e + ); } } } @@ -683,7 +695,10 @@ fn spawn_peer( // only do this on the Ok() path, even if we're mining, because an error in // network dispatching is likely due to resource exhaustion if mblock_deadline < get_epoch_time_ms() { - results_with_data.push_back(RelayerDirective::RunMicroblockTenure); + debug!("P2P: schedule microblock tenure"); + results_with_data.push_back(RelayerDirective::RunMicroblockTenure( + get_epoch_time_ms(), + )); mblock_deadline = get_epoch_time_ms() + (config.node.microblock_frequency as u128); } @@ -706,7 +721,7 @@ fn spawn_peer( ); match e { TrySendError::Full(directive) => { - if let RelayerDirective::RunMicroblockTenure = directive { + if let RelayerDirective::RunMicroblockTenure(_) = directive { // can drop this } else { // don't lose this data -- just try it again @@ -753,6 +768,7 @@ fn spawn_miner_relayer( relay_channel: Receiver, event_dispatcher: EventDispatcher, blocks_processed: BlocksProcessedCounter, + microblocks_processed: BlocksProcessedCounter, burnchain: Burnchain, coord_comms: CoordinatorChannels, unconfirmed_txs: Arc>, @@ -785,9 +801,10 @@ fn spawn_miner_relayer( let mut failed_to_mine_in_block: Option = None; let mut bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone()); - let mut microblock_miner_state = None; + let mut microblock_miner_state: Option = None; let mut miner_tip = None; let mut last_microblock_tenure_time = 0; + let mut last_tenure_issue_time = 0; let relayer_handle = thread::Builder::new().name("relayer".to_string()).spawn(move || { while let Ok(mut directive) = relay_channel.recv() { @@ -913,10 +930,13 @@ fn spawn_miner_relayer( // proceed to mine microblocks debug!( - "Microblock miner tip is now {}/{}", - &consensus_hash, &block_header_hash + "Microblock miner tip is now {}/{} ({})", + &consensus_hash, &block_header_hash, StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_header_hash) ); miner_tip = Some((ch, bh, microblock_privkey)); + + Relayer::refresh_unconfirmed(&mut chainstate, &mut sortdb); + send_unconfirmed_txs(&chainstate, unconfirmed_txs.clone()); } } else { debug!("Did not win sortition, my blocks [burn_hash= {}, block_hash= {}], their blocks [parent_consenus_hash= {}, burn_hash= {}, block_hash ={}]", @@ -927,7 +947,11 @@ fn spawn_miner_relayer( } } } - RelayerDirective::RunTenure(registered_key, last_burn_block) => { + RelayerDirective::RunTenure(registered_key, last_burn_block, issue_timestamp_ms) => { + if last_tenure_issue_time > issue_timestamp_ms { + continue; + } + let burn_header_hash = last_burn_block.burn_header_hash.clone(); debug!( "Relayer: Run tenure"; @@ -974,6 +998,8 @@ fn spawn_miner_relayer( failed_to_mine_in_block = Some(burn_chain_tip); } last_mined_blocks.insert(burn_header_hash, last_mined_blocks_vec); + + last_tenure_issue_time = get_epoch_time_ms(); } RelayerDirective::RegisterKey(ref last_burn_block) => { rotate_vrf_and_register( @@ -984,32 +1010,48 @@ fn spawn_miner_relayer( ); bump_processed_counter(&blocks_processed); } - RelayerDirective::RunMicroblockTenure => { - if last_microblock_tenure_time + (config.node.microblock_frequency as u128) > get_epoch_time_ms() { - // only mine when necessary -- the deadline to begin hasn't passed yet + RelayerDirective::RunMicroblockTenure(tenure_issue_ms) => { + if last_microblock_tenure_time > tenure_issue_ms { + // stale request continue; } - last_microblock_tenure_time = get_epoch_time_ms(); debug!("Relayer: run microblock tenure"); - // unconfirmed state must be consistent with the chain tip - if miner_tip.is_some() { - Relayer::refresh_unconfirmed(&mut chainstate, &mut sortdb); - } + // unconfirmed state must be consistent with the chain tip, as must the + // microblock mining state. + if let Some((ch, bh, mblock_pkey)) = miner_tip.clone() { + if let Some(miner_state) = microblock_miner_state.take() { + if miner_state.parent_consensus_hash == ch || miner_state.parent_block_hash == bh { + // preserve -- chaintip is unchanged + microblock_miner_state = Some(miner_state); + } + else { + debug!("Relayer: reset microblock miner state"); + microblock_miner_state = None; + } + } - run_microblock_tenure( - &config, - &mut microblock_miner_state, - &mut chainstate, - &mut sortdb, - &mem_pool, - &mut relayer, - miner_tip.as_ref(), - ); + run_microblock_tenure( + &config, + &mut microblock_miner_state, + &mut chainstate, + &mut sortdb, + &mem_pool, + &mut relayer, + (ch, bh, mblock_pkey), + microblocks_processed.clone() + ); - // synchronize unconfirmed tx index to p2p thread - send_unconfirmed_txs(&chainstate, unconfirmed_txs.clone()); + // synchronize unconfirmed tx index to p2p thread + send_unconfirmed_txs(&chainstate, unconfirmed_txs.clone()); + last_microblock_tenure_time = get_epoch_time_ms(); + } + else { + debug!("Relayer: reset unconfirmed state to 0 microblocks"); + set_processed_counter(µblocks_processed, 0); + microblock_miner_state = None; + } } RelayerDirective::Exit => break } @@ -1034,6 +1076,7 @@ impl InitializedNeonNode { last_burn_block: Option, miner: bool, blocks_processed: BlocksProcessedCounter, + microblocks_processed: BlocksProcessedCounter, coord_comms: CoordinatorChannels, sync_comms: PoxSyncWatchdogComms, burnchain: Burnchain, @@ -1207,6 +1250,7 @@ impl InitializedNeonNode { relay_recv, event_dispatcher.clone(), blocks_processed.clone(), + microblocks_processed.clone(), burnchain, coord_comms, shared_unconfirmed_txs.clone(), @@ -1227,7 +1271,7 @@ impl InitializedNeonNode { event_dispatcher, should_keep_running, ) - .expect("Failed to initialize mine/relay thread"); + .expect("Failed to initialize p2p thread"); info!("Start HTTP server on: {}", &config.node.rpc_bind); info!("Start P2P server on: {}", &config.node.p2p_bind); @@ -1251,7 +1295,8 @@ impl InitializedNeonNode { } } - /// Tell the relayer to fire off a tenure and a block commit op. + /// Tell the relayer to fire off a tenure and a block commit op, + /// if it is time to do so. pub fn relayer_issue_tenure(&mut self) -> bool { if !self.is_miner { // node is a follower, don't try to issue a tenure @@ -1261,16 +1306,29 @@ impl InitializedNeonNode { if let Some(burnchain_tip) = self.last_burn_block.clone() { match self.leader_key_registration_state { LeaderKeyRegistrationState::Active(ref key) => { - debug!("Using key {:?}", &key.vrf_public_key); - // sleep a little before building the anchor block, to give any broadcasted - // microblocks time to propagate. - thread::sleep(std::time::Duration::from_millis(self.sleep_before_tenure)); + debug!( + "Tenure: will wait for {}s before running tenure off of {}", + self.sleep_before_tenure / 1000, + &burnchain_tip.burn_header_hash + ); + sleep_ms(self.sleep_before_tenure); + debug!( + "Tenure: Using key {:?} off of {}", + &key.vrf_public_key, &burnchain_tip.burn_header_hash + ); + self.relay_channel - .send(RelayerDirective::RunTenure(key.clone(), burnchain_tip)) + .send(RelayerDirective::RunTenure( + key.clone(), + burnchain_tip, + get_epoch_time_ms(), + )) .is_ok() } LeaderKeyRegistrationState::Inactive => { - warn!("Skipped tenure because no active VRF key. Trying to register one."); + warn!( + "Tenure: skipped tenure because no active VRF key. Trying to register one." + ); self.leader_key_registration_state = LeaderKeyRegistrationState::Pending; self.relay_channel .send(RelayerDirective::RegisterKey(burnchain_tip)) @@ -1279,7 +1337,7 @@ impl InitializedNeonNode { LeaderKeyRegistrationState::Pending => true, } } else { - warn!("Do not know the last burn block. As a miner, this is bad."); + warn!("Tenure: Do not know the last burn block. As a miner, this is bad."); true } } @@ -1295,7 +1353,7 @@ impl InitializedNeonNode { if let Some(ref snapshot) = &self.last_burn_block { debug!( - "Notify sortition! Last snapshot is {}/{} ({})", + "Tenure: Notify sortition! Last snapshot is {}/{} ({})", &snapshot.consensus_hash, &snapshot.burn_header_hash, &snapshot.winning_stacks_block_hash @@ -1311,7 +1369,7 @@ impl InitializedNeonNode { .is_ok(); } } else { - debug!("Notify sortition! No last burn block"); + debug!("Tenure: Notify sortition! No last burn block"); } true } @@ -1627,7 +1685,7 @@ impl InitializedNeonNode { } }; - if let Some((microblocks, poison_opt)) = microblock_info_opt { + if let Some((ref microblocks, ref poison_opt)) = µblock_info_opt { if let Some(ref tail) = microblocks.last() { debug!( "Confirm microblock stream tailed at {} (seq {})", @@ -1636,6 +1694,8 @@ impl InitializedNeonNode { ); } + // try and confirm as many microblocks as we can (but note that the stream itself may + // be too long; we'll try again if that happens). stacks_parent_header.microblock_tail = microblocks.last().clone().map(|blk| blk.header.clone()); @@ -1643,7 +1703,7 @@ impl InitializedNeonNode { let poison_microblock_tx = inner_generate_poison_microblock_tx( keychain, coinbase_nonce + 1, - poison_payload, + poison_payload.clone(), config.is_mainnet(), config.burnchain.chain_id, ); @@ -1678,6 +1738,52 @@ impl InitializedNeonNode { Some(event_observer), ) { Ok(block) => block, + Err(ChainstateError::InvalidStacksMicroblock(msg, mblock_header_hash)) => { + // part of the parent microblock stream is invalid, so try again + info!("Parent microblock stream is invalid; trying again without the offender {} (msg: {})", &mblock_header_hash, &msg); + + // truncate the stream + stacks_parent_header.microblock_tail = match microblock_info_opt { + Some((microblocks, _)) => { + let mut tail = None; + for mblock in microblocks.into_iter() { + if mblock.block_hash() == mblock_header_hash { + break; + } + tail = Some(mblock); + } + if let Some(ref t) = &tail { + debug!( + "New parent microblock stream tail is {} (seq {})", + t.block_hash(), + t.header.sequence + ); + } + tail.map(|t| t.header) + } + None => None, + }; + + // try again + match StacksBlockBuilder::build_anchored_block( + chain_state, + &burn_db.index_conn(), + mem_pool, + &stacks_parent_header, + parent_block_total_burn, + vrf_proof.clone(), + mblock_pubkey_hash, + &coinbase_tx, + config.block_limit.clone(), + Some(event_observer), + ) { + Ok(block) => block, + Err(e) => { + error!("Failure mining anchor block even after removing offending microblock {}: {}", &mblock_header_hash, &e); + return None; + } + } + } Err(e) => { error!("Failure mining anchored block: {}", e); return None; @@ -1740,11 +1846,13 @@ impl InitializedNeonNode { ); let mut op_signer = keychain.generate_op_signer(); debug!( - "Submit block-commit for block {} off of {}/{} with microblock parent {}", + "Submit block-commit for block {} height {} off of {}/{} with microblock parent {} (seq {})", &anchored_block.block_hash(), + anchored_block.header.total_work.work, &parent_consensus_hash, &anchored_block.header.parent_block, - &anchored_block.header.parent_microblock + &anchored_block.header.parent_microblock, + &anchored_block.header.parent_microblock_sequence ); let res = bitcoin_controller.submit_operation(op, &mut op_signer, attempt); @@ -1845,6 +1953,7 @@ impl InitializedNeonNode { } // no-op on UserBurnSupport ops are not supported / produced at this point. + self.last_burn_block = Some(block_snapshot); last_sortitioned_block.map(|x| x.0) @@ -1899,6 +2008,7 @@ impl NeonGenesisNode { self, burnchain_tip: BurnchainTip, blocks_processed: BlocksProcessedCounter, + microblocks_processed: BlocksProcessedCounter, coord_comms: CoordinatorChannels, sync_comms: PoxSyncWatchdogComms, attachments_rx: Receiver>, @@ -1916,6 +2026,7 @@ impl NeonGenesisNode { Some(burnchain_tip), true, blocks_processed, + microblocks_processed, coord_comms, sync_comms, self.burnchain, @@ -1929,6 +2040,7 @@ impl NeonGenesisNode { self, burnchain_tip: BurnchainTip, blocks_processed: BlocksProcessedCounter, + microblocks_processed: BlocksProcessedCounter, coord_comms: CoordinatorChannels, sync_comms: PoxSyncWatchdogComms, attachments_rx: Receiver>, @@ -1946,6 +2058,7 @@ impl NeonGenesisNode { Some(burnchain_tip), false, blocks_processed, + microblocks_processed, coord_comms, sync_comms, self.burnchain, From 9a567e9639862e9b8c89edbf03cf99147d1a0ad0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 03:12:59 -0400 Subject: [PATCH 11/21] plumb through microblock chain counter --- testnet/stacks-node/src/run_loop/neon.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index a075354493..887099eb82 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -36,6 +36,7 @@ pub struct RunLoop { config: Config, pub callbacks: RunLoopCallbacks, blocks_processed: std::sync::Arc, + microblocks_processed: std::sync::Arc, coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>, } @@ -66,6 +67,7 @@ impl RunLoop { coordinator_channels: Some(channels), callbacks: RunLoopCallbacks::new(), blocks_processed: std::sync::Arc::new(std::sync::atomic::AtomicU64::new(0)), + microblocks_processed: std::sync::Arc::new(std::sync::atomic::AtomicU64::new(0)), } } @@ -81,6 +83,14 @@ impl RunLoop { #[cfg(not(test))] fn get_blocks_processed_arc(&self) {} + #[cfg(test)] + pub fn get_microblocks_processed_arc(&self) -> std::sync::Arc { + self.microblocks_processed.clone() + } + + #[cfg(not(test))] + fn get_microblocks_processed_arc(&self) {} + #[cfg(test)] fn bump_blocks_processed(&self) { self.blocks_processed @@ -303,6 +313,7 @@ impl RunLoop { node.into_initialized_leader_node( burnchain_tip.clone(), self.get_blocks_processed_arc(), + self.get_microblocks_processed_arc(), coordinator_senders.clone(), pox_watchdog.make_comms_handle(), attachments_rx, @@ -313,6 +324,7 @@ impl RunLoop { node.into_initialized_node( burnchain_tip.clone(), self.get_blocks_processed_arc(), + self.get_microblocks_processed_arc(), coordinator_senders.clone(), pox_watchdog.make_comms_handle(), attachments_rx, @@ -416,6 +428,11 @@ impl RunLoop { ); if next_height > block_height { + debug!( + "New burnchain block height {} > {}", + next_height, block_height + ); + let mut sort_count = 0; // first, let's process all blocks in (block_height, next_height] From a2ac514c4e9e51fa3993798142b069a58bcd3414 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 03:13:21 -0400 Subject: [PATCH 12/21] add a couple of microblock mining tests: * verify that the miner won't produce too-long of a microblock stream even if given the chance to mine _many_ microblocks * use the fault-injection logic to force the microblock miner to mine a too-long stream, and verify that the anchored block miner truncates it to the longest-possible stream to mine --- .../src/tests/neon_integrations.rs | 397 ++++++++++++++++++ 1 file changed, 397 insertions(+) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index f8e13a2c65..54d0a9c3d8 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -280,6 +280,35 @@ fn wait_for_runloop(blocks_processed: &Arc) { } } +fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64) -> bool { + let mut current = microblocks_processed.load(Ordering::SeqCst); + let start = Instant::now(); + + loop { + let now = microblocks_processed.load(Ordering::SeqCst); + if now == 0 && current != 0 { + // wrapped around -- a new epoch started + debug!( + "New microblock epoch started while waiting (originally {})", + current + ); + current = 0; + } + + if now > current { + break; + } + + if start.elapsed() > Duration::from_secs(timeout) { + warn!("Timed out waiting for microblocks to process"); + return false; + } + + thread::sleep(Duration::from_millis(100)); + } + return true; +} + /// returns Txid string fn submit_tx(http_origin: &str, tx: &Vec) -> String { let client = reqwest::blocking::Client::new(); @@ -1609,6 +1638,8 @@ fn size_check_integration_test() { channel.stop_chains_coordinator(); } +// if a microblock consumes the majority of the block budget, then _only_ a microblock will be +// mined for an epoch. #[test] #[ignore] fn size_overflow_unconfirmed_microblocks_integration_test() { @@ -1813,6 +1844,372 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { channel.stop_chains_coordinator(); } +// mine a stream of microblocks, and verify that the miner won't let us overflow the size +#[test] +#[ignore] +fn size_overflow_unconfirmed_stream_microblocks_integration_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut small_contract = "(define-public (f) (ok 1))".to_string(); + for _i in 0..((1024 * 1024 + 500) / 3) { + small_contract.push_str(" "); + } + + let spender_sks: Vec<_> = (0..25) + .into_iter() + .map(|_| StacksPrivateKey::new()) + .collect(); + let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + + let txs: Vec> = spender_sks + .iter() + .map(|spender_sk| { + let mut ret = vec![]; + for i in 0..1 { + let tx = make_contract_publish_microblock_only( + spender_sk, + i as u64, + 600000, + &format!("small-{}", i), + &small_contract, + ); + ret.push(tx); + } + ret + }) + .collect(); + + let flat_txs: Vec<_> = txs.iter().fold(vec![], |mut acc, a| { + acc.append(&mut a.clone()); + acc + }); + + let (mut conf, miner_account) = neon_integration_test_conf(); + + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 10492300000, + }); + } + + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 1000; + conf.node.microblock_frequency = 1000; + conf.node.max_microblocks = 65536; + conf.burnchain.max_rbf = 1000000; + + test_observer::spawn(); + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let microblocks_processed = run_loop.get_microblocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(None, 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // let's query the miner's account nonce: + let account = get_account(&http_origin, &miner_account); + assert_eq!(account.nonce, 1); + assert_eq!(account.balance, 0); + + for spender_addr in spender_addrs.iter() { + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.nonce, 0); + assert_eq!(account.balance, 10492300000); + } + + let mut ctr = 0; + while ctr < flat_txs.len() { + submit_tx(&http_origin, &flat_txs[ctr]); + if !wait_for_microblocks(µblocks_processed, 240) { + break; + } + ctr += 1; + } + + // should be able to fit 5 transactions in, in 5 microblocks + assert_eq!(ctr, 5); + sleep_ms(5_000); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + eprintln!("First confirmed microblock stream!"); + + microblocks_processed.store(0, Ordering::SeqCst); + + while ctr < flat_txs.len() { + submit_tx(&http_origin, &flat_txs[ctr]); + if !wait_for_microblocks(µblocks_processed, 240) { + break; + } + ctr += 1; + } + + // should be able to fit 5 more transactions in, in 5 microblocks + assert_eq!(ctr, 10); + sleep_ms(5_000); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + eprintln!("Second confirmed microblock stream!"); + + // confirm it + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let blocks = test_observer::get_blocks(); + assert_eq!(blocks.len(), 4); + + let mut max_big_txs_per_microblock = 0; + let mut total_big_txs_per_microblock = 0; + + // NOTE: this only counts the number of txs per stream, not in each microblock + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + eprintln!("{}", transactions.len()); + + let mut num_big_microblock_txs = 0; + + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::SmartContract(tsc) = parsed.payload { + if tsc.name.to_string().find("small-").is_some() { + num_big_microblock_txs += 1; + total_big_txs_per_microblock += 1; + } + } + } + if num_big_microblock_txs > max_big_txs_per_microblock { + max_big_txs_per_microblock = num_big_microblock_txs; + } + } + + eprintln!( + "max_big_txs_per_microblock: {}, total_big_txs_per_microblock: {}", + max_big_txs_per_microblock, total_big_txs_per_microblock + ); + + assert_eq!(max_big_txs_per_microblock, 5); + assert!(total_big_txs_per_microblock >= 10); + + test_observer::clear(); + channel.stop_chains_coordinator(); +} + +// Mine a too-long microblock stream, and verify that the anchored block miner truncates it down to +// the longest prefix of the stream that can be mined. +#[test] +#[ignore] +fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + // create microblock streams that are too big + env::set_var(core::FAULT_DISABLE_MICROBLOCKS_BYTES_CHECK, "1"); + env::set_var(core::FAULT_DISABLE_MICROBLOCKS_COST_CHECK, "1"); + + let mut small_contract = "(define-public (f) (ok 1))".to_string(); + for _i in 0..((1024 * 1024 + 500) / 8) { + small_contract.push_str(" "); + } + + let spender_sks: Vec<_> = (0..25) + .into_iter() + .map(|_| StacksPrivateKey::new()) + .collect(); + let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + + let txs: Vec> = spender_sks + .iter() + .map(|spender_sk| { + let mut ret = vec![]; + for i in 0..1 { + let tx = make_contract_publish_microblock_only( + spender_sk, + i as u64, + 1149230, + &format!("small-{}", i), + &small_contract, + ); + ret.push(tx); + } + ret + }) + .collect(); + + let flat_txs: Vec<_> = txs.iter().fold(vec![], |mut acc, a| { + acc.append(&mut a.clone()); + acc + }); + + let (mut conf, miner_account) = neon_integration_test_conf(); + + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 10492300000, + }); + } + + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 15000; + conf.node.microblock_frequency = 1000; + conf.node.max_microblocks = 65536; + conf.burnchain.max_rbf = 1000000; + conf.block_limit = BLOCK_LIMIT_MAINNET.clone(); + + test_observer::spawn(); + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let microblocks_processed = run_loop.get_microblocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(None, 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // let's query the miner's account nonce: + let account = get_account(&http_origin, &miner_account); + assert_eq!(account.nonce, 1); + assert_eq!(account.balance, 0); + + for spender_addr in spender_addrs.iter() { + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.nonce, 0); + assert_eq!(account.balance, 10492300000); + } + + let mut ctr = 0; + for i in 0..6 { + submit_tx(&http_origin, &flat_txs[ctr]); + if !wait_for_microblocks(µblocks_processed, 240) { + break; + } + ctr += 1; + } + + // confirm that we were able to use the fault-injection to *mine* 6 microblocks + assert_eq!(ctr, 6); + sleep_ms(5_000); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + eprintln!("First confirmed microblock stream!"); + + // confirm it + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let blocks = test_observer::get_blocks(); + assert_eq!(blocks.len(), 3); + + let mut max_big_txs_per_microblock = 0; + let mut total_big_txs_per_microblock = 0; + + // NOTE: this only counts the number of txs per stream, not in each microblock + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + eprintln!("{}", transactions.len()); + + let mut num_big_microblock_txs = 0; + + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::SmartContract(tsc) = parsed.payload { + if tsc.name.to_string().find("small-").is_some() { + num_big_microblock_txs += 1; + total_big_txs_per_microblock += 1; + } + } + } + if num_big_microblock_txs > max_big_txs_per_microblock { + max_big_txs_per_microblock = num_big_microblock_txs; + } + } + + eprintln!( + "max_big_txs_per_microblock: {}, total_big_txs_per_microblock: {}", + max_big_txs_per_microblock, total_big_txs_per_microblock + ); + + assert_eq!(max_big_txs_per_microblock, 3); + assert!(total_big_txs_per_microblock <= 6); + + test_observer::clear(); + channel.stop_chains_coordinator(); +} + #[test] #[ignore] fn runtime_overflow_unconfirmed_microblocks_integration_test() { From a1062f332cb277a66299d11a50d528ea0318bce6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 03:56:43 -0400 Subject: [PATCH 13/21] fix compile bug --- testnet/stacks-node/src/neon_node.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 19285dff2d..46f5561bc0 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -134,7 +134,7 @@ fn set_processed_counter(blocks_processed: &BlocksProcessedCounter, value: u64) } #[cfg(not(test))] -fn set_processed_counter(_blocks_processed: &BlocksProcessedCounter) {} +fn set_processed_counter(_blocks_processed: &BlocksProcessedCounter, value: u64) {} /// Process artifacts from the tenure. /// At this point, we're modifying the chainstate, and merging the artifacts from the previous tenure. From 5e9ecad65722034c78889cf2986f28c93264fa71 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 17:12:55 -0400 Subject: [PATCH 14/21] log block budgets when processing streams --- src/chainstate/stacks/db/blocks.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index e41fbcf8ee..a9a9418e7f 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -4540,7 +4540,9 @@ impl StacksChainState { return Err(Error::InvalidStacksBlock(msg)); } - debug!("Reached state root {}", root_hash); + debug!("Reached state root {}", root_hash; + "microblock cost" => %microblock_cost, + "block cost" => %block_cost); // good to go! clarity_tx.commit_to_block(chain_tip_consensus_hash, &block.block_hash()); From 59a9c22213d0398a94a94fffb45642bab5bb8cd0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 17:13:25 -0400 Subject: [PATCH 15/21] clarify that mine_next_microblock_from_txs() is *only* for testing --- src/chainstate/stacks/miner.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/chainstate/stacks/miner.rs b/src/chainstate/stacks/miner.rs index 3dbeed4a56..04b0f5c214 100644 --- a/src/chainstate/stacks/miner.rs +++ b/src/chainstate/stacks/miner.rs @@ -348,6 +348,7 @@ impl<'a> StacksMicroblockBuilder<'a> { return Ok(false); } + /// NOTE: this is only used in integration tests. pub fn mine_next_microblock_from_txs( &mut self, txs_and_lens: Vec<(StacksTransaction, u64)>, From b564c6cb9367dd977cfe8e827e043dd790e5e233 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 17:13:41 -0400 Subject: [PATCH 16/21] address feedback (comments and structure) --- src/net/atlas/download.rs | 23 ++++++++++++++++++----- src/net/atlas/mod.rs | 2 +- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/src/net/atlas/download.rs b/src/net/atlas/download.rs index 7a73a976c9..ed568be99c 100644 --- a/src/net/atlas/download.rs +++ b/src/net/atlas/download.rs @@ -65,6 +65,9 @@ impl AttachmentsDownloader { } } + /// Identify whether or not any AttachmentBatches in the priority queue are ready for + /// (re-)consideration by the downloader, based on whether or not its re-try deadline + /// has passed. pub fn has_ready_batches(&self) -> bool { for batch in self.priority_queue.iter() { if batch.retry_deadline < get_epoch_time_secs() { @@ -74,6 +77,10 @@ impl AttachmentsDownloader { return false; } + /// Returns the next attachments batch that is ready for processing -- i.e. after its deadline + /// has passed. + /// Because AttachmentBatches are ordered first by their retry deadlines, it follows that if + /// there are any ready AttachmentBatches, they'll be at the head of the queue. pub fn pop_next_ready_batch(&mut self) -> Option { let next_is_ready = if let Some(ref next) = self.priority_queue.peek() { next.retry_deadline < get_epoch_time_secs() @@ -132,9 +139,14 @@ impl AttachmentsDownloader { return Ok((vec![], vec![])); } - let attachments_batch = self - .pop_next_ready_batch() - .expect("Unable to pop attachments bactch from queue"); + let attachments_batch = match self.pop_next_ready_batch() { + Some(ready_batch) => ready_batch, + None => { + // unreachable + return Ok((vec![], vec![])); + } + }; + let ctx = AttachmentsBatchStateContext::new( attachments_batch, peers, @@ -1096,8 +1108,9 @@ impl AttachmentsBatch { self.retry_count += 1; let delay = cmp::min( MAX_RETRY_DELAY, - 2u64.saturating_pow(self.retry_count as u32) - + (thread_rng().gen::() % 2u64.saturating_pow((self.retry_count - 1) as u32)), + 2u64.saturating_pow(self.retry_count as u32).saturating_add( + thread_rng().gen::() % 2u64.saturating_pow((self.retry_count - 1) as u32), + ), ); debug!("Atlas: Re-attempt download in {} seconds", delay); diff --git a/src/net/atlas/mod.rs b/src/net/atlas/mod.rs index 4ee3a445ac..75264ef359 100644 --- a/src/net/atlas/mod.rs +++ b/src/net/atlas/mod.rs @@ -36,7 +36,7 @@ use std::convert::TryFrom; use std::hash::{Hash, Hasher}; pub const MAX_ATTACHMENT_INV_PAGES_PER_REQUEST: usize = 8; -pub const MAX_RETRY_DELAY: u64 = 600; +pub const MAX_RETRY_DELAY: u64 = 600; // seconds lazy_static! { pub static ref BNS_CHARS_REGEX: Regex = Regex::new("^([a-z0-9]|[-_])*$").unwrap(); From 9ded3ba3f647332428bb3c9568c3d274e6dc195d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 17:13:56 -0400 Subject: [PATCH 17/21] address feedback --- testnet/stacks-node/src/neon_node.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 46f5561bc0..6155fe3ecd 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -88,9 +88,9 @@ struct MicroblockMinerState { enum RelayerDirective { HandleNetResult(NetworkResult), ProcessTenure(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), - RunTenure(RegisteredKey, BlockSnapshot, u128), + RunTenure(RegisteredKey, BlockSnapshot, u128), // (vrf key, chain tip, time of issuance in ms) RegisterKey(BlockSnapshot), - RunMicroblockTenure(u128), + RunMicroblockTenure(u128), // time of issuance in ms Exit, } From 13961352bc99a46e2629840902d14d72ccab2770 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 17:14:06 -0400 Subject: [PATCH 18/21] fix failing test --- testnet/stacks-node/src/tests/neon_integrations.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 54d0a9c3d8..e8817b5f38 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1655,7 +1655,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { // small-sized contracts for microblocks let mut small_contract = "(define-public (f) (ok 1))".to_string(); - for _i in 0..((1024 * 1024 + 500) / 2) { + for _i in 0..(1024 * 1024 + 500) { small_contract.push_str(" "); } @@ -1684,7 +1684,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { let tx = make_contract_publish_microblock_only( spender_sk, i as u64, - 600000, + 1100000, &format!("small-{}", i), &small_contract, ); @@ -1765,7 +1765,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { } } - sleep_ms(75_000); + sleep_ms(150_000); // now let's mine a couple blocks, and then check the sender's nonce. // at the end of mining three blocks, there should be _two_ transactions from the microblock @@ -1777,7 +1777,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // this one will contain the sortition from above anchor block, // which *should* have also confirmed the microblock. - sleep_ms(75_000); + sleep_ms(150_000); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); From a3f8c93c8155c292f6e9f7c3d9cc5566c3185ebb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 22:27:10 -0400 Subject: [PATCH 19/21] activate new integration tests --- .github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests b/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests index 3887fae5a7..9fa7f61833 100644 --- a/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests +++ b/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests @@ -25,5 +25,7 @@ RUN cargo test -- --test-threads 1 --ignored tests::neon_integrations::pox_integ RUN cargo test -- --test-threads 1 --ignored tests::bitcoin_regtest::bitcoind_integration_test RUN cargo test -- --test-threads 1 --ignored tests::should_succeed_handling_malformed_and_valid_txs RUN cargo test -- --test-threads 1 --ignored tests::neon_integrations::size_overflow_unconfirmed_microblocks_integration_test +RUN cargo test -- --test-threads 1 --ignored tests::neon_integrations::size_overflow_unconfirmed_stream_microblocks_integration_test +RUN cargo test -- --test-threads 1 --ignored tests::neon_integrations::size_overflow_unconfirmed_invalid_stream_microblocks_integration_test RUN cargo test -- --test-threads 1 --ignored tests::neon_integrations::runtime_overflow_unconfirmed_microblocks_integration_test RUN cargo test -- --test-threads 1 --ignored tests::neon_integrations::antientropy_integration_test From 8029c8f7a7d886cfebfe69b72fa4f4e7966b0110 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 16 Apr 2021 22:27:27 -0400 Subject: [PATCH 20/21] flat_map --- testnet/stacks-node/src/tests/neon_integrations.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index e8817b5f38..c82d1f4a8b 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1881,10 +1881,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { }) .collect(); - let flat_txs: Vec<_> = txs.iter().fold(vec![], |mut acc, a| { - acc.append(&mut a.clone()); - acc - }); + let flat_txs: Vec<_> = txs.iter().flat_map(|a| a.clone()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); From 9bd50a34dd199a34b0f5c4ca8a8f13b7cceff4f2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 19 Apr 2021 01:07:38 -0400 Subject: [PATCH 21/21] fix inv-sync bug that was causing the antientropy test to time out --- src/net/inv.rs | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/src/net/inv.rs b/src/net/inv.rs index 781fe1ea14..28f98ea2b2 100644 --- a/src/net/inv.rs +++ b/src/net/inv.rs @@ -89,7 +89,7 @@ pub const INV_SYNC_INTERVAL: u64 = 0; #[cfg(not(test))] pub const FULL_INV_SYNC_INTERVAL: u64 = 12 * 3600; #[cfg(test)] -pub const FULL_INV_SYNC_INTERVAL: u64 = 120; +pub const FULL_INV_SYNC_INTERVAL: u64 = 60; #[cfg(not(test))] pub const INV_REWARD_CYCLES: u64 = 3; @@ -508,6 +508,11 @@ impl PeerBlocksInv { pub fn get_block_height(&self) -> u64 { self.first_block_height + self.num_sortitions } + + /// What's the number of PoX reward cycles we know about? + pub fn get_pox_height(&self) -> u64 { + self.num_reward_cycles + } } #[derive(Debug, Clone, PartialEq, Copy)] @@ -1742,12 +1747,11 @@ impl PeerNetwork { } /// Determine at which reward cycle to begin scanning inventories - fn get_block_scan_start(&self, full_rescan: bool) -> u64 { - let highest_known_reward_cycle = self.pox_id.num_inventory_reward_cycles() as u64; + fn get_block_scan_start(&self, highest_remote_reward_cycle: u64, full_rescan: bool) -> u64 { if full_rescan { 0 } else { - highest_known_reward_cycle.saturating_sub(self.connection_opts.inv_reward_cycles) + highest_remote_reward_cycle.saturating_sub(self.connection_opts.inv_reward_cycles) } } @@ -1766,7 +1770,7 @@ impl PeerNetwork { Some(x) => x, None => { // proceed to block scan - let scan_start = self.get_block_scan_start(full_rescan); + let scan_start = self.get_block_scan_start(stats.inv.get_pox_height(), full_rescan); debug!("{:?}: cannot make any more GetPoxInv requests for {:?}; proceeding to block inventory scan at reward cycle {}", &self.local_peer, nk, scan_start); stats.reset_block_scan(scan_start); return Ok(()); @@ -1822,7 +1826,8 @@ impl PeerNetwork { // proceed with block scan. // If we're in IBD, then this is an always-allowed peer and we should // react to divergences by deepening our rescan. - let scan_start = self.get_block_scan_start(ibd || full_rescan); + let scan_start = + self.get_block_scan_start(stats.inv.get_pox_height(), ibd || full_rescan); debug!( "{:?}: proceeding to block inventory scan for {:?} (diverged) at reward cycle {} (ibd={}, full={})", &self.local_peer, nk, scan_start, ibd, full_rescan @@ -1914,7 +1919,7 @@ impl PeerNetwork { } // proceed to block scan. - let scan_start = self.get_block_scan_start(full_rescan); + let scan_start = self.get_block_scan_start(stats.inv.get_pox_height(), full_rescan); debug!( "{:?}: proceeding to block inventory scan for {:?} at reward cycle {}", &self.local_peer, nk, scan_start @@ -2260,9 +2265,10 @@ impl PeerNetwork { // hint to downloader as to where to begin scanning inv_state.block_sortition_start = network .burnchain - .reward_cycle_to_block_height( - network.get_block_scan_start(inv_state.hint_do_full_rescan), - ) + .reward_cycle_to_block_height(network.get_block_scan_start( + network.pox_id.num_inventory_reward_cycles() as u64, + inv_state.hint_do_full_rescan, + )) .saturating_sub(sortdb.first_block_height); let was_full = inv_state.hint_do_full_rescan;