diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 2d493753e5..6dec7efb11 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -122,7 +122,9 @@ jobs: - tests::signer::v0::signer_set_rollover - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend - - tests::signer::v0::tenure_extend_after_idle + - tests::signer::v0::tenure_extend_after_idle_signers + - tests::signer::v0::tenure_extend_after_idle_miner + - tests::signer::v0::tenure_extend_succeeds_after_rejected_attempt - tests::signer::v0::stx_transfers_dont_effect_idle_timeout - tests::signer::v0::idle_tenure_extend_active_mining - tests::signer::v0::multiple_miners_with_custom_chain_id @@ -130,6 +132,8 @@ jobs: - tests::signer::v0::continue_after_fast_block_no_sortition - tests::signer::v0::block_validation_response_timeout - tests::signer::v0::tenure_extend_after_bad_commit + - tests::signer::v0::block_proposal_max_age_rejections + - tests::signer::v0::global_acceptance_depends_on_block_announcement - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state @@ -140,6 +144,7 @@ jobs: - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles + - tests::nakamoto_integrations::nakamoto_lockup_events - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::nakamoto_integrations::v3_signer_api_endpoint diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml new file mode 100644 index 0000000000..83cf240815 --- /dev/null +++ b/.github/workflows/clippy.yml @@ -0,0 +1,40 @@ +## Perform Clippy checks - currently set to defaults +## https://github.com/rust-lang/rust-clippy#usage +## https://rust-lang.github.io/rust-clippy/master/index.html +## +name: Clippy Checks + +# Only run when: +# - PRs are (re)opened against develop branch +on: + pull_request: + branches: + - develop + types: + - opened + - reopened + - synchronize + +jobs: + clippy_check: + name: Clippy Check + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Define Rust Toolchain + id: define_rust_toolchain + run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV + - name: Setup Rust Toolchain + id: setup_rust_toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: ${{ env.RUST_TOOLCHAIN }} + components: clippy + - name: Clippy + id: clippy + uses: actions-rs/clippy-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + args: -p libstackerdb -p stacks-signer -p pox-locking --no-deps --tests --all-features -- -D warnings \ No newline at end of file diff --git a/.github/workflows/p2p-tests.yml b/.github/workflows/p2p-tests.yml index 1c33eca0fb..81790bdc12 100644 --- a/.github/workflows/p2p-tests.yml +++ b/.github/workflows/p2p-tests.yml @@ -43,10 +43,10 @@ jobs: - net::tests::convergence::test_walk_star_15_org_biased - net::tests::convergence::test_walk_inbound_line_15 - net::api::tests::postblock_proposal::test_try_make_response - - net::server::tests::test_http_10_threads_getinfo - - net::server::tests::test_http_10_threads_getblock - - net::server::tests::test_http_too_many_clients - - net::server::tests::test_http_slow_client + - net::server::test::test_http_10_threads_getinfo + - net::server::test::test_http_10_threads_getblock + - net::server::test::test_http_too_many_clients + - net::server::test::test_http_slow_client steps: ## Setup test environment - name: Setup Test Environment diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 98eb5cf92c..457a2aaefd 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -18,55 +18,6 @@ concurrency: cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: - # Full genesis test with code coverage - full-genesis: - name: Full Genesis Test - runs-on: ubuntu-latest - strategy: - ## Continue with the test matrix even if we've had a failure - fail-fast: false - ## Run a maximum of 2 concurrent tests from the test matrix - max-parallel: 2 - matrix: - test-name: - - neon_integrations::bitcoind_integration_test - steps: - ## Setup test environment - - name: Setup Test Environment - id: setup_tests - uses: stacks-network/actions/stacks-core/testenv@main - with: - genesis: true - btc-version: "25.0" - - ## Run test matrix using restored cache of archive file - ## - Test will timeout after env.TEST_TIMEOUT minutes - - name: Run Tests - id: run_tests - timeout-minutes: ${{ fromJSON(env.TEST_TIMEOUT) }} - uses: stacks-network/actions/stacks-core/run-tests@main - with: - test-name: ${{ matrix.test-name }} - threads: 1 - archive-file: ~/genesis_archive.tar.zst - - ## Upload code coverage file - - name: Code Coverage - id: codecov - uses: stacks-network/actions/codecov@main - with: - test-name: large_genesis - filename: ./lcov.info - - - name: Status Output - run: | - echo "run_tests: ${{ steps.run_tests.outputs.status }}" - echo "codecov: ${{ steps.codecov.outputs.status }}" - - - name: Check Failures - if: steps.run_tests.outputs.status == 'failure' || steps.codecov.outputs.status == 'failure' - run: exit 1 - # Unit tests with code coverage unit-tests: name: Unit Tests @@ -186,7 +137,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - full-genesis - open-api-validation - core-contracts-clarinet-test steps: diff --git a/CHANGELOG.md b/CHANGELOG.md index d19470075d..53e7d29fbb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] ### Added +- Add `tenure_timeout_secs` to the miner for determining when a time-based tenure extend should be attempted. ### Changed diff --git a/Cargo.lock b/Cargo.lock index 8a3769b6a8..47621472bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -637,7 +637,7 @@ name = "clarity" version = "0.0.1" dependencies = [ "assert-json-diff 1.1.0", - "hashbrown", + "hashbrown 0.14.3", "integer-sqrt", "lazy_static", "mutants", @@ -1411,13 +1411,19 @@ dependencies = [ "serde", ] +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" + [[package]] name = "hashlink" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown", + "hashbrown 0.14.3", ] [[package]] @@ -1683,12 +1689,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.15.2", ] [[package]] @@ -1841,7 +1847,7 @@ name = "libsigner" version = "0.0.1" dependencies = [ "clarity", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libc", "libstackerdb", @@ -2621,7 +2627,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" name = "relay-server" version = "0.0.1" dependencies = [ - "hashbrown", + "hashbrown 0.14.3", ] [[package]] @@ -3270,7 +3276,7 @@ dependencies = [ "chrono", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libc", "nix", @@ -3305,7 +3311,7 @@ dependencies = [ "base64 0.12.3", "chrono", "clarity", - "hashbrown", + "hashbrown 0.14.3", "http-types", "lazy_static", "libc", @@ -3332,7 +3338,6 @@ dependencies = [ "tikv-jemallocator", "tiny_http", "tokio", - "toml", "tracing", "tracing-subscriber", "url", @@ -3346,7 +3351,7 @@ dependencies = [ "backoff", "clap 4.5.0", "clarity", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libsigner", "libstackerdb", @@ -3385,7 +3390,7 @@ dependencies = [ "criterion", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown", + "hashbrown 0.14.3", "integer-sqrt", "lazy_static", "libc", @@ -3421,6 +3426,7 @@ dependencies = [ "stx-genesis", "tikv-jemallocator", "time 0.2.27", + "toml", "url", "winapi 0.3.9", ] diff --git a/Cargo.toml b/Cargo.toml index c00c223c47..194e946ef4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,8 @@ rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } -thiserror = { version = "1.0.65" } +thiserror = "1.0.65" +toml = "0.5.6" # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index a559ad59fd..eedc2857fa 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -499,7 +499,7 @@ impl EventBatch { impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { #[cfg(any(test, feature = "testing"))] - pub fn new(database: ClarityDatabase<'a>, epoch: StacksEpochId) -> OwnedEnvironment<'a, '_> { + pub fn new(database: ClarityDatabase<'a>, epoch: StacksEpochId) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new( false, @@ -513,7 +513,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { } #[cfg(any(test, feature = "testing"))] - pub fn new_toplevel(mut database: ClarityDatabase<'a>) -> OwnedEnvironment<'a, '_> { + pub fn new_toplevel(mut database: ClarityDatabase<'a>) -> OwnedEnvironment<'a, 'a> { database.begin(); let epoch = database.get_clarity_epoch_version().unwrap(); let version = ClarityVersion::default_for_epoch(epoch); @@ -540,7 +540,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { mut database: ClarityDatabase<'a>, epoch: StacksEpochId, use_mainnet: bool, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { use crate::vm::tests::test_only_mainnet_to_chain_id; let cost_track = LimitedCostTracker::new_max_limit(&mut database, epoch, use_mainnet) .expect("FAIL: problem instantiating cost tracking"); @@ -557,7 +557,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { chain_id: u32, database: ClarityDatabase<'a>, epoch_id: StacksEpochId, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new( mainnet, @@ -576,7 +576,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { database: ClarityDatabase<'a>, cost_tracker: LimitedCostTracker, epoch_id: StacksEpochId, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new(mainnet, chain_id, database, cost_tracker, epoch_id), call_stack: CallStack::new(), @@ -1546,7 +1546,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { database: ClarityDatabase<'a>, cost_track: LimitedCostTracker, epoch_id: StacksEpochId, - ) -> GlobalContext { + ) -> GlobalContext<'a, 'hooks> { GlobalContext { database, cost_track, diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index c444aa553e..c0b75be83f 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -205,7 +205,7 @@ where } impl<'a> RollbackWrapper<'a> { - pub fn new(store: &'a mut dyn ClarityBackingStore) -> RollbackWrapper { + pub fn new(store: &'a mut dyn ClarityBackingStore) -> RollbackWrapper<'a> { RollbackWrapper { store, lookup_map: HashMap::new(), @@ -218,7 +218,7 @@ impl<'a> RollbackWrapper<'a> { pub fn from_persisted_log( store: &'a mut dyn ClarityBackingStore, log: RollbackWrapperPersistedLog, - ) -> RollbackWrapper { + ) -> RollbackWrapper<'a> { RollbackWrapper { store, lookup_map: log.lookup_map, diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 1de0e34f09..52a77e2bb8 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -114,6 +114,13 @@ pub enum SignerEvent { /// the time at which this event was received by the signer's event processor received_time: SystemTime, }, + /// A new processed Stacks block was received from the node with the given block hash + NewBlock { + /// The block header hash for the newly processed stacks block + block_hash: Sha512Trunc256Sum, + /// The block height for the newly processed stacks block + block_height: u64, + }, } /// Trait to implement a stop-signaler for the event receiver thread. @@ -298,29 +305,25 @@ impl EventReceiver for SignerEventReceiver { &request.method(), ))); } + debug!("Processing {} event", request.url()); if request.url() == "/stackerdb_chunks" { - process_stackerdb_event(event_receiver.local_addr, request) - .map_err(|e| { - error!("Error processing stackerdb_chunks message"; "err" => ?e); - e - }) + process_event::(request) } else if request.url() == "/proposal_response" { - process_proposal_response(request) + process_event::(request) } else if request.url() == "/new_burn_block" { - process_new_burn_block_event(request) + process_event::(request) } else if request.url() == "/shutdown" { event_receiver.stop_signal.store(true, Ordering::SeqCst); - return Err(EventError::Terminated); + Err(EventError::Terminated) + } else if request.url() == "/new_block" { + process_event::(request) } else { let url = request.url().to_string(); - // `/new_block` is expected, but not specifically handled. do not log. - if &url != "/new_block" { - debug!( - "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", - event_receiver.local_addr, - url - ); - } + debug!( + "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", + event_receiver.local_addr, + url + ); ack_dispatcher(request); Err(EventError::UnrecognizedEvent(url)) } @@ -385,12 +388,13 @@ fn ack_dispatcher(request: HttpRequest) { // TODO: add tests from mutation testing results #4835 #[cfg_attr(test, mutants::skip)] -/// Process a stackerdb event from the node -fn process_stackerdb_event( - local_addr: Option, - mut request: HttpRequest, -) -> Result, EventError> { +fn process_event(mut request: HttpRequest) -> Result, EventError> +where + T: SignerEventTrait, + E: serde::de::DeserializeOwned + TryInto, Error = EventError>, +{ let mut body = String::new(); + if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); ack_dispatcher(request); @@ -399,27 +403,12 @@ fn process_stackerdb_event( &e ))); } - - debug!("Got stackerdb_chunks event"; "chunks_event_body" => %body); - let event: StackerDBChunksEvent = serde_json::from_slice(body.as_bytes()) + // Regardless of whether we successfully deserialize, we should ack the dispatcher so they don't keep resending it + ack_dispatcher(request); + let json_event: E = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let event_contract_id = event.contract_id.clone(); - - let signer_event = match SignerEvent::try_from(event) { - Err(e) => { - info!( - "[{:?}] next_event got event from an unexpected contract id {}, return OK so other side doesn't keep sending this", - local_addr, - event_contract_id - ); - ack_dispatcher(request); - return Err(e); - } - Ok(x) => x, - }; - - ack_dispatcher(request); + let signer_event: SignerEvent = json_event.try_into()?; Ok(signer_event) } @@ -466,78 +455,69 @@ impl TryFrom for SignerEvent { } } -/// Process a proposal response from the node -fn process_proposal_response( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got proposal_response event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); + fn try_from(block_validate_response: BlockValidateResponse) -> Result { + Ok(SignerEvent::BlockValidationResponse( + block_validate_response, + )) } +} - let event: BlockValidateResponse = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; +#[derive(Debug, Deserialize)] +struct BurnBlockEvent { + burn_block_hash: String, + burn_block_height: u64, + reward_recipients: Vec, + reward_slot_holders: Vec, + burn_amount: u64, +} - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; + + fn try_from(burn_block_event: BurnBlockEvent) -> Result { + let burn_header_hash = burn_block_event + .burn_block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + BurnchainHeaderHash::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + + Ok(SignerEvent::NewBurnBlock { + burn_height: burn_block_event.burn_block_height, + received_time: SystemTime::now(), + burn_header_hash, + }) } +} - Ok(SignerEvent::BlockValidationResponse(event)) +#[derive(Debug, Deserialize)] +struct BlockEvent { + block_hash: String, + block_height: u64, } -/// Process a new burn block event from the node -fn process_new_burn_block_event( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got burn_block event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); - } - #[derive(Debug, Deserialize)] - struct TempBurnBlockEvent { - burn_block_hash: String, - burn_block_height: u64, - reward_recipients: Vec, - reward_slot_holders: Vec, - burn_amount: u64, - } - let temp: TempBurnBlockEvent = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let burn_header_hash = temp - .burn_block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - BurnchainHeaderHash::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - let event = SignerEvent::NewBurnBlock { - burn_height: temp.burn_block_height, - received_time: SystemTime::now(), - burn_header_hash, - }; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); + fn try_from(block_event: BlockEvent) -> Result { + let block_hash: Sha512Trunc256Sum = block_event + .block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + Sha512Trunc256Sum::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + Ok(SignerEvent::NewBlock { + block_hash, + block_height: block_event.block_height, + }) } - Ok(event) } pub fn get_signers_db_signer_set_message_id(name: &str) -> Option<(u32, u32)> { diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index a9dfc47806..5f733eddad 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -35,6 +35,9 @@ use std::path::Path; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; +#[cfg(any(test, feature = "testing"))] +pub mod tests; + pub fn get_epoch_time_secs() -> u64 { let start = SystemTime::now(); let since_the_epoch = start diff --git a/stacks-common/src/util/tests.rs b/stacks-common/src/util/tests.rs new file mode 100644 index 0000000000..b87e913718 --- /dev/null +++ b/stacks-common/src/util/tests.rs @@ -0,0 +1,99 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::{Arc, Mutex}; +/// `TestFlag` is a thread-safe utility designed for managing shared state in testing scenarios. It wraps +/// a value of type `T` inside an `Arc>>`, allowing you to set and retrieve a value +/// across different parts of your codebase while ensuring thread safety. +/// +/// This structure is particularly useful when: +/// - You need a global or static variable in tests. +/// - You want to control the execution of custom test code paths by setting and checking a shared value. +/// +/// # Type Parameter +/// - `T`: The type of the value managed by the `TestFlag`. It must implement the `Default` and `Clone` traits. +/// +/// # Examples +/// +/// ```rust +/// use stacks_common::util::tests::TestFlag; +/// use std::sync::{Arc, Mutex}; +/// +/// // Create a TestFlag instance +/// let test_flag = TestFlag::default(); +/// +/// // Set a value in the test flag +/// test_flag.set("test_value".to_string()); +/// +/// // Retrieve the value +/// assert_eq!(test_flag.get(), "test_value".to_string()); +/// +/// // Reset the value to default +/// test_flag.set("".to_string()); +/// assert_eq!(test_flag.get(), "".to_string()); +/// ``` +#[derive(Clone)] +pub struct TestFlag(pub Arc>>); + +impl Default for TestFlag { + fn default() -> Self { + Self(Arc::new(Mutex::new(None))) + } +} + +impl TestFlag { + /// Sets the value of the test flag. + /// + /// This method updates the value stored inside the `TestFlag`, replacing any existing value. + /// + /// # Arguments + /// - `value`: The new value to set for the `TestFlag`. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// test_flag.set(42); + /// assert_eq!(test_flag.get(), 42); + /// ``` + pub fn set(&self, value: T) { + *self.0.lock().unwrap() = Some(value); + } + + /// Retrieves the current value of the test flag. + /// + /// If no value has been set, this method returns the default value for the type `T`. + /// + /// # Returns + /// - The current value of the test flag, or the default value of `T` if none has been set. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// + /// // Get the default value + /// assert_eq!(test_flag.get(), 0); // For T = i32, default is 0 + /// + /// // Set a value + /// test_flag.set(123); + /// + /// // Get the updated value + /// assert_eq!(test_flag.get(), 123); + /// ``` + pub fn get(&self) -> T { + self.0.lock().unwrap().clone().unwrap_or_default().clone() + } +} diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index fd38fe9775..3c7ec2d0c1 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -9,7 +9,11 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## Added +- Introduced the `block_proposal_max_age_secs` configuration option for signers, enabling them to automatically ignore block proposals that exceed the specified age in seconds. + ## Changed +- Improvements to the stale signer cleanup logic: deletes the prior signer if it has no remaining unprocessed blocks in its database +- Signers now listen to new block events from the stacks node to determine whether a block has been successfully appended to the chain tip ## [3.1.0.0.2.0] diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 3beba641f2..22204daf97 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -36,11 +36,11 @@ serde_stacker = "0.1" slog = { version = "2.5.2", features = [ "max_level_trace" ] } slog-json = { version = "2.3.0", optional = true } slog-term = "2.6.0" -stacks-common = { path = "../stacks-common" } +stacks-common = { path = "../stacks-common", features = ["testing"] } stackslib = { path = "../stackslib" } thiserror = { workspace = true } tiny_http = { version = "0.12", optional = true } -toml = "0.5.6" +toml = { workspace = true } tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } rand = { workspace = true } diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index f2f042dffb..462f3dc2d2 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -176,7 +176,7 @@ enum ProposedBy<'a> { CurrentSortition(&'a SortitionState), } -impl<'a> ProposedBy<'a> { +impl ProposedBy<'_> { pub fn state(&self) -> &SortitionState { match self { ProposedBy::LastSortition(x) => x, @@ -367,7 +367,7 @@ impl SortitionsView { tenure_extend.burn_view_consensus_hash != sortition_consensus_hash; let extend_timestamp = signer_db.calculate_tenure_extend_timestamp( self.config.tenure_idle_timeout, - &block, + block, false, ); let epoch_time = get_epoch_time_secs(); diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 4e9067498d..7b666d3762 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -41,11 +41,9 @@ use stacks_common::types::chainstate::StacksPrivateKey; extern crate alloc; -#[derive(Parser, Debug)] -#[command(author, version, about)] -#[command(long_version = VERSION_STRING.as_str())] - /// The CLI arguments for the stacks signer +#[derive(Parser, Debug)] +#[command(author, version, about, long_version = VERSION_STRING.as_str())] pub struct Cli { /// Subcommand action to take #[command(subcommand)] diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index ba55bd9810..bdaa368567 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -414,6 +414,7 @@ pub(crate) mod tests { tenure_last_block_proposal_timeout: config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: config.block_proposal_validation_timeout, tenure_idle_timeout: config.tenure_idle_timeout, + block_proposal_max_age_secs: config.block_proposal_max_age_secs, } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 18412bc5f2..d2d526a589 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -39,6 +39,7 @@ const BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS: u64 = 120_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; const DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS: u64 = 30; const TENURE_IDLE_TIMEOUT_SECS: u64 = 300; +const DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS: u64 = 600; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -138,6 +139,8 @@ pub struct SignerConfig { pub block_proposal_validation_timeout: Duration, /// How much idle time must pass before allowing a tenure extend pub tenure_idle_timeout: Duration, + /// The maximum age of a block proposal in seconds that will be processed by the signer + pub block_proposal_max_age_secs: u64, } /// The parsed configuration for the signer @@ -176,6 +179,8 @@ pub struct GlobalConfig { pub block_proposal_validation_timeout: Duration, /// How much idle time must pass before allowing a tenure extend pub tenure_idle_timeout: Duration, + /// The maximum age of a block proposal that will be processed by the signer + pub block_proposal_max_age_secs: u64, } /// Internal struct for loading up the config file @@ -213,6 +218,8 @@ struct RawConfigFile { pub block_proposal_validation_timeout_ms: Option, /// How much idle time (in seconds) must pass before a tenure extend is allowed pub tenure_idle_timeout_secs: Option, + /// The maximum age of a block proposal (in secs) that will be processed by the signer. + pub block_proposal_max_age_secs: Option, } impl RawConfigFile { @@ -310,6 +317,10 @@ impl TryFrom for GlobalConfig { .unwrap_or(TENURE_IDLE_TIMEOUT_SECS), ); + let block_proposal_max_age_secs = raw_data + .block_proposal_max_age_secs + .unwrap_or(DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -326,6 +337,7 @@ impl TryFrom for GlobalConfig { tenure_last_block_proposal_timeout, block_proposal_validation_timeout, tenure_idle_timeout, + block_proposal_max_age_secs, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 11faadf871..018d35a4d2 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -286,6 +286,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo tenure_last_block_proposal_timeout: self.config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: self.config.block_proposal_validation_timeout, tenure_idle_timeout: self.config.tenure_idle_timeout, + block_proposal_max_age_secs: self.config.block_proposal_max_age_secs, })) } @@ -423,23 +424,15 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { let reward_cycle = signer.reward_cycle(); - let next_reward_cycle = reward_cycle.wrapping_add(1); - let stale = match next_reward_cycle.cmp(¤t_reward_cycle) { - std::cmp::Ordering::Less => true, // We are more than one reward cycle behind, so we are stale - std::cmp::Ordering::Equal => { - // We are the next reward cycle, so check if we were registered and have any pending blocks to process - match signer { - ConfiguredSigner::RegisteredSigner(signer) => { - !signer.has_unprocessed_blocks() - } - _ => true, - } + if reward_cycle >= current_reward_cycle { + // We are either the current or a future reward cycle, so we are not stale. + continue; + } + if let ConfiguredSigner::RegisteredSigner(signer) = signer { + if !signer.has_unprocessed_blocks() { + debug!("{signer}: Signer's tenure has completed."); + to_delete.push(*idx); } - std::cmp::Ordering::Greater => false, // We are the current reward cycle, so we are not stale - }; - if stale { - debug!("{signer}: Signer's tenure has completed."); - to_delete.push(*idx); } } for idx in to_delete { diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 9c4c348f8e..4cdc61471a 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -244,18 +244,10 @@ impl BlockInfo { } match state { BlockState::Unprocessed => false, - BlockState::LocallyAccepted => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyAccepted - ) - } - BlockState::LocallyRejected => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyRejected - ) - } + BlockState::LocallyAccepted | BlockState::LocallyRejected => !matches!( + prev_state, + BlockState::GloballyRejected | BlockState::GloballyAccepted + ), BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), } @@ -942,12 +934,8 @@ impl SignerDb { block_sighash: &Sha512Trunc256Sum, ts: u64, ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2), state = ?2 WHERE signer_signature_hash = ?3"; - let args = params![ - u64_to_sql(ts)?, - BlockState::GloballyAccepted.to_string(), - block_sighash - ]; + let qry = "UPDATE blocks SET broadcasted = ?1 WHERE signer_signature_hash = ?2"; + let args = params![u64_to_sql(ts)?, block_sighash]; debug!("Marking block {} as broadcasted at {}", block_sighash, ts); self.db.execute(qry, args)?; @@ -1394,14 +1382,7 @@ mod tests { .expect("Unable to get block from db") .expect("Unable to get block from db") .state, - BlockState::GloballyAccepted - ); - assert_eq!( - db.get_last_globally_accepted_block(&block_info_1.block.header.consensus_hash) - .unwrap() - .unwrap() - .signer_signature_hash(), - block_info_1.block.header.signer_signature_hash() + BlockState::Unprocessed ); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); @@ -1428,7 +1409,14 @@ mod tests { assert_eq!(block.state, BlockState::LocallyAccepted); assert!(!block.check_state(BlockState::Unprocessed)); assert!(block.check_state(BlockState::LocallyAccepted)); - assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::LocallyRejected).unwrap(); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(block.check_state(BlockState::LocallyAccepted)); + assert!(block.check_state(BlockState::LocallyRejected)); assert!(block.check_state(BlockState::GloballyAccepted)); assert!(block.check_state(BlockState::GloballyRejected)); @@ -1440,15 +1428,8 @@ mod tests { assert!(block.check_state(BlockState::GloballyAccepted)); assert!(!block.check_state(BlockState::GloballyRejected)); - // Must manually override as will not be able to move from GloballyAccepted to LocallyAccepted - block.state = BlockState::LocallyRejected; - assert!(!block.check_state(BlockState::Unprocessed)); - assert!(!block.check_state(BlockState::LocallyAccepted)); - assert!(block.check_state(BlockState::LocallyRejected)); - assert!(block.check_state(BlockState::GloballyAccepted)); - assert!(block.check_state(BlockState::GloballyRejected)); - - block.move_to(BlockState::GloballyRejected).unwrap(); + // Must manually override as will not be able to move from GloballyAccepted to GloballyRejected + block.state = BlockState::GloballyRejected; assert!(!block.check_state(BlockState::Unprocessed)); assert!(!block.check_state(BlockState::LocallyAccepted)); assert!(!block.check_state(BlockState::LocallyRejected)); diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs index 520fb36ca1..34b363311e 100644 --- a/stacks-signer/src/v0/mod.rs +++ b/stacks-signer/src/v0/mod.rs @@ -17,6 +17,10 @@ /// The signer module for processing events pub mod signer; +#[cfg(any(test, feature = "testing"))] +/// Test specific functions for the signer module +pub mod tests; + use libsigner::v0::messages::SignerMessage; use crate::v0::signer::Signer; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 22a02bc107..5a5128cce4 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -44,29 +44,13 @@ use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list -pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list -pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// Pause the block broadcast -pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// Skip broadcasting the block to the network -pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); - /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { + /// The private key of the signer + #[cfg(any(test, feature = "testing"))] + pub private_key: StacksPrivateKey, + #[cfg(not(any(test, feature = "testing")))] /// The private key of the signer private_key: StacksPrivateKey, /// The stackerdb client @@ -92,6 +76,8 @@ pub struct Signer { pub block_proposal_validation_timeout: Duration, /// The current submitted block proposal and its submission time pub submitted_block_proposal: Option<(BlockProposal, Instant)>, + /// Maximum age of a block proposal in seconds before it is dropped without processing + pub block_proposal_max_age_secs: u64, } impl std::fmt::Display for Signer { @@ -126,6 +112,7 @@ impl SignerTrait for Signer { Some(SignerEvent::BlockValidationResponse(_)) | Some(SignerEvent::MinerMessages(..)) | Some(SignerEvent::NewBurnBlock { .. }) + | Some(SignerEvent::NewBlock { .. }) | Some(SignerEvent::StatusCheck) | None => None, Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), @@ -168,21 +155,8 @@ impl SignerTrait for Signer { match message { SignerMessage::BlockProposal(block_proposal) => { #[cfg(any(test, feature = "testing"))] - if let Some(public_keys) = - &*TEST_IGNORE_ALL_BLOCK_PROPOSALS.lock().unwrap() - { - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private( - &self.private_key, - ), - ) { - warn!("{self}: Ignoring block proposal due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - continue; - } + if self.test_ignore_all_block_proposals(block_proposal) { + continue; } self.handle_block_proposal( stacks_client, @@ -246,6 +220,33 @@ impl SignerTrait for Signer { }); *sortition_state = None; } + SignerEvent::NewBlock { + block_hash, + block_height, + } => { + debug!( + "{self}: Received a new block event."; + "block_hash" => %block_hash, + "block_height" => block_height + ); + if let Ok(Some(mut block_info)) = self + .signer_db + .block_lookup(block_hash) + .inspect_err(|e| warn!("{self}: Failed to load block state: {e:?}")) + { + if block_info.state == BlockState::GloballyAccepted { + // We have already globally accepted this block. Do nothing. + return; + } + if let Err(e) = block_info.mark_globally_accepted() { + warn!("{self}: Failed to mark block as globally accepted: {e:?}"); + return; + } + if let Err(e) = self.signer_db.insert_block(&block_info) { + warn!("{self}: Failed to update block state to globally accepted: {e:?}"); + } + } + } } } @@ -284,6 +285,7 @@ impl From for Signer { proposal_config, submitted_block_proposal: None, block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, + block_proposal_max_age_secs: signer_config.block_proposal_max_age_secs, } } } @@ -344,6 +346,24 @@ impl Signer { return; } + if block_proposal + .block + .header + .timestamp + .saturating_add(self.block_proposal_max_age_secs) + < get_epoch_time_secs() + { + // Block is too old. Drop it with a warning. Don't even bother broadcasting to the node. + warn!("{self}: Received a block proposal that is more than {} secs old. Ignoring...", self.block_proposal_max_age_secs; + "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "block_id" => %block_proposal.block.block_id(), + "block_height" => block_proposal.block.header.chain_length, + "burn_height" => block_proposal.burn_height, + "timestamp" => block_proposal.block.header.timestamp, + ); + return; + } + // TODO: should add a check to ignore an old burn block height if we know its outdated. Would require us to store the burn block height we last saw on the side. // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); @@ -385,7 +405,10 @@ impl Signer { "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); + #[cfg(any(test, feature = "testing"))] let mut block_info = BlockInfo::from(block_proposal.clone()); + #[cfg(not(any(test, feature = "testing")))] + let block_info = BlockInfo::from(block_proposal.clone()); // Get sortition view if we don't have it if sortition_state.is_none() { @@ -475,10 +498,7 @@ impl Signer { self.test_reject_block_proposal(block_proposal, &mut block_info, block_response); if let Some(block_response) = block_response { - // We know proposal is invalid. Send rejection message, do not do further validation - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; + // We know proposal is invalid. Send rejection message, do not do further validation and do not store it. debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); let res = self .stackerdb @@ -885,7 +905,7 @@ impl Signer { // Not enough rejection signatures to make a decision return; } - debug!("{self}: {total_reject_weight}/{total_weight} signers voteed to reject the block {block_hash}"); + debug!("{self}: {total_reject_weight}/{total_weight} signers voted to reject the block {block_hash}"); if let Err(e) = block_info.mark_globally_rejected() { warn!("{self}: Failed to mark block as globally rejected: {e:?}",); } @@ -1007,7 +1027,7 @@ impl Signer { return; }; // move block to LOCALLY accepted state. - // We only mark this GLOBALLY accepted if we manage to broadcast it... + // It is only considered globally accepted IFF we receive a new block event confirming it OR see the chain tip of the node advance to it. if let Err(e) = block_info.mark_locally_accepted(true) { // Do not abort as we should still try to store the block signature threshold warn!("{self}: Failed to mark block as locally accepted: {e:?}"); @@ -1020,22 +1040,8 @@ impl Signer { panic!("{self} Failed to write block to signerdb: {e}"); }); #[cfg(any(test, feature = "testing"))] - { - if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block broadcast is stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Block validation is no longer stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - } - } + self.test_pause_block_broadcast(&block_info); + self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); if self .submitted_block_proposal @@ -1083,71 +1089,6 @@ impl Signer { } } - #[cfg(any(test, feature = "testing"))] - fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { - if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - let block_hash = block.header.signer_signature_hash(); - warn!( - "{self}: Skipping block broadcast due to testing directive"; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - "consensus_hash" => %block.header.consensus_hash - ); - - if let Err(e) = self - .signer_db - .set_block_broadcasted(&block_hash, get_epoch_time_secs()) - { - warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); - } - return true; - } - false - } - - #[cfg(any(test, feature = "testing"))] - fn test_reject_block_proposal( - &mut self, - block_proposal: &BlockProposal, - block_info: &mut BlockInfo, - block_response: Option, - ) -> Option { - let Some(public_keys) = &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() else { - return block_response; - }; - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), - ) { - warn!("{self}: Rejecting block proposal automatically due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; - // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject - // as invalid since we rejected in a prior round if this crops up again) - // in case this is the first time we saw this block. Safe to do since this is testing case only. - self.signer_db - .insert_block(block_info) - .unwrap_or_else(|e| self.handle_insert_block_error(e)); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::TestingDirective, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) - } else { - None - } - } - /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); @@ -1162,7 +1103,7 @@ impl Signer { } /// Helper for logging insert_block error - fn handle_insert_block_error(&self, e: DBError) { + pub fn handle_insert_block_error(&self, e: DBError) { error!("{self}: Failed to insert block into signer-db: {e:?}"); panic!("{self} Failed to write block to signerdb: {e}"); } diff --git a/stacks-signer/src/v0/tests.rs b/stacks-signer/src/v0/tests.rs new file mode 100644 index 0000000000..0b9cdcc569 --- /dev/null +++ b/stacks-signer/src/v0/tests.rs @@ -0,0 +1,141 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::LazyLock; + +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use libsigner::v0::messages::{BlockResponse, RejectCode}; +use libsigner::BlockProposal; +use slog::{slog_info, slog_warn}; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::tests::TestFlag; +use stacks_common::{info, warn}; + +use super::signer::Signer; +use crate::signerdb::BlockInfo; + +/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list +pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list +pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to pause broadcasting the block to the network +pub static TEST_PAUSE_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +/// A global variable that can be used to skip broadcasting the block to the network +pub static TEST_SKIP_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +impl Signer { + /// Skip the block broadcast if the TEST_SKIP_BLOCK_BROADCAST flag is set + pub fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { + if TEST_SKIP_BLOCK_BROADCAST.get() { + let block_hash = block.header.signer_signature_hash(); + warn!( + "{self}: Skipping block broadcast due to testing directive"; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash + ); + + if let Err(e) = self + .signer_db + .set_block_broadcasted(&block_hash, get_epoch_time_secs()) + { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); + } + return true; + } + false + } + + /// Reject block proposals if the TEST_REJECT_ALL_BLOCK_PROPOSAL flag is set for the signer's public key + pub fn test_reject_block_proposal( + &mut self, + block_proposal: &BlockProposal, + block_info: &mut BlockInfo, + block_response: Option, + ) -> Option { + let public_keys = TEST_REJECT_ALL_BLOCK_PROPOSAL.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject + // as invalid since we rejected in a prior round if this crops up again) + // in case this is the first time we saw this block. Safe to do since this is testing case only. + self.signer_db + .insert_block(block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::TestingDirective, + &self.private_key, + self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), + )) + } else { + block_response + } + } + + /// Pause the block broadcast if the TEST_PAUSE_BLOCK_BROADCAST flag is set + pub fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { + if TEST_PAUSE_BLOCK_BROADCAST.get() { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Block broadcast is stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + while TEST_PAUSE_BLOCK_BROADCAST.get() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("{self}: Block validation is no longer stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + } + } + + /// Ignore block proposals if the TEST_IGNORE_ALL_BLOCK_PROPOSALS flag is set for the signer's public key + pub fn test_ignore_all_block_proposals(&self, block_proposal: &BlockProposal) -> bool { + let public_keys = TEST_IGNORE_ALL_BLOCK_PROPOSALS.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Ignoring block proposal due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + return true; + } + false + } +} diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index edd58c6161..cbee2bfc98 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -58,6 +58,7 @@ libstackerdb = { path = "../libstackerdb" } siphasher = "0.3.7" hashbrown = { workspace = true } rusqlite = { workspace = true } +toml = { workspace = true } [target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] tikv-jemallocator = {workspace = true} diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/stackslib/conf/mainnet-follower-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-follower-conf.toml rename to stackslib/conf/mainnet-follower-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/stackslib/conf/mainnet-miner-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-miner-conf.toml rename to stackslib/conf/mainnet-miner-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/stackslib/conf/mainnet-mockminer-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-mockminer-conf.toml rename to stackslib/conf/mainnet-mockminer-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-signer.toml b/stackslib/conf/mainnet-signer.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-signer.toml rename to stackslib/conf/mainnet-signer.toml diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/stackslib/conf/testnet-follower-conf.toml similarity index 100% rename from testnet/stacks-node/conf/testnet-follower-conf.toml rename to stackslib/conf/testnet-follower-conf.toml diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/stackslib/conf/testnet-miner-conf.toml similarity index 100% rename from testnet/stacks-node/conf/testnet-miner-conf.toml rename to stackslib/conf/testnet-miner-conf.toml diff --git a/testnet/stacks-node/conf/testnet-signer.toml b/stackslib/conf/testnet-signer.toml similarity index 100% rename from testnet/stacks-node/conf/testnet-signer.toml rename to stackslib/conf/testnet-signer.toml diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 83c8903d35..3361301675 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -924,7 +924,7 @@ impl BitcoinIndexer { return Ok(()); } warn!( - "Header at height {} is not wihtin 2 hours of now (is at {})", + "Header at height {} is not within 2 hours of now (is at {})", highest_header_height, highest_header.block_header.header.time ); self.drop_headers(highest_header_height.saturating_sub(1))?; diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 139a666098..209c6b8ef0 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -198,6 +198,9 @@ pub trait BlockEventDispatcher { } pub struct ChainsCoordinatorConfig { + /// true: assume all anchor blocks are present, and block chain sync until they arrive + /// false: process sortitions in reward cycles without anchor blocks + pub assume_present_anchor_blocks: bool, /// true: use affirmation maps before 2.1 /// false: only use affirmation maps in 2.1 or later pub always_use_affirmation_maps: bool, @@ -209,8 +212,17 @@ pub struct ChainsCoordinatorConfig { impl ChainsCoordinatorConfig { pub fn new() -> ChainsCoordinatorConfig { ChainsCoordinatorConfig { - always_use_affirmation_maps: false, + always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, + } + } + + pub fn test_new() -> ChainsCoordinatorConfig { + ChainsCoordinatorConfig { + always_use_affirmation_maps: false, + require_affirmed_anchor_blocks: false, + assume_present_anchor_blocks: false, } } } @@ -700,7 +712,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader notifier: (), atlas_config, atlas_db: Some(atlas_db), - config: ChainsCoordinatorConfig::new(), + config: ChainsCoordinatorConfig::test_new(), burnchain_indexer, refresh_stacker_db: Arc::new(AtomicBool::new(false)), in_nakamoto_epoch: false, @@ -2336,6 +2348,20 @@ impl< panic!("BUG: no epoch defined at height {}", header.block_height) }); + if self.config.assume_present_anchor_blocks { + // anchor blocks are always assumed to be present in the chain history, + // so report its absence if we don't have it. + if let PoxAnchorBlockStatus::SelectedAndUnknown(missing_anchor_block, _) = + &rc_info.anchor_status + { + info!( + "Currently missing PoX anchor block {}, which is assumed to be present", + &missing_anchor_block + ); + return Ok(Some(missing_anchor_block.clone())); + } + } + if cur_epoch.epoch_id >= StacksEpochId::Epoch21 || self.config.always_use_affirmation_maps { // potentially have an anchor block, but only process the next reward cycle (and // subsequent reward cycles) with it if the prepare-phase block-commits affirm its diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index adf9dddc0e..ac91d0a20b 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -73,7 +73,8 @@ use super::stacks::db::{ use super::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use super::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeError, TenureChangePayload, TransactionPayload, + TenureChangeError, TenureChangePayload, TokenTransferMemo, TransactionPayload, + TransactionVersion, }; use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; @@ -108,8 +109,7 @@ use crate::core::{ }; use crate::net::stackerdb::{StackerDBConfig, MINER_SLOT_COUNT}; use crate::net::Error as net_error; -use crate::util_lib::boot; -use crate::util_lib::boot::boot_code_id; +use crate::util_lib::boot::{self, boot_code_addr, boot_code_id, boot_code_tx_auth}; use crate::util_lib::db::{ query_int, query_row, query_row_columns, query_row_panic, query_rows, sqlite_open, tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, @@ -2093,7 +2093,8 @@ impl NakamotoChainState { return Err(e); }; - let (receipt, clarity_commit, reward_set_data) = ok_opt.expect("FATAL: unreachable"); + let (mut receipt, clarity_commit, reward_set_data, phantom_unlock_events) = + ok_opt.expect("FATAL: unreachable"); assert_eq!( receipt.header.anchored_header.block_hash(), @@ -2147,6 +2148,20 @@ impl NakamotoChainState { &receipt.header.anchored_header.block_hash() ); + let tx_receipts = &mut receipt.tx_receipts; + if let Some(unlock_receipt) = + // For the event dispatcher, attach any STXMintEvents that + // could not be included in the block (e.g. because the + // block didn't have a Coinbase transaction). + Self::generate_phantom_unlock_tx( + phantom_unlock_events, + &stacks_chain_state.config(), + next_ready_block.header.chain_length, + ) + { + tx_receipts.push(unlock_receipt); + } + // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { let block_event = ( @@ -2157,7 +2172,7 @@ impl NakamotoChainState { dispatcher.announce_block( &block_event, &receipt.header.clone(), - &receipt.tx_receipts, + &tx_receipts, &parent_block_id, next_ready_block_snapshot.winning_block_txid, &receipt.matured_rewards, @@ -4197,11 +4212,13 @@ impl NakamotoChainState { applied_epoch_transition: bool, signers_updated: bool, coinbase_height: u64, + phantom_lockup_events: Vec, ) -> Result< ( StacksEpochReceipt, PreCommitClarityBlock<'a>, Option, + Vec, ), ChainstateError, > { @@ -4238,7 +4255,7 @@ impl NakamotoChainState { coinbase_height, }; - return Ok((epoch_receipt, clarity_commit, None)); + return Ok((epoch_receipt, clarity_commit, None, phantom_lockup_events)); } /// Append a Nakamoto Stacks block to the Stacks chain state. @@ -4264,6 +4281,7 @@ impl NakamotoChainState { StacksEpochReceipt, PreCommitClarityBlock<'a>, Option, + Vec, ), ChainstateError, > { @@ -4531,18 +4549,20 @@ impl NakamotoChainState { Ok(lockup_events) => lockup_events, }; - // if any, append lockups events to the coinbase receipt - if lockup_events.len() > 0 { + // If any, append lockups events to the coinbase receipt + if let Some(receipt) = tx_receipts.get_mut(0) { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction - if let Some(receipt) = tx_receipts.get_mut(0) { - if receipt.is_coinbase_tx() { - receipt.events.append(&mut lockup_events); - } - } else { - warn!("Unable to attach lockups events, block's first transaction is not a coinbase transaction") + if receipt.is_coinbase_tx() { + receipt.events.append(&mut lockup_events); } } + + // If lockup_events still contains items, it means they weren't attached + if !lockup_events.is_empty() { + info!("Unable to attach lockup events, block's first transaction is not a coinbase transaction. Will attach as a phantom tx."); + } + // if any, append auto unlock events to the coinbase receipt if auto_unlock_events.len() > 0 { // Receipts are appended in order, so the first receipt should be @@ -4615,6 +4635,7 @@ impl NakamotoChainState { applied_epoch_transition, signer_set_calc.is_some(), coinbase_height, + lockup_events, ); } @@ -4728,7 +4749,12 @@ impl NakamotoChainState { coinbase_height, }; - Ok((epoch_receipt, clarity_commit, reward_set_data)) + Ok(( + epoch_receipt, + clarity_commit, + reward_set_data, + lockup_events, + )) } /// Create a StackerDB config for the .miners contract. @@ -4889,6 +4915,53 @@ impl NakamotoChainState { clarity.save_analysis(&contract_id, &analysis).unwrap(); }) } + + /// Generate a "phantom" transaction to include STXMintEvents for + /// lockups that could not be attached to a Coinbase transaction + /// (because the block doesn't have a Coinbase transaction). + fn generate_phantom_unlock_tx( + events: Vec, + config: &ChainstateConfig, + stacks_block_height: u64, + ) -> Option { + if events.is_empty() { + return None; + } + info!("Generating phantom unlock tx"); + let version = if config.mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + // Make the txid unique -- the phantom tx payload should include something block-specific otherwise + // they will always have the same txid. In this case we use the block height in the memo. This also + // happens to give some indication of the purpose of this phantom tx, for anyone looking. + let memo = TokenTransferMemo({ + let str = format!("Block {} token unlocks", stacks_block_height); + let mut buf = [0u8; 34]; + buf[..str.len().min(34)].copy_from_slice(&str.as_bytes()[..]); + buf + }); + let boot_code_address = boot_code_addr(config.mainnet); + let boot_code_auth = boot_code_tx_auth(boot_code_address.clone()); + let unlock_tx = StacksTransaction::new( + version, + boot_code_auth, + TransactionPayload::TokenTransfer( + PrincipalData::Standard(boot_code_address.into()), + 0, + memo, + ), + ); + let unlock_receipt = StacksTransactionReceipt::from_stx_transfer( + unlock_tx, + events, + Value::okay_true(), + ExecutionCost::ZERO, + ); + Some(unlock_receipt) + } } impl StacksMessageCodec for NakamotoBlock { diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 42f72d5165..ffdea5a7dd 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2903,7 +2903,7 @@ pub mod test { // Just update the expected value assert_eq!( genesis_root_hash.to_string(), - "c771616ff6acb710051238c9f4a3c48020a6d70cda637d34b89f2311a7e27886" + "0eb3076f0635ccdfcdc048afb8dea9048c5180a2e2b2952874af1d18f06321e8" ); } diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index f703f8a367..1f43a34d40 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -16,8 +16,9 @@ //! Subcommands used by `stacks-inspect` binary +use std::any::type_name; use std::cell::LazyCell; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -28,9 +29,12 @@ use regex::Regex; use rusqlite::{Connection, OpenFlags}; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::get_epoch_time_ms; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::VRFProof; use crate::burnchains::db::BurnchainDB; -use crate::burnchains::PoxConstants; +use crate::burnchains::{Burnchain, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ get_ancestor_sort_id, SortitionDB, SortitionHandle, SortitionHandleContext, }; @@ -42,82 +46,83 @@ use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, St use crate::chainstate::stacks::miner::*; use crate::chainstate::stacks::{Error as ChainstateError, *}; use crate::clarity_vm::clarity::ClarityInstance; +use crate::config::{Config, ConfigFile, DEFAULT_MAINNET_CONFIG}; use crate::core::*; +use crate::cost_estimates::metrics::UnitMetric; +use crate::cost_estimates::UnitEstimator; use crate::util_lib::db::IndexDBTx; -/// Can be used with CLI commands to support non-mainnet chainstate -/// Allows integration testing of these functions -#[derive(Deserialize)] -pub struct StacksChainConfig { - pub chain_id: u32, - pub first_block_height: u64, - pub first_burn_header_hash: BurnchainHeaderHash, - pub first_burn_header_timestamp: u64, - pub pox_constants: PoxConstants, - pub epochs: EpochList, +/// Options common to many `stacks-inspect` subcommands +/// Returned by `process_common_opts()` +#[derive(Debug, Default)] +pub struct CommonOpts { + pub config: Option, } -impl StacksChainConfig { - pub fn default_mainnet() -> Self { - Self { - chain_id: CHAIN_ID_MAINNET, - first_block_height: BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - first_burn_header_hash: BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH) - .unwrap(), - first_burn_header_timestamp: BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants: PoxConstants::mainnet_default(), - epochs: (*STACKS_EPOCHS_MAINNET).clone(), +/// Process arguments common to many `stacks-inspect` subcommands and drain them from `argv` +/// +/// Args: +/// - `argv`: Full CLI args `Vec` +/// - `start_at`: Position in args vec where to look for common options. +/// For example, if `start_at` is `1`, then look for these options **before** the subcommand: +/// ```console +/// stacks-inspect --config testnet.toml replay-block path/to/chainstate +/// ``` +pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts { + let mut i = start_at; + let mut opts = CommonOpts::default(); + while let Some(arg) = argv.get(i) { + let (prefix, opt) = arg.split_at(2); + if prefix != "--" { + // No args left to take + break; } - } - - pub fn default_testnet() -> Self { - let mut pox_constants = PoxConstants::regtest_default(); - pox_constants.prepare_length = 100; - pox_constants.reward_cycle_length = 900; - pox_constants.v1_unlock_height = 3; - pox_constants.v2_unlock_height = 5; - pox_constants.pox_3_activation_height = 5; - pox_constants.pox_4_activation_height = 6; - pox_constants.v3_unlock_height = 7; - let mut epochs = EpochList::new(&*STACKS_EPOCHS_REGTEST); - epochs[StacksEpochId::Epoch10].start_height = 0; - epochs[StacksEpochId::Epoch10].end_height = 0; - epochs[StacksEpochId::Epoch20].start_height = 0; - epochs[StacksEpochId::Epoch20].end_height = 1; - epochs[StacksEpochId::Epoch2_05].start_height = 1; - epochs[StacksEpochId::Epoch2_05].end_height = 2; - epochs[StacksEpochId::Epoch21].start_height = 2; - epochs[StacksEpochId::Epoch21].end_height = 3; - epochs[StacksEpochId::Epoch22].start_height = 3; - epochs[StacksEpochId::Epoch22].end_height = 4; - epochs[StacksEpochId::Epoch23].start_height = 4; - epochs[StacksEpochId::Epoch23].end_height = 5; - epochs[StacksEpochId::Epoch24].start_height = 5; - epochs[StacksEpochId::Epoch24].end_height = 6; - epochs[StacksEpochId::Epoch25].start_height = 6; - epochs[StacksEpochId::Epoch25].end_height = 56_457; - epochs[StacksEpochId::Epoch30].start_height = 56_457; - Self { - chain_id: CHAIN_ID_TESTNET, - first_block_height: 0, - first_burn_header_hash: BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH) - .unwrap(), - first_burn_header_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants, - epochs, + // "Take" arg + i += 1; + match opt { + "config" => { + let path = &argv[i]; + i += 1; + let config_file = ConfigFile::from_path(&path).unwrap_or_else(|e| { + panic!("Failed to read '{path}' as stacks-node config: {e}") + }); + let config = Config::from_config_file(config_file, false).unwrap_or_else(|e| { + panic!("Failed to convert config file into node config: {e}") + }); + opts.config.replace(config); + } + "network" => { + let network = &argv[i]; + i += 1; + let config_file = match network.to_lowercase().as_str() { + "helium" => ConfigFile::helium(), + "mainnet" => ConfigFile::mainnet(), + "mocknet" => ConfigFile::mocknet(), + "xenon" => ConfigFile::xenon(), + other => { + eprintln!("Unknown network choice `{other}`"); + process::exit(1); + } + }; + let config = Config::from_config_file(config_file, false).unwrap_or_else(|e| { + panic!("Failed to convert config file into node config: {e}") + }); + opts.config.replace(config); + } + _ => panic!("Unrecognized option: {opt}"), } } + // Remove options processed + argv.drain(start_at..i); + opts } -const STACKS_CHAIN_CONFIG_DEFAULT_MAINNET: LazyCell = - LazyCell::new(StacksChainConfig::default_mainnet); - /// Replay blocks from chainstate database /// Terminates on error using `process::exit()` /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_block(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -195,7 +200,7 @@ pub fn command_replay_block(argv: &[String], conf: Option<&StacksChainConfig>) { /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -212,12 +217,15 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainC let chain_state_path = format!("{db_path}/chainstate/"); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); let conn = chainstate.nakamoto_blocks_db(); @@ -281,7 +289,7 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainC /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` /// - `conf`: Optional config for running on non-mainnet chainstate -pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_mock_mining(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -369,32 +377,184 @@ pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConf } } +/// Replay mock mined blocks from JSON files +/// Terminates on error using `process::exit()` +/// +/// Arguments: +/// - `argv`: Args in CLI format: ` [args...]` +/// - `conf`: Optional config for running on non-mainnet chainstate +pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { + let print_help_and_exit = || { + let n = &argv[0]; + eprintln!("Usage: {n} [min-fee [max-time]]"); + eprintln!(""); + eprintln!("Given a , try to ''mine'' an anchored block. This invokes the miner block"); + eprintln!("assembly, but does not attempt to broadcast a block commit. This is useful for determining"); + eprintln!("what transactions a given chain state would include in an anchor block,"); + eprintln!("or otherwise simulating a miner."); + process::exit(1); + }; + + // Parse subcommand-specific args + let db_path = argv.get(1).unwrap_or_else(print_help_and_exit); + let min_fee = argv + .get(2) + .map(|arg| arg.parse().expect("Could not parse min_fee")) + .unwrap_or(u64::MAX); + let max_time = argv + .get(3) + .map(|arg| arg.parse().expect("Could not parse max_time")) + .unwrap_or(u64::MAX); + + let start = get_epoch_time_ms(); + + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); + + let burnchain_path = format!("{db_path}/burnchain"); + let sort_db_path = format!("{db_path}/burnchain/sortition"); + let chain_state_path = format!("{db_path}/chainstate/"); + + let burnchain = conf.get_burnchain(); + let sort_db = SortitionDB::open(&sort_db_path, false, burnchain.pox_constants.clone()) + .unwrap_or_else(|e| panic!("Failed to open {sort_db_path}: {e}")); + let (chain_state, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap_or_else(|e| panic!("Failed to open stacks chain state: {e}")); + let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) + .unwrap_or_else(|e| panic!("Failed to get sortition chain tip: {e}")); + + let estimator = Box::new(UnitEstimator); + let metric = Box::new(UnitMetric); + + let mut mempool_db = MemPoolDB::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + estimator, + metric, + ) + .unwrap_or_else(|e| panic!("Failed to open mempool db: {e}")); + + let tip_header = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap_or_else(|e| panic!("Error looking up chain tip: {e}")) + .expect("No chain tip found"); + + // Fail if Nakamoto chainstate detected. `try-mine` cannot mine Nakamoto blocks yet + // TODO: Add Nakamoto block support + if matches!( + &tip_header.anchored_header, + StacksBlockHeaderTypes::Nakamoto(..) + ) { + panic!("Attempting to mine Nakamoto block. Nakamoto blocks not supported yet!"); + }; + + let sk = StacksPrivateKey::new(); + let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); + tx_auth.set_origin_nonce(0); + + let mut coinbase_tx = StacksTransaction::new( + TransactionVersion::Mainnet, + tx_auth, + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), + ); + + coinbase_tx.chain_id = conf.burnchain.chain_id; + coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&coinbase_tx); + tx_signer.sign_origin(&sk).unwrap(); + let coinbase_tx = tx_signer.get_tx().unwrap(); + + let mut settings = BlockBuilderSettings::limited(); + settings.max_miner_time_ms = max_time; + + let result = StacksBlockBuilder::build_anchored_block( + &chain_state, + &sort_db.index_handle(&chain_tip.sortition_id), + &mut mempool_db, + &tip_header, + chain_tip.total_burn, + VRFProof::empty(), + Hash160([0; 20]), + &coinbase_tx, + settings, + None, + &Burnchain::new( + &burnchain_path, + &burnchain.chain_name, + &burnchain.network_name, + ) + .unwrap(), + ); + + let stop = get_epoch_time_ms(); + + println!( + "{} mined block @ height = {} off of {} ({}/{}) in {}ms. Min-fee: {}, Max-time: {}", + if result.is_ok() { + "Successfully" + } else { + "Failed to" + }, + tip_header.stacks_block_height + 1, + StacksBlockHeader::make_index_block_hash( + &tip_header.consensus_hash, + &tip_header.anchored_header.block_hash() + ), + &tip_header.consensus_hash, + &tip_header.anchored_header.block_hash(), + stop.saturating_sub(start), + min_fee, + max_time + ); + + if let Ok((block, execution_cost, size)) = result { + let mut total_fees = 0; + for tx in block.txs.iter() { + total_fees += tx.get_tx_fee(); + } + println!( + "Block {}: {} uSTX, {} bytes, cost {:?}", + block.block_hash(), + total_fees, + size, + &execution_cost + ); + } + + process::exit(0); +} + /// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate -fn replay_staging_block( - db_path: &str, - index_block_hash_hex: &str, - conf: Option<&StacksChainConfig>, -) { +fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option<&Config>) { let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + &epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -448,30 +608,31 @@ fn replay_staging_block( } /// Process a mock mined block and call `replay_block()` to validate -fn replay_mock_mined_block( - db_path: &str, - block: AssembledAnchorBlock, - conf: Option<&StacksChainConfig>, -) { +fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock, conf: Option<&Config>) { let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + &epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -650,22 +811,28 @@ fn replay_block( } /// Fetch and process a NakamotoBlock from database and call `replay_block_nakamoto()` to validate -fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &StacksChainConfig) { +fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &Config) { let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + &epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -696,7 +863,7 @@ fn replay_block_nakamoto( ) }); - debug!("Process staging Nakamoto block"; + info!("Process staging Nakamoto block"; "consensus_hash" => %block.header.consensus_hash, "stacks_block_hash" => %block.header.block_hash(), "stacks_block_id" => %block.header.block_id(), @@ -940,3 +1107,36 @@ fn replay_block_nakamoto( Ok(()) } + +#[cfg(test)] +pub mod test { + use super::*; + + fn parse_cli_command(s: &str) -> Vec { + s.split(' ').map(String::from).collect() + } + + #[test] + pub fn test_drain_common_opts() { + // Should find/remove no options + let mut argv = parse_cli_command( + "stacks-inspect try-mine --config my_config.toml /tmp/chainstate/mainnet", + ); + let argv_init = argv.clone(); + let opts = drain_common_opts(&mut argv, 0); + let opts = drain_common_opts(&mut argv, 1); + + assert_eq!(argv, argv_init); + assert!(opts.config.is_none()); + + // Should find config opts and remove from vec + let mut argv = parse_cli_command( + "stacks-inspect --network mocknet --network mainnet try-mine /tmp/chainstate/mainnet", + ); + let opts = drain_common_opts(&mut argv, 1); + let argv_expected = parse_cli_command("stacks-inspect try-mine /tmp/chainstate/mainnet"); + + assert_eq!(argv, argv_expected); + assert!(opts.config.is_some()); + } +} diff --git a/testnet/stacks-node/src/chain_data.rs b/stackslib/src/config/chain_data.rs similarity index 97% rename from testnet/stacks-node/src/chain_data.rs rename to stackslib/src/config/chain_data.rs index cc60f964a3..e4c3899511 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/stackslib/src/config/chain_data.rs @@ -17,21 +17,22 @@ use std::collections::HashMap; use std::process::{Command, Stdio}; -use stacks::burnchains::bitcoin::address::BitcoinAddress; -use stacks::burnchains::bitcoin::{BitcoinNetworkType, BitcoinTxOutput}; -use stacks::burnchains::{Burnchain, BurnchainSigner, Error as BurnchainError, Txid}; -use stacks::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; -use stacks::chainstate::burn::distribution::BurnSamplePoint; -use stacks::chainstate::burn::operations::leader_block_commit::{ - MissedBlockCommit, BURN_BLOCK_MINED_AT_MODULUS, -}; -use stacks::chainstate::burn::operations::LeaderBlockCommitOp; -use stacks::chainstate::stacks::address::PoxAddress; -use stacks::core::MINING_COMMITMENT_WINDOW; -use stacks::util_lib::db::Error as DBError; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRFSeed}; use stacks_common::util::hash::hex_bytes; +use crate::burnchains::bitcoin::address::BitcoinAddress; +use crate::burnchains::bitcoin::{BitcoinNetworkType, BitcoinTxOutput}; +use crate::burnchains::{Burnchain, BurnchainSigner, Error as BurnchainError, Txid}; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; +use crate::chainstate::burn::distribution::BurnSamplePoint; +use crate::chainstate::burn::operations::leader_block_commit::{ + MissedBlockCommit, BURN_BLOCK_MINED_AT_MODULUS, +}; +use crate::chainstate::burn::operations::LeaderBlockCommitOp; +use crate::chainstate::stacks::address::PoxAddress; +use crate::core::MINING_COMMITMENT_WINDOW; +use crate::util_lib::db::Error as DBError; + pub struct MinerStats { pub unconfirmed_commits_helper: String, } @@ -526,11 +527,6 @@ pub mod tests { use std::fs; use std::io::Write; - use stacks::burnchains::{BurnchainSigner, Txid}; - use stacks::chainstate::burn::distribution::BurnSamplePoint; - use stacks::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; - use stacks::chainstate::burn::operations::LeaderBlockCommitOp; - use stacks::chainstate::stacks::address::{PoxAddress, PoxAddressType20}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPublicKey, VRFSeed, }; @@ -538,6 +534,11 @@ pub mod tests { use stacks_common::util::uint::{BitArray, Uint256}; use super::MinerStats; + use crate::burnchains::{BurnchainSigner, Txid}; + use crate::chainstate::burn::distribution::BurnSamplePoint; + use crate::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; + use crate::chainstate::burn::operations::LeaderBlockCommitOp; + use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20}; #[test] fn test_burn_dist_to_prob_dist() { diff --git a/testnet/stacks-node/src/config.rs b/stackslib/src/config/mod.rs similarity index 96% rename from testnet/stacks-node/src/config.rs rename to stackslib/src/config/mod.rs index 4ad793a4c3..42663372f6 100644 --- a/testnet/stacks-node/src/config.rs +++ b/stackslib/src/config/mod.rs @@ -14,48 +14,20 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +pub mod chain_data; + use std::collections::{HashMap, HashSet}; use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::str::FromStr; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, LazyLock, Mutex}; use std::time::Duration; use std::{cmp, fs, thread}; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdentifier}; -use lazy_static::lazy_static; use rand::RngCore; use serde::Deserialize; -use stacks::burnchains::affirmation::AffirmationMap; -use stacks::burnchains::bitcoin::BitcoinNetworkType; -use stacks::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; -use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; -use stacks::chainstate::stacks::boot::MINERS_NAME; -use stacks::chainstate::stacks::index::marf::MARFOpenOpts; -use stacks::chainstate::stacks::index::storage::TrieHashCalculationMode; -use stacks::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; -use stacks::chainstate::stacks::MAX_BLOCK_LEN; -use stacks::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; -use stacks::core::{ - MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, - BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, - BITCOIN_TESTNET_STACKS_25_REORGED_HEIGHT, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, - PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, -}; -use stacks::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; -use stacks::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; -use stacks::cost_estimates::fee_scalar::ScalarFeeRateEstimator; -use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMetric}; -use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; -use stacks::net::atlas::AtlasConfig; -use stacks::net::connection::ConnectionOptions; -use stacks::net::{Neighbor, NeighborAddress, NeighborKey}; -use stacks::types::chainstate::BurnchainHeaderHash; -use stacks::types::EpochList; -use stacks::util::hash::to_hex; -use stacks::util_lib::boot::boot_code_id; -use stacks::util_lib::db::Error as DBError; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::net::PeerAddress; @@ -64,7 +36,36 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use crate::chain_data::MinerStats; +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::bitcoin::BitcoinNetworkType; +use crate::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; +use crate::chainstate::nakamoto::signer_set::NakamotoSigners; +use crate::chainstate::stacks::boot::MINERS_NAME; +use crate::chainstate::stacks::index::marf::MARFOpenOpts; +use crate::chainstate::stacks::index::storage::TrieHashCalculationMode; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; +use crate::chainstate::stacks::MAX_BLOCK_LEN; +use crate::config::chain_data::MinerStats; +use crate::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; +use crate::core::{ + MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, + BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, + BITCOIN_TESTNET_STACKS_25_REORGED_HEIGHT, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, + PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, STACKS_EPOCHS_REGTEST, STACKS_EPOCHS_TESTNET, +}; +use crate::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; +use crate::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; +use crate::cost_estimates::fee_scalar::ScalarFeeRateEstimator; +use crate::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMetric}; +use crate::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; +use crate::net::atlas::AtlasConfig; +use crate::net::connection::ConnectionOptions; +use crate::net::{Neighbor, NeighborAddress, NeighborKey}; +use crate::types::chainstate::BurnchainHeaderHash; +use crate::types::EpochList; +use crate::util::hash::to_hex; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; pub const DEFAULT_SATS_PER_VB: u64 = 50; pub const OP_TX_BLOCK_COMMIT_ESTIM_SIZE: u64 = 380; @@ -93,6 +94,45 @@ const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; +// This should be greater than the signers' timeout. This is used for issuing fallback tenure extends +const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 420; + +static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = + LazyLock::new(|| ConnectionOptions { + inbox_maxlen: 100, + outbox_maxlen: 100, + timeout: 15, + idle_timeout: 15, // how long a HTTP connection can be idle before it's closed + heartbeat: 3600, + // can't use u64::max, because sqlite stores as i64. + private_key_lifetime: 9223372036854775807, + num_neighbors: 32, // number of neighbors whose inventories we track + num_clients: 750, // number of inbound p2p connections + soft_num_neighbors: 16, // soft-limit on the number of neighbors whose inventories we track + soft_num_clients: 750, // soft limit on the number of inbound p2p connections + max_neighbors_per_host: 1, // maximum number of neighbors per host we permit + max_clients_per_host: 4, // maximum number of inbound p2p connections per host we permit + soft_max_neighbors_per_host: 1, // soft limit on the number of neighbors per host we permit + soft_max_neighbors_per_org: 32, // soft limit on the number of neighbors per AS we permit (TODO: for now it must be greater than num_neighbors) + soft_max_clients_per_host: 4, // soft limit on how many inbound p2p connections per host we permit + max_http_clients: 1000, // maximum number of HTTP connections + max_neighbors_of_neighbor: 10, // maximum number of neighbors we'll handshake with when doing a neighbor walk (I/O for this can be expensive, so keep small-ish) + walk_interval: 60, // how often, in seconds, we do a neighbor walk + walk_seed_probability: 0.1, // 10% of the time when not in IBD, walk to a non-seed node even if we aren't connected to a seed node + log_neighbors_freq: 60_000, // every minute, log all peer connections + inv_sync_interval: 45, // how often, in seconds, we refresh block inventories + inv_reward_cycles: 3, // how many reward cycles to look back on, for mainnet + download_interval: 10, // how often, in seconds, we do a block download scan (should be less than inv_sync_interval) + dns_timeout: 15_000, + max_inflight_blocks: 6, + max_inflight_attachments: 6, + ..std::default::Default::default() + }); + +pub static DEFAULT_MAINNET_CONFIG: LazyLock = LazyLock::new(|| { + Config::from_config_file(ConfigFile::mainnet(), false) + .expect("Failed to create default mainnet config") +}); #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] @@ -141,7 +181,7 @@ impl ConfigFile { mode: Some("xenon".to_string()), rpc_port: Some(18332), peer_port: Some(18333), - peer_host: Some("bitcoind.testnet.stacks.co".to_string()), + peer_host: Some("0.0.0.0".to_string()), magic_bytes: Some("T2".into()), ..BurnchainConfigFile::default() }; @@ -187,9 +227,9 @@ impl ConfigFile { mode: Some("mainnet".to_string()), rpc_port: Some(8332), peer_port: Some(8333), - peer_host: Some("bitcoin.blockstack.com".to_string()), - username: Some("blockstack".to_string()), - password: Some("blockstacksystem".to_string()), + peer_host: Some("0.0.0.0".to_string()), + username: Some("bitcoin".to_string()), + password: Some("bitcoin".to_string()), magic_bytes: Some("X2".to_string()), ..BurnchainConfigFile::default() }; @@ -311,39 +351,6 @@ pub struct Config { pub atlas: AtlasConfig, } -lazy_static! { - static ref HELIUM_DEFAULT_CONNECTION_OPTIONS: ConnectionOptions = ConnectionOptions { - inbox_maxlen: 100, - outbox_maxlen: 100, - timeout: 15, - idle_timeout: 15, // how long a HTTP connection can be idle before it's closed - heartbeat: 3600, - // can't use u64::max, because sqlite stores as i64. - private_key_lifetime: 9223372036854775807, - num_neighbors: 32, // number of neighbors whose inventories we track - num_clients: 750, // number of inbound p2p connections - soft_num_neighbors: 16, // soft-limit on the number of neighbors whose inventories we track - soft_num_clients: 750, // soft limit on the number of inbound p2p connections - max_neighbors_per_host: 1, // maximum number of neighbors per host we permit - max_clients_per_host: 4, // maximum number of inbound p2p connections per host we permit - soft_max_neighbors_per_host: 1, // soft limit on the number of neighbors per host we permit - soft_max_neighbors_per_org: 32, // soft limit on the number of neighbors per AS we permit (TODO: for now it must be greater than num_neighbors) - soft_max_clients_per_host: 4, // soft limit on how many inbound p2p connections per host we permit - max_http_clients: 1000, // maximum number of HTTP connections - max_neighbors_of_neighbor: 10, // maximum number of neighbors we'll handshake with when doing a neighbor walk (I/O for this can be expensive, so keep small-ish) - walk_interval: 60, // how often, in seconds, we do a neighbor walk - walk_seed_probability: 0.1, // 10% of the time when not in IBD, walk to a non-seed node even if we aren't connected to a seed node - log_neighbors_freq: 60_000, // every minute, log all peer connections - inv_sync_interval: 45, // how often, in seconds, we refresh block inventories - inv_reward_cycles: 3, // how many reward cycles to look back on, for mainnet - download_interval: 10, // how often, in seconds, we do a block download scan (should be less than inv_sync_interval) - dns_timeout: 15_000, - max_inflight_blocks: 6, - max_inflight_attachments: 6, - .. std::default::Default::default() - }; -} - impl Config { /// get the up-to-date burnchain options from the config. /// If the config file can't be loaded, then return the existing config @@ -516,10 +523,7 @@ impl Config { } fn check_nakamoto_config(&self, burnchain: &Burnchain) { - let epochs = StacksEpoch::get_epochs( - self.burnchain.get_bitcoin_network().1, - self.burnchain.epochs.as_ref(), - ); + let epochs = self.burnchain.get_epoch_list(); let Some(epoch_30) = epochs.get(StacksEpochId::Epoch30) else { // no Epoch 3.0, so just return return; @@ -636,8 +640,8 @@ impl Config { BitcoinNetworkType::Mainnet => { Err("Cannot configure epochs in mainnet mode".to_string()) } - BitcoinNetworkType::Testnet => Ok(stacks::core::STACKS_EPOCHS_TESTNET.to_vec()), - BitcoinNetworkType::Regtest => Ok(stacks::core::STACKS_EPOCHS_REGTEST.to_vec()), + BitcoinNetworkType::Testnet => Ok(STACKS_EPOCHS_TESTNET.to_vec()), + BitcoinNetworkType::Regtest => Ok(STACKS_EPOCHS_REGTEST.to_vec()), }?; let mut matched_epochs = vec![]; for configured_epoch in conf_epochs.iter() { @@ -1283,6 +1287,10 @@ impl BurnchainConfig { other => panic!("Invalid stacks-node mode: {other}"), } } + + pub fn get_epoch_list(&self) -> EpochList { + StacksEpoch::get_epochs(self.get_bitcoin_network().1, self.epochs.as_ref()) + } } #[derive(Clone, Deserialize, Default, Debug)] @@ -1648,6 +1656,7 @@ pub struct NodeConfig { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: bool, pub require_affirmed_anchor_blocks: bool, + pub assume_present_anchor_blocks: bool, /// Fault injection for failing to push blocks pub fault_injection_block_push_fail_probability: Option, // fault injection for hiding blocks. @@ -1931,6 +1940,7 @@ impl Default for NodeConfig { use_test_genesis_chainstate: None, always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, fault_injection_block_push_fail_probability: None, fault_injection_hide_blocks: false, chain_liveness_poll_time_secs: 300, @@ -2145,6 +2155,8 @@ pub struct MinerConfig { pub block_commit_delay: Duration, /// The percentage of the remaining tenure cost limit to consume each block. pub tenure_cost_limit_per_block_percentage: Option, + /// Duration to wait before attempting to issue a tenure extend + pub tenure_timeout: Duration, } impl Default for MinerConfig { @@ -2181,6 +2193,7 @@ impl Default for MinerConfig { tenure_cost_limit_per_block_percentage: Some( DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE, ), + tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), } } } @@ -2417,6 +2430,7 @@ pub struct NodeConfigFile { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: Option, pub require_affirmed_anchor_blocks: Option, + pub assume_present_anchor_blocks: Option, /// At most, how often should the chain-liveness thread /// wake up the chains-coordinator. Defaults to 300s (5 min). pub chain_liveness_poll_time_secs: Option, @@ -2498,6 +2512,10 @@ impl NodeConfigFile { // miners should always try to mine, even if they don't have the anchored // blocks in the canonical affirmation map. Followers, however, can stall. require_affirmed_anchor_blocks: self.require_affirmed_anchor_blocks.unwrap_or(!miner), + // as of epoch 3.0, all prepare phases have anchor blocks. + // at the start of epoch 3.0, the chain stalls without anchor blocks. + // only set this to false if you're doing some very extreme testing. + assume_present_anchor_blocks: true, // chainstate fault_injection activation for hide_blocks. // you can't set this in the config file. fault_injection_hide_blocks: false, @@ -2566,6 +2584,7 @@ pub struct MinerConfigFile { pub subsequent_rejection_pause_ms: Option, pub block_commit_delay_ms: Option, pub tenure_cost_limit_per_block_percentage: Option, + pub tenure_timeout_secs: Option, } impl MinerConfigFile { @@ -2706,6 +2725,7 @@ impl MinerConfigFile { subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), block_commit_delay: self.block_commit_delay_ms.map(Duration::from_millis).unwrap_or(miner_default_config.block_commit_delay), tenure_cost_limit_per_block_percentage, + tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), }) } } diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index 31f97628a6..190ef8a1f0 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -63,6 +63,7 @@ pub mod clarity_cli; /// A high level library for interacting with the Clarity vm pub mod clarity_vm; pub mod cli; +pub mod config; pub mod core; pub mod cost_estimates; pub mod deps; diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 7f324c52c8..76a9d63c21 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -309,6 +309,8 @@ fn main() { process::exit(1); } + let common_opts = cli::drain_common_opts(&mut argv, 1); + if argv[1] == "--version" { println!( "{}", @@ -796,128 +798,7 @@ check if the associated microblocks can be downloaded } if argv[1] == "try-mine" { - if argv.len() < 3 { - eprintln!( - "Usage: {} try-mine [min-fee [max-time]] - -Given a , try to ''mine'' an anchored block. This invokes the miner block -assembly, but does not attempt to broadcast a block commit. This is useful for determining -what transactions a given chain state would include in an anchor block, or otherwise -simulating a miner. -", - argv[0] - ); - process::exit(1); - } - - let start = get_epoch_time_ms(); - let burnchain_path = format!("{}/mainnet/burnchain", &argv[2]); - let sort_db_path = format!("{}/mainnet/burnchain/sortition", &argv[2]); - let chain_state_path = format!("{}/mainnet/chainstate/", &argv[2]); - - let mut min_fee = u64::MAX; - let mut max_time = u64::MAX; - - if argv.len() >= 4 { - min_fee = argv[3].parse().expect("Could not parse min_fee"); - } - if argv.len() >= 5 { - max_time = argv[4].parse().expect("Could not parse max_time"); - } - - let sort_db = SortitionDB::open(&sort_db_path, false, PoxConstants::mainnet_default()) - .unwrap_or_else(|_| panic!("Failed to open {sort_db_path}")); - let chain_id = CHAIN_ID_MAINNET; - let (chain_state, _) = StacksChainState::open(true, chain_id, &chain_state_path, None) - .expect("Failed to open stacks chain state"); - let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) - .expect("Failed to get sortition chain tip"); - - let estimator = Box::new(UnitEstimator); - let metric = Box::new(UnitMetric); - - let mut mempool_db = MemPoolDB::open(true, chain_id, &chain_state_path, estimator, metric) - .expect("Failed to open mempool db"); - - let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) - .unwrap() - .unwrap(); - let parent_header = StacksChainState::get_anchored_block_header_info( - chain_state.db(), - &header_tip.consensus_hash, - &header_tip.anchored_header.block_hash(), - ) - .expect("Failed to load chain tip header info") - .expect("Failed to load chain tip header info"); - - let sk = StacksPrivateKey::new(); - let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); - tx_auth.set_origin_nonce(0); - - let mut coinbase_tx = StacksTransaction::new( - TransactionVersion::Mainnet, - tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), - ); - - coinbase_tx.chain_id = chain_id; - coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - let mut tx_signer = StacksTransactionSigner::new(&coinbase_tx); - tx_signer.sign_origin(&sk).unwrap(); - let coinbase_tx = tx_signer.get_tx().unwrap(); - - let mut settings = BlockBuilderSettings::limited(); - settings.max_miner_time_ms = max_time; - - let result = StacksBlockBuilder::build_anchored_block( - &chain_state, - &sort_db.index_handle(&chain_tip.sortition_id), - &mut mempool_db, - &parent_header, - chain_tip.total_burn, - VRFProof::empty(), - Hash160([0; 20]), - &coinbase_tx, - settings, - None, - &Burnchain::new(&burnchain_path, "bitcoin", "main").unwrap(), - ); - - let stop = get_epoch_time_ms(); - - println!( - "{} mined block @ height = {} off of {} ({}/{}) in {}ms. Min-fee: {}, Max-time: {}", - if result.is_ok() { - "Successfully" - } else { - "Failed to" - }, - parent_header.stacks_block_height + 1, - StacksBlockHeader::make_index_block_hash( - &parent_header.consensus_hash, - &parent_header.anchored_header.block_hash() - ), - &parent_header.consensus_hash, - &parent_header.anchored_header.block_hash(), - stop.saturating_sub(start), - min_fee, - max_time - ); - - if let Ok((block, execution_cost, size)) = result { - let mut total_fees = 0; - for tx in block.txs.iter() { - total_fees += tx.get_tx_fee(); - } - println!( - "Block {}: {} uSTX, {} bytes, cost {:?}", - block.block_hash(), - total_fees, - size, - &execution_cost - ); - } - + cli::command_try_mine(&argv[1..], common_opts.config.as_ref()); process::exit(0); } @@ -1719,41 +1600,17 @@ simulating a miner. } if argv[1] == "replay-block" { - cli::command_replay_block(&argv[1..], None); + cli::command_replay_block(&argv[1..], common_opts.config.as_ref()); process::exit(0); } if argv[1] == "replay-naka-block" { - let chain_config = - if let Some(network_flag_ix) = argv.iter().position(|arg| arg == "--network") { - let Some(network_choice) = argv.get(network_flag_ix + 1) else { - eprintln!("Must supply network choice after `--network` option"); - process::exit(1); - }; - - let network_config = match network_choice.to_lowercase().as_str() { - "testnet" => cli::StacksChainConfig::default_testnet(), - "mainnet" => cli::StacksChainConfig::default_mainnet(), - other => { - eprintln!("Unknown network choice `{other}`"); - process::exit(1); - } - }; - - argv.remove(network_flag_ix + 1); - argv.remove(network_flag_ix); - - Some(network_config) - } else { - None - }; - - cli::command_replay_block_nakamoto(&argv[1..], chain_config.as_ref()); + cli::command_replay_block_nakamoto(&argv[1..], common_opts.config.as_ref()); process::exit(0); } if argv[1] == "replay-mock-mining" { - cli::command_replay_mock_mining(&argv[1..], None); + cli::command_replay_mock_mining(&argv[1..], common_opts.config.as_ref()); process::exit(0); } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 6729dbc4a8..53d6ec9fa1 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -355,7 +355,7 @@ impl NakamotoBootPlan { fn boot_nakamoto_peers<'a>( mut self, observer: Option<&'a TestEventObserver>, - ) -> (TestPeer<'a>, Vec) { + ) -> (TestPeer<'a>, Vec>) { let mut peer_config = TestPeerConfig::new(&self.test_name, 0, 0); peer_config.network_id = self.network_id; peer_config.private_key = self.private_key.clone(); @@ -666,7 +666,7 @@ impl NakamotoBootPlan { self, boot_plan: Vec, observer: Option<&'a TestEventObserver>, - ) -> (TestPeer<'a>, Vec) { + ) -> (TestPeer<'a>, Vec>) { let test_signers = self.test_signers.clone(); let pox_constants = self.pox_constants.clone(); let test_stackers = self.test_stackers.clone(); diff --git a/stx-genesis/chainstate-test.txt b/stx-genesis/chainstate-test.txt index 614cf3d9f4..6eedf241d1 100644 --- a/stx-genesis/chainstate-test.txt +++ b/stx-genesis/chainstate-test.txt @@ -69,4 +69,5 @@ SM1ZH700J7CEDSEHM5AJ4C4MKKWNESTS35DD3SZM5,13888889,2267 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,45467 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,6587 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,2267 +SP2CTPPV8BHBVSQR727A3MK00ZD85RNY903KAG9F3,12345678,35 -----END STX VESTING----- \ No newline at end of file diff --git a/stx-genesis/chainstate-test.txt.sha256 b/stx-genesis/chainstate-test.txt.sha256 index 56782ae494..69ac95c254 100644 --- a/stx-genesis/chainstate-test.txt.sha256 +++ b/stx-genesis/chainstate-test.txt.sha256 @@ -1 +1 @@ -014402b47d53b0716402c172fa746adf308b03a826ebea91944a5eb6a304a823 \ No newline at end of file +088c3caea982a8f6f74dda48ec5f06f51f7605def9760a971b1acd763ee6b7cf \ No newline at end of file diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 0c68d22ee7..e902140428 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -14,7 +14,6 @@ serde_derive = "1" serde_json = { version = "1.0", features = ["arbitrary_precision", "raw_value"] } stacks = { package = "stackslib", path = "../../stackslib" } stx-genesis = { path = "../../stx-genesis"} -toml = "0.5.6" base64 = "0.12.0" backtrace = "0.3.50" libc = "0.2.151" diff --git a/testnet/stacks-node/conf b/testnet/stacks-node/conf new file mode 120000 index 0000000000..94edd3b5d4 --- /dev/null +++ b/testnet/stacks-node/conf @@ -0,0 +1 @@ +../../stackslib/conf/ \ No newline at end of file diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 727483886e..f3aaa95ab5 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -48,6 +48,12 @@ use stacks::chainstate::burn::Opcodes; use stacks::chainstate::coordinator::comm::CoordinatorChannels; #[cfg(test)] use stacks::chainstate::stacks::address::PoxAddress; +use stacks::config::BurnchainConfig; +#[cfg(test)] +use stacks::config::{ + OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, + OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, +}; use stacks::core::{EpochList, StacksEpochId}; use stacks::monitoring::{increment_btc_blocks_received_counter, increment_btc_ops_sent_counter}; use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; @@ -74,12 +80,6 @@ use url::Url; use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; -use crate::config::BurnchainConfig; -#[cfg(test)] -use crate::config::{ - OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, - OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, -}; /// The number of bitcoin blocks that can have /// passed since the UTXO cache was last refreshed before @@ -2806,13 +2806,13 @@ mod tests { use std::io::Write; use stacks::burnchains::BurnchainSigner; + use stacks::config::DEFAULT_SATS_PER_VB; use stacks_common::deps_common::bitcoin::blockdata::script::Builder; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, VRFSeed}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use super::*; - use crate::config::DEFAULT_SATS_PER_VB; #[test] fn test_get_satoshis_per_byte() { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 4d6eec8922..2f71838adb 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -26,6 +26,8 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; +#[cfg(any(test, feature = "testing"))] +use lazy_static::lazy_static; use rand::Rng; use rusqlite::{params, Connection}; use serde_json::json; @@ -49,6 +51,7 @@ use stacks::chainstate::stacks::miner::TransactionEvent; use stacks::chainstate::stacks::{ StacksBlock, StacksMicroblock, StacksTransaction, TransactionPayload, }; +use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::core::mempool::{MemPoolDropReason, MemPoolEventDispatcher, ProposalCallbackReceiver}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::{ @@ -59,6 +62,8 @@ use stacks::net::http::HttpRequestContents; use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; use stacks::util::hash::to_hex; +#[cfg(any(test, feature = "testing"))] +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -68,7 +73,11 @@ use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use url::Url; -use super::config::{EventKeyType, EventObserverConfig}; +#[cfg(any(test, feature = "testing"))] +lazy_static! { + /// Do not announce a signed/mined block to the network when set to true. + pub static ref TEST_SKIP_BLOCK_ANNOUNCEMENT: TestFlag = TestFlag::default(); +} #[derive(Debug, Clone)] struct EventObserver { @@ -1299,6 +1308,11 @@ impl EventDispatcher { let mature_rewards = serde_json::Value::Array(mature_rewards_vec); + #[cfg(any(test, feature = "testing"))] + if test_skip_block_announcement(&block) { + return; + } + for (observer_id, filtered_events_ids) in dispatch_matrix.iter().enumerate() { let filtered_events: Vec<_> = filtered_events_ids .iter() @@ -1695,6 +1709,18 @@ impl EventDispatcher { } } +#[cfg(any(test, feature = "testing"))] +fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { + if TEST_SKIP_BLOCK_ANNOUNCEMENT.get() { + warn!( + "Skipping new block announcement due to testing directive"; + "block_hash" => %block.block_hash + ); + return true; + } + false +} + #[cfg(test)] mod test { use std::net::TcpListener; diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index c285c6a168..2a9a601723 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -10,10 +10,10 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::MinerStatus; +use stacks::config::MinerConfig; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; -use crate::config::MinerConfig; use crate::neon::Counters; use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::RegisteredKey; diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 4fa1c5e5a7..7916de9d00 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -16,8 +16,6 @@ use stacks_common::util::hash::hex_bytes; pub mod monitoring; pub mod burnchains; -pub mod chain_data; -pub mod config; pub mod event_dispatcher; pub mod genesis_data; pub mod globals; @@ -41,19 +39,19 @@ use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvi use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::blocks::DummyEventDispatcher; use stacks::chainstate::stacks::db::StacksChainState; +use stacks::config::chain_data::MinerStats; +pub use stacks::config::{Config, ConfigFile}; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; pub use self::burnchains::{ BitcoinRegtestController, BurnchainController, BurnchainTip, MocknetController, }; -pub use self::config::{Config, ConfigFile}; pub use self::event_dispatcher::EventDispatcher; pub use self::keychain::Keychain; pub use self::node::{ChainTip, Node}; pub use self::run_loop::{helium, neon}; pub use self::tenure::Tenure; -use crate::chain_data::MinerStats; use crate::neon_node::{BlockMinerThread, TipCandidate}; use crate::run_loop::boot_nakamoto; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 6a4ea39b60..d9edf97e90 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -139,7 +139,7 @@ pub struct BlockMinerThread { burnchain: Burnchain, /// Last block mined last_block_mined: Option, - /// Number of blocks mined since a tenure change/extend + /// Number of blocks mined since a tenure change/extend was attempted mined_blocks: u64, /// Copy of the node's registered VRF key registered_key: RegisteredKey, @@ -160,6 +160,8 @@ pub struct BlockMinerThread { /// Handle to the p2p thread for block broadcast p2p_handle: NetworkHandle, signer_set_cache: Option, + /// The time at which tenure change/extend was attempted + tenure_change_time: Instant, } impl BlockMinerThread { @@ -187,6 +189,7 @@ impl BlockMinerThread { reason, p2p_handle: rt.get_p2p_handle(), signer_set_cache: None, + tenure_change_time: Instant::now(), } } @@ -1186,7 +1189,9 @@ impl BlockMinerThread { if self.last_block_mined.is_some() { // Check if we can extend the current tenure let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); - if get_epoch_time_secs() <= tenure_extend_timestamp { + if get_epoch_time_secs() <= tenure_extend_timestamp + && self.tenure_change_time.elapsed() <= self.config.miner.tenure_timeout + { return Ok(NakamotoTenureInfo { coinbase_tx: None, tenure_change_tx: None, @@ -1195,6 +1200,8 @@ impl BlockMinerThread { info!("Miner: Time-based tenure extend"; "current_timestamp" => get_epoch_time_secs(), "tenure_extend_timestamp" => tenure_extend_timestamp, + "tenure_change_time_elapsed" => self.tenure_change_time.elapsed().as_secs(), + "tenure_timeout_secs" => self.config.miner.tenure_timeout.as_secs(), ); self.tenure_extend_reset(); } @@ -1265,6 +1272,7 @@ impl BlockMinerThread { } fn tenure_extend_reset(&mut self) { + self.tenure_change_time = Instant::now(); self.reason = MinerReason::Extended { burn_view_consensus_hash: self.burn_block.consensus_hash, }; diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 00c21ec003..834c59fa95 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -35,16 +35,16 @@ use stacks::types::PublicKey; use stacks::util::get_epoch_time_secs; use stacks::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; use stacks::util::secp256k1::MessageSignature; +#[cfg(test)] +use stacks_common::util::tests::TestFlag; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; -#[cfg(test)] -use crate::neon::TestFlag; #[cfg(test)] /// Fault injection flag to prevent the miner from seeing enough signer signatures. /// Used to test that the signers will broadcast a block if it gets enough signatures -pub static TEST_IGNORE_SIGNERS: LazyLock = LazyLock::new(TestFlag::default); +pub static TEST_IGNORE_SIGNERS: LazyLock> = LazyLock::new(TestFlag::default); /// How long should the coordinator poll on the event receiver before /// waking up to check timeouts? diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index c74ce3d878..2d4dc7fadd 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -187,6 +187,8 @@ use stacks::chainstate::stacks::{ StacksMicroblock, StacksPublicKey, StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionPayload, TransactionVersion, }; +use stacks::config::chain_data::MinerStats; +use stacks::config::NodeConfig; use stacks::core::mempool::MemPoolDB; use stacks::core::{EpochList, FIRST_BURNCHAIN_CONSENSUS_HASH, STACKS_EPOCH_3_0_MARKER}; use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; @@ -220,8 +222,6 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, burnchain_params_from_config, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::{make_bitcoin_indexer, Error as BurnchainControllerError}; -use crate::chain_data::MinerStats; -use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; use crate::nakamoto_node::signer_coordinator::SignerCoordinator; use crate::run_loop::neon::RunLoop; diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 648c6d7470..171ebcb2cb 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -22,9 +22,8 @@ use std::{fs, thread}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::coordinator::comm::CoordinatorChannels; -use stacks::core::StacksEpochExtension; use stacks::net::p2p::PeerNetwork; -use stacks_common::types::{StacksEpoch, StacksEpochId}; +use stacks_common::types::StacksEpochId; use crate::event_dispatcher::EventDispatcher; use crate::globals::NeonGlobals; @@ -233,10 +232,7 @@ impl BootRunLoop { fn reached_epoch_30_transition(config: &Config) -> Result { let burn_height = Self::get_burn_height(config)?; - let epochs = StacksEpoch::get_epochs( - config.burnchain.get_bitcoin_network().1, - config.burnchain.epochs.as_ref(), - ); + let epochs = config.burnchain.get_epoch_list(); let epoch_3 = epochs .get(StacksEpochId::Epoch30) .ok_or("No Epoch-3.0 defined")?; diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 16f5a12b2d..335fb325d8 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -319,6 +319,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index b2171b4e8b..4ecc84b73b 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -21,6 +21,8 @@ use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; +#[cfg(test)] +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; @@ -94,30 +96,6 @@ impl std::ops::Deref for RunLoopCounter { } } -#[cfg(test)] -#[derive(Clone)] -pub struct TestFlag(pub Arc>>); - -#[cfg(test)] -impl Default for TestFlag { - fn default() -> Self { - Self(Arc::new(std::sync::Mutex::new(None))) - } -} - -#[cfg(test)] -impl TestFlag { - /// Set the test flag to the given value - pub fn set(&self, value: bool) { - *self.0.lock().unwrap() = Some(value); - } - - /// Get the test flag value. Defaults to false if the flag is not set. - pub fn get(&self) -> bool { - self.0.lock().unwrap().unwrap_or(false) - } -} - #[derive(Clone, Default)] pub struct Counters { pub blocks_processed: RunLoopCounter, @@ -135,7 +113,7 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, #[cfg(test)] - pub naka_skip_commit_op: TestFlag, + pub naka_skip_commit_op: TestFlag, } impl Counters { @@ -637,6 +615,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node @@ -1168,19 +1147,8 @@ impl RunLoop { let mut sortition_db_height = rc_aligned_height; let mut burnchain_height = sortition_db_height; - let mut num_sortitions_in_last_cycle = 1; // prepare to fetch the first reward cycle! - let mut target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(burnchain_height) - .expect("BUG: block height is not in a reward cycle") - + 1, - ), - burnchain.get_headers_height() - 1, - ); - debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; @@ -1208,17 +1176,13 @@ impl RunLoop { let remote_chain_height = burnchain.get_headers_height() - 1; - // wait for the p2p state-machine to do at least one pass - debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); - - // wait until it's okay to process the next reward cycle's sortitions - let ibd = match self.get_pox_watchdog().pox_sync_wait( + // wait until it's okay to process the next reward cycle's sortitions. + let (ibd, target_burnchain_block_height) = match self.get_pox_watchdog().pox_sync_wait( &burnchain_config, &burnchain_tip, remote_chain_height, - num_sortitions_in_last_cycle, ) { - Ok(ibd) => ibd, + Ok(x) => x, Err(e) => { debug!("Runloop: PoX sync wait routine aborted: {e:?}"); continue; @@ -1232,9 +1196,6 @@ impl RunLoop { 0.0 }; - // will recalculate this in the following loop - num_sortitions_in_last_cycle = 0; - // Download each burnchain block and process their sortitions. This, in turn, will // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and // process them. This loop runs for one reward cycle, so that the next pass of the @@ -1282,8 +1243,6 @@ impl RunLoop { "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); - let mut sort_count = 0; - debug!("Runloop: block mining until we process all sortitions"); signal_mining_blocked(globals.get_miner_status()); @@ -1301,9 +1260,6 @@ impl RunLoop { "Failed to find block in fork processed by burnchain indexer", ) }; - if block.sortition { - sort_count += 1; - } let sortition_id = &block.sortition_id; @@ -1350,9 +1306,8 @@ impl RunLoop { debug!("Runloop: enable miner after processing sortitions"); signal_mining_ready(globals.get_miner_status()); - num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height})" ); sortition_db_height = next_sortition_height; @@ -1371,22 +1326,6 @@ impl RunLoop { } } - // advance one reward cycle at a time. - // If we're still downloading, then this is simply target_burnchain_block_height + reward_cycle_len. - // Otherwise, this is burnchain_tip + reward_cycle_len - let next_target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: burnchain height before system start") - + 1, - ), - remote_chain_height, - ); - - debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); - target_burnchain_block_height = next_target_burnchain_block_height; - if sortition_db_height >= burnchain_height && !ibd { let canonical_stacks_tip_height = SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn()) diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index 395d829c8f..488234d21d 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -1,20 +1,28 @@ -use std::collections::VecDeque; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::Arc; use stacks::burnchains::{Burnchain, Error as burnchain_error}; -use stacks::chainstate::stacks::db::StacksChainState; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::burnchains::BurnchainTip; use crate::Config; -// amount of time to wait for an inv or download sync to complete. -// These _really should_ complete before the PoX sync watchdog permits processing the next reward -// cycle, so this number is intentionally high (like, there's something really wrong with your -// network if your node is actualy waiting a day in-between reward cycles). -const SYNC_WAIT_SECS: u64 = 24 * 3600; - #[derive(Clone)] pub struct PoxSyncWatchdogComms { /// how many passes in the p2p state machine have taken place since startup? @@ -56,22 +64,6 @@ impl PoxSyncWatchdogComms { self.last_ibd.load(Ordering::SeqCst) } - /// Wait for at least one inv-sync state-machine passes - pub fn wait_for_inv_sync_pass(&self, timeout: u64) -> Result { - let current = self.get_inv_sync_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_inv_sync_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one inv-sync pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - fn interruptable_sleep(&self, secs: u64) -> Result<(), burnchain_error> { let deadline = secs + get_epoch_time_secs(); while get_epoch_time_secs() < deadline { @@ -83,21 +75,6 @@ impl PoxSyncWatchdogComms { Ok(()) } - pub fn wait_for_download_pass(&self, timeout: u64) -> Result { - let current = self.get_download_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_download_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one download pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - pub fn should_keep_running(&self) -> bool { self.should_keep_running.load(Ordering::SeqCst) } @@ -124,82 +101,25 @@ impl PoxSyncWatchdogComms { /// unless it's reasonably sure that it has processed all Stacks blocks for this reward cycle. /// This struct monitors the Stacks chainstate to make this determination. pub struct PoxSyncWatchdog { - /// number of attachable but unprocessed staging blocks over time - new_attachable_blocks: VecDeque, - /// number of newly-processed staging blocks over time - new_processed_blocks: VecDeque, - /// last time we asked for attachable blocks - last_attachable_query: u64, - /// last time we asked for processed blocks - last_processed_query: u64, - /// number of samples to take - max_samples: u64, - /// maximum number of blocks to count per query (affects performance!) - max_staging: u64, - /// when did we first start watching? - watch_start_ts: u64, - /// when did we first see a flatline in block-processing rate? - last_block_processed_ts: u64, - /// estimated time for a block to get downloaded. Used to infer how long to wait for the first - /// blocks to show up when waiting for this reward cycle. - estimated_block_download_time: f64, - /// estimated time for a block to get processed -- from when it shows up as attachable to when - /// it shows up as processed. Used to infer how long to wait for the last block to get - /// processed before unblocking burnchain sync for the next reward cycle. - estimated_block_process_time: f64, - /// time between burnchain syncs in stead state + /// time between burnchain syncs in steady state steady_state_burnchain_sync_interval: u64, - /// when to re-sync under steady state - steady_state_resync_ts: u64, - /// chainstate handle - chainstate: StacksChainState, /// handle to relayer thread that informs the watchdog when the P2P state-machine does stuff relayer_comms: PoxSyncWatchdogComms, /// should this sync watchdog always download? used in integration tests. unconditionally_download: bool, } -const PER_SAMPLE_WAIT_MS: u64 = 1000; - impl PoxSyncWatchdog { pub fn new( config: &Config, watchdog_comms: PoxSyncWatchdogComms, ) -> Result { - let mainnet = config.is_mainnet(); - let chain_id = config.burnchain.chain_id; - let chainstate_path = config.get_chainstate_path_str(); let burnchain_poll_time = config.burnchain.poll_time_secs; - let download_timeout = config.connection_options.timeout; - let max_samples = config.node.pox_sync_sample_secs; let unconditionally_download = config.node.pox_sync_sample_secs == 0; - let marf_opts = config.node.get_marf_opts(); - - let (chainstate, _) = - match StacksChainState::open(mainnet, chain_id, &chainstate_path, Some(marf_opts)) { - Ok(cs) => cs, - Err(e) => { - return Err(format!( - "Failed to open chainstate at '{chainstate_path}': {e:?}" - )); - } - }; Ok(PoxSyncWatchdog { unconditionally_download, - new_attachable_blocks: VecDeque::new(), - new_processed_blocks: VecDeque::new(), - last_attachable_query: 0, - last_processed_query: 0, - max_samples, - max_staging: 10, - watch_start_ts: 0, - last_block_processed_ts: 0, - estimated_block_download_time: download_timeout as f64, - estimated_block_process_time: 5.0, steady_state_burnchain_sync_interval: burnchain_poll_time, - steady_state_resync_ts: 0, - chainstate, relayer_comms: watchdog_comms, }) } @@ -208,39 +128,9 @@ impl PoxSyncWatchdog { self.relayer_comms.clone() } - /// How many recently-added Stacks blocks are in an attachable state, up to $max_staging? - fn count_attachable_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_attachable_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_attachable_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_attachable_query = get_epoch_time_secs(); - Ok(cnt) - } - - /// How many recently-processed Stacks blocks are there, up to $max_staging? - /// ($max_staging is necessary to limit the runtime of this method, since the underlying SQL - /// uses COUNT(*), which in Sqlite is a _O(n)_ operation for _n_ rows) - fn count_processed_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_processed_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_processed_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_processed_query = get_epoch_time_secs(); - Ok(cnt) - } - /// Are we in the initial burnchain block download? i.e. is the burn tip snapshot far enough away /// from the burnchain height that we should be eagerly downloading snapshots? - pub fn infer_initial_burnchain_block_download( + fn infer_initial_burnchain_block_download( burnchain: &Burnchain, last_processed_height: u64, burnchain_height: u64, @@ -261,182 +151,23 @@ impl PoxSyncWatchdog { ibd } - /// Calculate the first derivative of a list of points - fn derivative(sample_list: &VecDeque) -> Vec { - let mut deltas = vec![]; - let mut prev = 0; - for (i, sample) in sample_list.iter().enumerate() { - if i == 0 { - prev = *sample; - continue; - } - let delta = *sample - prev; - prev = *sample; - deltas.push(delta); - } - deltas - } - - /// Is a derivative approximately flat, with a maximum absolute deviation from 0? - /// Return whether or not the sample is mostly flat, and how many points were over the given - /// error bar in either direction. - fn is_mostly_flat(deriv: &[i64], error: i64) -> (bool, usize) { - let mut total_deviates = 0; - let mut ret = true; - for d in deriv.iter() { - if d.abs() > error { - total_deviates += 1; - ret = false; - } - } - (ret, total_deviates) - } - - /// low and high pass filter average -- take average without the smallest and largest values - fn hilo_filter_avg(samples: &[i64]) -> f64 { - // take average with low and high pass - let mut min = i64::MAX; - let mut max = i64::MIN; - for s in samples.iter() { - if *s < 0 { - // nonsensical result (e.g. due to clock drift?) - continue; - } - if *s < min { - min = *s; - } - if *s > max { - max = *s; - } - } - - let mut count = 0; - let mut sum = 0; - for s in samples.iter() { - if *s < 0 { - // nonsensical result - continue; - } - if *s == min { - continue; - } - if *s == max { - continue; - } - count += 1; - sum += *s; - } - - if count == 0 { - // no viable samples - 1.0 - } else { - (sum as f64) / (count as f64) - } - } - - /// estimate how long a block remains in an unprocessed state - fn estimate_block_process_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_wait_times = - StacksChainState::measure_block_wait_time(chainstate.db(), start_height, end_height) - .expect("BUG: failed to query chainstate block-processing times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_wait_times) - } - - /// estimate how long a block takes to download - fn estimate_block_download_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_download_times = StacksChainState::measure_block_download_time( - chainstate.db(), - start_height, - end_height, - ) - .expect("BUG: failed to query chainstate block-download times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_download_times) - } - - /// Reset internal state. Performed when it's okay to begin syncing the burnchain. - /// Updates estimate for block-processing time and block-downloading time. - fn reset(&mut self, burnchain: &Burnchain, tip_height: u64) { - // find the average (with low/high pass filter) time a block spends in the DB without being - // processed, during this reward cycle - self.estimated_block_process_time = - PoxSyncWatchdog::estimate_block_process_time(&self.chainstate, burnchain, tip_height); - - // find the average (with low/high pass filter) time a block spends downloading - self.estimated_block_download_time = - PoxSyncWatchdog::estimate_block_download_time(&self.chainstate, burnchain, tip_height); - - debug!( - "Estimated block download time: {}s. Estimated block processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time - ); - - self.new_attachable_blocks.clear(); - self.new_processed_blocks.clear(); - self.last_block_processed_ts = 0; - self.watch_start_ts = 0; - self.steady_state_resync_ts = 0; - } - - /// Wait until all of the Stacks blocks for the given reward cycle are seemingly downloaded and - /// processed. Do so by watching the _rate_ at which attachable Stacks blocks arrive and get - /// processed. - /// Returns whether or not we're still in the initial block download -- i.e. true if we're - /// still downloading burnchain blocks, or we haven't reached steady-state block-processing. + /// Wait until the next PoX anchor block arrives. + /// We know for a fact that they all exist for Epochs 2.5 and earlier, in both mainnet and + /// testnet. + /// Return (still-in-ibd?, maximum-burnchain-sync-height) on success. pub fn pox_sync_wait( &mut self, burnchain: &Burnchain, burnchain_tip: &BurnchainTip, // this is the highest burnchain snapshot we've sync'ed to burnchain_height: u64, // this is the absolute burnchain block height - num_sortitions_in_last_cycle: u64, - ) -> Result { - if self.watch_start_ts == 0 { - self.watch_start_ts = get_epoch_time_secs(); - } - if self.steady_state_resync_ts == 0 { - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - } + ) -> Result<(bool, u64), burnchain_error> { + let burnchain_rc = burnchain + .block_height_to_reward_cycle(burnchain_height) + .expect("FATAL: burnchain height is before system start"); + + let sortition_rc = burnchain + .block_height_to_reward_cycle(burnchain_tip.block_snapshot.block_height) + .expect("FATAL: sortition height is before system start"); let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( burnchain, @@ -444,220 +175,23 @@ impl PoxSyncWatchdog { burnchain_height, ); - // unconditionally download the first reward cycle - if burnchain_tip.block_snapshot.block_height - < burnchain.first_block_height + (burnchain.pox_constants.reward_cycle_length as u64) - { - debug!("PoX watchdog in first reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); + let max_sync_height = if sortition_rc < burnchain_rc { + burnchain + .reward_cycle_to_block_height(sortition_rc + 1) + .min(burnchain_height) + } else { + burnchain_tip + .block_snapshot + .block_height + .max(burnchain_height) + }; + self.relayer_comms.set_ibd(ibbd); + if !self.unconditionally_download { self.relayer_comms .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - - return Ok(ibbd); - } - - if self.unconditionally_download { - debug!("PoX watchdog set to unconditionally download (ibd={ibbd})"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - - let mut waited = false; - if ibbd { - // we are far behind the burnchain tip (i.e. not in the last reward cycle), - // so make sure the downloader knows about blocks it doesn't have yet so we can go and - // fetch its blocks before proceeding. - if num_sortitions_in_last_cycle > 0 { - debug!("PoX watchdog: Wait for at least one inventory state-machine pass..."); - self.relayer_comms.wait_for_inv_sync_pass(SYNC_WAIT_SECS)?; - waited = true; - } else { - debug!("PoX watchdog: In initial block download, and no sortitions to consider in this reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - } else { - debug!("PoX watchdog: not in initial burn block download, so not waiting for an inventory state-machine pass"); } - if burnchain_tip.block_snapshot.block_height - + (burnchain.pox_constants.reward_cycle_length as u64) - >= burnchain_height - { - // unconditionally download if we're within the last reward cycle (after the poll timeout) - if !waited { - debug!( - "PoX watchdog in last reward cycle -- sync after {} seconds", - self.steady_state_burnchain_sync_interval - ); - self.relayer_comms.set_ibd(ibbd); - - self.relayer_comms - .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - } else { - debug!("PoX watchdog in last reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - } - return Ok(ibbd); - } - - // have we reached steady-state behavior? i.e. have we stopped processing both burnchain - // and Stacks blocks? - let mut steady_state = false; - debug!("PoX watchdog: Wait until chainstate reaches steady-state block-processing..."); - - let ibbd = loop { - if !self.relayer_comms.should_keep_running() { - break false; - } - let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( - burnchain, - burnchain_tip.block_snapshot.block_height, - burnchain_height, - ); - - let expected_first_block_deadline = - self.watch_start_ts + (self.estimated_block_download_time as u64); - let expected_last_block_deadline = self.last_block_processed_ts - + (self.estimated_block_download_time as u64) - + (self.estimated_block_process_time as u64); - - match ( - self.count_attachable_stacks_blocks(), - self.count_processed_stacks_blocks(), - ) { - (Ok(num_available), Ok(num_processed)) => { - self.new_attachable_blocks.push_back(num_available as i64); - self.new_processed_blocks.push_back(num_processed as i64); - - if (self.new_attachable_blocks.len() as u64) > self.max_samples { - self.new_attachable_blocks.pop_front(); - } - if (self.new_processed_blocks.len() as u64) > self.max_samples { - self.new_processed_blocks.pop_front(); - } - - if (self.new_attachable_blocks.len() as u64) < self.max_samples - || (self.new_processed_blocks.len() as u64) < self.max_samples - { - // still getting initial samples - if self.new_processed_blocks.len() % 10 == 0 { - debug!( - "PoX watchdog: Still warming up: {} out of {} samples...", - &self.new_attachable_blocks.len(), - &self.max_samples - ); - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && get_epoch_time_secs() < expected_first_block_deadline - { - // still waiting for that first block in this reward cycle - debug!("PoX watchdog: Still warming up: waiting until {expected_first_block_deadline}s for first Stacks block download (estimated download time: {}s)...", self.estimated_block_download_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && (self.new_attachable_blocks.len() as u64) < self.max_samples - && self.watch_start_ts - + self.max_samples - + self.steady_state_burnchain_sync_interval - * (burnchain.stable_confirmations as u64) - < get_epoch_time_secs() - { - debug!( - "PoX watchdog: could not calculate {} samples in {} seconds. Assuming suspend/resume, or assuming load is too high.", - self.max_samples, - self.max_samples + self.steady_state_burnchain_sync_interval * (burnchain.stable_confirmations as u64) - ); - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - - self.watch_start_ts = get_epoch_time_secs(); - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - continue; - } - - // take first derivative of samples -- see if the download and processing rate has gone to 0 - let attachable_delta = PoxSyncWatchdog::derivative(&self.new_attachable_blocks); - let processed_delta = PoxSyncWatchdog::derivative(&self.new_processed_blocks); - - let (flat_attachable, attachable_deviants) = - PoxSyncWatchdog::is_mostly_flat(&attachable_delta, 0); - let (flat_processed, processed_deviants) = - PoxSyncWatchdog::is_mostly_flat(&processed_delta, 0); - - debug!("PoX watchdog: flat-attachable?: {flat_attachable}, flat-processed?: {flat_processed}, estimated block-download time: {}s, estimated block-processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time); - - if flat_attachable && flat_processed && self.last_block_processed_ts == 0 { - // we're flat-lining -- this may be the end of this cycle - self.last_block_processed_ts = get_epoch_time_secs(); - } - - if self.last_block_processed_ts > 0 - && get_epoch_time_secs() < expected_last_block_deadline - { - debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{expected_last_block_deadline})s before burnchain synchronization (estimated block-processing time: {}s)", - get_epoch_time_secs() + 1, self.estimated_block_process_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if ibbd { - // doing initial burnchain block download right now. - // only proceed to fetch the next reward cycle's burnchain blocks if we're neither downloading nor - // attaching blocks recently - debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {flat_attachable}, flat-processed = {flat_processed}, min-attachable: {attachable_deviants}, min-processed: {processed_deviants}"); - - if !flat_attachable || !flat_processed { - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - } else { - let now = get_epoch_time_secs(); - if now < self.steady_state_resync_ts { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state; waiting until at least {} before burnchain synchronization", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } else { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state, but ready burnchain synchronization as of {}", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - } - } - } - (err_attach, err_processed) => { - // can only happen on DB query failure - error!("PoX watchdog: Failed to count recently attached ('{err_attach:?}') and/or processed ('{err_processed:?}') staging blocks"); - panic!(); - } - }; - - if ibbd || !steady_state { - debug!("PoX watchdog: Wait for at least one downloader state-machine pass before resetting..."); - self.relayer_comms.wait_for_download_pass(SYNC_WAIT_SECS)?; - } else { - debug!("PoX watchdog: in steady-state, so not waiting for download pass"); - } - - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - break ibbd; - }; - - let ret = ibbd || !steady_state; - self.relayer_comms.set_ibd(ret); - Ok(ret) + Ok((ibbd, max_sync_height)) } } diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 3e69ac18cc..ef193f56f7 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -7,12 +7,12 @@ use stacks::chainstate::burn::operations::BlockstackOperationType::{ LeaderBlockCommit, LeaderKeyRegister, }; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::InitialBalance; use stacks::core::StacksEpochId; use stacks_common::util::hash::hex_bytes; use super::PUBLISH_CONTRACT; use crate::burnchains::bitcoin_regtest_controller::BitcoinRPCRequest; -use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::to_addr; use crate::Config; diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 1964612bd4..e555b6a8aa 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -13,6 +13,7 @@ use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{ StacksBlockHeader, StacksPrivateKey, StacksTransaction, TransactionPayload, }; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, @@ -22,7 +23,6 @@ use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRF use stacks_common::util::hash::hex_bytes; use stacks_common::util::sleep_ms; -use crate::config::{EventKeyType, InitialBalance}; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::{ diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 55d3ee0b7b..d50cac0117 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -24,6 +24,7 @@ use stacks::chainstate::stacks::miner::{ }; use stacks::chainstate::stacks::StacksBlockHeader; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::{Config, InitialBalance}; use stacks::core::{self, EpochList, BURNCHAIN_TX_SEARCH_WINDOW}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{ @@ -35,7 +36,6 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; use crate::burnchains::bitcoin_regtest_controller::UTXO; -use crate::config::{Config, InitialBalance}; use crate::neon::RunLoopCounter; use crate::operations::BurnchainOpSigner; use crate::stacks_common::address::AddressHashMode; diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 3bf521d7cb..493fb36fcd 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -8,6 +8,7 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready}; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::{EventKeyType, EventObserverConfig, InitialBalance}; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -17,7 +18,6 @@ use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; use super::neon_integrations::get_account; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; use crate::neon_node::StacksNode; use crate::stacks_common::types::Address; use crate::stacks_common::util::hash::bytes_to_hex; diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 92b6a97b8f..085e5a49cb 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -18,10 +18,10 @@ use std::{env, thread}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use stacks::burnchains::{Burnchain, PoxConstants}; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks_common::util::sleep_ms; -use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::*; diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 5e4ff9852a..8780d08012 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -26,6 +26,7 @@ use stacks::chainstate::stacks::boot::RawRewardSetEntry; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::consts::STACKS_EPOCH_MAX; @@ -35,7 +36,6 @@ use stacks_common::util::hash::{bytes_to_hex, hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; -use crate::config::InitialBalance; use crate::stacks_common::codec::StacksMessageCodec; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::{ diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index bedf8721cb..e840b0fcd3 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -17,11 +17,11 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, PoxConstants}; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksPrivateKey; -use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::wait_for; use crate::tests::neon_integrations::{ diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 79c3394352..7f893835d1 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -24,6 +24,7 @@ use stacks::chainstate::stacks::{ }; use stacks::clarity_vm::clarity::ClarityConnection; use stacks::codec::StacksMessageCodec; +use stacks::config::InitialBalance; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, @@ -40,7 +41,6 @@ use super::{ make_contract_call, make_contract_publish, make_stacks_transfer, to_addr, ADDR_4, SK_1, SK_2, SK_3, }; -use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::make_sponsored_stacks_transfer_on_testnet; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 281feae99a..13923a847a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -57,6 +57,7 @@ use stacks::chainstate::stacks::{ TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSpendingCondition, TransactionVersion, MAX_BLOCK_LEN, }; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, @@ -95,7 +96,6 @@ use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use stacks_signer::v0::SpawnedSigner; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; @@ -3404,7 +3404,7 @@ fn vote_for_aggregate_key_burn_op() { /// This test boots a follower node using the block downloader #[test] #[ignore] -fn follower_bootup() { +fn follower_bootup_simple() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -9422,6 +9422,178 @@ fn v3_blockbyheight_api_endpoint() { run_loop_thread.join().unwrap(); } +/// Verify that lockup events are attached to a phantom tx receipt +/// if the block does not have a coinbase tx +#[test] +#[ignore] +fn nakamoto_lockup_events() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + conf.connection_options.auth_token = Some(password.clone()); + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let stacker_sk = setup_stacker(&mut conf); + let signer_sk = Secp256k1PrivateKey::new(); + let signer_addr = tests::to_addr(&signer_sk); + let _signer_pubkey = Secp256k1PublicKey::from_private(&signer_sk); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + (send_amt + send_fee) * 100, + ); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // only subscribe to the block proposal events + test_observer::spawn(); + test_observer::register_any(&mut conf); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![signer_sk]); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &conf, + &blocks_processed, + &[stacker_sk], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + blind_signer(&conf, &signers, proposals_submitted); + let burnchain = conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + // TODO (hack) instantiate the sortdb in the burnchain + _ = btc_regtest_controller.sortdb_mut(); + + info!("------------------------- Setup finished, run test -------------------------"); + + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let get_stacks_height = || { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + tip.stacks_block_height + }; + let initial_block_height = get_stacks_height(); + + // This matches the data in `stx-genesis/chainstate-test.txt` + // Recipient: ST2CTPPV8BHBVSQR727A3MK00ZD85RNY9015WGW2D + let unlock_recipient = "ST2CTPPV8BHBVSQR727A3MK00ZD85RNY9015WGW2D"; + let unlock_height = 35_u64; + let interims_to_mine = unlock_height - initial_block_height; + + info!( + "----- Mining to unlock height -----"; + "unlock_height" => unlock_height, + "initial_height" => initial_block_height, + "interims_to_mine" => interims_to_mine, + ); + + // submit a tx so that the miner will mine an extra stacks block + let mut sender_nonce = 0; + + for _ in 0..interims_to_mine { + let height_before = get_stacks_height(); + info!("----- Mining interim block -----"; + "height" => %height_before, + "nonce" => %sender_nonce, + ); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + + wait_for(30, || Ok(get_stacks_height() > height_before)).unwrap(); + } + + let blocks = test_observer::get_blocks(); + let block = blocks.last().unwrap(); + assert_eq!( + block.get("block_height").unwrap().as_u64().unwrap(), + unlock_height + ); + + let events = block.get("events").unwrap().as_array().unwrap(); + let mut found_event = false; + for event in events { + let mint_event = event.get("stx_mint_event"); + if mint_event.is_some() { + found_event = true; + let mint_event = mint_event.unwrap(); + let recipient = mint_event.get("recipient").unwrap().as_str().unwrap(); + assert_eq!(recipient, unlock_recipient); + let amount = mint_event.get("amount").unwrap().as_str().unwrap(); + assert_eq!(amount, "12345678"); + let txid = event.get("txid").unwrap().as_str().unwrap(); + assert_eq!( + txid, + "0x63dd5773338782755e4947a05a336539137dfe13b19a0eac5154306850aca8ef" + ); + } + } + assert!(found_event); + + info!("------------------------- Test finished, clean up -------------------------"); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + #[test] #[ignore] /// This test spins up a nakamoto-neon node. diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index e3d592d23c..fc363d3db8 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -5,7 +5,6 @@ use std::sync::{mpsc, Arc}; use std::time::{Duration, Instant}; use std::{cmp, env, fs, io, thread}; -use clarity::consts::BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -39,8 +38,9 @@ use stacks::chainstate::stacks::{ StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; -use stacks::cli::{self, StacksChainConfig}; +use stacks::cli; use stacks::codec::StacksMessageCodec; +use stacks::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, @@ -83,7 +83,6 @@ use super::{ SK_2, SK_3, }; use crate::burnchains::bitcoin_regtest_controller::{self, addr2str, BitcoinRPCRequest, UTXO}; -use crate::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; use crate::stacks_common::types::PrivateKey; @@ -199,13 +198,13 @@ pub mod test_observer { use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::StacksTransaction; use stacks::codec::StacksMessageCodec; + use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::net::api::postblock_proposal::BlockValidateResponse; use stacks::util::hash::hex_bytes; use stacks_common::types::chainstate::StacksBlockId; use warp::Filter; use {tokio, warp}; - use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::{MinedBlockEvent, MinedMicroblockEvent, MinedNakamotoBlockEvent}; use crate::Config; @@ -12691,22 +12690,9 @@ fn mock_miner_replay() { let blocks_dir = blocks_dir.into_os_string().into_string().unwrap(); let db_path = format!("{}/neon", conf.node.working_dir); let args: Vec = vec!["replay-mock-mining".into(), db_path, blocks_dir]; - let SortitionDB { - first_block_height, - first_burn_header_hash, - .. - } = *btc_regtest_controller.sortdb_mut(); - let replay_config = StacksChainConfig { - chain_id: conf.burnchain.chain_id, - first_block_height, - first_burn_header_hash, - first_burn_header_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants: burnchain_config.pox_constants, - epochs: conf.burnchain.epochs.expect("Missing `epochs` in config"), - }; info!("Replaying mock mined blocks..."); - cli::command_replay_mock_mining(&args, Some(&replay_config)); + cli::command_replay_mock_mining(&args, Some(&conf)); // ---------- Test finished, clean up ---------- diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index ff128d0a03..432b990667 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -44,6 +44,7 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; @@ -55,14 +56,14 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::tests::TestFlag; use stacks_signer::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; -use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; -use crate::neon::{Counters, RunLoopCounter, TestFlag}; +use crate::neon::{Counters, RunLoopCounter}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ @@ -90,7 +91,7 @@ pub struct RunningNodes { pub nakamoto_blocks_mined: RunLoopCounter, pub nakamoto_blocks_rejected: RunLoopCounter, pub nakamoto_blocks_signer_pushed: RunLoopCounter, - pub nakamoto_test_skip_commit_op: TestFlag, + pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, } @@ -310,7 +311,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest info_before.stacks_tip_height - && blocks_mined > mined_before) + && (!use_nakamoto_blocks_mined || blocks_mined > mined_before)) }) .unwrap(); let mined_block_elapsed_time = mined_block_time.elapsed(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index df20fc0087..5200883667 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -23,7 +23,8 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, + BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, + SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession, VERSION_STRING}; use stacks::address::AddressHashMode; @@ -36,6 +37,7 @@ use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, TransactionPayload}; use stacks::codec::StacksMessageCodec; +use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::getsigner::GetSignerResponse; @@ -58,7 +60,7 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::v0::signer::{ +use stacks_signer::v0::tests::{ TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, }; @@ -67,8 +69,7 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; -use crate::config::{EventKeyType, EventObserverConfig}; -use crate::event_dispatcher::MinedNakamotoBlockEvent; +use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEMENT}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; @@ -279,7 +280,7 @@ impl SignerTest { // could be other miners mining blocks. let height_before = get_chain_info(&self.running_nodes.conf).stacks_tip_height; info!("Waiting for first Nakamoto block: {}", height_before + 1); - self.mine_nakamoto_block(Duration::from_secs(30)); + self.mine_nakamoto_block(Duration::from_secs(30), false); wait_for(30, || { Ok(get_chain_info(&self.running_nodes.conf).stacks_tip_height > height_before) }) @@ -288,12 +289,17 @@ impl SignerTest { } // Only call after already past the epoch 3.0 boundary - fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { + fn mine_and_verify_confirmed_naka_block( + &mut self, + timeout: Duration, + num_signers: usize, + use_nakamoto_blocks_mined: bool, + ) { info!("------------------------- Try mining one block -------------------------"); let reward_cycle = self.get_current_reward_cycle(); - self.mine_nakamoto_block(timeout); + self.mine_nakamoto_block(timeout, use_nakamoto_blocks_mined); // Verify that the signers accepted the proposed block, sending back a validate ok response let proposed_signer_signature_hash = self @@ -376,11 +382,11 @@ impl SignerTest { let total_nmb_blocks_to_mine = burnchain_height.saturating_sub(current_block_height); debug!("Mining {total_nmb_blocks_to_mine} Nakamoto block(s) to reach burnchain height {burnchain_height}"); for _ in 0..total_nmb_blocks_to_mine { - self.mine_and_verify_confirmed_naka_block(timeout, num_signers); + self.mine_and_verify_confirmed_naka_block(timeout, num_signers, false); } } - /// Propose an invalid block to the signers + /// Propose a block to the signers fn propose_block(&mut self, block: NakamotoBlock, timeout: Duration) { let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = @@ -390,6 +396,7 @@ impl SignerTest { .btc_regtest_controller .get_headers_height(); let reward_cycle = self.get_current_reward_cycle(); + let signer_signature_hash = block.header.signer_signature_hash(); let message = SignerMessage::BlockProposal(BlockProposal { block, burn_height, @@ -406,7 +413,7 @@ impl SignerTest { let mut version = 0; let slot_id = MinerSlotID::BlockProposal.to_u8() as u32; let start = Instant::now(); - debug!("Proposing invalid block to signers"); + debug!("Proposing block to signers: {signer_signature_hash}"); while !accepted { let mut chunk = StackerDBChunkData::new(slot_id * 2, version, message.serialize_to_vec()); @@ -488,6 +495,7 @@ fn block_proposal_rejection() { header: NakamotoBlockHeader::empty(), txs: vec![], }; + block.header.timestamp = get_epoch_time_secs(); // First propose a block to the signers that does not have the correct consensus hash or BitVec. This should be rejected BEFORE // the block is submitted to the node for validation. @@ -588,7 +596,7 @@ fn miner_gather_signatures() { signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); // Test prometheus metrics response #[cfg(feature = "monitoring_prom")] @@ -820,14 +828,8 @@ fn reloads_signer_set_in() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - vec![(sender_addr, send_amt + send_fee)], - |_config| {}, - |_| {}, - None, - None, - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); setup_epoch_3_reward_set( &signer_test.running_nodes.conf, @@ -951,7 +953,7 @@ fn forked_tenure_testing( config.first_proposal_burn_block_timing = proposal_limit; // don't allow signers to post signed blocks (limits the amount of fault injection we // need) - TEST_SKIP_BLOCK_BROADCAST.lock().unwrap().replace(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); }, |config| { config.miner.tenure_cost_limit_per_block_percentage = None; @@ -1331,7 +1333,7 @@ fn bitcoind_forking_test() { for i in 0..pre_fork_tenures { info!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let pre_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; @@ -1403,7 +1405,7 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let pre_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; @@ -1479,7 +1481,7 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let test_end_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2415,10 +2417,7 @@ fn retry_on_rejection() { .map(StacksPublicKey::from_private) .take(num_signers) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); let proposals_before = signer_test .running_nodes @@ -2465,10 +2464,7 @@ fn retry_on_rejection() { // resume signing info!("Disable unconditional rejection and wait for the block to be processed"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(vec![]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); loop { let blocks_mined = signer_test .running_nodes @@ -2512,7 +2508,7 @@ fn signers_broadcast_signed_blocks() { .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); wait_for(30, || { let blocks_mined = signer_test @@ -2579,8 +2575,8 @@ fn signers_broadcast_signed_blocks() { #[test] #[ignore] -/// This test verifies that a miner will produce a TenureExtend transaction after the idle timeout is reached. -fn tenure_extend_after_idle() { +/// This test verifies that a miner will produce a TenureExtend transaction after the signers' idle timeout is reached. +fn tenure_extend_after_idle_signers() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -2613,7 +2609,7 @@ fn tenure_extend_after_idle() { signer_test.boot_to_epoch_3(); info!("---- Nakamoto booted, starting test ----"); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); info!("---- Waiting for a tenure extend ----"); @@ -2628,6 +2624,173 @@ fn tenure_extend_after_idle() { signer_test.shutdown(); } +#[test] +#[ignore] +/// This test verifies that a miner will produce a TenureExtend transaction after the miner's idle timeout +/// even if they do not see the signers' tenure extend timestamp responses. +fn tenure_extend_after_idle_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); + let miner_idle_timeout = idle_timeout + Duration::from_secs(10); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_timeout = miner_idle_timeout; + }, + None, + None, + ); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Start a new tenure but ignore block signatures so no timestamps are recorded ----"); + let tip_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + TEST_IGNORE_SIGNERS.set(true); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || { + let tip_height = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + Ok(tip_height > tip_height_before) + }, + ) + .expect("Failed to mine the tenure change block"); + + // Now, wait for a block with a tenure change due to the new block + wait_for(30, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::BlockFound, + )) + }) + .expect("Timed out waiting for a block with a tenure change"); + + info!("---- Waiting for a tenure extend ----"); + + TEST_IGNORE_SIGNERS.set(false); + // Now, wait for a block with a tenure extend + wait_for(miner_idle_timeout.as_secs() + 20, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test verifies that a miner that attempts to produce a tenure extend too early will be rejected by the signers, +/// but will eventually succeed after the signers' idle timeout has passed. +fn tenure_extend_succeeds_after_rejected_attempt() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); + let miner_idle_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_timeout = miner_idle_timeout; + }, + None, + None, + ); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Waiting for a rejected tenure extend ----"); + // Now, wait for a block with a tenure extend proposal from the miner, but ensure it is rejected. + wait_for(30, || { + let block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + if let SignerMessage::BlockProposal(proposal) = message { + if proposal.block.get_tenure_tx_payload().unwrap().cause + == TenureChangeCause::Extended + { + return Some(proposal.block); + } + } + None + }); + let Some(block) = &block else { + return Ok(false); + }; + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + if let SignerMessage::BlockResponse(BlockResponse::Rejected(rejected)) = message { + if block.header.signer_signature_hash() == rejected.signer_signature_hash { + return Some(rejected.signature); + } + } + None + }) + .collect::>(); + Ok(signatures.len() >= num_signers * 7 / 10) + }) + .expect("Test timed out while waiting for a rejected tenure extend"); + + info!("---- Waiting for an accepted tenure extend ----"); + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Test timed out while waiting for an accepted tenure extend"); + signer_test.shutdown(); +} + #[test] #[ignore] /// Verify that Nakamoto blocks that don't modify the tenure's execution cost @@ -2675,7 +2838,7 @@ fn stx_transfers_dont_effect_idle_timeout() { "info_height" => info_before.stacks_tip_height, "blocks_before" => blocks_before, ); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); info!("---- Getting current idle timeout ----"); @@ -2813,7 +2976,7 @@ fn idle_tenure_extend_active_mining() { // Add a delay to the block validation process TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap().replace(3); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); info!("---- Getting current idle timeout ----"); @@ -2881,7 +3044,7 @@ fn idle_tenure_extend_active_mining() { info!("----- Submitted deploy txs, mining BTC block -----"); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); let mut last_response = signer_test.get_latest_block_response(slot_id); // Make multiple tenures that get extended through idle timeouts @@ -3994,7 +4157,7 @@ fn signer_set_rollover() { send_amt, ); submit_tx(&http_origin, &transfer_tx); - signer_test.mine_nakamoto_block(short_timeout); + signer_test.mine_nakamoto_block(short_timeout, true); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); let block_sighash = mined_block.signer_signature_hash; let signer_signatures = mined_block.signer_signature; @@ -4068,7 +4231,7 @@ fn signer_set_rollover() { }) .expect("Timed out waiting for stacking txs to be mined"); - signer_test.mine_nakamoto_block(short_timeout); + signer_test.mine_nakamoto_block(short_timeout, true); let next_reward_cycle = reward_cycle.saturating_add(1); @@ -4121,7 +4284,7 @@ fn signer_set_rollover() { send_amt, ); submit_tx(&http_origin, &transfer_tx); - signer_test.mine_nakamoto_block(short_timeout); + signer_test.mine_nakamoto_block(short_timeout, true); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); info!("---- Verifying that the new signers signed the block -----"); @@ -4308,7 +4471,7 @@ fn duplicate_signers() { info!("------------------------- Try mining one block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); info!("------------------------- Read all `BlockResponse::Accepted` messages -------------------------"); @@ -5193,10 +5356,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .cloned() .take(num_signers / 2 + num_signers % 2) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); test_observer::clear(); // Make a new stacks transaction to create a different block signature, but make sure to propose it // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted @@ -5229,10 +5389,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); let transfer_tx = make_stacks_transfer( &sender_sk, @@ -5388,10 +5545,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .cloned() .take(num_signers * 3 / 10) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); test_observer::clear(); // submit a tx so that the miner will mine a stacks block N+1 @@ -5456,10 +5610,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it let transfer_tx = make_stacks_transfer( @@ -5615,10 +5766,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .cloned() .skip(num_signers * 7 / 10) .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); @@ -5696,10 +5844,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(Vec::new()); wait_for(short_timeout, || { let info_after = signer_test .stacks_client @@ -5842,10 +5987,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { .cloned() .skip(num_signers * 7 / 10) .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); @@ -6064,7 +6206,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Forcing miner to ignore block responses for block N+1"); TEST_IGNORE_SIGNERS.set(true); info!("Delaying signer block N+1 broadcasting to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); + TEST_PAUSE_BLOCK_BROADCAST.set(true); test_observer::clear(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test @@ -6127,7 +6269,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } }) .collect::>(); - Ok(signatures.len() == num_signers) + Ok(signatures.len() >= num_signers * 7 / 10) }) .expect("Test timed out while waiting for signers signatures for first block proposal"); let block = block.unwrap(); @@ -6191,7 +6333,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Allowing miner to accept block responses again. "); TEST_IGNORE_SIGNERS.set(false); info!("Allowing signers to broadcast block N+1 to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); + TEST_PAUSE_BLOCK_BROADCAST.set(false); // Assert the N+1' block was rejected let rejected_block = rejected_block.unwrap(); @@ -6217,7 +6359,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } }) .collect::>(); - Ok(block_rejections.len() == num_signers) + Ok(block_rejections.len() >= num_signers * 7 / 10) }) .expect("FAIL: Timed out waiting for block proposal rejections"); @@ -6536,10 +6678,7 @@ fn continue_after_fast_block_no_sortition() { // Make all signers ignore block proposals let ignoring_signers = all_signers.to_vec(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(ignoring_signers.clone()); info!("------------------------- Submit Miner 2 Block Commit -------------------------"); let rejections_before = signer_test @@ -6653,10 +6792,7 @@ fn continue_after_fast_block_no_sortition() { let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); let nmb_old_blocks = test_observer::get_blocks().len(); // Allow signers to respond to proposals again - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); info!("------------------------- Wait for Miner B's Block N -------------------------"); // wait for the new block to be processed @@ -6820,7 +6956,7 @@ fn continue_after_tenure_extend() { signer_test.boot_to_epoch_3(); info!("------------------------- Mine Normal Tenure -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); info!("------------------------- Extend Tenure -------------------------"); signer_test @@ -7315,13 +7451,12 @@ fn block_commit_delay() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![], |config| { // make the duration long enough that the miner will be marked as malicious - config.block_proposal_timeout = block_proposal_timeout; + config.block_proposal_timeout = Duration::from_secs(600); }, |config| { // Set the block commit delay to 10 minutes to ensure no block commit is sent @@ -7361,10 +7496,7 @@ fn block_commit_delay() { .iter() .map(StacksPublicKey::from_private) .collect::>(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(all_signers); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(all_signers); info!("------------------------- Test Mine Burn Block -------------------------"); let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; @@ -7399,10 +7531,7 @@ fn block_commit_delay() { .load(Ordering::SeqCst); info!("------------------------- Resume Signing -------------------------"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); // Wait for a block to be mined wait_for(60, || { @@ -7466,7 +7595,7 @@ fn block_validation_response_timeout() { signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); info!("------------------------- Test Block Validation Stalled -------------------------"); TEST_VALIDATE_STALL.lock().unwrap().replace(true); let validation_stall_start = Instant::now(); @@ -7514,6 +7643,7 @@ fn block_validation_response_timeout() { header: NakamotoBlockHeader::empty(), txs: vec![], }; + block.header.timestamp = get_epoch_time_secs(); let info_before = get_chain_info(&signer_test.running_nodes.conf); // Propose a block to the signers that passes initial checks but will not be submitted to the stacks node due to the submission stall @@ -7583,7 +7713,7 @@ fn block_validation_response_timeout() { ); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); let info_before = info_after; - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); wait_for(30, || { let info = get_chain_info(&signer_test.running_nodes.conf); @@ -9029,3 +9159,372 @@ fn tenure_extend_after_2_bad_commits() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// Test the block_proposal_max_age_secs signer configuration option. It should reject blocks that are +/// invalid but within the max age window, otherwise it should simply drop the block without further processing. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// +/// Test Execution: +/// The stacks node is advanced to epoch 3.0 reward set calculation to ensure the signer set is determined. +/// An invalid block proposal with a recent timestamp is forcibly written to the miner's slot to simulate the miner proposing a block. +/// The signers process the invalid block and broadcast a block response rejection to the respective .signers-XXX-YYY contract. +/// A second block proposal with an outdated timestamp is then submitted to the miner's slot to simulate the miner proposing a very old block. +/// The test confirms no further block rejection response is submitted to the .signers-XXX-YYY contract. +/// +/// Test Assertion: +/// - Each signer successfully rejects the recent invalid block proposal. +/// - No signer submits a block proposal response for the outdated block proposal. +/// - The stacks tip does not advance +fn block_proposal_max_age_rejections() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |config| { + config.block_proposal_max_age_secs = 30; + }, + |_| {}, + None, + None, + ); + signer_test.boot_to_epoch_3(); + let short_timeout = Duration::from_secs(30); + + info!("------------------------- Send Block Proposal To Signers -------------------------"); + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let mut block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + // First propose a stale block that is older than the block_proposal_max_age_secs + block.header.timestamp = get_epoch_time_secs().saturating_sub( + signer_test.signer_configs[0] + .block_proposal_max_age_secs + .saturating_add(1), + ); + let block_signer_signature_hash_1 = block.header.signer_signature_hash(); + signer_test.propose_block(block.clone(), short_timeout); + + // Next propose a recent invalid block + block.header.timestamp = get_epoch_time_secs(); + let block_signer_signature_hash_2 = block.header.signer_signature_hash(); + signer_test.propose_block(block, short_timeout); + + info!("------------------------- Test Block Proposal Rejected -------------------------"); + // Verify the signers rejected only the SECOND block proposal. The first was not even processed. + wait_for(30, || { + let rejections: Vec<_> = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .map(|chunk| { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + return None; + }; + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + signature, + .. + })) => { + assert_eq!( + signer_signature_hash, block_signer_signature_hash_2, + "We should only reject the second block" + ); + Some(signature) + } + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + .. + })) => { + assert_ne!( + signer_signature_hash, block_signer_signature_hash_1, + "We should never have accepted block" + ); + None + } + _ => None, + } + }) + .collect(); + Ok(rejections.len() > num_signers * 7 / 10) + }) + .expect("Timed out waiting for block rejections"); + + info!("------------------------- Test Peer Info-------------------------"); + assert_eq!(info_before, get_chain_info(&signer_test.running_nodes.conf)); + + info!("------------------------- Test Shutdown-------------------------"); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that signers do not mark a block as globally accepted if it was not announced by the node. +/// This will simulate this case via testing flags, and ensure that a block can be reorged across tenure +/// boundaries now (as it is only marked locally accepted and no longer gets marked globally accepted +/// by simply seeing the threshold number of signatures). +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// 1. The node mines 1 stacks block N (all signers sign it). +/// 2. <30% of signers are configured to auto reject any block proposals, broadcast of new blocks are skipped, and miners are configured to ignore signers responses. +/// 3. The node mines 1 stacks block N+1 (all signers sign it, but one which rejects it) but eventually all mark the block as locally accepted. +/// 4. A new tenure starts and the miner attempts to mine a new sister block N+1' (as it does not see the threshold number of signatures or any block push from signers). +/// 5. The signers accept this sister block as a valid reorg and the node advances to block N+1'. +/// +/// Test Assertion: +/// - All signers accepted block N. +/// - Less than 30% of the signers rejected block N+1. +/// - All signers accept block N+1' as a valid reorg. +/// - The node advances to block N+1' +fn global_acceptance_depends_on_block_announcement() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 4; + + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Just accept all reorg attempts + config.tenure_last_block_proposal_timeout = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + None, + None, + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + test_observer::clear(); + // submit a tx so that the miner will mine a stacks block N + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); + + wait_for(short_timeout, || { + Ok(signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for N to be mined and processed"); + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); + + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); + // Make less than 30% of the signers reject the block and ensure it is accepted by the node, but not announced. + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 3 / 10) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(true); + TEST_IGNORE_SIGNERS.set(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); + test_observer::clear(); + + // submit a tx so that the miner will mine a stacks block N+1 + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N+1"); + + let mut proposed_block = None; + let start_time = Instant::now(); + while proposed_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + proposed_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + let proposed_block = proposed_block.expect("Failed to find proposed block within 30s"); + + // Even though one of the signers rejected the block, it will eventually accept the block as it sees the 70% threshold of signatures + signer_test + .wait_for_block_acceptance( + short_timeout, + &proposed_block.header.signer_signature_hash(), + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1 by all signers"); + + info!( + "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" + ); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(false); + TEST_IGNORE_SIGNERS.set(false); + TEST_SKIP_BLOCK_BROADCAST.set(false); + test_observer::clear(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }, + ) + .unwrap(); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let mut sister_block = None; + let start_time = Instant::now(); + while sister_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + sister_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_after.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + let sister_block = sister_block.expect("Failed to find proposed sister block within 30s"); + signer_test + .wait_for_block_acceptance( + short_timeout, + &sister_block.header.signer_signature_hash(), + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1' by all signers"); + + // Assert the block was mined and the tip has changed. + assert_eq!( + info_after.stacks_tip_height, + sister_block.header.chain_length + ); + assert_eq!(info_after.stacks_tip, sister_block.header.block_hash()); + assert_eq!( + info_after.stacks_tip_consensus_hash, + sister_block.header.consensus_hash + ); + assert_eq!( + sister_block.header.chain_length, + proposed_block.header.chain_length + ); + assert_ne!(sister_block, proposed_block); +} diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index c68b477b47..6212dd6fcc 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -18,6 +18,7 @@ use std::{env, thread}; use clarity::vm::types::QualifiedContractIdentifier; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -25,7 +26,6 @@ use {reqwest, serde_json}; use super::bitcoin_regtest::BitcoinCoreController; use crate::burnchains::BurnchainController; -use crate::config::{EventKeyType, InitialBalance}; use crate::tests::neon_integrations::{ neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, };