diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 04e74f94e8..41e14b4458 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -142,6 +142,7 @@ jobs: - tests::nakamoto_integrations::v3_signer_api_endpoint - tests::nakamoto_integrations::test_shadow_recovery - tests::nakamoto_integrations::signer_chainstate + - tests::nakamoto_integrations::sip029_coinbase_change - tests::nakamoto_integrations::clarity_cost_spend_down - tests::nakamoto_integrations::v3_blockbyheight_api_endpoint # TODO: enable these once v1 signer is supported by a new nakamoto epoch diff --git a/CHANGELOG.md b/CHANGELOG.md index 046ca667a0..f383cb596c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,8 +9,19 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added +- New RPC endpoints + - `/v2/clarity/marf/:marf_key_hash` + - `/v2/clarity/metadata/:principal/:contract_name/:clarity_metadata_key` + +### Changed + +## [3.0.0.0.4] + +### Added + ### Changed +- Use the same burn view loader in both block validation and block processing ## [3.0.0.0.3] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8d6c3aabba..b8c63abc2c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -579,7 +579,7 @@ _Do_ document things that are not clear, e.g.: Keep in mind that better variable names can reduce the need for comments, e.g.: - `burnblock_height` instead of `height` may eliminate the need to comment that `height` refers to a burnblock height -- `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to to explain that the inputs are microblocks +- `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to explain that the inputs are microblocks - `add_transaction_to_microblock` explains more than `handle_transaction`, and reduces the need to even read the comment # Licensing and contributor license agreement diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 6a8f64f1b2..d563dce6e8 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -148,7 +148,8 @@ pub fn run_analysis( | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db, build_type_map) } StacksEpochId::Epoch10 => { diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index 800347d0f0..b4f6557c2e 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -52,7 +52,8 @@ impl FunctionType { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => self.check_args_2_1(accounting, args, clarity_version), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => self.check_args_2_1(accounting, args, clarity_version), StacksEpochId::Epoch10 => { return Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) } @@ -75,7 +76,8 @@ impl FunctionType { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } StacksEpochId::Epoch10 => { diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index b3ee746fcf..430d707b02 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -775,7 +775,8 @@ impl LimitedCostTracker { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => COSTS_3_NAME.to_string(), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => COSTS_3_NAME.to_string(), }; Ok(result) } diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 50715fd98f..4f6f3f7781 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -23,7 +23,7 @@ use stacks_common::consts::{ }; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, - VRFSeed, + TrieHash, VRFSeed, }; use stacks_common::types::{Address, StacksEpoch as GenericStacksEpoch, StacksEpochId}; use stacks_common::util::hash::{to_hex, Hash160, Sha256Sum, Sha512Trunc256Sum}; @@ -76,6 +76,68 @@ pub enum StoreType { PoxUnlockHeight = 0x15, } +impl TryFrom<&str> for StoreType { + type Error = String; + + fn try_from(value: &str) -> core::result::Result { + use self::StoreType::*; + + let hex_value = u8::from_str_radix(value, 10).map_err(|e| e.to_string())?; + match hex_value { + 0x00 => Ok(DataMap), + 0x01 => Ok(Variable), + 0x02 => Ok(FungibleToken), + 0x03 => Ok(CirculatingSupply), + 0x04 => Ok(NonFungibleToken), + 0x05 => Ok(DataMapMeta), + 0x06 => Ok(VariableMeta), + 0x07 => Ok(FungibleTokenMeta), + 0x08 => Ok(NonFungibleTokenMeta), + 0x09 => Ok(Contract), + 0x10 => Ok(SimmedBlock), + 0x11 => Ok(SimmedBlockHeight), + 0x12 => Ok(Nonce), + 0x13 => Ok(STXBalance), + 0x14 => Ok(PoxSTXLockup), + 0x15 => Ok(PoxUnlockHeight), + _ => Err("Invalid StoreType".into()), + } + } +} + +pub enum ContractDataVarName { + Contract, + ContractSize, + ContractSrc, + ContractDataSize, +} + +impl ContractDataVarName { + pub fn as_str(&self) -> &str { + match self { + Self::Contract => "contract", + Self::ContractSize => "contract-size", + Self::ContractSrc => "contract-src", + Self::ContractDataSize => "contract-data-size", + } + } +} + +impl TryFrom<&str> for ContractDataVarName { + type Error = String; + + fn try_from(value: &str) -> core::result::Result { + use self::ContractDataVarName::*; + match value { + "contract" => Ok(Contract), + "contract-size" => Ok(ContractSize), + "contract-src" => Ok(ContractSrc), + "contract-data-size" => Ok(ContractDataSize), + _ => Err("Invalid ContractDataVarName".into()), + } + } +} + pub struct ClarityDatabase<'a> { pub store: RollbackWrapper<'a>, headers_db: &'a dyn HeadersDB, @@ -465,6 +527,13 @@ impl<'a> ClarityDatabase<'a> { self.store.get_data::(key) } + pub fn get_data_by_hash(&mut self, hash: &TrieHash) -> Result> + where + T: ClarityDeserializable, + { + self.store.get_data_by_hash::(hash) + } + pub fn put_value(&mut self, key: &str, value: Value, epoch: &StacksEpochId) -> Result<()> { self.put_value_with_size(key, value, epoch)?; Ok(()) @@ -522,6 +591,16 @@ impl<'a> ClarityDatabase<'a> { self.store.get_data_with_proof(key) } + pub fn get_data_with_proof_by_hash( + &mut self, + hash: &TrieHash, + ) -> Result)>> + where + T: ClarityDeserializable, + { + self.store.get_data_with_proof_by_hash(hash) + } + pub fn make_key_for_trip( contract_identifier: &QualifiedContractIdentifier, data: StoreType, @@ -559,12 +638,18 @@ impl<'a> ClarityDatabase<'a> { self.store .prepare_for_contract_metadata(contract_identifier, hash)?; // insert contract-size - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSize.as_str(), + ); self.insert_metadata(contract_identifier, &key, &(contract_content.len() as u64))?; // insert contract-src if STORE_CONTRACT_SRC_INTERFACE { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-src"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSrc.as_str(), + ); self.insert_metadata(contract_identifier, &key, &contract_content.to_string())?; } Ok(()) @@ -574,7 +659,10 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Option { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-src"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSrc.as_str(), + ); self.fetch_metadata(contract_identifier, &key) .ok() .flatten() @@ -683,7 +771,10 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Result { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSize.as_str(), + ); let contract_size: u64 = self.fetch_metadata(contract_identifier, &key)? .ok_or_else(|| { @@ -691,7 +782,10 @@ impl<'a> ClarityDatabase<'a> { "Failed to read non-consensus contract metadata, even though contract exists in MARF." .into()) })?; - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-data-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractDataSize.as_str(), + ); let data_size: u64 = self .fetch_metadata(contract_identifier, &key)? .ok_or_else(|| { @@ -710,7 +804,10 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, data_size: u64, ) -> Result<()> { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSize.as_str(), + ); let contract_size: u64 = self.fetch_metadata(contract_identifier, &key)? .ok_or_else(|| { @@ -720,7 +817,10 @@ impl<'a> ClarityDatabase<'a> { })?; contract_size.cost_overflow_add(data_size)?; - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-data-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractDataSize.as_str(), + ); self.insert_metadata(contract_identifier, &key, &data_size)?; Ok(()) } @@ -730,13 +830,19 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, contract: Contract, ) -> Result<()> { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::Contract.as_str(), + ); self.insert_metadata(contract_identifier, &key, &contract)?; Ok(()) } pub fn has_contract(&mut self, contract_identifier: &QualifiedContractIdentifier) -> bool { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::Contract.as_str(), + ); self.store.has_metadata_entry(contract_identifier, &key) } @@ -744,7 +850,10 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Result { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::Contract.as_str(), + ); let mut data: Contract = self.fetch_metadata(contract_identifier, &key)? .ok_or_else(|| InterpreterError::Expect( "Failed to read non-consensus contract metadata, even though contract exists in MARF." diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index b6a45ee764..07d48c9504 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -18,7 +18,7 @@ use std::path::PathBuf; #[cfg(feature = "canonical")] use rusqlite::Connection; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, VRFSeed}; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, TrieHash, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use crate::vm::analysis::AnalysisDatabase; @@ -64,9 +64,15 @@ pub trait ClarityBackingStore { fn put_all_data(&mut self, items: Vec<(String, String)>) -> Result<()>; /// fetch K-V out of the committed datastore fn get_data(&mut self, key: &str) -> Result>; + /// fetch Hash(K)-V out of the commmitted datastore + fn get_data_from_path(&mut self, hash: &TrieHash) -> Result>; /// fetch K-V out of the committed datastore, along with the byte representation /// of the Merkle proof for that key-value pair fn get_data_with_proof(&mut self, key: &str) -> Result)>>; + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> Result)>>; fn has_entry(&mut self, key: &str) -> Result { Ok(self.get_data(key)?.is_some()) } @@ -209,10 +215,21 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't retrieve data") } + fn get_data_from_path(&mut self, _hash: &TrieHash) -> Result> { + panic!("NullBackingStore can't retrieve data") + } + fn get_data_with_proof(&mut self, _key: &str) -> Result)>> { panic!("NullBackingStore can't retrieve data") } + fn get_data_with_proof_from_path( + &mut self, + _hash: &TrieHash, + ) -> Result)>> { + panic!("NullBackingStore can't retrieve data") + } + #[cfg(feature = "canonical")] fn get_side_store(&mut self) -> &Connection { panic!("NullBackingStore has no side store") diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index 3fd845f92f..c444aa553e 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -17,7 +17,7 @@ use std::hash::Hash; use hashbrown::HashMap; -use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::{StacksBlockId, TrieHash}; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -369,6 +369,21 @@ impl<'a> RollbackWrapper<'a> { .transpose() } + /// this function will only return commitment proofs for values _already_ materialized + /// in the underlying store. otherwise it returns None. + pub fn get_data_with_proof_by_hash( + &mut self, + hash: &TrieHash, + ) -> InterpreterResult)>> + where + T: ClarityDeserializable, + { + self.store + .get_data_with_proof_from_path(hash)? + .map(|(value, proof)| Ok((T::deserialize(&value)?, proof))) + .transpose() + } + pub fn get_data(&mut self, key: &str) -> InterpreterResult> where T: ClarityDeserializable, @@ -392,6 +407,23 @@ impl<'a> RollbackWrapper<'a> { .transpose() } + /// DO NOT USE IN CONSENSUS CODE. + /// + /// Load data directly from the underlying store, given its trie hash. The lookup map will not + /// be used. + /// + /// This should never be called from within the Clarity VM, or via block-processing. It's only + /// meant to be used by the RPC system. + pub fn get_data_by_hash(&mut self, hash: &TrieHash) -> InterpreterResult> + where + T: ClarityDeserializable, + { + self.store + .get_data_from_path(hash)? + .map(|x| T::deserialize(&x)) + .transpose() + } + pub fn deserialize_value( value_hex: &str, expected: &TypeSignature, diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 0e0f0e3f6e..7bc9a7130f 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -19,7 +19,7 @@ use rusqlite::{ params, Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OptionalExtension, Row, Savepoint, }; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, TrieHash}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::db::tx_busy_handler; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -330,10 +330,21 @@ impl ClarityBackingStore for MemoryBackingStore { SqliteConnection::get(self.get_side_store(), key) } + fn get_data_from_path(&mut self, hash: &TrieHash) -> Result> { + SqliteConnection::get(self.get_side_store(), hash.to_string().as_str()) + } + fn get_data_with_proof(&mut self, key: &str) -> Result)>> { Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) } + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> Result)>> { + self.get_data_with_proof(&hash.to_string()) + } + fn get_side_store(&mut self) -> &Connection { &self.side_store } diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 9075c55e71..b23e356dea 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -529,7 +529,7 @@ const LOG2_API: SimpleFunctionAPI = SimpleFunctionAPI { snippet: "log2 ${1:expr-1}", signature: "(log2 n)", description: - "Returns the power to which the number 2 must be raised to to obtain the value `n`, rounded + "Returns the power to which the number 2 must be raised to obtain the value `n`, rounded down to the nearest integer. Fails on a negative numbers. ", example: "(log2 u8) ;; Returns u3 diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index 833ed4baf8..6482493a29 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -63,6 +63,8 @@ macro_rules! switch_on_global_epoch { StacksEpochId::Epoch25 => $Epoch205Version(args, env, context), // Note: We reuse 2.05 for 3.0. StacksEpochId::Epoch30 => $Epoch205Version(args, env, context), + // Note: We reuse 2.05 for 3.1. + StacksEpochId::Epoch31 => $Epoch205Version(args, env, context), } } }; diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index d64b207522..ff991f5513 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -575,7 +575,7 @@ pub fn execute(program: &str) -> Result> { ) } -/// Execute for test in in Clarity2, Epoch21, testnet. +/// Execute for test in Clarity2, Epoch21, testnet. #[cfg(any(test, feature = "testing"))] pub fn execute_v2(program: &str) -> Result> { execute_with_parameters( diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index f2b6d4dd09..295909859f 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -52,7 +52,8 @@ pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnState | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => UnitTestBurnStateDB { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => UnitTestBurnStateDB { epoch_id, ast_rules: ASTRules::PrecheckSize, }, diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 2c6f23ef42..5fa58b507b 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -122,6 +122,7 @@ epochs_template! { Epoch24, Epoch25, Epoch30, + Epoch31, } clarity_template! { @@ -140,6 +141,9 @@ clarity_template! { (Epoch30, Clarity1), (Epoch30, Clarity2), (Epoch30, Clarity3), + (Epoch31, Clarity1), + (Epoch31, Clarity2), + (Epoch31, Clarity3), } #[cfg(test)] diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 280258e026..b3984c5251 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -587,7 +587,8 @@ impl TypeSignature { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => self.admits_type_v2_1(other), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => self.admits_type_v2_1(other), StacksEpochId::Epoch10 => { return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) } @@ -800,7 +801,8 @@ impl TypeSignature { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => self.canonicalize_v2_1(), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => self.canonicalize_v2_1(), } } @@ -1158,7 +1160,8 @@ impl TypeSignature { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => Self::least_supertype_v2_1(a, b), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => Self::least_supertype_v2_1(a, b), StacksEpochId::Epoch10 => { return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) } diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index 4c437d52cc..7050d5dbd9 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -40,6 +40,7 @@ impl ClarityVersion { StacksEpochId::Epoch24 => ClarityVersion::Clarity2, StacksEpochId::Epoch25 => ClarityVersion::Clarity2, StacksEpochId::Epoch30 => ClarityVersion::Clarity3, + StacksEpochId::Epoch31 => ClarityVersion::Clarity3, } } } diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 94a5479613..e029a8b113 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -172,6 +172,35 @@ Where data is the hex serialization of the variable value. This endpoint also accepts a querystring parameter `?proof=` which when supplied `0`, will return the JSON object _without_ the `proof` field. +### GET /v2/clarity/marf/[Clarity MARF Key] +Attempt to fetch the value of a MARF key. The key is identified with [Clarity MARF Key]. + +Returns JSON data in the form: + +```json +{ + "data": "0x01ce...", + "proof": "0x01ab...", +} +``` + +Where data is the hex serialization of the value. + +### GET /v2/clarity/metadata/[Stacks Address]/[Contract Name]/[Clarity Metadata Key] +Attempt to fetch the metadata of a contract. + The contract is identified with [Stacks Address] and [Contract Name] in the URL path. + The metadata key is identified with [Clarity Metadata Key]. + +Returns JSON data in the form: + +```json +{ + "data": "'{\"contract_identifier\":{...}'", +} +``` + +Where data is the metadata formatted as a JSON string. + ### GET /v2/constant_val/[Stacks Address]/[Contract Name]/[Constant Name] Attempt to fetch a constant from a contract. The contract is identified with [Stacks Address] and [Contract Name] in the URL path. The constant is identified with [Constant Name]. diff --git a/docs/rpc/api/core-node/get-clarity-marf-value.example.json b/docs/rpc/api/core-node/get-clarity-marf-value.example.json new file mode 100644 index 0000000000..d0e233416f --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-marf-value.example.json @@ -0,0 +1,4 @@ +{ + "data": "0x0a0c000000010a6d6f6e737465722d69640100000000000000000000000000000001", + "proof": "0x123..." +} diff --git a/docs/rpc/api/core-node/get-clarity-marf-value.schema.json b/docs/rpc/api/core-node/get-clarity-marf-value.schema.json new file mode 100644 index 0000000000..ea7e7894fb --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-marf-value.schema.json @@ -0,0 +1,17 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "Response of get Clarity MARF value request", + "title": "ClarityMARFValueResponse", + "type": "object", + "required": ["data"], + "properties": { + "data": { + "type": "string", + "description": "Hex-encoded string" + }, + "proof": { + "type": "string", + "description": "Hex-encoded string of the MARF proof for the data" + } + } +} diff --git a/docs/rpc/api/core-node/get-clarity-metadata.example.json b/docs/rpc/api/core-node/get-clarity-metadata.example.json new file mode 100644 index 0000000000..5bb4bd5c47 --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-metadata.example.json @@ -0,0 +1,3 @@ +{ + "data": "'{\"contract_identifier\":{...}, \"private_function_types\":{...}'" +} diff --git a/docs/rpc/api/core-node/get-clarity-metadata.schema.json b/docs/rpc/api/core-node/get-clarity-metadata.schema.json new file mode 100644 index 0000000000..3c0104fa41 --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-metadata.schema.json @@ -0,0 +1,13 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "Response of get clarity metadata request", + "title": "ClarityMetadataResponse", + "type": "object", + "required": ["data"], + "properties": { + "data": { + "type": "string", + "description": "Metadata value formatted as a JSON string" + } + } +} diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index db36da8bac..d82494ca36 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -486,6 +486,93 @@ paths: If tip == "latest", the query will be run from the latest known tip (includes unconfirmed state). If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). + /v2/clarity/marf/{clarity_marf_key}: + post: + summary: Get the MARF value for a given key + tags: + - Smart Contracts + operationId: get_clarity_marf_value + description: | + Attempt to fetch the value of a MARF key. + + In the response, `data` is the hex serialization of the value. + responses: + 200: + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-clarity-marf-value.schema.json + example: + $ref: ./api/core-node/get-clarity-marf-value.example.json + 400: + description: Failed to retrieve MARF key + parameters: + - name: clarity_marf_key + in: path + required: true + description: MARF key + schema: + type: string + - name: proof + in: query + description: Returns object without the proof field when set to 0 + schema: + type: integer + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + + /v2/clarity/metadata/{contract_address}/{contract_name}/{clarity_metadata_key}: + post: + summary: Get the contract metadata for the metadata key + tags: + - Smart Contracts + operationId: get_clarity_metadata_key + description: | + Attempt to fetch the metadata of a contract. The contract is identified with [Contract Address] and [Contract Name] in the URL path. The metadata key is identified with [Clarity Metadata Key]. + + In the response, `data` is formatted as JSON. + responses: + 200: + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-clarity-metadata.schema.json + example: + $ref: ./api/core-node/get-clarity-metadata.example.json + 400: + description: Failed to retrieve constant value from contract + parameters: + - name: contract_address + in: path + required: true + description: Stacks address + schema: + type: string + - name: contract_name + in: path + required: true + description: Contract name + schema: + type: string + - name: clarity_metadata_key + in: path + required: true + description: Metadata key + schema: + type: string + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + /v2/constant_val/{contract_address}/{contract_name}/{constant_name}: post: summary: Get the value of a constant inside a contract @@ -778,4 +865,4 @@ paths: text/plain: schema: type: integer - example: 7 \ No newline at end of file + example: 7 diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 1a13aa02ed..04c3acc1ea 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -81,10 +81,11 @@ pub mod consts { pub const PEER_VERSION_EPOCH_2_4: u8 = 0x09; pub const PEER_VERSION_EPOCH_2_5: u8 = 0x0a; pub const PEER_VERSION_EPOCH_3_0: u8 = 0x0b; + pub const PEER_VERSION_EPOCH_3_1: u8 = 0x0c; /// this should be updated to the latest network epoch version supported by /// this node. this will be checked by the `validate_epochs()` method. - pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_0 as u32; + pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_1 as u32; /// set the fourth byte of the peer version pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; @@ -93,6 +94,9 @@ pub mod consts { /// network identifiers pub const NETWORK_ID_MAINNET: u32 = 0x17000000; pub const NETWORK_ID_TESTNET: u32 = 0xff000000; + + /// number of uSTX per STX + pub const MICROSTACKS_PER_STACKS: u32 = 1_000_000; } /// This test asserts that the constant above doesn't change. diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 47d6c3c499..59347ed36a 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -30,6 +30,68 @@ impl_byte_array_serde!(TrieHash); pub const TRIEHASH_ENCODED_SIZE: usize = 32; +impl TrieHash { + pub fn from_key(k: &str) -> Self { + Self::from_data(k.as_bytes()) + } + + /// TrieHash of zero bytes + pub fn from_empty_data() -> TrieHash { + // sha2-512/256 hash of empty string. + // this is used so frequently it helps performance if we just have a constant for it. + TrieHash([ + 0xc6, 0x72, 0xb8, 0xd1, 0xef, 0x56, 0xed, 0x28, 0xab, 0x87, 0xc3, 0x62, 0x2c, 0x51, + 0x14, 0x06, 0x9b, 0xdd, 0x3a, 0xd7, 0xb8, 0xf9, 0x73, 0x74, 0x98, 0xd0, 0xc0, 0x1e, + 0xce, 0xf0, 0x96, 0x7a, + ]) + } + + /// TrieHash from bytes + pub fn from_data(data: &[u8]) -> TrieHash { + if data.len() == 0 { + return TrieHash::from_empty_data(); + } + + let mut tmp = [0u8; 32]; + + let mut hasher = Sha512_256::new(); + hasher.update(data); + tmp.copy_from_slice(hasher.finalize().as_slice()); + + TrieHash(tmp) + } + + pub fn from_data_array>(data: &[B]) -> TrieHash { + if data.len() == 0 { + return TrieHash::from_empty_data(); + } + + let mut tmp = [0u8; 32]; + + let mut hasher = Sha512_256::new(); + + for item in data.iter() { + hasher.update(item); + } + tmp.copy_from_slice(hasher.finalize().as_slice()); + TrieHash(tmp) + } + + /// Convert to a String that can be used in e.g. sqlite + pub fn to_string(&self) -> String { + let s = format!("{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", + self.0[0], self.0[1], self.0[2], self.0[3], + self.0[4], self.0[5], self.0[6], self.0[7], + self.0[8], self.0[9], self.0[10], self.0[11], + self.0[12], self.0[13], self.0[14], self.0[15], + self.0[16], self.0[17], self.0[18], self.0[19], + self.0[20], self.0[21], self.0[22], self.0[23], + self.0[24], self.0[25], self.0[26], self.0[27], + self.0[28], self.0[29], self.0[30], self.0[31]); + s + } +} + #[derive(Serialize, Deserialize)] pub struct BurnchainHeaderHash(pub [u8; 32]); impl_array_newtype!(BurnchainHeaderHash, u8, 32); diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 8089c6c0a1..4a1f34cbc7 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -1,3 +1,20 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cell::LazyCell; use std::cmp::Ordering; use std::fmt; use std::ops::{Deref, DerefMut, Index, IndexMut}; @@ -11,6 +28,7 @@ use crate::address::{ C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; +use crate::consts::MICROSTACKS_PER_STACKS; use crate::deps_common::bitcoin::blockdata::transaction::TxOut; use crate::types::chainstate::{StacksAddress, StacksPublicKey}; use crate::util::hash::Hash160; @@ -19,6 +37,9 @@ use crate::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; pub mod chainstate; pub mod net; +#[cfg(test)] +pub mod tests; + /// A container for public keys (compressed secp256k1 public keys) pub struct StacksPublicKeyBuffer(pub [u8; 33]); impl_array_newtype!(StacksPublicKeyBuffer, u8, 33); @@ -81,6 +102,7 @@ pub enum StacksEpochId { Epoch24 = 0x02019, Epoch25 = 0x0201a, Epoch30 = 0x03000, + Epoch31 = 0x03001, } #[derive(Debug)] @@ -89,9 +111,153 @@ pub enum MempoolCollectionBehavior { ByReceiveTime, } +/// Struct describing an interval of time (measured in burnchain blocks) during which a coinbase is +/// allotted. Applies to SIP-029 code paths and later. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CoinbaseInterval { + /// amount of uSTX to award + pub coinbase: u128, + /// height of the chain after Stacks chain genesis at which this coinbase interval starts + pub effective_start_height: u64, +} + +/// From SIP-029: +/// +/// | Coinbase Interval | Bitcoin Height | Offset Height | Approx. Supply | STX Reward | Annual Inflation | +/// |--------------------|----------------|---------------------|------------------|------------|------------------| +/// | Current | - | - | 1,552,452,847 | 1000 | - | +/// | 1st | 945,000 | 278,950 | 1,627,352,847 | 500 (50%) | 3.23% | +/// | 2nd | 1,050,000 | 383,950 | 1,679,852,847 | 250 (50%) | 1.57% | +/// | 3rd | 1,260,000 | 593,950 | 1,732,352,847 | 125 (50%) | 0.76% | +/// | 4th | 1,470,000 | 803,950 | 1,758,602,847 | 62.5 (50%) | 0.37% | +/// | - | 2,197,560 | 1,531,510 | 1,804,075,347 | 62.5 (0%) | 0.18% | +/// +/// The above is for mainnet, which has a burnchain year of 52596 blocks and starts at burnchain height 666050. +/// The `Offset Height` column is simply the difference between `Bitcoin Height` and 666050. + +/// Mainnet coinbase intervals, as of SIP-029 +pub const COINBASE_INTERVALS_MAINNET: LazyCell<[CoinbaseInterval; 5]> = LazyCell::new(|| { + let emissions_schedule = [ + CoinbaseInterval { + coinbase: 1_000 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 0, + }, + CoinbaseInterval { + coinbase: 500 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 278_950, + }, + CoinbaseInterval { + coinbase: 250 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 383_950, + }, + CoinbaseInterval { + coinbase: 125 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 593_950, + }, + CoinbaseInterval { + coinbase: (625 * u128::from(MICROSTACKS_PER_STACKS)) / 10, + effective_start_height: 803_950, + }, + ]; + assert!(CoinbaseInterval::check_order(&emissions_schedule)); + emissions_schedule +}); + +/// Testnet coinbase intervals, as of SIP-029 +pub const COINBASE_INTERVALS_TESTNET: LazyCell<[CoinbaseInterval; 5]> = LazyCell::new(|| { + let emissions_schedule = [ + CoinbaseInterval { + coinbase: 1_000 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 0, + }, + CoinbaseInterval { + coinbase: 500 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 1000, + }, + CoinbaseInterval { + coinbase: 250 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 2000, + }, + CoinbaseInterval { + coinbase: 125 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 3000, + }, + CoinbaseInterval { + coinbase: (625 * u128::from(MICROSTACKS_PER_STACKS)) / 10, + effective_start_height: 4000, + }, + ]; + assert!(CoinbaseInterval::check_order(&emissions_schedule)); + emissions_schedule +}); + +/// Used for testing to substitute a coinbase schedule +#[cfg(any(test, feature = "testing"))] +pub static COINBASE_INTERVALS_TEST: std::sync::Mutex>> = + std::sync::Mutex::new(None); + +#[cfg(any(test, feature = "testing"))] +pub fn set_test_coinbase_schedule(coinbase_schedule: Option>) { + match COINBASE_INTERVALS_TEST.lock() { + Ok(mut schedule_guard) => { + *schedule_guard = coinbase_schedule; + } + Err(_e) => { + panic!("COINBASE_INTERVALS_TEST mutex poisoned"); + } + } +} + +impl CoinbaseInterval { + /// Look up the value of a coinbase at an effective height. + /// Precondition: `intervals` must be sorted in ascending order by `effective_start_height` + pub fn get_coinbase_at_effective_height( + intervals: &[CoinbaseInterval], + effective_height: u64, + ) -> u128 { + if intervals.is_empty() { + return 0; + } + if intervals.len() == 1 { + if intervals[0].effective_start_height <= effective_height { + return intervals[0].coinbase; + } else { + return 0; + } + } + + for i in 0..(intervals.len() - 1) { + if intervals[i].effective_start_height <= effective_height + && effective_height < intervals[i + 1].effective_start_height + { + return intervals[i].coinbase; + } + } + + // in last interval, which per the above checks is guaranteed to exist + intervals.last().unwrap_or_else(|| unreachable!()).coinbase + } + + /// Verify that a list of intervals is sorted in ascending order by `effective_start_height` + pub fn check_order(intervals: &[CoinbaseInterval]) -> bool { + if intervals.len() < 2 { + return true; + } + + let mut ht = intervals[0].effective_start_height; + for i in 1..intervals.len() { + if intervals[i].effective_start_height < ht { + return false; + } + ht = intervals[i].effective_start_height; + } + true + } +} + impl StacksEpochId { pub fn latest() -> StacksEpochId { - StacksEpochId::Epoch30 + StacksEpochId::Epoch31 } /// In this epoch, how should the mempool perform garbage collection? @@ -105,7 +271,9 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => MempoolCollectionBehavior::ByStacksHeight, - StacksEpochId::Epoch30 => MempoolCollectionBehavior::ByReceiveTime, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { + MempoolCollectionBehavior::ByReceiveTime + } } } @@ -120,7 +288,7 @@ impl StacksEpochId { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => false, - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, } } @@ -134,7 +302,10 @@ impl StacksEpochId { | StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => false, - StacksEpochId::Epoch24 | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => true, } } @@ -150,7 +321,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, } } @@ -166,7 +337,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, } } @@ -181,7 +352,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, } } @@ -212,7 +383,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => 0, - StacksEpochId::Epoch30 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, } } @@ -248,7 +419,132 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => cur_reward_cycle > first_epoch30_reward_cycle, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { + cur_reward_cycle > first_epoch30_reward_cycle + } + } + } + + /// What is the coinbase (in uSTX) to award for the given burnchain height? + /// Applies prior to SIP-029 + fn coinbase_reward_pre_sip029( + &self, + first_burnchain_height: u64, + current_burnchain_height: u64, + ) -> u128 { + /* + From https://forum.stacks.org/t/pox-consensus-and-stx-future-supply + + """ + + 1000 STX for years 0-4 + 500 STX for years 4-8 + 250 STX for years 8-12 + 125 STX in perpetuity + + + From the Token Whitepaper: + + We expect that once native mining goes live, approximately 4383 blocks will be pro- + cessed per month, or approximately 52,596 blocks will be processed per year. + + """ + */ + // this is saturating subtraction for the initial reward calculation + // where we are computing the coinbase reward for blocks that occur *before* + // the `first_burn_block_height` + let effective_ht = current_burnchain_height.saturating_sub(first_burnchain_height); + let blocks_per_year = 52596; + let stx_reward = if effective_ht < blocks_per_year * 4 { + 1000 + } else if effective_ht < blocks_per_year * 8 { + 500 + } else if effective_ht < blocks_per_year * 12 { + 250 + } else { + 125 + }; + + stx_reward * (u128::from(MICROSTACKS_PER_STACKS)) + } + + /// Get the coinbase intervals to use. + /// Can be overriden by tests + #[cfg(any(test, feature = "testing"))] + pub(crate) fn get_coinbase_intervals(mainnet: bool) -> Vec { + match COINBASE_INTERVALS_TEST.lock() { + Ok(schedule_opt) => { + if let Some(schedule) = (*schedule_opt).as_ref() { + info!("Use overridden coinbase schedule {:?}", &schedule); + return schedule.clone(); + } + } + Err(_e) => { + panic!("COINBASE_INTERVALS_TEST mutex poisoned"); + } + } + + if mainnet { + COINBASE_INTERVALS_MAINNET.to_vec() + } else { + COINBASE_INTERVALS_TESTNET.to_vec() + } + } + + #[cfg(not(any(test, feature = "testing")))] + pub(crate) fn get_coinbase_intervals(mainnet: bool) -> Vec { + if mainnet { + COINBASE_INTERVALS_MAINNET.to_vec() + } else { + COINBASE_INTERVALS_TESTNET.to_vec() + } + } + + /// what are the offsets after chain-start when coinbase reductions occur? + /// Applies at and after SIP-029. + /// Uses coinbase intervals defined by COINBASE_INTERVALS_MAINNET, unless overridden by a unit + /// or integration test. + fn coinbase_reward_sip029( + &self, + mainnet: bool, + first_burnchain_height: u64, + current_burnchain_height: u64, + ) -> u128 { + let effective_ht = current_burnchain_height.saturating_sub(first_burnchain_height); + let coinbase_intervals = Self::get_coinbase_intervals(mainnet); + CoinbaseInterval::get_coinbase_at_effective_height(&coinbase_intervals, effective_ht) + } + + /// What is the coinbase to award? + pub fn coinbase_reward( + &self, + mainnet: bool, + first_burnchain_height: u64, + current_burnchain_height: u64, + ) -> u128 { + match self { + StacksEpochId::Epoch10 => { + // Stacks is not active + 0 + } + StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 => { + self.coinbase_reward_pre_sip029(first_burnchain_height, current_burnchain_height) + } + StacksEpochId::Epoch31 => { + let cb = self.coinbase_reward_sip029( + mainnet, + first_burnchain_height, + current_burnchain_height, + ); + cb + } } } } @@ -265,6 +561,7 @@ impl std::fmt::Display for StacksEpochId { StacksEpochId::Epoch24 => write!(f, "2.4"), StacksEpochId::Epoch25 => write!(f, "2.5"), StacksEpochId::Epoch30 => write!(f, "3.0"), + StacksEpochId::Epoch31 => write!(f, "3.1"), } } } @@ -283,6 +580,7 @@ impl TryFrom for StacksEpochId { x if x == StacksEpochId::Epoch24 as u32 => Ok(StacksEpochId::Epoch24), x if x == StacksEpochId::Epoch25 as u32 => Ok(StacksEpochId::Epoch25), x if x == StacksEpochId::Epoch30 as u32 => Ok(StacksEpochId::Epoch30), + x if x == StacksEpochId::Epoch31 as u32 => Ok(StacksEpochId::Epoch31), _ => Err("Invalid epoch"), } } diff --git a/stacks-common/src/types/tests.rs b/stacks-common/src/types/tests.rs new file mode 100644 index 0000000000..20676999e7 --- /dev/null +++ b/stacks-common/src/types/tests.rs @@ -0,0 +1,352 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::{ + set_test_coinbase_schedule, CoinbaseInterval, StacksEpochId, COINBASE_INTERVALS_MAINNET, + COINBASE_INTERVALS_TESTNET, +}; + +#[test] +fn test_mainnet_coinbase_emissions() { + assert_eq!(COINBASE_INTERVALS_MAINNET.len(), 5); + assert_eq!(COINBASE_INTERVALS_MAINNET[0].coinbase, 1_000_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[1].coinbase, 500_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[2].coinbase, 250_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[3].coinbase, 125_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[4].coinbase, 62_500_000); + + // heights from SIP-029 + assert_eq!( + COINBASE_INTERVALS_MAINNET[0].effective_start_height, + 666_050 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[1].effective_start_height, + 945_000 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[2].effective_start_height, + 1_050_000 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[3].effective_start_height, + 1_260_000 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[4].effective_start_height, + 1_470_000 - 666_050 + ); +} + +#[test] +fn test_get_coinbase_at_effective_height() { + assert!(CoinbaseInterval::check_order(&*COINBASE_INTERVALS_MAINNET)); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 666050 - 666050 + ), + 1_000_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 666051 - 666050 + ), + 1_000_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 944_999 - 666050 + ), + 1_000_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 945_000 - 666050 + ), + 500_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 945_001 - 666050 + ), + 500_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_049_999 - 666050 + ), + 500_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_050_000 - 666050 + ), + 250_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_050_001 - 666050 + ), + 250_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_259_999 - 666050 + ), + 250_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_260_000 - 666050 + ), + 125_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_260_001 - 666050 + ), + 125_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_469_999 - 666050 + ), + 125_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_470_000 - 666050 + ), + 62_500_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_470_001 - 666050 + ), + 62_500_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 2_197_559 - 666050 + ), + 62_500_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 2_197_560 - 666050 + ), + 62_500_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 2_197_561 - 666050 + ), + 62_500_000 + ); +} + +#[test] +fn test_epoch_coinbase_reward() { + // new coinbase schedule + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 666050), + 1_000_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 666051), + 1_000_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 944_999), + 1_000_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 945_000), + 500_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 945_001), + 500_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_049_999), + 500_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_050_000), + 250_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_050_001), + 250_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_259_999), + 250_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_260_000), + 125_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_260_001), + 125_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_469_999), + 125_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_470_000), + 62_500_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_470_001), + 62_500_000 + ); + + // old coinbase schedule + for epoch in [ + StacksEpochId::Epoch20, + StacksEpochId::Epoch2_05, + StacksEpochId::Epoch21, + StacksEpochId::Epoch22, + StacksEpochId::Epoch23, + StacksEpochId::Epoch24, + StacksEpochId::Epoch25, + ] + .iter() + { + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 4 - 1), + 1_000_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 4), + 500_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 4 + 1), + 500_000_000 + ); + + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 8 - 1), + 500_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 8), + 250_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 8 + 1), + 250_000_000 + ); + + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 12 - 1), + 250_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 12), + 125_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 12 + 1), + 125_000_000 + ); + } +} + +/// Verifies that the test facility for setting a coinbase schedule in a unit or integration test +/// actually works. +#[test] +fn test_set_coinbase_intervals() { + let new_sched = vec![ + CoinbaseInterval { + coinbase: 1, + effective_start_height: 0, + }, + CoinbaseInterval { + coinbase: 2, + effective_start_height: 1, + }, + CoinbaseInterval { + coinbase: 3, + effective_start_height: 2, + }, + CoinbaseInterval { + coinbase: 4, + effective_start_height: 3, + }, + CoinbaseInterval { + coinbase: 5, + effective_start_height: 4, + }, + ]; + + assert_eq!( + StacksEpochId::get_coinbase_intervals(true), + *COINBASE_INTERVALS_MAINNET + ); + assert_eq!( + StacksEpochId::get_coinbase_intervals(false), + *COINBASE_INTERVALS_TESTNET + ); + + set_test_coinbase_schedule(Some(new_sched.clone())); + + assert_eq!(StacksEpochId::get_coinbase_intervals(true), new_sched); + assert_eq!(StacksEpochId::get_coinbase_intervals(false), new_sched); + + set_test_coinbase_schedule(None); + + assert_eq!( + StacksEpochId::get_coinbase_intervals(true), + *COINBASE_INTERVALS_MAINNET + ); + assert_eq!( + StacksEpochId::get_coinbase_intervals(false), + *COINBASE_INTERVALS_TESTNET + ); +} diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 46e25b285f..a332b344ce 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,14 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +## [3.0.0.0.4.0] + +### Added + +### Changed + +- Use the same burn view loader in both block validation and block processing + ## [3.0.0.0.3.0] ### Added diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cae6a210b7..4676738629 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -173,6 +173,9 @@ impl StacksClient { &self, consensus_hash: &ConsensusHash, ) -> Result { + debug!("StacksClient: Getting tenure tip"; + "consensus_hash" => %consensus_hash, + ); let send_request = || { self.stacks_node_client .get(self.tenure_tip_path(consensus_hash)) @@ -192,6 +195,7 @@ impl StacksClient { /// Get the last set reward cycle stored within the stackerdb contract pub fn get_last_set_cycle(&self) -> Result { + debug!("StacksClient: Getting last set cycle"); let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); let function_name_str = "get-last-set-cycle"; let function_name = ClarityName::from(function_name_str); @@ -210,6 +214,10 @@ impl StacksClient { stackerdb_contract: &QualifiedContractIdentifier, page: u32, ) -> Result, ClientError> { + debug!("StacksClient: Getting signer slots"; + "stackerdb_contract" => %stackerdb_contract, + "page" => page, + ); let function_name_str = "stackerdb-get-signer-slots-page"; let function_name = ClarityName::from(function_name_str); let function_args = &[ClarityValue::UInt(page.into())]; @@ -250,6 +258,9 @@ impl StacksClient { &self, reward_cycle: u64, ) -> Result, ClientError> { + debug!("StacksClient: Getting parsed signer slots"; + "reward_cycle" => reward_cycle, + ); let signer_set = u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); @@ -272,6 +283,7 @@ impl StacksClient { /// Determine the stacks node current epoch pub fn get_node_epoch(&self) -> Result { + debug!("StacksClient: Getting node epoch"); let pox_info = self.get_pox_data()?; let burn_block_height = self.get_burn_block_height()?; @@ -302,7 +314,7 @@ impl StacksClient { /// Submit the block proposal to the stacks node. The block will be validated and returned via the HTTP endpoint for Block events. pub fn submit_block_for_validation(&self, block: NakamotoBlock) -> Result<(), ClientError> { - debug!("stacks_node_client: Submitting block for validation..."; + debug!("StacksClient: Submitting block for validation"; "signer_sighash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, @@ -337,6 +349,10 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { + debug!("StacksClient: Getting tenure forking info"; + "chosen_parent" => %chosen_parent, + "last_sortition" => %last_sortition, + ); let mut tenures: VecDeque = self.get_tenure_forking_info_step(chosen_parent, last_sortition)?; if tenures.is_empty() { @@ -373,7 +389,7 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { - debug!("stacks_node_client: Getting tenure forking info..."; + debug!("StacksClient: Getting tenure forking info"; "chosen_parent" => %chosen_parent, "last_sortition" => %last_sortition, ); @@ -402,7 +418,7 @@ impl StacksClient { /// Get the current winning sortition and the last winning sortition pub fn get_current_and_last_sortition(&self) -> Result { - debug!("stacks_node_client: Getting current and prior sortition..."); + debug!("StacksClient: Getting current and prior sortition"); let path = format!("{}/latest_and_last", self.sortition_info_path()); let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { @@ -443,7 +459,7 @@ impl StacksClient { /// Get the current peer info data from the stacks node pub fn get_peer_info(&self) -> Result { - debug!("stacks_node_client: Getting peer info..."); + debug!("StacksClient: Getting peer info"); let timer = crate::monitoring::new_rpc_call_timer(&self.core_info_path(), &self.http_origin); let send_request = || { @@ -466,7 +482,9 @@ impl StacksClient { &self, reward_cycle: u64, ) -> Result>, ClientError> { - debug!("stacks_node_client: Getting reward set signers for reward cycle {reward_cycle}..."); + debug!("StacksClient: Getting reward set signers"; + "reward_cycle" => reward_cycle, + ); let timer = crate::monitoring::new_rpc_call_timer( &format!("{}/v3/stacker_set/:reward_cycle", self.http_origin), &self.http_origin, @@ -502,7 +520,7 @@ impl StacksClient { /// Retrieve the current pox data from the stacks node pub fn get_pox_data(&self) -> Result { - debug!("stacks_node_client: Getting pox data..."); + debug!("StacksClient: Getting pox data"); let timer = crate::monitoring::new_rpc_call_timer(&self.pox_path(), &self.http_origin); let send_request = || { self.stacks_node_client @@ -521,11 +539,13 @@ impl StacksClient { /// Helper function to retrieve the burn tip height from the stacks node fn get_burn_block_height(&self) -> Result { + debug!("StacksClient: Getting burn block height"); self.get_peer_info().map(|info| info.burn_block_height) } /// Get the current reward cycle info from the stacks node pub fn get_current_reward_cycle_info(&self) -> Result { + debug!("StacksClient: Getting current reward cycle info"); let pox_data = self.get_pox_data()?; let blocks_mined = pox_data .current_burnchain_block_height @@ -548,7 +568,9 @@ impl StacksClient { &self, address: &StacksAddress, ) -> Result { - debug!("stacks_node_client: Getting account info..."); + debug!("StacksClient: Getting account info"; + "address" => %address, + ); let timer_label = format!("{}/v2/accounts/:principal", self.http_origin); let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); let send_request = || { @@ -570,6 +592,11 @@ impl StacksClient { /// /// In tests, this panics if the retry takes longer than 30 seconds. pub fn post_block_until_ok(&self, log_fmt: &F, block: &NakamotoBlock) -> bool { + debug!("StacksClient: Posting block to stacks node"; + "signer_sighash" => %block.header.signer_signature_hash(), + "block_id" => %block.header.block_id(), + "block_height" => %block.header.chain_length, + ); let start_time = Instant::now(); loop { match self.post_block(block) { @@ -595,7 +622,8 @@ impl StacksClient { /// Returns `true` if the block was accepted or `false` if the block /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { - debug!("stacks_node_client: Posting block to the stacks node..."; + debug!("StacksClient: Posting block to the stacks node"; + "signer_sighash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, ); @@ -630,7 +658,9 @@ impl StacksClient { function_name: &ClarityName, function_args: &[ClarityValue], ) -> Result { - debug!("stacks_node_client: Calling read-only function {function_name} with args {function_args:?}..."); + debug!( + "StacksClient: Calling read-only function {function_name} with args {function_args:?}" + ); let args = function_args .iter() .filter_map(|arg| arg.serialize_to_hex().ok()) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 9fcaa1fa1b..c0bd679a54 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -871,16 +871,6 @@ where .map_err(DBError::SerializationError) } -#[cfg(test)] -pub fn test_signer_db(db_path: &str) -> SignerDb { - use std::fs; - - if fs::metadata(db_path).is_ok() { - fs::remove_file(db_path).unwrap(); - } - SignerDb::new(db_path).expect("Failed to create signer db") -} - #[cfg(test)] mod tests { use std::fs; diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 84a45eb278..b688097d70 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -1083,7 +1083,9 @@ impl Burnchain { } /// Hand off the block to the ChainsCoordinator _and_ process the sortition - /// *only* to be used by legacy stacks node interfaces, like the Helium node + /// *only* to be used by legacy stacks node interfaces, like the Helium node. + /// + /// It does not work on mainnet. fn process_block_and_sortition_deprecated( db: &mut SortitionDB, burnchain_db: &mut BurnchainDB, @@ -1120,6 +1122,7 @@ impl Burnchain { // method is deprecated and only used in defunct helium nodes db.evaluate_sortition( + false, &header, blockstack_txs, burnchain, diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index b08d7a097e..8d72d4efa9 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -44,7 +44,6 @@ use crate::chainstate::burn::{ BlockSnapshot, ConsensusHash, ConsensusHashExtensions, OpsHash, SortitionHash, }; use crate::chainstate::stacks::address::StacksAddressExtensions; -use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::util_lib::db::Error as db_error; @@ -478,6 +477,7 @@ fn test_process_block_ops() { let (sn121, _) = tx .process_block_ops( + false, &burnchain, &initial_snapshot, &header, @@ -500,6 +500,7 @@ fn test_process_block_ops() { let (sn122, _) = tx .process_block_ops( + false, &burnchain, &block_121_snapshot, &header, @@ -521,6 +522,7 @@ fn test_process_block_ops() { let mut tx = SortitionHandleTx::begin(&mut db, &block_122_snapshot.sortition_id).unwrap(); let (sn123, _) = tx .process_block_ops( + false, &burnchain, &block_122_snapshot, &header, @@ -632,6 +634,7 @@ fn test_process_block_ops() { SortitionHandleTx::begin(&mut db, &block_123_snapshot.sortition_id).unwrap(); let (sn124, _) = tx .process_block_ops( + false, &burnchain, &block_123_snapshot, &header, @@ -873,6 +876,7 @@ fn test_burn_snapshot_sequence() { let mut tx = SortitionHandleTx::begin(&mut db, &prev_snapshot.sortition_id).unwrap(); let (sn, _) = tx .process_block_ops( + false, &burnchain, &prev_snapshot, &header, diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 887b56861b..c8543b1142 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -627,6 +627,7 @@ impl TestBurnchainBlock { let new_snapshot = sortition_db_handle .process_block_txs( + false, &parent_snapshot, &header, burnchain, diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 82318bfe37..0aacd2816a 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -112,6 +112,7 @@ impl<'a> SortitionHandleTx<'a> { /// * return the snapshot (and sortition results) fn process_checked_block_ops( &mut self, + mainnet: bool, burnchain: &Burnchain, parent_snapshot: &BlockSnapshot, block_header: &BurnchainBlockHeader, @@ -141,6 +142,7 @@ impl<'a> SortitionHandleTx<'a> { // do the cryptographic sortition and pick the next winning block. let mut snapshot = BlockSnapshot::make_snapshot( + mainnet, self, burnchain, &next_sortition_id, @@ -158,6 +160,11 @@ impl<'a> SortitionHandleTx<'a> { BurnchainError::DBError(e) })?; + let snapshot_epoch = SortitionDB::get_stacks_epoch(self, snapshot.block_height)? + .unwrap_or_else(|| { + panic!("FATAL: no epoch defined for snapshot"); + }); + // was this snapshot the first with mining? // compute the initial block rewards. let initialize_bonus = if snapshot.sortition && parent_snapshot.total_burn == 0 { @@ -166,6 +173,8 @@ impl<'a> SortitionHandleTx<'a> { let mut total_reward = 0; for burn_block_height in burnchain.initial_reward_start_block..snapshot.block_height { total_reward += StacksChainState::get_coinbase_reward( + snapshot_epoch.epoch_id, + mainnet, burn_block_height, self.context.first_block_height, ); @@ -227,6 +236,7 @@ impl<'a> SortitionHandleTx<'a> { /// Returns the BlockSnapshot created from this block. pub fn process_block_ops( &mut self, + mainnet: bool, burnchain: &Burnchain, parent_snapshot: &BlockSnapshot, block_header: &BurnchainBlockHeader, @@ -279,6 +289,7 @@ impl<'a> SortitionHandleTx<'a> { // process them let res = self .process_checked_block_ops( + mainnet, burnchain, parent_snapshot, block_header, @@ -305,6 +316,7 @@ impl<'a> SortitionHandleTx<'a> { /// list of blockstack transactions. pub fn process_block_txs( &mut self, + mainnet: bool, parent_snapshot: &BlockSnapshot, this_block_header: &BurnchainBlockHeader, burnchain: &Burnchain, @@ -324,6 +336,7 @@ impl<'a> SortitionHandleTx<'a> { ); let new_snapshot = self.process_block_ops( + mainnet, burnchain, &parent_snapshot, &this_block_header, @@ -353,7 +366,6 @@ mod tests { use crate::chainstate::burn::operations::{LeaderBlockCommitOp, LeaderKeyRegisterOp}; use crate::chainstate::burn::*; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::MICROSTACKS_PER_STACKS; @@ -432,6 +444,7 @@ mod tests { let processed = ic .process_block_ops( + false, &burnchain, &snapshot, &next_block_header, diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index dd543ac7f7..e399121e07 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3163,6 +3163,7 @@ impl SortitionDB { StacksEpochId::Epoch24 => version_u32 >= 3, StacksEpochId::Epoch25 => version_u32 >= 3, StacksEpochId::Epoch30 => version_u32 >= 3, + StacksEpochId::Epoch31 => version_u32 >= 3, } } @@ -3986,7 +3987,7 @@ impl<'a> SortitionDBConn<'a> { tip, reward_cycle_id, )?; - info!("Fetching preprocessed reward set"; + debug!("Fetching preprocessed reward set"; "tip_sortition_id" => %tip, "reward_cycle_id" => reward_cycle_id, "prepare_phase_start_sortition_id" => %first_sortition, @@ -4288,6 +4289,7 @@ impl SortitionDB { /// commits its results. This is used to post the calculated reward set to an event observer. pub fn evaluate_sortition) -> ()>( &mut self, + mainnet: bool, burn_header: &BurnchainBlockHeader, ops: Vec, burnchain: &Burnchain, @@ -4365,6 +4367,7 @@ impl SortitionDB { }; let new_snapshot = sortition_db_handle.process_block_txs( + mainnet, &parent_snapshot, burn_header, burnchain, @@ -6601,7 +6604,6 @@ pub mod tests { BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use crate::chainstate::burn::ConsensusHash; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::{StacksEpochExtension, *}; use crate::util_lib::db::Error as db_error; diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index ed01ae014b..59c335cd58 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -450,7 +450,6 @@ mod tests { }; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::MINING_COMMITMENT_WINDOW; diff --git a/stackslib/src/chainstate/burn/mod.rs b/stackslib/src/chainstate/burn/mod.rs index be92c3088f..4552210f44 100644 --- a/stackslib/src/chainstate/burn/mod.rs +++ b/stackslib/src/chainstate/burn/mod.rs @@ -432,7 +432,6 @@ mod tests { use crate::burnchains::bitcoin::address::BitcoinAddress; use crate::burnchains::bitcoin::keys::BitcoinPublicKey; use crate::chainstate::burn::db::sortdb::*; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::util_lib::db::Error as db_error; #[test] diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index c3a378ddf6..a752131668 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -42,7 +42,7 @@ use crate::chainstate::stacks::{StacksPrivateKey, StacksPublicKey}; use crate::core::{ StacksEpoch, StacksEpochId, STACKS_EPOCH_2_05_MARKER, STACKS_EPOCH_2_1_MARKER, STACKS_EPOCH_2_2_MARKER, STACKS_EPOCH_2_3_MARKER, STACKS_EPOCH_2_4_MARKER, - STACKS_EPOCH_2_5_MARKER, STACKS_EPOCH_3_0_MARKER, + STACKS_EPOCH_2_5_MARKER, STACKS_EPOCH_3_0_MARKER, STACKS_EPOCH_3_1_MARKER, }; use crate::net::Error as net_error; @@ -881,6 +881,7 @@ impl LeaderBlockCommitOp { StacksEpochId::Epoch24 => self.check_epoch_commit_marker(STACKS_EPOCH_2_4_MARKER), StacksEpochId::Epoch25 => self.check_epoch_commit_marker(STACKS_EPOCH_2_5_MARKER), StacksEpochId::Epoch30 => self.check_epoch_commit_marker(STACKS_EPOCH_3_0_MARKER), + StacksEpochId::Epoch31 => self.check_epoch_commit_marker(STACKS_EPOCH_3_1_MARKER), } } @@ -900,7 +901,8 @@ impl LeaderBlockCommitOp { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { // correct behavior -- uses *sortition height* to find the intended sortition ID let sortition_height = self .block_height @@ -1186,7 +1188,6 @@ mod tests { use crate::chainstate::burn::operations::*; use crate::chainstate::burn::{ConsensusHash, *}; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::{ StacksEpoch, StacksEpochExtension, StacksEpochId, PEER_VERSION_EPOCH_1_0, diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 44402adc0c..5608b6739d 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -253,7 +253,6 @@ pub mod tests { }; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash, OpsHash, SortitionHash}; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::core::StacksEpochId; pub struct OpFixture { diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index b0221f1439..ff71b0cf10 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -40,7 +40,7 @@ use crate::chainstate::burn::{ SortitionHash, }; use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::index::{ClarityMarfTrieId, MarfTrieId, TrieHashExtension}; +use crate::chainstate::stacks::index::{ClarityMarfTrieId, MarfTrieId}; use crate::core::*; use crate::util_lib::db::Error as db_error; @@ -498,6 +498,7 @@ impl BlockSnapshot { /// /// Call this *after* you store all of the block's transactions to the burn db. pub fn make_snapshot( + mainnet: bool, sort_tx: &mut SortitionHandleTx, burnchain: &Burnchain, my_sortition_id: &SortitionId, @@ -518,6 +519,7 @@ impl BlockSnapshot { .epoch_id; Self::make_snapshot_in_epoch( + mainnet, sort_tx, burnchain, my_sortition_id, @@ -531,6 +533,7 @@ impl BlockSnapshot { } pub fn make_snapshot_in_epoch( + mainnet: bool, sort_tx: &mut SortitionHandleTx, burnchain: &Burnchain, my_sortition_id: &SortitionId, @@ -561,6 +564,8 @@ impl BlockSnapshot { initial_mining_bonus_ustx } else { let missed_coinbase = StacksChainState::get_coinbase_reward( + epoch_id, + mainnet, parent_snapshot.block_height, first_block_height, ); @@ -788,6 +793,7 @@ mod test { burnchain_state_transition: &BurnchainStateTransition, ) -> Result { BlockSnapshot::make_snapshot( + false, sort_tx, burnchain, my_sortition_id, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 5b7c7e89b6..139a666098 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -419,8 +419,8 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { return Ok(RewardSet::empty()); } } - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { - // Epoch 2.5 and 3.0 compute reward sets, but *only* if PoX-4 is active + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { + // Epoch 2.5, 3.0, and 3.1 compute reward sets, but *only* if PoX-4 is active if burnchain .pox_constants .active_pox_contract(current_burn_height) @@ -2674,6 +2674,7 @@ impl< let (next_snapshot, _) = self .sortition_db .evaluate_sortition( + self.chain_state_db.mainnet, &header, ops, &self.burnchain, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index fc7c8ba504..1bb5e44192 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -1164,6 +1164,7 @@ impl< let (next_snapshot, _) = self .sortition_db .evaluate_sortition( + self.chain_state_db.mainnet, &header, ops, &self.burnchain, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index ca37e30121..d874186988 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -131,7 +131,7 @@ pub use self::staging_blocks::{ NakamotoStagingBlocksConn, NakamotoStagingBlocksConnRef, NakamotoStagingBlocksTx, }; -pub const NAKAMOTO_BLOCK_VERSION: u8 = 0; +pub const NAKAMOTO_BLOCK_VERSION: u8 = 1; define_named_enum!(HeaderTypeNames { Nakamoto("nakamoto"), @@ -1788,6 +1788,73 @@ impl NakamotoChainState { } } + /// Get the current burnchain view + /// This is either: + /// (1) set by the tenure change tx if one exists + /// (2) the same as parent block id + pub fn get_block_burn_view( + sort_db: &SortitionDB, + next_ready_block: &NakamotoBlock, + parent_header_info: &StacksHeaderInfo, + ) -> Result { + let burnchain_view = if let Some(tenure_change) = next_ready_block.get_tenure_tx_payload() { + if let Some(ref parent_burn_view) = parent_header_info.burn_view { + // check that the tenure_change's burn view descends from the parent + let parent_burn_view_sn = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + parent_burn_view, + )? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; + let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + if connected_sort_id != parent_burn_view_sn.sortition_id { + warn!( + "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Does not connect to burn view of parent block ID".into(), + )); + } + } + tenure_change.burn_view_consensus_hash + } else { + parent_header_info.burn_view.clone().ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })? + }; + Ok(burnchain_view) + } + /// Process the next ready block. /// If there exists a ready Nakamoto block, then this method returns Ok(Some(..)) with the /// receipt. Otherwise, it returns Ok(None). @@ -1920,62 +1987,8 @@ impl NakamotoChainState { // this is either: // (1) set by the tenure change tx if one exists // (2) the same as parent block id - - let burnchain_view = if let Some(tenure_change) = next_ready_block.get_tenure_tx_payload() { - if let Some(ref parent_burn_view) = parent_header_info.burn_view { - // check that the tenure_change's burn view descends from the parent - let parent_burn_view_sn = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - parent_burn_view, - )? - .ok_or_else(|| { - warn!( - "Cannot process Nakamoto block: could not find parent block's burnchain view"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) - })?; - let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; - let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? - .ok_or_else(|| { - warn!( - "Cannot process Nakamoto block: could not find parent block's burnchain view"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) - })?; - if connected_sort_id != parent_burn_view_sn.sortition_id { - warn!( - "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - return Err(ChainstateError::InvalidStacksBlock( - "Does not connect to burn view of parent block ID".into(), - )); - } - } - tenure_change.burn_view_consensus_hash - } else { - parent_header_info.burn_view.clone().ok_or_else(|| { - warn!( - "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) - })? - }; + let burnchain_view = + Self::get_block_burn_view(sort_db, &next_ready_block, &parent_header_info)?; let Some(burnchain_view_sn) = SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &burnchain_view)? else { diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 9852733311..b72bbdda14 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -971,6 +971,8 @@ impl NakamotoChainState { .accumulated_coinbase_ustx; let coinbase_at_block = StacksChainState::get_coinbase_reward( + evaluated_epoch, + chainstate_tx.config.mainnet, chain_tip_burn_header_height, burn_dbconn.context.first_block_height, ); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 04f772da02..791f2064dc 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -3612,41 +3612,13 @@ impl StacksChainState { } /// Get the coinbase at this burn block height, in microSTX - pub fn get_coinbase_reward(burn_block_height: u64, first_burn_block_height: u64) -> u128 { - /* - From https://forum.stacks.org/t/pox-consensus-and-stx-future-supply - - """ - - 1000 STX for years 0-4 - 500 STX for years 4-8 - 250 STX for years 8-12 - 125 STX in perpetuity - - - From the Token Whitepaper: - - We expect that once native mining goes live, approximately 4383 blocks will be pro- - cessed per month, or approximately 52,596 blocks will be processed per year. - - """ - */ - // this is saturating subtraction for the initial reward calculation - // where we are computing the coinbase reward for blocks that occur *before* - // the `first_burn_block_height` - let effective_ht = burn_block_height.saturating_sub(first_burn_block_height); - let blocks_per_year = 52596; - let stx_reward = if effective_ht < blocks_per_year * 4 { - 1000 - } else if effective_ht < blocks_per_year * 8 { - 500 - } else if effective_ht < blocks_per_year * 12 { - 250 - } else { - 125 - }; - - stx_reward * (u128::from(MICROSTACKS_PER_STACKS)) + pub fn get_coinbase_reward( + epoch: StacksEpochId, + mainnet: bool, + burn_block_height: u64, + first_burn_block_height: u64, + ) -> u128 { + epoch.coinbase_reward(mainnet, first_burn_block_height, burn_block_height) } /// Create the block reward. @@ -4132,7 +4104,12 @@ impl StacksChainState { current_epoch = StacksEpochId::Epoch30; } StacksEpochId::Epoch30 => { - panic!("No defined transition from Epoch30 forward") + // no special initialization is needed, since only the coinbase emission + // schedule is changing. + current_epoch = StacksEpochId::Epoch31; + } + StacksEpochId::Epoch31 => { + panic!("No defined transition from Epoch31 forward") } } } @@ -4942,8 +4919,7 @@ impl StacksChainState { )?; Ok((stack_ops, transfer_ops, delegate_ops, vec![])) } - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { - // TODO: sbtc ops in epoch 3.0 + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( chainstate_tx, parent_index_hash, @@ -5033,7 +5009,7 @@ impl StacksChainState { pox_reward_cycle, pox_start_cycle_info, ), - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { Self::handle_pox_cycle_start_pox_4( clarity_tx, pox_reward_cycle, @@ -5758,6 +5734,8 @@ impl StacksChainState { .accumulated_coinbase_ustx; let coinbase_at_block = StacksChainState::get_coinbase_reward( + evaluated_epoch, + mainnet, u64::from(chain_tip_burn_header_height), burn_dbconn.context.first_block_height, ); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 6b6f523f88..7fed3e9a46 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -307,6 +307,7 @@ impl DBConfig { StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 8, StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 8, StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch31 => version_u32 >= 3 && version_u32 <= 8, } } } diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index e9de9139a2..aef9627d15 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -8725,6 +8725,7 @@ pub mod test { StacksEpochId::Epoch24 => self.get_stacks_epoch(5), StacksEpochId::Epoch25 => self.get_stacks_epoch(6), StacksEpochId::Epoch30 => self.get_stacks_epoch(7), + StacksEpochId::Epoch31 => self.get_stacks_epoch(8), } } fn get_pox_payout_addrs( diff --git a/stackslib/src/chainstate/stacks/index/bits.rs b/stackslib/src/chainstate/stacks/index/bits.rs index e212b03299..6397cee3a3 100644 --- a/stackslib/src/chainstate/stacks/index/bits.rs +++ b/stackslib/src/chainstate/stacks/index/bits.rs @@ -29,7 +29,7 @@ use stacks_common::util::macros::is_trace; use crate::chainstate::stacks::index::node::{ clear_backptr, ConsensusSerializable, TrieNode, TrieNode16, TrieNode256, TrieNode4, TrieNode48, - TrieNodeID, TrieNodeType, TriePtr, TRIEPATH_MAX_LEN, TRIEPTR_SIZE, + TrieNodeID, TrieNodeType, TriePtr, TRIEPTR_SIZE, }; use crate::chainstate::stacks::index::storage::{TrieFileStorage, TrieStorageConnection}; use crate::chainstate::stacks::index::{BlockMap, Error, MarfTrieId, TrieLeaf}; @@ -55,15 +55,15 @@ pub fn path_from_bytes(r: &mut R) -> Result, Error> { } })?; - if lenbuf[0] as usize > TRIEPATH_MAX_LEN { + if lenbuf[0] as usize > TRIEHASH_ENCODED_SIZE { trace!( "Path length is {} (expected <= {})", lenbuf[0], - TRIEPATH_MAX_LEN + TRIEHASH_ENCODED_SIZE ); return Err(Error::CorruptionError(format!( "Node path is longer than {} bytes (got {})", - TRIEPATH_MAX_LEN, lenbuf[0] + TRIEHASH_ENCODED_SIZE, lenbuf[0] ))); } @@ -326,7 +326,7 @@ pub fn read_nodetype_at_head_nohash( /// node hash id ptrs & ptr data path /// /// X is fixed and determined by the TrieNodeType variant. -/// Y is variable, but no more than TriePath::len(). +/// Y is variable, but no more than TrieHash::len(). /// /// If `read_hash` is false, then the contents of the node hash are undefined. fn inner_read_nodetype_at_head( diff --git a/stackslib/src/chainstate/stacks/index/cache.rs b/stackslib/src/chainstate/stacks/index/cache.rs index 7f92efdd8b..7547fd6d80 100644 --- a/stackslib/src/chainstate/stacks/index/cache.rs +++ b/stackslib/src/chainstate/stacks/index/cache.rs @@ -40,7 +40,7 @@ use crate::chainstate::stacks::index::bits::{ }; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::{trie_sql, ClarityMarfTrieId, Error, MarfTrieId, TrieLeaf}; use crate::util_lib::db::{ @@ -420,7 +420,7 @@ pub mod test { } } else { for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let leaf = TrieLeaf::from_value(&vec![], value.clone()); marf.insert_raw(path, leaf).unwrap(); } @@ -443,7 +443,7 @@ pub mod test { for (i, block_data) in data.iter().enumerate() { test_debug!("Read block {}", i); for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let marf_leaf = TrieLeaf::from_value(&vec![], value.clone()); let read_time = SystemTime::now(); diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 4123b1310a..5a7da69e52 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -42,7 +42,7 @@ use crate::chainstate::stacks::index::bits::{ }; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::storage::{NodeHashReader, TrieStorageConnection}; use crate::chainstate::stacks::index::{trie_sql, ClarityMarfTrieId, Error, MarfTrieId, TrieLeaf}; diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index d5dd77c51f..a4082627fd 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -28,14 +28,14 @@ use stacks_common::util::log; use crate::chainstate::stacks::index::bits::{get_leaf_hash, get_node_hash, read_root_hash}; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, CursorError, TrieCursor, TrieNode, TrieNode16, - TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, TRIEPTR_SIZE, + TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePtr, TRIEPTR_SIZE, }; use crate::chainstate::stacks::index::storage::{ TrieFileStorage, TrieHashCalculationMode, TrieStorageConnection, TrieStorageTransaction, }; use crate::chainstate::stacks::index::trie::Trie; use crate::chainstate::stacks::index::{ - ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieHashExtension, TrieLeaf, TrieMerkleProof, + ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieLeaf, TrieMerkleProof, }; use crate::util_lib::db::Error as db_error; @@ -122,11 +122,39 @@ pub trait MarfConnection { fn sqlite_conn(&self) -> &Connection; + /// Get and check a value against get_from_hash + /// (test only) + #[cfg(test)] + fn get_and_check_with_hash(&mut self, block_hash: &T, key: &str) { + let res = self.with_conn(|c| MARF::get_by_key(c, block_hash, key)); + let res_with_hash = + self.with_conn(|c| MARF::get_by_hash(c, block_hash, &TrieHash::from_key(key))); + match (res, res_with_hash) { + (Ok(Some(x)), Ok(Some(y))) => { + assert_eq!(x, y); + } + (Ok(None), Ok(None)) => {} + (Err(_), Err(_)) => {} + (x, y) => { + panic!("Inconsistency: {x:?} != {y:?}"); + } + } + } + + #[cfg(not(test))] + fn get_and_check_with_hash(&mut self, _block_hash: &T, _key: &str) {} + /// Resolve a key from the MARF to a MARFValue with respect to the given block height. fn get(&mut self, block_hash: &T, key: &str) -> Result, Error> { + self.get_and_check_with_hash(block_hash, key); self.with_conn(|c| MARF::get_by_key(c, block_hash, key)) } + /// Resolve a TrieHash from the MARF to a MARFValue with respect to the given block height. + fn get_from_hash(&mut self, block_hash: &T, th: &TrieHash) -> Result, Error> { + self.with_conn(|c| MARF::get_by_hash(c, block_hash, th)) + } + fn get_with_proof( &mut self, block_hash: &T, @@ -142,6 +170,21 @@ pub trait MarfConnection { }) } + fn get_with_proof_from_hash( + &mut self, + block_hash: &T, + hash: &TrieHash, + ) -> Result)>, Error> { + self.with_conn(|conn| { + let marf_value = match MARF::get_by_path(conn, block_hash, hash)? { + None => return Ok(None), + Some(x) => x, + }; + let proof = TrieMerkleProof::from_path(conn, hash, &marf_value, block_hash)?; + Ok(Some((marf_value, proof))) + }) + } + fn get_block_at_height(&mut self, height: u32, tip: &T) -> Result, Error> { self.with_conn(|c| MARF::get_block_at_height(c, height, tip)) } @@ -781,7 +824,7 @@ impl MARF { fn walk_cow( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, ) -> Result, Error> { let block_id = storage.get_block_identifier(block_hash); MARF::extend_trie(storage, block_hash)?; @@ -886,7 +929,7 @@ impl MARF { fn walk( storage: &mut TrieStorageConnection, block_hash: &T, - path: &TriePath, + path: &TrieHash, ) -> Result<(TrieCursor, TrieNodeType), Error> { storage.open_block(block_hash)?; @@ -994,7 +1037,7 @@ impl MARF { pub fn get_path( storage: &mut TrieStorageConnection, block_hash: &T, - path: &TriePath, + path: &TrieHash, ) -> Result, Error> { trace!("MARF::get_path({:?}) {:?}", block_hash, path); @@ -1045,7 +1088,7 @@ impl MARF { fn do_insert_leaf( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, leaf_value: &TrieLeaf, update_skiplist: bool, ) -> Result<(), Error> { @@ -1076,7 +1119,7 @@ impl MARF { pub fn insert_leaf( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, value: &TrieLeaf, ) -> Result<(), Error> { if storage.readonly() { @@ -1089,7 +1132,7 @@ impl MARF { pub fn insert_leaf_in_batch( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, value: &TrieLeaf, ) -> Result<(), Error> { if storage.readonly() { @@ -1123,6 +1166,35 @@ impl MARF { Ok(MARF::from_storage(file_storage)) } + pub fn get_by_path( + storage: &mut TrieStorageConnection, + block_hash: &T, + path: &TrieHash, + ) -> Result, Error> { + let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); + + let result = MARF::get_path(storage, block_hash, &path).or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }); + + // restore + storage + .open_block_maybe_id(&cur_block_hash, cur_block_id) + .map_err(|e| { + warn!( + "Failed to re-open {} {:?}: {:?}", + &cur_block_hash, cur_block_id, &e + ); + warn!("Result of failed path lookup '{}': {:?}", path, &result); + e + })?; + + result.map(|option_result| option_result.map(|leaf| leaf.data)) + } + + /// Load up a MARF value by key, given a handle to the storage connection and a tip to work off + /// of. pub fn get_by_key( storage: &mut TrieStorageConnection, block_hash: &T, @@ -1130,7 +1202,7 @@ impl MARF { ) -> Result, Error> { let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let result = MARF::get_path(storage, block_hash, &path).or_else(|e| match e { Error::NotFoundError => Ok(None), @@ -1152,6 +1224,35 @@ impl MARF { result.map(|option_result| option_result.map(|leaf| leaf.data)) } + /// Load up a MARF value by TrieHash, given a handle to the storage connection and a tip to + /// work off of. + pub fn get_by_hash( + storage: &mut TrieStorageConnection, + block_hash: &T, + path: &TrieHash, + ) -> Result, Error> { + let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); + + let result = MARF::get_path(storage, block_hash, &path).or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }); + + // restore + storage + .open_block_maybe_id(&cur_block_hash, cur_block_id) + .map_err(|e| { + warn!( + "Failed to re-open {} {:?}: {:?}", + &cur_block_hash, cur_block_id, &e + ); + warn!("Result of failed hash lookup '{}': {:?}", path, &result); + e + })?; + + result.map(|option_result| option_result.map(|leaf| leaf.data)) + } + pub fn get_block_height_miner_tip( storage: &mut TrieStorageConnection, block_hash: &T, @@ -1262,7 +1363,7 @@ impl MARF { .zip(values[0..last].iter()) .try_for_each(|((index, key), value)| { let marf_leaf = TrieLeaf::from_value(&[], value.clone()); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); if eta_enabled { let updated_progress = 100 * index / last; @@ -1280,7 +1381,7 @@ impl MARF { if result.is_ok() { // last insert updates the root with the skiplist hash let marf_leaf = TrieLeaf::from_value(&[], values[last].clone()); - let path = TriePath::from_key(&keys[last]); + let path = TrieHash::from_key(&keys[last]); result = MARF::insert_leaf(conn, block_hash, &path, &marf_leaf); } @@ -1320,6 +1421,20 @@ impl MARF { Ok(Some((marf_value, proof))) } + pub fn get_with_proof_from_hash( + &mut self, + block_hash: &T, + path: &TrieHash, + ) -> Result)>, Error> { + let mut conn = self.storage.connection(); + let marf_value = match MARF::get_by_path(&mut conn, block_hash, &path)? { + None => return Ok(None), + Some(x) => x, + }; + let proof = TrieMerkleProof::from_path(&mut conn, &path, &marf_value, block_hash)?; + Ok(Some((marf_value, proof))) + } + pub fn get_bhh_at_height(&mut self, block_hash: &T, height: u32) -> Result, Error> { MARF::get_block_at_height(&mut self.storage.connection(), height, block_hash) } @@ -1356,14 +1471,14 @@ impl MARF { return Err(Error::ReadOnlyError); } let marf_leaf = TrieLeaf::from_value(&[], value); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); self.insert_raw(path, marf_leaf) } /// Insert the given (key, value) pair into the MARF. Inserting the same key twice silently /// overwrites the existing key. Succeeds if there are no storage errors. /// Must be called after a call to .begin() (will fail otherwise) - pub fn insert_raw(&mut self, path: TriePath, marf_leaf: TrieLeaf) -> Result<(), Error> { + pub fn insert_raw(&mut self, path: TrieHash, marf_leaf: TrieLeaf) -> Result<(), Error> { if self.storage.readonly() { return Err(Error::ReadOnlyError); } diff --git a/stackslib/src/chainstate/stacks/index/mod.rs b/stackslib/src/chainstate/stacks/index/mod.rs index eb082747c5..9fee7ab2d6 100644 --- a/stackslib/src/chainstate/stacks/index/mod.rs +++ b/stackslib/src/chainstate/stacks/index/mod.rs @@ -151,71 +151,6 @@ impl MarfTrieId for BurnchainHeaderHash {} #[cfg(test)] impl MarfTrieId for BlockHeaderHash {} -pub trait TrieHashExtension { - fn from_empty_data() -> TrieHash; - fn from_data(data: &[u8]) -> TrieHash; - fn from_data_array>(data: &[B]) -> TrieHash; - fn to_string(&self) -> String; -} - -impl TrieHashExtension for TrieHash { - /// TrieHash of zero bytes - fn from_empty_data() -> TrieHash { - // sha2-512/256 hash of empty string. - // this is used so frequently it helps performance if we just have a constant for it. - TrieHash([ - 0xc6, 0x72, 0xb8, 0xd1, 0xef, 0x56, 0xed, 0x28, 0xab, 0x87, 0xc3, 0x62, 0x2c, 0x51, - 0x14, 0x06, 0x9b, 0xdd, 0x3a, 0xd7, 0xb8, 0xf9, 0x73, 0x74, 0x98, 0xd0, 0xc0, 0x1e, - 0xce, 0xf0, 0x96, 0x7a, - ]) - } - - /// TrieHash from bytes - fn from_data(data: &[u8]) -> TrieHash { - if data.len() == 0 { - return TrieHash::from_empty_data(); - } - - let mut tmp = [0u8; 32]; - - let mut hasher = TrieHasher::new(); - hasher.update(data); - tmp.copy_from_slice(hasher.finalize().as_slice()); - - TrieHash(tmp) - } - - fn from_data_array>(data: &[B]) -> TrieHash { - if data.len() == 0 { - return TrieHash::from_empty_data(); - } - - let mut tmp = [0u8; 32]; - - let mut hasher = TrieHasher::new(); - - for item in data.iter() { - hasher.update(item); - } - tmp.copy_from_slice(hasher.finalize().as_slice()); - TrieHash(tmp) - } - - /// Convert to a String that can be used in e.g. sqlite - fn to_string(&self) -> String { - let s = format!("{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", - self.0[0], self.0[1], self.0[2], self.0[3], - self.0[4], self.0[5], self.0[6], self.0[7], - self.0[8], self.0[9], self.0[10], self.0[11], - self.0[12], self.0[13], self.0[14], self.0[15], - self.0[16], self.0[17], self.0[18], self.0[19], - self.0[20], self.0[21], self.0[22], self.0[23], - self.0[24], self.0[25], self.0[26], self.0[27], - self.0[28], self.0[29], self.0[30], self.0[31]); - s - } -} - /// Structure that holds the actual data in a MARF leaf node. /// It only stores the hash of some value string, but we add 8 extra bytes for future extensions. /// If not used (the rule today), then they should all be 0. diff --git a/stackslib/src/chainstate/stacks/index/node.rs b/stackslib/src/chainstate/stacks/index/node.rs index 19e8aa327f..da9fc8bbd2 100644 --- a/stackslib/src/chainstate/stacks/index/node.rs +++ b/stackslib/src/chainstate/stacks/index/node.rs @@ -32,8 +32,8 @@ use crate::chainstate::stacks::index::bits::{ get_path_byte_len, get_ptrs_byte_len, path_from_bytes, ptrs_from_bytes, write_path_to_bytes, }; use crate::chainstate::stacks::index::{ - BlockMap, ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieHashExtension, TrieHasher, - TrieLeaf, MARF_VALUE_ENCODED_SIZE, + BlockMap, ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieHasher, TrieLeaf, + MARF_VALUE_ENCODED_SIZE, }; #[derive(Debug, Clone, PartialEq)] @@ -106,23 +106,6 @@ fn ptrs_consensus_hash( Ok(()) } -/// A path in the Trie is the SHA2-512/256 hash of its key. -pub struct TriePath([u8; 32]); -impl_array_newtype!(TriePath, u8, 32); -impl_array_hexstring_fmt!(TriePath); -impl_byte_array_newtype!(TriePath, u8, 32); - -pub const TRIEPATH_MAX_LEN: usize = 32; - -impl TriePath { - pub fn from_key(k: &str) -> TriePath { - let h = TrieHash::from_data(k.as_bytes()); - let mut hb = [0u8; TRIEPATH_MAX_LEN]; - hb.copy_from_slice(h.as_bytes()); - TriePath(hb) - } -} - /// All Trie nodes implement the following methods: pub trait TrieNode { /// Node ID for encoding/decoding @@ -339,7 +322,7 @@ impl TriePtr { /// nodes to visit when updating the root node hash. #[derive(Debug, Clone, PartialEq)] pub struct TrieCursor { - pub path: TriePath, // the path to walk + pub path: TrieHash, // the path to walk pub index: usize, // index into the path pub node_path_index: usize, // index into the currently-visited node's compressed path pub nodes: Vec, // list of nodes this cursor visits @@ -349,7 +332,7 @@ pub struct TrieCursor { } impl TrieCursor { - pub fn new(path: &TriePath, root_ptr: TriePtr) -> TrieCursor { + pub fn new(path: &TrieHash, root_ptr: TriePtr) -> TrieCursor { TrieCursor { path: path.clone(), index: 0, diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index 815def9c91..85e91ebefb 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -35,14 +35,13 @@ use crate::chainstate::stacks::index::bits::{ use crate::chainstate::stacks::index::marf::MARF; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, ConsensusSerializable, CursorError, TrieCursor, - TrieNode, TrieNode16, TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePath, - TriePtr, + TrieNode, TrieNode16, TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::storage::{TrieFileStorage, TrieStorageConnection}; use crate::chainstate::stacks::index::trie::Trie; use crate::chainstate::stacks::index::{ BlockMap, ClarityMarfTrieId, Error, MARFValue, MarfTrieId, ProofTrieNode, ProofTriePtr, - TrieHashExtension, TrieLeaf, TrieMerkleProof, TrieMerkleProofType, + TrieLeaf, TrieMerkleProof, TrieMerkleProofType, }; impl ConsensusSerializable<()> for ProofTrieNode { @@ -1004,7 +1003,7 @@ impl TrieMerkleProof { /// * segment proof i+1 must be a prefix of segment proof i /// * segment proof 0 must end in a leaf /// * all segment proofs must end in a Node256 (a root) - fn is_proof_well_formed(proof: &Vec>, expected_path: &TriePath) -> bool { + fn is_proof_well_formed(proof: &Vec>, expected_path: &TrieHash) -> bool { if proof.len() == 0 { trace!("Proof is empty"); return false; @@ -1048,7 +1047,7 @@ impl TrieMerkleProof { } }; - // first path bytes must be the expected TriePath + // first path bytes must be the expected TrieHash if expected_path.as_bytes().to_vec() != path_bytes { trace!( "Invalid proof -- path bytes {:?} differs from the expected path {:?}", @@ -1121,7 +1120,7 @@ impl TrieMerkleProof { /// NOTE: Trie root hashes are globally unique by design, even if they represent the same contents, so the root_to_block map is bijective with high probability. pub fn verify_proof( proof: &Vec>, - path: &TriePath, + path: &TrieHash, value: &MARFValue, root_hash: &TrieHash, root_to_block: &HashMap, @@ -1351,7 +1350,7 @@ impl TrieMerkleProof { /// Verify this proof pub fn verify( &self, - path: &TriePath, + path: &TrieHash, marf_value: &MARFValue, root_hash: &TrieHash, root_to_block: &HashMap, @@ -1362,7 +1361,7 @@ impl TrieMerkleProof { /// Walk down the trie pointed to by s until we reach a backptr or a leaf fn walk_to_leaf_or_backptr( storage: &mut TrieStorageConnection, - path: &TriePath, + path: &TrieHash, ) -> Result<(TrieCursor, TrieNodeType, TriePtr), Error> { trace!( "Walk path {:?} from {:?} to the first backptr", @@ -1438,7 +1437,7 @@ impl TrieMerkleProof { /// If the path doesn't resolve, return an error (NotFoundError) pub fn from_path( storage: &mut TrieStorageConnection, - path: &TriePath, + path: &TrieHash, expected_value: &MARFValue, root_block_header: &T, ) -> Result, Error> { @@ -1562,7 +1561,7 @@ impl TrieMerkleProof { root_block_header: &T, ) -> Result, Error> { let marf_value = MARFValue::from_value(value); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); TrieMerkleProof::from_path(storage, &path, &marf_value, root_block_header) } @@ -1572,7 +1571,7 @@ impl TrieMerkleProof { value: &MARFValue, root_block_header: &T, ) -> Result, Error> { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); TrieMerkleProof::from_path(storage, &path, value, root_block_header) } } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 6994c7ad05..6e7ca815c9 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -46,13 +46,12 @@ use crate::chainstate::stacks::index::file::{TrieFile, TrieFileNodeHashReader}; use crate::chainstate::stacks::index::marf::MARFOpenOpts; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::profile::TrieBenchmark; use crate::chainstate::stacks::index::trie::Trie; use crate::chainstate::stacks::index::{ - trie_sql, BlockMap, ClarityMarfTrieId, Error, MarfTrieId, TrieHashExtension, TrieHasher, - TrieLeaf, + trie_sql, BlockMap, ClarityMarfTrieId, Error, MarfTrieId, TrieHasher, TrieLeaf, }; use crate::util_lib::db::{ sql_pragma, sqlite_open, tx_begin_immediate, tx_busy_handler, Error as db_error, diff --git a/stackslib/src/chainstate/stacks/index/test/cache.rs b/stackslib/src/chainstate/stacks/index/test/cache.rs index 5a0bc41d00..1abd0e741a 100644 --- a/stackslib/src/chainstate/stacks/index/test/cache.rs +++ b/stackslib/src/chainstate/stacks/index/test/cache.rs @@ -105,7 +105,7 @@ fn test_marf_with_cache( } } else { for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let leaf = TrieLeaf::from_value(&vec![], value.clone()); marf.insert_raw(path, leaf).unwrap(); } @@ -128,7 +128,7 @@ fn test_marf_with_cache( for (i, block_data) in data.iter().enumerate() { test_debug!("Read block {}", i); for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let marf_leaf = TrieLeaf::from_value(&vec![], value.clone()); let read_time = SystemTime::now(); diff --git a/stackslib/src/chainstate/stacks/index/test/file.rs b/stackslib/src/chainstate/stacks/index/test/file.rs index 499198aca5..19ac5e60e4 100644 --- a/stackslib/src/chainstate/stacks/index/test/file.rs +++ b/stackslib/src/chainstate/stacks/index/test/file.rs @@ -106,7 +106,7 @@ fn test_migrate_existing_trie_blobs() { marf.begin(&last_block_header, &block_header).unwrap(); for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let leaf = TrieLeaf::from_value(&vec![], value.clone()); marf.insert_raw(path, leaf).unwrap(); } @@ -147,7 +147,7 @@ fn test_migrate_existing_trie_blobs() { // verify that we can read everything from the blobs for (i, block_data) in data.iter().enumerate() { for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let marf_leaf = TrieLeaf::from_value(&vec![], value.clone()); let leaf = MARF::get_path( diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index b66fc4dd8a..e7535e9553 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -32,9 +32,7 @@ use crate::chainstate::stacks::index::proofs::*; use crate::chainstate::stacks::index::storage::*; use crate::chainstate::stacks::index::test::*; use crate::chainstate::stacks::index::trie::*; -use crate::chainstate::stacks::index::{ - ClarityMarfTrieId, Error, MARFValue, TrieHashExtension, TrieLeaf, -}; +use crate::chainstate::stacks::index::{ClarityMarfTrieId, Error, MARFValue, TrieLeaf}; #[test] fn marf_insert_different_leaf_same_block_100() { @@ -52,7 +50,7 @@ fn marf_insert_different_leaf_same_block_100() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); for i in 0..100 { let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); @@ -117,7 +115,7 @@ fn marf_insert_different_leaf_different_path_different_block_100() { marf.commit().unwrap(); marf.begin(&BlockHeaderHash::sentinel(), &block_header) .unwrap(); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -140,7 +138,7 @@ fn marf_insert_different_leaf_different_path_different_block_100() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, i as u8, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); let leaf = MARF::get_path(&mut marf.borrow_storage_backend(), &block_header, &path) @@ -189,7 +187,7 @@ fn marf_insert_same_leaf_different_block_100() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); for i in 0..100 { let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); @@ -197,7 +195,7 @@ fn marf_insert_same_leaf_different_block_100() { marf.commit().unwrap(); marf.begin(&BlockHeaderHash::sentinel(), &next_block_header) .unwrap(); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -271,7 +269,7 @@ fn marf_insert_leaf_sequence_2() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let prior_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); marf.commit().unwrap(); @@ -294,7 +292,7 @@ fn marf_insert_leaf_sequence_2() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); let leaf = MARF::get_path( @@ -348,7 +346,7 @@ fn marf_insert_leaf_sequence_100() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); marf.commit().unwrap(); let next_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); @@ -372,7 +370,7 @@ fn marf_insert_leaf_sequence_100() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); eprintln!("Finding value inserted at {}", &next_block_header); @@ -567,7 +565,7 @@ where let next_path = path_gen(i, path.clone()); - let triepath = TriePath::from_bytes(&next_path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&next_path[..]).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); debug!("----------------"); @@ -582,7 +580,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &next_block_header, - &TriePath::from_bytes(&next_path[..]).unwrap(), + &TrieHash::from_bytes(&next_path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -603,7 +601,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &next_block_header, - &TriePath::from_bytes(&prev_path[..]).unwrap(), + &TrieHash::from_bytes(&prev_path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -675,7 +673,7 @@ where // add a leaf at the end of the path let next_path = path_gen(i, path.clone()); - let triepath = TriePath::from_bytes(&next_path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&next_path[..]).unwrap(); let value = MARFValue([i as u8; 40]); assert_eq!( @@ -847,7 +845,7 @@ fn marf_merkle_verify_backptrs() { marf.commit().unwrap(); marf.begin(&block_header_1, &block_header_2).unwrap(); marf.insert_raw( - TriePath::from_bytes(&path_2[..]).unwrap(), + TrieHash::from_bytes(&path_2[..]).unwrap(), TrieLeaf::new(&vec![], &[20 as u8; 40].to_vec()), ) .unwrap(); @@ -865,7 +863,7 @@ fn marf_merkle_verify_backptrs() { marf.commit().unwrap(); marf.begin(&block_header_2, &block_header_3).unwrap(); marf.insert_raw( - TriePath::from_bytes(&path_3[..]).unwrap(), + TrieHash::from_bytes(&path_3[..]).unwrap(), TrieLeaf::new(&vec![], &[21 as u8; 40].to_vec()), ) .unwrap(); @@ -922,7 +920,7 @@ where let (path, next_block_header) = path_gen(i); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -944,7 +942,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -998,7 +996,7 @@ where let i1 = i % 256; let (path, _next_block_header) = path_gen(i); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1011,7 +1009,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -1139,7 +1137,7 @@ fn marf_split_leaf_path() { .unwrap(); let path = [0u8; 32]; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new(&vec![], &[0u8; 40].to_vec()); debug!("----------------"); @@ -1161,7 +1159,7 @@ fn marf_split_leaf_path() { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ]; - let triepath_2 = TriePath::from_bytes(&path_2[..]).unwrap(); + let triepath_2 = TrieHash::from_bytes(&path_2[..]).unwrap(); let value_2 = TrieLeaf::new(&vec![], &[1u8; 40].to_vec()); debug!("----------------"); @@ -1602,7 +1600,7 @@ fn marf_read_random_1048576_4096_file_storage() { let path = TrieHash::from_data(&seed[..]).as_bytes()[0..32].to_vec(); seed = path.clone(); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1615,7 +1613,7 @@ fn marf_read_random_1048576_4096_file_storage() { let read_value = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -1896,7 +1894,7 @@ fn marf_insert_flush_to_different_block() { None }; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1919,7 +1917,7 @@ fn marf_insert_flush_to_different_block() { let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &target_block, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -2017,7 +2015,7 @@ fn marf_insert_flush_to_different_block() { 24, 25, 26, 27, 28, 29, i0 as u8, i1 as u8, ]; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -2037,7 +2035,7 @@ fn marf_insert_flush_to_different_block() { let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &read_from_block, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -2074,7 +2072,7 @@ fn test_marf_read_only() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let leaf = TrieLeaf::new( &vec![], &[ @@ -2138,13 +2136,13 @@ fn test_marf_begin_from_sentinel_twice() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_1 = TriePath::from_bytes(&path_1[..]).unwrap(); + let triepath_1 = TrieHash::from_bytes(&path_1[..]).unwrap(); let path_2 = [ 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_2 = TriePath::from_bytes(&path_2[..]).unwrap(); + let triepath_2 = TrieHash::from_bytes(&path_2[..]).unwrap(); let value_1 = TrieLeaf::new(&vec![], &vec![1u8; 40]); let value_2 = TrieLeaf::new(&vec![], &vec![2u8; 40]); @@ -2210,14 +2208,14 @@ fn test_marf_unconfirmed() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_1 = TriePath::from_bytes(&path_1[..]).unwrap(); + let triepath_1 = TrieHash::from_bytes(&path_1[..]).unwrap(); let value_1 = TrieLeaf::new(&vec![], &vec![1u8; 40]); let path_2 = [ 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_2 = TriePath::from_bytes(&path_2[..]).unwrap(); + let triepath_2 = TrieHash::from_bytes(&path_2[..]).unwrap(); let value_2 = TrieLeaf::new(&vec![], &vec![2u8; 40]); let block_header = StacksBlockId([0x33u8; 32]); diff --git a/stackslib/src/chainstate/stacks/index/test/mod.rs b/stackslib/src/chainstate/stacks/index/test/mod.rs index 2c3b04698c..0ccdffa78b 100644 --- a/stackslib/src/chainstate/stacks/index/test/mod.rs +++ b/stackslib/src/chainstate/stacks/index/test/mod.rs @@ -31,9 +31,7 @@ use crate::chainstate::stacks::index::node::*; use crate::chainstate::stacks::index::proofs::*; use crate::chainstate::stacks::index::storage::*; use crate::chainstate::stacks::index::trie::*; -use crate::chainstate::stacks::index::{ - MARFValue, MarfTrieId, TrieHashExtension, TrieLeaf, TrieMerkleProof, -}; +use crate::chainstate::stacks::index::{MARFValue, MarfTrieId, TrieLeaf, TrieMerkleProof}; use crate::chainstate::stacks::{BlockHeaderHash, TrieHash}; pub mod cache; @@ -108,7 +106,7 @@ pub fn merkle_test( value: &Vec, ) -> () { let (_, root_hash) = Trie::read_root(s).unwrap(); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let block_header = BlockHeaderHash([0u8; 32]); s.open_block(&block_header).unwrap(); @@ -147,7 +145,7 @@ pub fn merkle_test_marf( s.open_block(header).unwrap(); let (_, root_hash) = Trie::read_root(s).unwrap(); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let mut marf_value = [0u8; 40]; marf_value.copy_from_slice(&value[0..40]); @@ -199,7 +197,7 @@ pub fn merkle_test_marf_key_value( test_debug!("---------"); let root_to_block = root_to_block.unwrap_or_else(|| s.read_root_to_block_table().unwrap()); - let triepath = TriePath::from_key(key); + let triepath = TrieHash::from_key(key); let marf_value = MARFValue::from_value(value); assert!(proof.verify(&triepath, &marf_value, &root_hash, &root_to_block)); diff --git a/stackslib/src/chainstate/stacks/index/test/node.rs b/stackslib/src/chainstate/stacks/index/test/node.rs index a98491595d..227adda439 100644 --- a/stackslib/src/chainstate/stacks/index/test/node.rs +++ b/stackslib/src/chainstate/stacks/index/test/node.rs @@ -4215,7 +4215,7 @@ fn trie_cursor_walk_full() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4313,7 +4313,7 @@ fn trie_cursor_walk_1() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4406,7 +4406,7 @@ fn trie_cursor_walk_2() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4496,7 +4496,7 @@ fn trie_cursor_walk_3() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4585,7 +4585,7 @@ fn trie_cursor_walk_4() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4673,7 +4673,7 @@ fn trie_cursor_walk_5() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4760,7 +4760,7 @@ fn trie_cursor_walk_6() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4845,7 +4845,7 @@ fn trie_cursor_walk_10() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4937,7 +4937,7 @@ fn trie_cursor_walk_20() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -5028,7 +5028,7 @@ fn trie_cursor_walk_32() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let walk_point = nodes[0].clone(); diff --git a/stackslib/src/chainstate/stacks/index/test/proofs.rs b/stackslib/src/chainstate/stacks/index/test/proofs.rs index 9642bfcdc5..9bd24af548 100644 --- a/stackslib/src/chainstate/stacks/index/test/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/test/proofs.rs @@ -59,7 +59,7 @@ fn verifier_catches_stale_proof() { let new_value = m.get(&block_2, &k1).unwrap().unwrap(); test_debug!("NEW: {:?}", new_value); - let path = TriePath::from_key(&k1); + let path = TrieHash::from_key(&k1); merkle_test_marf_key_value(&mut m.borrow_storage_backend(), &block_2, &k1, &new_v, None); @@ -75,7 +75,7 @@ fn verifier_catches_stale_proof() { .unwrap(); // the verifier should not allow a proof from k1 to old_v from block_2 - let triepath_2 = TriePath::from_key(&k1); + let triepath_2 = TrieHash::from_key(&k1); let marf_value_2 = MARFValue::from_value(&old_v); assert!(!proof_2.verify(&triepath_2, &marf_value_2, &root_hash_2, &root_to_block)); @@ -86,7 +86,7 @@ fn verifier_catches_stale_proof() { .unwrap(); // the verifier should allow a proof from k1 to old_v from block_1 - let triepath_1 = TriePath::from_key(&k1); + let triepath_1 = TrieHash::from_key(&k1); let marf_value_1 = MARFValue::from_value(&old_v); assert!(proof_1.verify(&triepath_1, &marf_value_1, &root_hash_1, &root_to_block)); } @@ -169,7 +169,7 @@ fn ncc_verifier_catches_stale_proof() { TrieMerkleProof::from_entry(&mut m.borrow_storage_backend(), &k1, &another_v, &block_5) .unwrap(); - let triepath_4 = TriePath::from_key(&k1); + let triepath_4 = TrieHash::from_key(&k1); let marf_value_4 = MARFValue::from_value(&another_v); let root_to_block = { m.borrow_storage_backend() @@ -186,7 +186,7 @@ fn ncc_verifier_catches_stale_proof() { TrieMerkleProof::from_entry(&mut m.borrow_storage_backend(), &k1, &old_v, &block_2) .unwrap(); - let triepath_4 = TriePath::from_key(&k1); + let triepath_4 = TrieHash::from_key(&k1); let marf_value_4 = MARFValue::from_value(&old_v); let root_to_block = { m.borrow_storage_backend() diff --git a/stackslib/src/chainstate/stacks/index/test/storage.rs b/stackslib/src/chainstate/stacks/index/test/storage.rs index a996bc7186..fdd3e30191 100644 --- a/stackslib/src/chainstate/stacks/index/test/storage.rs +++ b/stackslib/src/chainstate/stacks/index/test/storage.rs @@ -164,7 +164,7 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { ]; path_bytes[24..32].copy_from_slice(&i.to_be_bytes()); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); confirmed_marf.insert_raw(path.clone(), value).unwrap(); } @@ -213,7 +213,7 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { ]; path_bytes[24..32].copy_from_slice(&i.to_be_bytes()); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); // NOTE: may have been overwritten; just check for presence assert!( @@ -235,7 +235,7 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { path_bytes[16..24].copy_from_slice(&j.to_be_bytes()); } - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[(i + 128) as u8; 40].to_vec()); new_inserted.push((path.clone(), value.clone())); diff --git a/stackslib/src/chainstate/stacks/index/test/trie.rs b/stackslib/src/chainstate/stacks/index/test/trie.rs index ca2c0ced65..9bac45508c 100644 --- a/stackslib/src/chainstate/stacks/index/test/trie.rs +++ b/stackslib/src/chainstate/stacks/index/test/trie.rs @@ -137,7 +137,7 @@ fn trie_cursor_try_attach_leaf() { path[i] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); // end of path -- cursor points to the insertion point. @@ -164,7 +164,7 @@ fn trie_cursor_try_attach_leaf() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -194,7 +194,7 @@ fn trie_cursor_try_attach_leaf() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -250,7 +250,7 @@ fn trie_cursor_promote_leaf_to_node4() { // add a single leaf let mut c = TrieCursor::new( - &TriePath::from_bytes(&[ + &TrieHash::from_bytes(&[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]) @@ -275,7 +275,7 @@ fn trie_cursor_promote_leaf_to_node4() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&[ + &TrieHash::from_bytes(&[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 ]) @@ -317,7 +317,7 @@ fn trie_cursor_promote_leaf_to_node4() { path[i] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, node, node_hash) = walk_to_insertion_point(&mut f, &mut c); // end of path -- cursor points to the insertion point @@ -342,7 +342,7 @@ fn trie_cursor_promote_leaf_to_node4() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -372,7 +372,7 @@ fn trie_cursor_promote_leaf_to_node4() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -467,7 +467,7 @@ fn trie_cursor_promote_node4_to_node16() { path[k] = j + 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); f.open_block(&block_header).unwrap(); @@ -486,7 +486,7 @@ fn trie_cursor_promote_node4_to_node16() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -515,7 +515,7 @@ fn trie_cursor_promote_node4_to_node16() { path[k] = 128; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -536,7 +536,7 @@ fn trie_cursor_promote_node4_to_node16() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -627,7 +627,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = j + 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -648,7 +648,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -677,7 +677,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = 128; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -698,7 +698,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -734,7 +734,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = j + 40; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -755,7 +755,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -784,7 +784,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = 129; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -806,7 +806,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -897,7 +897,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = j + 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -918,7 +918,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -947,7 +947,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = 128; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -968,7 +968,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1004,7 +1004,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = j + 40; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1024,7 +1024,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1053,7 +1053,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = 129; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1074,7 +1074,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1110,7 +1110,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = j + 90; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1131,7 +1131,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1160,7 +1160,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = 130; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1181,7 +1181,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1256,7 +1256,7 @@ fn trie_cursor_splice_leaf_4() { path[5 * k + 2] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); test_debug!("Start splice-insert at {:?}", &c); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1283,7 +1283,7 @@ fn trie_cursor_splice_leaf_4() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1349,7 +1349,7 @@ fn trie_cursor_splice_leaf_2() { path[3 * k + 1] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); test_debug!("Start splice-insert at {:?}", &c); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1372,7 +1372,7 @@ fn trie_cursor_splice_leaf_2() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1413,7 +1413,7 @@ where for i in 0..count { eprintln!("{}", i); let path = path_gen(i); - let triepath = TriePath::from_bytes(&path).unwrap(); + let triepath = TrieHash::from_bytes(&path).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1519,7 +1519,7 @@ where for i in 0..count { let path = path_gen(i); - let triepath = TriePath::from_bytes(&path).unwrap(); + let triepath = TrieHash::from_bytes(&path).unwrap(); let value = MARF::get_path(&mut marf.borrow_storage_backend(), &block_header, &triepath) .unwrap() diff --git a/stackslib/src/chainstate/stacks/index/trie.rs b/stackslib/src/chainstate/stacks/index/trie.rs index 6c7cc7a08a..65e41cf3ed 100644 --- a/stackslib/src/chainstate/stacks/index/trie.rs +++ b/stackslib/src/chainstate/stacks/index/trie.rs @@ -39,9 +39,7 @@ use crate::chainstate::stacks::index::node::{ use crate::chainstate::stacks::index::storage::{ TrieFileStorage, TrieHashCalculationMode, TrieStorageConnection, }; -use crate::chainstate::stacks::index::{ - Error, MarfTrieId, TrieHashExtension, TrieHasher, TrieLeaf, -}; +use crate::chainstate::stacks::index::{Error, MarfTrieId, TrieHasher, TrieLeaf}; /// We don't actually instantiate a Trie, but we still need to pass a type parameter for the /// storage implementation. diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index c9d3b40dce..8134db9d44 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -45,7 +45,7 @@ use crate::chainstate::stacks::index::bits::{ use crate::chainstate::stacks::index::file::TrieFile; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::storage::{TrieFileStorage, TrieStorageConnection}; use crate::chainstate::stacks::index::{trie_sql, BlockMap, Error, MarfTrieId, TrieLeaf}; diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index fed0e70e95..3a8636b3b5 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -422,6 +422,31 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { .transpose() } + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> InterpreterResult)>> { + self.marf + .get_with_proof_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|(marf_value, proof)| { + let side_key = marf_value.to_hex(); + let data = + SqliteConnection::get(self.get_side_store(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + })?; + Ok((data, proof.serialize_to_vec())) + }) + .transpose() + } + fn get_data(&mut self, key: &str) -> InterpreterResult> { trace!("MarfedKV get: {:?} tip={}", key, &self.chain_tip); self.marf @@ -452,6 +477,36 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { .transpose() } + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { + trace!("MarfedKV get_from_hash: {:?} tip={}", hash, &self.chain_tip); + self.marf + .get_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => { + trace!( + "MarfedKV get {:?} off of {:?}: not found", + hash, + &self.chain_tip + ); + Ok(None) + } + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|marf_value| { + let side_key = marf_value.to_hex(); + trace!("MarfedKV get side-key for {:?}: {:?}", hash, &side_key); + SqliteConnection::get(self.get_side_store(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + .into() + }) + }) + .transpose() + } + fn put_all_data(&mut self, _items: Vec<(String, String)>) -> InterpreterResult<()> { error!("Attempted to commit changes to read-only MARF"); panic!("BUG: attempted commit to read-only MARF"); @@ -631,6 +686,36 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { .transpose() } + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { + trace!("MarfedKV get_from_hash: {:?} tip={}", hash, &self.chain_tip); + self.marf + .get_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => { + trace!( + "MarfedKV get {:?} off of {:?}: not found", + hash, + &self.chain_tip + ); + Ok(None) + } + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|marf_value| { + let side_key = marf_value.to_hex(); + trace!("MarfedKV get side-key for {:?}: {:?}", hash, &side_key); + SqliteConnection::get(self.marf.sqlite_tx(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + .into() + }) + }) + .transpose() + } + fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> { self.marf .get_with_proof(&self.chain_tip, key) @@ -653,6 +738,31 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { .transpose() } + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> InterpreterResult)>> { + self.marf + .get_with_proof_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|(marf_value, proof)| { + let side_key = marf_value.to_hex(); + let data = + SqliteConnection::get(self.marf.sqlite_tx(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + })?; + Ok((data, proof.serialize_to_vec())) + }) + .transpose() + } + fn get_side_store(&mut self) -> &Connection { self.marf.sqlite_tx() } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 44eeaa2e07..0bce54dcfb 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -1,5 +1,6 @@ use std::ops::{Deref, DerefMut}; +use clarity::types::chainstate::TrieHash; use clarity::util::hash::Sha512Trunc256Sum; use clarity::vm::analysis::AnalysisDatabase; use clarity::vm::database::sqlite::{ @@ -1232,10 +1233,24 @@ impl ClarityBackingStore for MemoryBackingStore { SqliteConnection::get(self.get_side_store(), key) } + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { + SqliteConnection::get(self.get_side_store(), hash.to_string().as_str()) + } + fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> { Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) } + fn get_data_with_proof_from_path( + &mut self, + key: &TrieHash, + ) -> InterpreterResult)>> { + Ok( + SqliteConnection::get(self.get_side_store(), key.to_string().as_str())? + .map(|x| (x, vec![])), + ) + } + fn get_side_store(&mut self) -> &Connection { &self.side_store } diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 8db6b3043a..e7d8faff0c 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -168,7 +168,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { let (ast, _analysis) = tx .analyze_smart_contract( &boot_code_id("costs-3", false), diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index bb850a784c..ba4dbf14d2 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -18,6 +18,7 @@ use std::collections::HashSet; use clarity::vm::costs::ExecutionCost; use lazy_static::lazy_static; +pub use stacks_common::consts::MICROSTACKS_PER_STACKS; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; pub use stacks_common::types::StacksEpochId; use stacks_common::types::{EpochList as GenericEpochList, StacksEpoch as GenericStacksEpoch}; @@ -46,7 +47,7 @@ pub use stacks_common::consts::{ NETWORK_ID_TESTNET, PEER_NETWORK_EPOCH, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, - PEER_VERSION_MAINNET, PEER_VERSION_MAINNET_MAJOR, PEER_VERSION_TESTNET, + PEER_VERSION_EPOCH_3_1, PEER_VERSION_MAINNET, PEER_VERSION_MAINNET_MAJOR, PEER_VERSION_TESTNET, PEER_VERSION_TESTNET_MAJOR, STACKS_EPOCH_MAX, }; @@ -99,7 +100,11 @@ pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 791_551; pub const BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT: u64 = 840_360; /// This is Epoch-3.0, activation height proposed in SIP-021 pub const BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT: u64 = 867_867; +/// This is Epoch-3.1, activation height proposed in SIP-029 +pub const BITCOIN_MAINNET_STACKS_31_BURN_HEIGHT: u64 = 875_000; +/// Bitcoin mainline testnet3 activation heights. +/// TODO: No longer used since testnet3 is dead, so remove. pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = @@ -111,6 +116,7 @@ pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_633; pub const BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT: u64 = 2_432_545; pub const BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT: u64 = 2_583_893; pub const BITCOIN_TESTNET_STACKS_30_BURN_HEIGHT: u64 = 30_000_000; +pub const BITCOIN_TESTNET_STACKS_31_BURN_HEIGHT: u64 = 30_000_001; /// This constant sets the approximate testnet bitcoin height at which 2.5 Xenon /// was reorged back to 2.5 instantiation. This is only used to calculate the @@ -133,8 +139,6 @@ lazy_static! { pub const BOOT_BLOCK_HASH: BlockHeaderHash = BlockHeaderHash([0xff; 32]); pub const BURNCHAIN_BOOT_CONSENSUS_HASH: ConsensusHash = ConsensusHash([0xff; 20]); -pub const MICROSTACKS_PER_STACKS: u32 = 1_000_000; - pub const POX_SUNSET_START: u64 = 100_000; pub const POX_SUNSET_END: u64 = POX_SUNSET_START + 400_000; @@ -298,10 +302,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_MAINNET_STACKS_31_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: BITCOIN_MAINNET_STACKS_31_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]); } @@ -366,10 +377,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: BITCOIN_TESTNET_STACKS_30_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_TESTNET_STACKS_31_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: BITCOIN_TESTNET_STACKS_31_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]); } @@ -434,10 +452,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: 7001, - end_height: STACKS_EPOCH_MAX, + end_height: 8001, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: 8001, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]); } @@ -469,6 +494,10 @@ pub static STACKS_EPOCH_2_5_MARKER: u8 = 0x0a; /// *or greater*. pub static STACKS_EPOCH_3_0_MARKER: u8 = 0x0b; +/// Stacks 3.1 epoch marker. All block-commits in 3.1 must have a memo bitfield with this value +/// *or greater*. +pub static STACKS_EPOCH_3_1_MARKER: u8 = 0x0c; + #[test] fn test_ord_for_stacks_epoch() { let epochs = &*STACKS_EPOCHS_MAINNET; @@ -648,6 +677,42 @@ fn test_ord_for_stacks_epoch() { epochs[StacksEpochId::Epoch30].cmp(&epochs[StacksEpochId::Epoch25]), Ordering::Greater ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch10]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch20]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch2_05]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch21]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch22]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch23]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch24]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch25]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch30]), + Ordering::Greater + ); } #[test] @@ -711,6 +776,8 @@ pub trait StacksEpochExtension { #[cfg(test)] fn unit_test_3_0(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] + fn unit_test_3_1(epoch_2_0_block_height: u64) -> EpochList; + #[cfg(test)] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] fn unit_test_3_0_only(first_burnchain_height: u64) -> EpochList; @@ -1350,6 +1417,135 @@ impl StacksEpochExtension for StacksEpoch { ]) } + #[cfg(test)] + fn unit_test_3_1(first_burnchain_height: u64) -> EpochList { + info!( + "StacksEpoch unit_test_3_1 first_burn_height = {}", + first_burnchain_height + ); + + EpochList::new(&[ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 16, + end_height: first_burnchain_height + 20, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: first_burnchain_height + 20, + end_height: first_burnchain_height + 24, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: first_burnchain_height + 24, + end_height: first_burnchain_height + 28, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height + 28, + end_height: first_burnchain_height + 32, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: first_burnchain_height + 32, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_3_1, + }, + ]) + } + #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> EpochList { info!( @@ -1488,6 +1684,7 @@ impl StacksEpochExtension for StacksEpoch { StacksEpochId::Epoch24 => StacksEpoch::unit_test_2_4(first_burnchain_height), StacksEpochId::Epoch25 => StacksEpoch::unit_test_2_5(first_burnchain_height), StacksEpochId::Epoch30 => StacksEpoch::unit_test_3_0(first_burnchain_height), + StacksEpochId::Epoch31 => StacksEpoch::unit_test_3_1(first_burnchain_height), } } @@ -1542,8 +1739,8 @@ impl StacksEpochExtension for StacksEpoch { .iter() .max() .expect("FATAL: expect at least one epoch"); - if max_epoch.epoch_id == StacksEpochId::Epoch30 { - assert!(PEER_NETWORK_EPOCH >= u32::from(PEER_VERSION_EPOCH_2_5)); + if max_epoch.epoch_id == StacksEpochId::Epoch31 { + assert!(PEER_NETWORK_EPOCH >= u32::from(PEER_VERSION_EPOCH_3_0)); } else { assert!( max_epoch.network_epoch as u32 <= PEER_NETWORK_EPOCH, diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 01fcac9e89..03447e9bf4 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -48,7 +48,7 @@ use crate::chainstate::stacks::db::test::{ }; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::events::StacksTransactionReceipt; -use crate::chainstate::stacks::index::{MarfTrieId, TrieHashExtension}; +use crate::chainstate::stacks::index::MarfTrieId; use crate::chainstate::stacks::miner::TransactionResult; use crate::chainstate::stacks::test::codec_all_transactions; use crate::chainstate::stacks::{ diff --git a/stackslib/src/cost_estimates/pessimistic.rs b/stackslib/src/cost_estimates/pessimistic.rs index bb1cf48f38..cdb3ceb7da 100644 --- a/stackslib/src/cost_estimates/pessimistic.rs +++ b/stackslib/src/cost_estimates/pessimistic.rs @@ -230,6 +230,8 @@ impl PessimisticEstimator { StacksEpochId::Epoch25 => ":2.1", // reuse cost estimates in Epoch30 StacksEpochId::Epoch30 => ":2.1", + // reuse cost estimates in Epoch31 + StacksEpochId::Epoch31 => ":2.1", }; format!( "cc{}:{}:{}.{}", diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 16fbd7c2d2..7f324c52c8 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1597,6 +1597,7 @@ simulating a miner. SortitionDB::get_canonical_burn_chain_tip(new_sortition_db.conn()).unwrap(); new_sortition_db .evaluate_sortition( + false, &burn_block_header, blockstack_txs, &burnchain, @@ -2094,6 +2095,7 @@ fn analyze_sortition_mev(argv: Vec) { debug!("Re-evaluate sortition at height {}", height); let (next_sn, state_transition) = sortdb .evaluate_sortition( + true, &burn_block.header, burn_block.ops.clone(), &burnchain, @@ -2109,6 +2111,7 @@ fn analyze_sortition_mev(argv: Vec) { let mut sort_tx = sortdb.tx_begin_at_tip(); let tip_pox_id = sort_tx.get_pox_id().unwrap(); let next_sn_nakamoto = BlockSnapshot::make_snapshot_in_epoch( + true, &mut sort_tx, &burnchain, &ancestor_sn.sortition_id, diff --git a/stackslib/src/net/api/getclaritymarfvalue.rs b/stackslib/src/net/api/getclaritymarfvalue.rs new file mode 100644 index 0000000000..678d4fa46b --- /dev/null +++ b/stackslib/src/net/api/getclaritymarfvalue.rs @@ -0,0 +1,219 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::representations::CONTRACT_PRINCIPAL_REGEX_STRING; +use lazy_static::lazy_static; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::TrieHash; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ClarityMarfResponse { + pub data: String, + #[serde(rename = "proof")] + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub marf_proof: Option, +} + +#[derive(Clone)] +pub struct RPCGetClarityMarfRequestHandler { + pub marf_key_hash: Option, +} +impl RPCGetClarityMarfRequestHandler { + pub fn new() -> Self { + Self { + marf_key_hash: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetClarityMarfRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/clarity/marf/(?P[0-9a-f]{64})$"#).unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v2/clarity/marf/:marf_key_hash" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let marf_key = if let Some(key_str) = captures.name("marf_key_hash") { + TrieHash::from_hex(key_str.as_str()) + .map_err(|e| Error::Http(400, format!("Invalid hash string: {e:?}")))? + } else { + return Err(Error::Http(404, "Missing `marf_key_hash`".to_string())); + }; + + self.marf_key_hash = Some(marf_key); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetClarityMarfRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.marf_key_hash = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let marf_key_hash = self + .marf_key_hash + .take() + .ok_or(NetError::SendError("`marf_key_hash` not set".to_string()))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let with_proof = contents.get_with_proof(); + + let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_data_with_proof_by_hash(&marf_key_hash) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + clarity_db + .get_data_by_hash(&marf_key_hash) + .ok() + .flatten() + .map(|a| (a, None))? + }; + + let data = format!("0x{}", value_hex); + Some(ClarityMarfResponse { data, marf_proof }) + }) + }, + ) + }); + + let data_resp = match data_opt { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Marf key hash not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetClarityMarfRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let marf_value: ClarityMarfResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(marf_value)?) + } +} + +impl StacksHttpRequest { + pub fn new_getclaritymarf( + host: PeerHost, + marf_key_hash: TrieHash, + tip_req: TipRequest, + with_proof: bool, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/clarity/marf/{}", &marf_key_hash), + HttpRequestContents::new() + .for_tip(tip_req) + .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_clarity_marf_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ClarityMarfResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getclaritymetadata.rs b/stackslib/src/net/api/getclaritymetadata.rs new file mode 100644 index 0000000000..ee6ec96567 --- /dev/null +++ b/stackslib/src/net/api/getclaritymetadata.rs @@ -0,0 +1,272 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::database::clarity_db::ContractDataVarName; +use clarity::vm::database::StoreType; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, MAX_STRING_LEN, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::ContractName; +use lazy_static::lazy_static; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; + +lazy_static! { + static ref CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING: String = format!( + "([a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*|[-+=/*]|[<>]=?){{1,{}}}", + MAX_STRING_LEN + ); + static ref METADATA_KEY_REGEX_STRING: String = format!( + r"vm-metadata::(?P(\d{{1,2}}))::(?P(contract|contract-size|contract-src|contract-data-size|{}))", + *CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING, + ); +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ClarityMetadataResponse { + pub data: String, +} + +#[derive(Clone)] +pub struct RPCGetClarityMetadataRequestHandler { + pub clarity_metadata_key: Option, + pub contract_identifier: Option, +} +impl RPCGetClarityMetadataRequestHandler { + pub fn new() -> Self { + Self { + clarity_metadata_key: None, + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetClarityMetadataRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r"^/v2/clarity/metadata/(?P
{})/(?P{})/(?P(analysis)|({}))$", + *STANDARD_PRINCIPAL_REGEX_STRING, + *CONTRACT_NAME_REGEX_STRING, + *METADATA_KEY_REGEX_STRING + )) + .unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v2/clarity/metadata/:principal/:contract_name/:clarity_metadata_key" + } + + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + + let metadata_key = match captures.name("clarity_metadata_key") { + Some(key_str) => key_str.as_str().to_string(), + None => { + return Err(Error::DecodeError( + "Missing `clarity_metadata_key`".to_string(), + )); + } + }; + + if metadata_key != "analysis" { + // Validate that the metadata key is well-formed. It must be of data type: + // DataMapMeta (5) | VariableMeta (6) | FungibleTokenMeta (7) | NonFungibleTokenMeta (8) + // or Contract (9) followed by a valid contract metadata name + match captures + .name("data_type") + .and_then(|data_type| StoreType::try_from(data_type.as_str()).ok()) + { + Some(data_type) => match data_type { + StoreType::DataMapMeta + | StoreType::VariableMeta + | StoreType::FungibleTokenMeta + | StoreType::NonFungibleTokenMeta => {} + StoreType::Contract => { + if captures + .name("var_name") + .and_then(|var_name| { + ContractDataVarName::try_from(var_name.as_str()).ok() + }) + .is_none() + { + return Err(Error::DecodeError( + "Invalid metadata var name".to_string(), + )); + } + } + _ => { + return Err(Error::DecodeError("Invalid metadata type".to_string())); + } + }, + None => { + return Err(Error::DecodeError("Invalid metadata type".to_string())); + } + } + } + + self.contract_identifier = Some(contract_identifier); + self.clarity_metadata_key = Some(metadata_key); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetClarityMetadataRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.clarity_metadata_key = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self.contract_identifier.take().ok_or(NetError::SendError( + "`contract_identifier` not set".to_string(), + ))?; + let clarity_metadata_key = self.clarity_metadata_key.take().ok_or(NetError::SendError( + "`clarity_metadata_key` not set".to_string(), + ))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let data = clarity_db + .store + .get_metadata(&contract_identifier, &clarity_metadata_key) + .ok() + .flatten()?; + + Some(ClarityMetadataResponse { data }) + }) + }, + ) + }); + + let data_resp = match data_opt { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Metadata not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetClarityMetadataRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let metadata: ClarityMetadataResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(metadata)?) + } +} + +impl StacksHttpRequest { + pub fn new_getclaritymetadata( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + clarity_metadata_key: String, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/clarity/metadata/{}/{}/{}", + &contract_addr, &contract_name, &clarity_metadata_key + ), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_clarity_metadata_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ClarityMetadataResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 8fc8ee33ba..8d32308d9d 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -43,6 +43,8 @@ pub mod getattachmentsinv; pub mod getblock; pub mod getblock_v3; pub mod getblockbyheight; +pub mod getclaritymarfvalue; +pub mod getclaritymetadata; pub mod getconstantval; pub mod getcontractabi; pub mod getcontractsrc; @@ -94,6 +96,8 @@ impl StacksHttp { self.register_rpc_endpoint(getblock::RPCBlocksRequestHandler::new()); self.register_rpc_endpoint(getblock_v3::RPCNakamotoBlockRequestHandler::new()); self.register_rpc_endpoint(getblockbyheight::RPCNakamotoBlockByHeightRequestHandler::new()); + self.register_rpc_endpoint(getclaritymarfvalue::RPCGetClarityMarfRequestHandler::new()); + self.register_rpc_endpoint(getclaritymetadata::RPCGetClarityMetadataRequestHandler::new()); self.register_rpc_endpoint(getconstantval::RPCGetConstantValRequestHandler::new()); self.register_rpc_endpoint(getcontractabi::RPCGetContractAbiRequestHandler::new()); self.register_rpc_endpoint(getcontractsrc::RPCGetContractSrcRequestHandler::new()); diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index b6f91c59b8..27a930d634 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -38,7 +38,7 @@ use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::Txid; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleConn}; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NAKAMOTO_BLOCK_VERSION}; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; @@ -374,9 +374,39 @@ impl NakamotoBlockProposal { }); } - let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; - let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip); - let mut db_handle = sortdb.index_handle(&sort_tip); + // Check block version. If it's less than the compiled-in version, just emit a warning + // because there's a new version of the node / signer binary available that really ought to + // be used (hint, hint) + if self.block.header.version != NAKAMOTO_BLOCK_VERSION { + warn!("Proposed block has unexpected version. Upgrade your node and/or signer ASAP."; + "block.header.version" => %self.block.header.version, + "expected" => %NAKAMOTO_BLOCK_VERSION); + } + + // open sortition view to the current burn view. + // If the block has a TenureChange with an Extend cause, then the burn view is whatever is + // indicated in the TenureChange. + // Otherwise, it's the same as the block's parent's burn view. + let parent_stacks_header = NakamotoChainState::get_block_header( + chainstate.db(), + &self.block.header.parent_block_id, + )? + .ok_or_else(|| BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidBlock, + reason: "Invalid parent block".into(), + })?; + + let burn_view_consensus_hash = + NakamotoChainState::get_block_burn_view(sortdb, &self.block, &parent_stacks_header)?; + let sort_tip = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &burn_view_consensus_hash)? + .ok_or_else(|| BlockValidateRejectReason { + reason_code: ValidateRejectCode::NoSuchTenure, + reason: "Failed to find sortition for block tenure".to_string(), + })?; + + let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip.sortition_id); + let mut db_handle = sortdb.index_handle(&sort_tip.sortition_id); // (For the signer) // Verify that the block's tenure is on the canonical sortition history @@ -413,14 +443,6 @@ impl NakamotoBlockProposal { )?; // Validate txs against chainstate - let parent_stacks_header = NakamotoChainState::get_block_header( - chainstate.db(), - &self.block.header.parent_block_id, - )? - .ok_or_else(|| BlockValidateRejectReason { - reason_code: ValidateRejectCode::InvalidBlock, - reason: "Invalid parent block".into(), - })?; // Validate the block's timestamp. It must be: // - Greater than the parent block's timestamp diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index 9bd174d322..aff20d962f 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -70,7 +70,7 @@ impl HttpRequest for RPCPostBlockRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(&format!("^{PATH}$")).unwrap() + Regex::new(&format!("^{}(/)?$", PATH.trim_end_matches('/'))).unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/api/tests/getclaritymarfvalue.rs b/stackslib/src/net/api/tests/getclaritymarfvalue.rs new file mode 100644 index 0000000000..7255d1ee99 --- /dev/null +++ b/stackslib/src/net/api/tests/getclaritymarfvalue.rs @@ -0,0 +1,205 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::database::{ClarityDeserializable, STXBalance}; +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions, TypeSignature}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{StacksAddress, TrieHash}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let vm_key_epoch = TrieHash::from_key("vm-epoch::epoch-version"); + let vm_key_trip = + TrieHash::from_key("vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::1::count"); + let vm_key_quad = + TrieHash::from_key("vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::0::data::1234"); + let valid_keys = [vm_key_epoch, vm_key_trip, vm_key_quad]; + + for key in valid_keys { + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + key, + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + true, + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + assert_eq!(request.contents().get_with_proof(), true); + + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymarfvalue::RPCGetClarityMarfRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.marf_key_hash, Some(key.clone())); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.marf_key_hash.is_none()); + } +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing marf value + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::bar"), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed::1::bar-unconfirmed"), + TipRequest::UseLatestUnconfirmedTip, + true, + ); + requests.push(request); + + // query non-existant var + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + TrieHash::from_key( + "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::does-not-exist", + ), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-exist::1::bar"), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query vm-account balance + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + TrieHash::from_key("vm-account::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R::19"), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // existing data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_clarity_marf_response().unwrap(); + assert_eq!(resp.data, "0x0000000000000000000000000000000000"); + assert!(resp.marf_proof.is_some()); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_clarity_marf_response().unwrap(); + assert_eq!(resp.data, "0x0100000000000000000000000000000001"); + assert!(resp.marf_proof.is_some()); + + // no such var + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // vm-account balance + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_clarity_marf_response().unwrap(); + let balance = STXBalance::deserialize(&resp.data[2..]).unwrap(); + + assert_eq!(balance.amount_unlocked(), 1_000_000_000); + assert_eq!(balance.amount_locked(), 0); +} diff --git a/stackslib/src/net/api/tests/getclaritymetadata.rs b/stackslib/src/net/api/tests/getclaritymetadata.rs new file mode 100644 index 0000000000..495bbb514f --- /dev/null +++ b/stackslib/src/net/api/tests/getclaritymetadata.rs @@ -0,0 +1,373 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::database::{ClaritySerializable, DataMapMetadata, DataVariableMetadata}; +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions, TypeSignature}; +use clarity::vm::{ClarityName, ContractName}; +use serde_json::json; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::http::Error as HttpError; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{Error as NetError, ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-size".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.clarity_metadata_key, + Some("vm-metadata::9::contract-size".to_string()) + ); + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world" + ) + .unwrap() + ) + ); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.clarity_metadata_key.is_none()); +} + +#[test] +fn test_try_parse_invalid_store_type() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::2::contract-size".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let parsed_request_err = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap_err(); + + assert_eq!( + parsed_request_err, + HttpError::DecodeError("Invalid metadata type".to_string()).into() + ); + handler.restart(); +} + +#[test] +fn test_try_parse_invalid_contract_metadata_var_name() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-invalid-key".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let parsed_request_err = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap_err(); + + assert_eq!( + parsed_request_err, + HttpError::DecodeError("Invalid metadata var name".to_string()).into() + ); + handler.restart(); +} + +#[test] +fn test_try_parse_request_for_analysis() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "analysis".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.clarity_metadata_key, Some("analysis".to_string())); + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world" + ) + .unwrap() + ) + ); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.clarity_metadata_key.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query invalid metadata key (wrong store type) + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::2::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing contract size metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-size".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing data map metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::5::test-map".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing data var metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::6::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing data var metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::6::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing data var metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::6::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query undeclared var metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::6::non-existing-var".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing contract size metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-size".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query invalid metadata key (wrong store type) + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::2::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // unknwnon data var + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); + + // contract size metadata + let response = responses.remove(0); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + let resp = response.decode_clarity_metadata_response().unwrap(); + assert_eq!(resp.data, "1432"); + + // data map metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataMapMetadata { + key_type: TypeSignature::UIntType, + value_type: TypeSignature::UIntType, + }; + assert_eq!(resp.data, expected.serialize()); + + // data var metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataVariableMetadata { + value_type: TypeSignature::IntType, + }; + assert_eq!(resp.data, expected.serialize()); + + // data var metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataVariableMetadata { + value_type: TypeSignature::IntType, + }; + assert_eq!(resp.data, expected.serialize()); + + // data var metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataVariableMetadata { + value_type: TypeSignature::IntType, + }; + assert_eq!(resp.data, expected.serialize()); + + // invalid metadata key + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // contract size metadata + let response = responses.remove(0); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + let resp = response.decode_clarity_metadata_response().unwrap(); + assert_eq!(resp.data, "1432"); + + // unknwnon data var + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 0a6ad69762..cd8a337acb 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -61,6 +61,8 @@ mod getattachmentsinv; mod getblock; mod getblock_v3; mod getblockbyheight; +mod getclaritymarfvalue; +mod getclaritymetadata; mod getconstantval; mod getcontractabi; mod getcontractsrc; @@ -120,7 +122,7 @@ const TEST_CONTRACT: &'static str = " (ok 1))) (begin (map-set unit-map { account: 'ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R } { units: 123 })) - + (define-read-only (ro-confirmed) u1) (define-public (do-test) (ok u0)) @@ -1044,7 +1046,7 @@ impl<'a> TestRPC<'a> { peer_2.sortdb = Some(peer_2_sortdb); peer_2.stacks_node = Some(peer_2_stacks_node); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + peer_2.mempool = Some(peer_2_mempool); convo_send_recv(&mut convo_2, &mut convo_1); @@ -1053,8 +1055,6 @@ impl<'a> TestRPC<'a> { // hack around the borrow-checker convo_send_recv(&mut convo_1, &mut convo_2); - peer_2.mempool = Some(peer_2_mempool); - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); @@ -1076,27 +1076,45 @@ impl<'a> TestRPC<'a> { .unwrap(); } - { - let rpc_args = RPCHandlerArgs::default(); - let mut node_state = StacksNodeState::new( - &mut peer_1.network, - &peer_1_sortdb, - &mut peer_1_stacks_node.chainstate, - &mut peer_1_mempool, - &rpc_args, - false, - ); - convo_1.chat(&mut node_state).unwrap(); - } - - convo_1.try_flush().unwrap(); - peer_1.sortdb = Some(peer_1_sortdb); peer_1.stacks_node = Some(peer_1_stacks_node); - peer_1.mempool = Some(peer_1_mempool); - // should have gotten a reply - let resp_opt = convo_1.try_get_response(); + let resp_opt = loop { + debug!("Peer 1 try get response"); + convo_send_recv(&mut convo_1, &mut convo_2); + { + let peer_1_sortdb = peer_1.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + + let rpc_args = RPCHandlerArgs::default(); + let mut node_state = StacksNodeState::new( + &mut peer_1.network, + &peer_1_sortdb, + &mut peer_1_stacks_node.chainstate, + &mut peer_1_mempool, + &rpc_args, + false, + ); + + convo_1.chat(&mut node_state).unwrap(); + + peer_1.sortdb = Some(peer_1_sortdb); + peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.mempool = Some(peer_1_mempool); + } + + convo_1.try_flush().unwrap(); + + info!("Try get response from request {:?}", &request); + + // should have gotten a reply + let resp_opt = convo_1.try_get_response(); + if resp_opt.is_some() { + break resp_opt; + } + }; + assert!(resp_opt.is_some()); let resp = resp_opt.unwrap(); diff --git a/stackslib/src/net/api/tests/postblock_v3.rs b/stackslib/src/net/api/tests/postblock_v3.rs index 5cc652fc83..0b0a95f3a4 100644 --- a/stackslib/src/net/api/tests/postblock_v3.rs +++ b/stackslib/src/net/api/tests/postblock_v3.rs @@ -214,6 +214,66 @@ fn handle_req_accepted() { assert_eq!(resp.stacks_block_id, next_block_id); } +#[test] +fn handle_req_without_trailing_accepted() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let path_without_slash: &str = "/v3/blocks/upload"; + let observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &observer); + let (next_block, ..) = rpc_test.peer_1.single_block_tenure( + &rpc_test.privk1, + |_| {}, + |burn_ops| { + rpc_test.peer_2.next_burnchain_block(burn_ops.clone()); + }, + |_| true, + ); + let next_block_id = next_block.block_id(); + let mut requests = vec![]; + + // post the block + requests.push( + StacksHttpRequest::new_for_peer( + addr.into(), + "POST".into(), + path_without_slash.into(), + HttpRequestContents::new().payload_stacks(&next_block), + ) + .unwrap(), + ); + + // idempotent + requests.push( + StacksHttpRequest::new_for_peer( + addr.into(), + "POST".into(), + path_without_slash.into(), + HttpRequestContents::new().payload_stacks(&next_block), + ) + .unwrap(), + ); + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + info!( + "Response for the request that has the path without the last '/': {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, true); + assert_eq!(resp.stacks_block_id, next_block_id); + + let response = responses.remove(0); + info!( + "Response for the request that has the path without the last '/': {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, false); + assert_eq!(resp.stacks_block_id, next_block_id); +} + #[test] fn handle_req_unknown_burn_block() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 7d45b39769..1d8e5d10d2 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -6375,6 +6375,8 @@ mod test { } } + // TODO: test for has_acceptable_epoch() + #[test] fn convo_process_relayers() { let conn_opts = ConnectionOptions::default(); diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index 15adebef95..9a2811d1b5 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -274,6 +274,7 @@ impl BurnchainController for MocknetController { .unwrap(); let new_chain_tip = burn_tx .process_block_ops( + false, &self.burnchain, &chain_tip.block_snapshot, &next_block_header, diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index e58bd2b848..9022ec8f34 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -53,6 +53,7 @@ use stacks::net::connection::ConnectionOptions; use stacks::net::{Neighbor, NeighborKey}; use stacks::types::chainstate::BurnchainHeaderHash; use stacks::types::EpochList; +use stacks::util::hash::to_hex; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::db::Error as DBError; use stacks_common::consts::SIGNER_SLOTS_PER_USER; @@ -659,6 +660,8 @@ impl Config { Ok(StacksEpochId::Epoch25) } else if epoch_name == EPOCH_CONFIG_3_0_0 { Ok(StacksEpochId::Epoch30) + } else if epoch_name == EPOCH_CONFIG_3_1_0 { + Ok(StacksEpochId::Epoch31) } else { Err(format!("Unknown epoch name specified: {epoch_name}")) }?; @@ -685,6 +688,7 @@ impl Config { StacksEpochId::Epoch24, StacksEpochId::Epoch25, StacksEpochId::Epoch30, + StacksEpochId::Epoch31, ]; for (expected_epoch, configured_epoch) in expected_list .iter() @@ -833,7 +837,12 @@ impl Config { } let miner = match config_file.miner { - Some(miner) => miner.into_config_default(miner_default_config)?, + Some(mut miner) => { + if miner.mining_key.is_none() && !node.seed.is_empty() { + miner.mining_key = Some(to_hex(&node.seed)); + } + miner.into_config_default(miner_default_config)? + } None => miner_default_config, }; @@ -1291,6 +1300,7 @@ pub const EPOCH_CONFIG_2_3_0: &str = "2.3"; pub const EPOCH_CONFIG_2_4_0: &str = "2.4"; pub const EPOCH_CONFIG_2_5_0: &str = "2.5"; pub const EPOCH_CONFIG_3_0_0: &str = "3.0"; +pub const EPOCH_CONFIG_3_1_0: &str = "3.1"; #[derive(Clone, Deserialize, Default, Debug)] pub struct AffirmationOverride { @@ -2540,6 +2550,13 @@ pub struct MinerConfigFile { impl MinerConfigFile { fn into_config_default(self, miner_default_config: MinerConfig) -> Result { + match &self.mining_key { + Some(_) => {} + None => { + panic!("mining key not set"); + } + } + let mining_key = self .mining_key .as_ref() diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index b15d0f4c7e..745ae03fc9 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -143,7 +143,11 @@ pub struct BlockMinerThread { registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner burn_election_block: BlockSnapshot, - /// Current burnchain tip + /// Current burnchain tip as of the last TenureChange + /// * if the last tenure-change was a BlockFound, then this is the same as the + /// `burn_election_block`. + /// * otherwise, if the last tenure-change is an Extend, then this is the sortition of the burn + /// view consensus hash in the TenureChange burn_block: BlockSnapshot, /// The start of the parent tenure for this tenure parent_tenure_id: StacksBlockId, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index b346cdc346..8cc1293acd 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -38,7 +38,7 @@ use stacks::chainstate::stacks::miner::{ get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, }; use stacks::core::mempool::MemPoolDB; -use stacks::core::STACKS_EPOCH_3_0_MARKER; +use stacks::core::STACKS_EPOCH_3_1_MARKER; use stacks::monitoring::increment_stx_blocks_mined_counter; use stacks::net::db::LocalPeer; use stacks::net::p2p::NetworkHandle; @@ -738,7 +738,7 @@ impl RelayerThread { key_block_ptr: u32::try_from(key.block_height) .expect("FATAL: burn block height exceeded u32"), key_vtxindex: u16::try_from(key.op_vtxindex).expect("FATAL: vtxindex exceeded u16"), - memo: vec![STACKS_EPOCH_3_0_MARKER], + memo: vec![STACKS_EPOCH_3_1_MARKER], new_seed: VRFSeed::from_proof(&tip_vrf_proof), parent_block_ptr: u32::try_from(commit_parent_block_burn_height) .expect("FATAL: burn block height exceeded u32"), diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ef6199d331..25d22801c0 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -29,6 +29,7 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::SignerMessage as SignerMessageV0; use libsigner::{SignerSession, StackerDBSession}; +use rusqlite::OptionalExtension; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ @@ -61,7 +62,7 @@ use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, - PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_TESTNET, + PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_EPOCH_3_1, PEER_VERSION_TESTNET, }; use stacks::libstackerdb::SlotMetadata; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; @@ -71,7 +72,7 @@ use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, }; -use stacks::types::chainstate::StacksBlockId; +use stacks::types::chainstate::{ConsensusHash, StacksBlockId}; use stacks::util::hash::hex_bytes; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -85,7 +86,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash, }; -use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::types::{set_test_coinbase_schedule, CoinbaseInterval, StacksPublicKeyBuffer}; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; @@ -117,7 +118,7 @@ pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; pub static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; lazy_static! { - pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ + pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 10] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -177,10 +178,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: 231, - end_height: STACKS_EPOCH_MAX, + end_height: 241, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: 241, + end_height: STACKS_EPOCH_MAX, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]; } @@ -9812,9 +9820,226 @@ fn test_shadow_recovery() { #[test] #[ignore] +/// Integration test for SIP-029 +fn sip029_coinbase_change() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + let new_sched = vec![ + CoinbaseInterval { + coinbase: 1_000_000_000, + effective_start_height: 0, + }, + // NOTE: epoch 3.1 goes into effect at 241 + CoinbaseInterval { + coinbase: 500_000_000, + effective_start_height: 245, + }, + CoinbaseInterval { + coinbase: 125_000_000, + effective_start_height: 255, + }, + CoinbaseInterval { + coinbase: 62_500_000, + effective_start_height: 265, + }, + ]; + + set_test_coinbase_schedule(Some(new_sched.clone())); + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.node.pox_sync_sample_secs = 180; + naka_conf.burnchain.max_rbf = 10_000_000; + + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // mine until burnchain height 270 + loop { + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + wait_for(20, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); + + let node_info = get_chain_info_opt(&naka_conf).unwrap(); + if node_info.burn_block_height >= 270 { + break; + } + } + + info!("Nakamoto miner has advanced to burn height 270"); + + // inspect `payments` table to see that coinbase was applied + let all_snapshots = sortdb.get_all_snapshots().unwrap(); + + // whether or not the last snapshot had a sortition + let mut prev_sortition = false; + + // whether or not we witnessed the requisite coinbases + let mut witnessed_1000 = false; + let mut witnessed_500 = false; + let mut witnessed_125 = false; + let mut witnessed_62_5 = false; + + // initial mining bonus + let initial_mining_bonus = 20400000; + + for sn in all_snapshots { + if !sn.sortition { + prev_sortition = false; + continue; + } + if sn.consensus_hash == ConsensusHash([0x00; 20]) { + continue; + } + let coinbase = { + let sql = "SELECT coinbase FROM payments WHERE consensus_hash = ?1"; + let args = rusqlite::params![&sn.consensus_hash]; + let Some(coinbase) = chainstate + .db() + .query_row(sql, args, |r| { + let coinbase_txt: String = r.get_unwrap(0); + let coinbase: u64 = coinbase_txt.parse().unwrap(); + Ok(coinbase) + }) + .optional() + .unwrap() + else { + info!("No coinbase for {} {}", sn.block_height, &sn.consensus_hash); + continue; + }; + + coinbase + }; + + info!( + "Coinbase at {} {}: {}", + sn.block_height, &sn.consensus_hash, coinbase + ); + // use >= for coinbases since a missed sortition can lead to coinbase accumulation + if sn.block_height < 245 { + if prev_sortition { + assert_eq!(coinbase, 1_000_000_000 + initial_mining_bonus); + witnessed_1000 = true; + } else { + assert!(coinbase >= 1_000_000_000 + initial_mining_bonus); + } + } else if sn.block_height < 255 { + if prev_sortition { + assert_eq!(coinbase, 500_000_000 + initial_mining_bonus); + witnessed_500 = true; + } else { + assert!(coinbase >= 500_000_000 + initial_mining_bonus); + } + } else if sn.block_height < 265 { + if prev_sortition { + assert_eq!(coinbase, 125_000_000 + initial_mining_bonus); + witnessed_125 = true; + } else { + assert!(coinbase >= 125_000_000 + initial_mining_bonus); + } + } else { + if prev_sortition { + assert_eq!(coinbase, 62_500_000 + initial_mining_bonus); + witnessed_62_5 = true; + } else { + assert!(coinbase >= 62_500_000 + initial_mining_bonus); + } + } + + prev_sortition = true; + } + + assert!(witnessed_1000); + assert!(witnessed_500); + assert!(witnessed_125); + assert!(witnessed_62_5); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + /// This test is testing that the clarity cost spend down works as expected, /// spreading clarity contract calls across the tenure instead of all in the first block. /// It also ensures that the clarity cost resets at the start of each tenure. +#[test] +#[ignore] fn clarity_cost_spend_down() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2486043ccc..9c9fee200f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -506,7 +506,7 @@ fn block_proposal_rejection() { signer_test.wait_for_validate_reject_response(short_timeout, block_signer_signature_hash_2); assert!(matches!( reject.reason_code, - ValidateRejectCode::UnknownParent + ValidateRejectCode::InvalidBlock )); let start_polling = Instant::now(); @@ -532,7 +532,10 @@ fn block_proposal_rejection() { assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); } else if signer_signature_hash == block_signer_signature_hash_2 { found_signer_signature_hash_2 = true; - assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); + assert!(matches!( + reason_code, + RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock) + )); } else { continue; } @@ -1267,8 +1270,18 @@ fn bitcoind_forking_test() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = - SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |_| {}, + |node_config| { + let epochs = node_config.burnchain.epochs.as_mut().unwrap(); + epochs[StacksEpochId::Epoch30].end_height = 3_015; + epochs[StacksEpochId::Epoch31].start_height = 3_015; + }, + None, + None, + ); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); let miner_address = Keychain::default(conf.node.seed.clone()) @@ -3069,6 +3082,8 @@ fn mock_sign_epoch_25() { let epochs = node_config.burnchain.epochs.as_mut().unwrap(); epochs[StacksEpochId::Epoch25].end_height = 251; epochs[StacksEpochId::Epoch30].start_height = 251; + epochs[StacksEpochId::Epoch30].end_height = 265; + epochs[StacksEpochId::Epoch31].start_height = 265; }, None, None, @@ -3167,7 +3182,7 @@ fn mock_sign_epoch_25() { ); } assert!( - main_poll_time.elapsed() <= Duration::from_secs(45), + main_poll_time.elapsed() <= Duration::from_secs(145), "Timed out waiting to advance epoch 3.0 boundary" ); } @@ -3229,6 +3244,8 @@ fn multiple_miners_mock_sign_epoch_25() { let epochs = config.burnchain.epochs.as_mut().unwrap(); epochs[StacksEpochId::Epoch25].end_height = 251; epochs[StacksEpochId::Epoch30].start_height = 251; + epochs[StacksEpochId::Epoch30].end_height = 265; + epochs[StacksEpochId::Epoch31].start_height = 265; config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { warn!(