From 26cf0da7017b35250a601c9c8d8f2928be206a23 Mon Sep 17 00:00:00 2001 From: Ludo Galabru Date: Wed, 8 May 2024 12:29:11 -0400 Subject: [PATCH 01/56] feat: given a MARF key constructed client side, retrieve clarity value --- stackslib/src/net/api/getclaritymarfvalue.rs | 234 +++++++++++++++++++ stackslib/src/net/api/mod.rs | 4 + stackslib/src/net/httpcore.rs | 11 + 3 files changed, 249 insertions(+) create mode 100644 stackslib/src/net/api/getclaritymarfvalue.rs diff --git a/stackslib/src/net/api/getclaritymarfvalue.rs b/stackslib/src/net/api/getclaritymarfvalue.rs new file mode 100644 index 0000000000..d2369f2b31 --- /dev/null +++ b/stackslib/src/net/api/getclaritymarfvalue.rs @@ -0,0 +1,234 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::{ClarityDatabase, STXBalance, StoreType}; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::{ClarityName, ClarityVersion, ContractName}; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::{to_hex, Sha256Sum}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ClarityMarfValueResponse { + pub data: String, + #[serde(rename = "proof")] + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub marf_proof: Option, +} + +#[derive(Clone)] +pub struct RPCGetClarityMarfValueRequestHandler { + pub clarity_marf_key: Option, +} +impl RPCGetClarityMarfValueRequestHandler { + pub fn new() -> Self { + Self { + clarity_marf_key: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetClarityMarfValueRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + // TODO: write regex validating the following patterns + // format!("vm::{}::{}::{}", contract_identifier, data as u8, var_name) + // format!("vm::{}::{}::{}::{}", contract_identifier, data as u8, var_name, key_value) + // format!("vm-metadata::{}::{}", data as u8, var_name) + // format!("vm-epoch::epoch-version") + + Regex::new(&format!("^/v2/clarity_marf_value/(?.*)$",)).unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v2/clarity_marf_value/:clarity_marf_key" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let marf_key = request::get_marf_key(captures, "clarity_marf_key")?; + + self.clarity_marf_key = Some(marf_key); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetClarityMarfValueRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.clarity_marf_key = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let clarity_marf_key = self.clarity_marf_key.take().ok_or(NetError::SendError( + "`clarity_marf_key` not set".to_string(), + ))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let with_proof = contents.get_with_proof(); + + let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_data_with_proof(&clarity_marf_key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + clarity_db + .get_data(&clarity_marf_key) + .ok() + .flatten() + .map(|a| (a, None))? + }; + + let data = format!("0x{}", value_hex); + Some(ClarityMarfValueResponse { data, marf_proof }) + }) + }) + }); + + let data_resp = match data_opt { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Data var not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetClarityMarfValueRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let marf_value: ClarityMarfValueResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(marf_value)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for a data var + pub fn new_getclaritymarfvalue( + host: PeerHost, + clarity_marf_key: String, + tip_req: TipRequest, + with_proof: bool, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/clarity_marf_value/{}", &clarity_marf_key), + HttpRequestContents::new() + .for_tip(tip_req) + .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_clarity_marf_value_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ClarityMarfValueResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index d256c15b97..fc7e1f64a3 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -42,6 +42,7 @@ pub mod getattachment; pub mod getattachmentsinv; pub mod getblock; pub mod getblock_v3; +pub mod getclaritymarfvalue; pub mod getconstantval; pub mod getcontractabi; pub mod getcontractsrc; @@ -90,6 +91,9 @@ impl StacksHttp { self.register_rpc_endpoint(getattachmentsinv::RPCGetAttachmentsInvRequestHandler::new()); self.register_rpc_endpoint(getblock::RPCBlocksRequestHandler::new()); self.register_rpc_endpoint(getblock_v3::RPCNakamotoBlockRequestHandler::new()); + self.register_rpc_endpoint( + getclaritymarfvalue::RPCGetClarityMarfValueRequestHandler::new(), + ); self.register_rpc_endpoint(getconstantval::RPCGetConstantValRequestHandler::new()); self.register_rpc_endpoint(getcontractabi::RPCGetContractAbiRequestHandler::new()); self.register_rpc_endpoint(getcontractsrc::RPCGetContractSrcRequestHandler::new()); diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index dec51df42a..fb92b03b18 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -247,6 +247,17 @@ pub mod request { Ok(txid) } + /// Get and parse a MARF key from a path's captures, given the name of the regex field. + pub fn get_marf_key(captures: &Captures, key: &str) -> Result { + let marf_key = if let Some(marf_key_str) = captures.name(key) { + marf_key_str.as_str().to_string() + } else { + return Err(HttpError::Http(404, format!("Missing `{}`", key))); + }; + + Ok(marf_key) + } + /// Get and parse a Clarity name from a path's captures, given the name of the regex field. pub fn get_clarity_name(captures: &Captures, key: &str) -> Result { let clarity_name = if let Some(name_str) = captures.name(key) { From 931f315d8a49df3a9c9dbc165d1f84f025bee6d6 Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Tue, 23 Jul 2024 15:52:45 +0200 Subject: [PATCH 02/56] feat: rpc endpoint to retrieve clarity metadata --- clarity/src/vm/representations.rs | 12 + stackslib/src/net/api/getclaritymarfvalue.rs | 90 +++----- stackslib/src/net/api/getclaritymetadata.rs | 217 ++++++++++++++++++ stackslib/src/net/api/mod.rs | 2 + .../src/net/api/tests/getclaritymarfvalue.rs | 180 +++++++++++++++ .../src/net/api/tests/getclaritymetadata.rs | 166 ++++++++++++++ stackslib/src/net/api/tests/mod.rs | 4 +- stackslib/src/net/httpcore.rs | 2 +- 8 files changed, 618 insertions(+), 55 deletions(-) create mode 100644 stackslib/src/net/api/getclaritymetadata.rs create mode 100644 stackslib/src/net/api/tests/getclaritymarfvalue.rs create mode 100644 stackslib/src/net/api/tests/getclaritymetadata.rs diff --git a/clarity/src/vm/representations.rs b/clarity/src/vm/representations.rs index c80e3c7467..8f140a6a4d 100644 --- a/clarity/src/vm/representations.rs +++ b/clarity/src/vm/representations.rs @@ -64,6 +64,18 @@ lazy_static! { Regex::new(format!("^{}$|^__transient$", CONTRACT_NAME_REGEX_STRING.as_str()).as_str()) .unwrap() }; + pub static ref MARF_KEY_FOR_TRIP_REGEX_STRING: String = format!( + r"vm::{}::\d+::.*", + *CONTRACT_PRINCIPAL_REGEX_STRING, + ); + pub static ref MARF_KEY_FOR_QUAD_REGEX_STRING: String = format!( + r"{}::.*", + *MARF_KEY_FOR_TRIP_REGEX_STRING, + ); + pub static ref METADATA_KEY_REGEX_STRING: String = format!( + r"vm-metadata::\d+::.*", + + ); } guarded_string!( diff --git a/stackslib/src/net/api/getclaritymarfvalue.rs b/stackslib/src/net/api/getclaritymarfvalue.rs index d2369f2b31..b950f7ffcc 100644 --- a/stackslib/src/net/api/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/getclaritymarfvalue.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -14,40 +14,23 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::io::{Read, Write}; - -use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; use clarity::vm::clarity::ClarityConnection; -use clarity::vm::costs::LimitedCostTracker; -use clarity::vm::database::{ClarityDatabase, STXBalance, StoreType}; use clarity::vm::representations::{ - CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, + MARF_KEY_FOR_QUAD_REGEX_STRING, MARF_KEY_FOR_TRIP_REGEX_STRING, }; -use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; -use clarity::vm::{ClarityName, ClarityVersion, ContractName}; use regex::{Captures, Regex}; -use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::net::PeerHost; -use stacks_common::types::Address; -use stacks_common::util::hash::{to_hex, Sha256Sum}; - -use crate::burnchains::Burnchain; -use crate::chainstate::burn::db::sortdb::SortitionDB; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::Error as ChainError; -use crate::core::mempool::MemPoolDB; +use stacks_common::util::hash::to_hex; + use crate::net::http::{ parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, - HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, }; use crate::net::httpcore::{ - request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, }; -use crate::net::p2p::PeerNetwork; use crate::net::{Error as NetError, StacksNodeState, TipRequest}; -use crate::util_lib::boot::boot_code_id; -use crate::util_lib::db::Error as DBError; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ClarityMarfValueResponse { @@ -77,13 +60,11 @@ impl HttpRequest for RPCGetClarityMarfValueRequestHandler { } fn path_regex(&self) -> Regex { - // TODO: write regex validating the following patterns - // format!("vm::{}::{}::{}", contract_identifier, data as u8, var_name) - // format!("vm::{}::{}::{}::{}", contract_identifier, data as u8, var_name, key_value) - // format!("vm-metadata::{}::{}", data as u8, var_name) - // format!("vm-epoch::epoch-version") - - Regex::new(&format!("^/v2/clarity_marf_value/(?.*)$",)).unwrap() + Regex::new(&format!( + r"^/v2/clarity_marf_value/(?P(vm-epoch::epoch-version)|({})|({}))$", + *MARF_KEY_FOR_TRIP_REGEX_STRING, *MARF_KEY_FOR_QUAD_REGEX_STRING + )) + .unwrap() } fn metrics_identifier(&self) -> &str { @@ -105,7 +86,7 @@ impl HttpRequest for RPCGetClarityMarfValueRequestHandler { )); } - let marf_key = request::get_marf_key(captures, "clarity_marf_key")?; + let marf_key = request::get_key(captures, "clarity_marf_key")?; self.clarity_marf_key = Some(marf_key); @@ -142,26 +123,30 @@ impl RPCRequestHandler for RPCGetClarityMarfValueRequestHandler { let with_proof = contents.get_with_proof(); let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let (value_hex, marf_proof): (String, _) = if with_proof { - clarity_db - .get_data_with_proof(&clarity_marf_key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? - } else { - clarity_db - .get_data(&clarity_marf_key) - .ok() - .flatten() - .map(|a| (a, None))? - }; - - let data = format!("0x{}", value_hex); - Some(ClarityMarfValueResponse { data, marf_proof }) - }) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_data_with_proof(&clarity_marf_key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + clarity_db + .get_data(&clarity_marf_key) + .ok() + .flatten() + .map(|a| (a, None))? + }; + + let data = format!("0x{}", value_hex); + Some(ClarityMarfValueResponse { data, marf_proof }) + }) + }, + ) }); let data_resp = match data_opt { @@ -169,7 +154,7 @@ impl RPCRequestHandler for RPCGetClarityMarfValueRequestHandler { Ok(Some(None)) => { return StacksHttpResponse::new_error( &preamble, - &HttpNotFound::new("Data var not found".to_string()), + &HttpNotFound::new("Marf key not found".to_string()), ) .try_into_contents() .map_err(NetError::from); @@ -204,7 +189,6 @@ impl HttpResponse for RPCGetClarityMarfValueRequestHandler { } impl StacksHttpRequest { - /// Make a new request for a data var pub fn new_getclaritymarfvalue( host: PeerHost, clarity_marf_key: String, diff --git a/stackslib/src/net/api/getclaritymetadata.rs b/stackslib/src/net/api/getclaritymetadata.rs new file mode 100644 index 0000000000..2feb81613a --- /dev/null +++ b/stackslib/src/net/api/getclaritymetadata.rs @@ -0,0 +1,217 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, METADATA_KEY_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::ContractName; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ClarityMetadataResponse { + pub data: String, +} + +#[derive(Clone)] +pub struct RPCGetClarityMetadataRequestHandler { + pub clarity_metadata_key: Option, + pub contract_identifier: Option, +} +impl RPCGetClarityMetadataRequestHandler { + pub fn new() -> Self { + Self { + clarity_metadata_key: None, + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetClarityMetadataRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r"^/v2/clarity_metadata/(?P
{})/(?P{})/(?P(analysis)|({}))$", + *STANDARD_PRINCIPAL_REGEX_STRING, + *CONTRACT_NAME_REGEX_STRING, + *METADATA_KEY_REGEX_STRING + )) + .unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v2/clarity_metadata/:principal/:contract_name/:clarity_metadata_key" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let metadata_key = request::get_key(captures, "clarity_metadata_key")?; + + self.contract_identifier = Some(contract_identifier); + self.clarity_metadata_key = Some(metadata_key); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetClarityMetadataRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.clarity_metadata_key = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self.contract_identifier.take().ok_or(NetError::SendError( + "`contract_identifier` not set".to_string(), + ))?; + let clarity_metadata_key = self.clarity_metadata_key.take().ok_or(NetError::SendError( + "`clarity_metadata_key` not set".to_string(), + ))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let data = clarity_db + .store + .get_metadata(&contract_identifier, &clarity_metadata_key) + .ok() + .flatten()?; + + Some(ClarityMetadataResponse { data }) + }) + }, + ) + }); + + let data_resp = match data_opt { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Metadata not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetClarityMetadataRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let metadata: ClarityMetadataResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(metadata)?) + } +} + +impl StacksHttpRequest { + pub fn new_getclaritymetadata( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + clarity_metadata_key: String, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/clarity_metadata/{}/{}/{}", + &contract_addr, &contract_name, &clarity_metadata_key + ), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_clarity_metadata_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ClarityMetadataResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index fc7e1f64a3..9e6be1bf11 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -43,6 +43,7 @@ pub mod getattachmentsinv; pub mod getblock; pub mod getblock_v3; pub mod getclaritymarfvalue; +pub mod getclaritymetadata; pub mod getconstantval; pub mod getcontractabi; pub mod getcontractsrc; @@ -94,6 +95,7 @@ impl StacksHttp { self.register_rpc_endpoint( getclaritymarfvalue::RPCGetClarityMarfValueRequestHandler::new(), ); + self.register_rpc_endpoint(getclaritymetadata::RPCGetClarityMetadataRequestHandler::new()); self.register_rpc_endpoint(getconstantval::RPCGetConstantValRequestHandler::new()); self.register_rpc_endpoint(getcontractabi::RPCGetContractAbiRequestHandler::new()); self.register_rpc_endpoint(getcontractsrc::RPCGetContractSrcRequestHandler::new()); diff --git a/stackslib/src/net/api/tests/getclaritymarfvalue.rs b/stackslib/src/net/api/tests/getclaritymarfvalue.rs new file mode 100644 index 0000000000..b4129c7d93 --- /dev/null +++ b/stackslib/src/net/api/tests/getclaritymarfvalue.rs @@ -0,0 +1,180 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let vm_key_epoch = "vm-epoch::epoch-version"; + let vm_key_trip = "vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::1::count"; + let vm_key_quad = "vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::0::data::1234"; + let valid_keys = [vm_key_epoch, vm_key_trip, vm_key_quad]; + + for key in valid_keys { + let request = StacksHttpRequest::new_getclaritymarfvalue( + addr.into(), + key.to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + true, + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + assert_eq!(request.contents().get_with_proof(), true); + + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymarfvalue::RPCGetClarityMarfValueRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.clarity_marf_key, Some(key.to_string())); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.clarity_marf_key.is_none()); + } +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getclaritymarfvalue( + addr.into(), + "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getclaritymarfvalue( + addr.into(), + "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed::1::bar-unconfirmed" + .to_string(), + TipRequest::UseLatestUnconfirmedTip, + true, + ); + requests.push(request); + + // query non-existant var + let request = StacksHttpRequest::new_getclaritymarfvalue( + addr.into(), + "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::does-not-exist".to_string(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getclaritymarfvalue( + addr.into(), + "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-exist::1::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // existing data + let response = responses.remove(0); + println!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_data_var_response().unwrap(); + assert_eq!(resp.data, "0x0000000000000000000000000000000000"); + assert!(resp.marf_proof.is_some()); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_data_var_response().unwrap(); + assert_eq!(resp.data, "0x0100000000000000000000000000000001"); + assert!(resp.marf_proof.is_some()); + + // no such var + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/getclaritymetadata.rs b/stackslib/src/net/api/tests/getclaritymetadata.rs new file mode 100644 index 0000000000..3de5949a87 --- /dev/null +++ b/stackslib/src/net/api/tests/getclaritymetadata.rs @@ -0,0 +1,166 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-size".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.clarity_metadata_key, + Some("vm-metadata::9::contract-size".to_string()) + ); + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world" + ) + .unwrap() + ) + ); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.clarity_metadata_key.is_none()); +} + +#[test] +fn test_try_parse_request_for_analysis() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "analysis".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.clarity_metadata_key, Some("analysis".to_string())); + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world" + ) + .unwrap() + ) + ); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.clarity_metadata_key.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-size".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_clarity_metadata_response().unwrap(); + assert_eq!(resp.data, "1432"); +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index f0a537d045..72800164f3 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -60,6 +60,8 @@ mod getattachment; mod getattachmentsinv; mod getblock; mod getblock_v3; +mod getclaritymarfvalue; +mod getclaritymetadata; mod getconstantval; mod getcontractabi; mod getcontractsrc; @@ -117,7 +119,7 @@ const TEST_CONTRACT: &'static str = " (ok 1))) (begin (map-set unit-map { account: 'ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R } { units: 123 })) - + (define-read-only (ro-confirmed) u1) (define-public (do-test) (ok u0)) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index fb92b03b18..71bcfff284 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -248,7 +248,7 @@ pub mod request { } /// Get and parse a MARF key from a path's captures, given the name of the regex field. - pub fn get_marf_key(captures: &Captures, key: &str) -> Result { + pub fn get_key(captures: &Captures, key: &str) -> Result { let marf_key = if let Some(marf_key_str) = captures.name(key) { marf_key_str.as_str().to_string() } else { From ccf60f18cadf4757a915ef5b91e37a6ed55a178c Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Tue, 23 Jul 2024 17:20:39 +0200 Subject: [PATCH 03/56] docs: add get_clarity_mark_value and get_clarity_metadata documentation --- docs/rpc-endpoints.md | 29 ++++++ .../get-clarity-marf-value.example.json | 4 + .../get-clarity-marf-value.schema.json | 17 ++++ .../get-clarity-metadata.example.json | 3 + .../get-clarity-metadata.schema.json | 13 +++ docs/rpc/openapi.yaml | 93 ++++++++++++++++++- 6 files changed, 156 insertions(+), 3 deletions(-) create mode 100644 docs/rpc/api/core-node/get-clarity-marf-value.example.json create mode 100644 docs/rpc/api/core-node/get-clarity-marf-value.schema.json create mode 100644 docs/rpc/api/core-node/get-clarity-metadata.example.json create mode 100644 docs/rpc/api/core-node/get-clarity-metadata.schema.json diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 6163f27b75..117d74d0dc 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -172,6 +172,35 @@ Where data is the hex serialization of the variable value. This endpoint also accepts a querystring parameter `?proof=` which when supplied `0`, will return the JSON object _without_ the `proof` field. +### GET /v2/clarity_marf_value/[Clarity MARF Key] +Attempt to fetch the value of a MARF key. The key is identified with [Clarity MARF Key]. + +Returns JSON data in the form: + +```json +{ + "data": "0x01ce...", + "proof": "0x01ab...", +} +``` + +Where data is the hex serialization of the value. + +### GET /v2/clarity_metadata/[Stacks Address]/[Contract Name]/[Clarity Metadata Key] +Attempt to fetch the metadata of a contract. + The contract is identified with [Stacks Address] and [Contract Name] in the URL path. + The metadata key is identified with [Clarity Metadata Key]. + +Returns JSON data in the form: + +```json +{ + "data": "'{\"contract_identifier\":{...}'", +} +``` + +Where data is the metadata formatted as a JSON string. + ### GET /v2/constant_val/[Stacks Address]/[Contract Name]/[Constant Name] Attempt to fetch a constant from a contract. The contract is identified with [Stacks Address] and [Contract Name] in the URL path. The constant is identified with [Constant Name]. diff --git a/docs/rpc/api/core-node/get-clarity-marf-value.example.json b/docs/rpc/api/core-node/get-clarity-marf-value.example.json new file mode 100644 index 0000000000..d0e233416f --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-marf-value.example.json @@ -0,0 +1,4 @@ +{ + "data": "0x0a0c000000010a6d6f6e737465722d69640100000000000000000000000000000001", + "proof": "0x123..." +} diff --git a/docs/rpc/api/core-node/get-clarity-marf-value.schema.json b/docs/rpc/api/core-node/get-clarity-marf-value.schema.json new file mode 100644 index 0000000000..ea7e7894fb --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-marf-value.schema.json @@ -0,0 +1,17 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "Response of get Clarity MARF value request", + "title": "ClarityMARFValueResponse", + "type": "object", + "required": ["data"], + "properties": { + "data": { + "type": "string", + "description": "Hex-encoded string" + }, + "proof": { + "type": "string", + "description": "Hex-encoded string of the MARF proof for the data" + } + } +} diff --git a/docs/rpc/api/core-node/get-clarity-metadata.example.json b/docs/rpc/api/core-node/get-clarity-metadata.example.json new file mode 100644 index 0000000000..5bb4bd5c47 --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-metadata.example.json @@ -0,0 +1,3 @@ +{ + "data": "'{\"contract_identifier\":{...}, \"private_function_types\":{...}'" +} diff --git a/docs/rpc/api/core-node/get-clarity-metadata.schema.json b/docs/rpc/api/core-node/get-clarity-metadata.schema.json new file mode 100644 index 0000000000..3c0104fa41 --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-metadata.schema.json @@ -0,0 +1,13 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "Response of get clarity metadata request", + "title": "ClarityMetadataResponse", + "type": "object", + "required": ["data"], + "properties": { + "data": { + "type": "string", + "description": "Metadata value formatted as a JSON string" + } + } +} diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index f33e0dca73..e77fc31ade 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -519,7 +519,94 @@ paths: description: | The Stacks chain tip to query from. If tip == "latest", the query will be run from the latest known tip (includes unconfirmed state). - If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). + If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). + + /v2/clarity_marf_value/{clarity_marf_key}: + post: + summary: Get the MARF value for a given key + tags: + - Smart Contracts + operationId: get_clarity_marf_value + description: | + Attempt to fetch the value of a MARF key. The key is identified with [Clarity MARF Key]. + + In the response, `data` is the hex serialization of the value. + responses: + 200: + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-clarity-marf-value.schema.json + example: + $ref: ./api/core-node/get-clarity-marf-value.example.json + 400: + description: Failed to retrieve MARF key + parameters: + - name: clarity_marf_key + in: path + required: true + description: MARF key + schema: + type: string + - name: proof + in: query + description: Returns object without the proof field when set to 0 + schema: + type: integer + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + + /v2/clarity_metadata/{contract_address}/{contract_name}/{clarity_metadata_key}: + post: + summary: Get the contract metadata for the metadata key + tags: + - Smart Contracts + operationId: get_clarity_metadata_key + description: | + Attempt to fetch the metadata of a contract. The contract is identified with [Stacks Address] and [Contract Name] in the URL path. The metadata key is identified with [Clarity Metadata Key]. + + In the response, `data` is formatted as JSON. + responses: + 200: + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-clarity-metadata.schema.json + example: + $ref: ./api/core-node/get-clarity-metadata.example.json + 400: + description: Failed to retrieve constant value from contract + parameters: + - name: contract_address + in: path + required: true + description: Stacks address + schema: + type: string + - name: contract_name + in: path + required: true + description: Contract name + schema: + type: string + - name: clarity_metadata_key + in: path + required: true + description: Metadata key + schema: + type: string + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). /v2/constant_val/{contract_address}/{contract_name}/{constant_name}: post: @@ -633,7 +720,7 @@ paths: /v3/blocks/{block_id}: get: - summary: Fetch a Nakamoto block + summary: Fetch a Nakamoto block tags: - Blocks operationId: get_block_v3 @@ -674,7 +761,7 @@ paths: application/json: example: $ref: ./api/core-node/get_tenure_info.json - + /v3/tenures/{block_id}: get: summary: Fetch a sequence of Nakamoto blocks in a tenure From 26c448727def84bbef34ba4d14ec4fd5244a2390 Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Tue, 23 Jul 2024 18:12:47 +0200 Subject: [PATCH 04/56] refactor: improve clarity_mark_key and clarity_metadata request parsing --- clarity/src/vm/representations.rs | 11 +++++++---- stackslib/src/net/api/getclaritymarfvalue.rs | 2 +- stackslib/src/net/api/getclaritymetadata.rs | 2 +- stackslib/src/net/api/tests/getclaritymarfvalue.rs | 2 +- stackslib/src/net/httpcore.rs | 12 ++++++------ 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/clarity/src/vm/representations.rs b/clarity/src/vm/representations.rs index 8f140a6a4d..ee3387d629 100644 --- a/clarity/src/vm/representations.rs +++ b/clarity/src/vm/representations.rs @@ -51,6 +51,8 @@ lazy_static! { "({})|({})", *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_PRINCIPAL_REGEX_STRING ); + static ref CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING: String = + "[a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*|[-+=/*]|[<>]=?".into(); pub static ref CLARITY_NAME_REGEX_STRING: String = "^[a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*$|^[-+=/*]$|^[<>]=?$".into(); pub static ref CLARITY_NAME_REGEX: Regex = @@ -65,16 +67,17 @@ lazy_static! { .unwrap() }; pub static ref MARF_KEY_FOR_TRIP_REGEX_STRING: String = format!( - r"vm::{}::\d+::.*", + r"vm::{}::\d+::({})", *CONTRACT_PRINCIPAL_REGEX_STRING, + *CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING, ); pub static ref MARF_KEY_FOR_QUAD_REGEX_STRING: String = format!( - r"{}::.*", + r"{}::[0-9a-fA-F]+", *MARF_KEY_FOR_TRIP_REGEX_STRING, ); pub static ref METADATA_KEY_REGEX_STRING: String = format!( - r"vm-metadata::\d+::.*", - + r"vm-metadata::\d+::(contract|contract-size|contract-src|contract-data-size|({}))", + *CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING, ); } diff --git a/stackslib/src/net/api/getclaritymarfvalue.rs b/stackslib/src/net/api/getclaritymarfvalue.rs index b950f7ffcc..470c5872ba 100644 --- a/stackslib/src/net/api/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/getclaritymarfvalue.rs @@ -86,7 +86,7 @@ impl HttpRequest for RPCGetClarityMarfValueRequestHandler { )); } - let marf_key = request::get_key(captures, "clarity_marf_key")?; + let marf_key = request::get_clarity_key(captures, "clarity_marf_key")?; self.clarity_marf_key = Some(marf_key); diff --git a/stackslib/src/net/api/getclaritymetadata.rs b/stackslib/src/net/api/getclaritymetadata.rs index 2feb81613a..8a08a48503 100644 --- a/stackslib/src/net/api/getclaritymetadata.rs +++ b/stackslib/src/net/api/getclaritymetadata.rs @@ -89,7 +89,7 @@ impl HttpRequest for RPCGetClarityMetadataRequestHandler { } let contract_identifier = request::get_contract_address(captures, "address", "contract")?; - let metadata_key = request::get_key(captures, "clarity_metadata_key")?; + let metadata_key = request::get_clarity_key(captures, "clarity_metadata_key")?; self.contract_identifier = Some(contract_identifier); self.clarity_metadata_key = Some(metadata_key); diff --git a/stackslib/src/net/api/tests/getclaritymarfvalue.rs b/stackslib/src/net/api/tests/getclaritymarfvalue.rs index b4129c7d93..40e73c41e4 100644 --- a/stackslib/src/net/api/tests/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/tests/getclaritymarfvalue.rs @@ -128,7 +128,7 @@ fn test_try_make_response() { // existing data let response = responses.remove(0); - println!( + debug!( "Response:\n{}\n", std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() ); diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 71bcfff284..731b5fb771 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -247,15 +247,15 @@ pub mod request { Ok(txid) } - /// Get and parse a MARF key from a path's captures, given the name of the regex field. - pub fn get_key(captures: &Captures, key: &str) -> Result { - let marf_key = if let Some(marf_key_str) = captures.name(key) { - marf_key_str.as_str().to_string() + /// Get a clarity key (MARF or Metadata) from a path's captures, given the name of the regex field. + pub fn get_clarity_key(captures: &Captures, clarity_key: &str) -> Result { + let key = if let Some(key_str) = captures.name(clarity_key) { + key_str.as_str().to_string() } else { - return Err(HttpError::Http(404, format!("Missing `{}`", key))); + return Err(HttpError::Http(404, format!("Missing `{}`", clarity_key))); }; - Ok(marf_key) + Ok(key) } /// Get and parse a Clarity name from a path's captures, given the name of the regex field. From 71cad36a21b3e1d6c47b57645a8857f61e938af9 Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:29:10 +0200 Subject: [PATCH 05/56] refactor: rename get clarity marf value and metadata endpoints --- clarity/src/vm/representations.rs | 15 ------ stackslib/src/net/api/getclaritymarfvalue.rs | 50 ++++++++++++------- stackslib/src/net/api/getclaritymetadata.rs | 30 ++++++++--- stackslib/src/net/api/mod.rs | 4 +- .../src/net/api/tests/getclaritymarfvalue.rs | 12 ++--- stackslib/src/net/httpcore.rs | 11 ---- 6 files changed, 62 insertions(+), 60 deletions(-) diff --git a/clarity/src/vm/representations.rs b/clarity/src/vm/representations.rs index ee3387d629..c80e3c7467 100644 --- a/clarity/src/vm/representations.rs +++ b/clarity/src/vm/representations.rs @@ -51,8 +51,6 @@ lazy_static! { "({})|({})", *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_PRINCIPAL_REGEX_STRING ); - static ref CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING: String = - "[a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*|[-+=/*]|[<>]=?".into(); pub static ref CLARITY_NAME_REGEX_STRING: String = "^[a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*$|^[-+=/*]$|^[<>]=?$".into(); pub static ref CLARITY_NAME_REGEX: Regex = @@ -66,19 +64,6 @@ lazy_static! { Regex::new(format!("^{}$|^__transient$", CONTRACT_NAME_REGEX_STRING.as_str()).as_str()) .unwrap() }; - pub static ref MARF_KEY_FOR_TRIP_REGEX_STRING: String = format!( - r"vm::{}::\d+::({})", - *CONTRACT_PRINCIPAL_REGEX_STRING, - *CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING, - ); - pub static ref MARF_KEY_FOR_QUAD_REGEX_STRING: String = format!( - r"{}::[0-9a-fA-F]+", - *MARF_KEY_FOR_TRIP_REGEX_STRING, - ); - pub static ref METADATA_KEY_REGEX_STRING: String = format!( - r"vm-metadata::\d+::(contract|contract-size|contract-src|contract-data-size|({}))", - *CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING, - ); } guarded_string!( diff --git a/stackslib/src/net/api/getclaritymarfvalue.rs b/stackslib/src/net/api/getclaritymarfvalue.rs index 470c5872ba..ff584a0ccf 100644 --- a/stackslib/src/net/api/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/getclaritymarfvalue.rs @@ -15,9 +15,8 @@ // along with this program. If not, see . use clarity::vm::clarity::ClarityConnection; -use clarity::vm::representations::{ - MARF_KEY_FOR_QUAD_REGEX_STRING, MARF_KEY_FOR_TRIP_REGEX_STRING, -}; +use clarity::vm::representations::CONTRACT_PRINCIPAL_REGEX_STRING; +use lazy_static::lazy_static; use regex::{Captures, Regex}; use stacks_common::types::net::PeerHost; use stacks_common::util::hash::to_hex; @@ -32,8 +31,19 @@ use crate::net::httpcore::{ }; use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +lazy_static! { + static ref CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING: String = + "[a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*|[-+=/*]|[<>]=?".into(); + static ref MARF_KEY_FOR_TRIP_REGEX_STRING: String = format!( + r"vm::{}::\d+::({})", + *CONTRACT_PRINCIPAL_REGEX_STRING, *CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING, + ); + static ref MARF_KEY_FOR_QUAD_REGEX_STRING: String = + format!(r"{}::[0-9a-fA-F]+", *MARF_KEY_FOR_TRIP_REGEX_STRING,); +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct ClarityMarfValueResponse { +pub struct ClarityMarfResponse { pub data: String, #[serde(rename = "proof")] #[serde(default)] @@ -42,10 +52,10 @@ pub struct ClarityMarfValueResponse { } #[derive(Clone)] -pub struct RPCGetClarityMarfValueRequestHandler { +pub struct RPCGetClarityMarfRequestHandler { pub clarity_marf_key: Option, } -impl RPCGetClarityMarfValueRequestHandler { +impl RPCGetClarityMarfRequestHandler { pub fn new() -> Self { Self { clarity_marf_key: None, @@ -54,21 +64,21 @@ impl RPCGetClarityMarfValueRequestHandler { } /// Decode the HTTP request -impl HttpRequest for RPCGetClarityMarfValueRequestHandler { +impl HttpRequest for RPCGetClarityMarfRequestHandler { fn verb(&self) -> &'static str { "GET" } fn path_regex(&self) -> Regex { Regex::new(&format!( - r"^/v2/clarity_marf_value/(?P(vm-epoch::epoch-version)|({})|({}))$", + r"^/v2/clarity/marf/(?P(vm-epoch::epoch-version)|({})|({}))$", *MARF_KEY_FOR_TRIP_REGEX_STRING, *MARF_KEY_FOR_QUAD_REGEX_STRING )) .unwrap() } fn metrics_identifier(&self) -> &str { - "/v2/clarity_marf_value/:clarity_marf_key" + "/v2/clarity/marf/:clarity_marf_key" } /// Try to decode this request. @@ -86,7 +96,11 @@ impl HttpRequest for RPCGetClarityMarfValueRequestHandler { )); } - let marf_key = request::get_clarity_key(captures, "clarity_marf_key")?; + let marf_key = if let Some(key_str) = captures.name("clarity_marf_key") { + key_str.as_str().to_string() + } else { + return Err(Error::Http(404, "Missing `clarity_marf_key`".to_string())); + }; self.clarity_marf_key = Some(marf_key); @@ -96,7 +110,7 @@ impl HttpRequest for RPCGetClarityMarfValueRequestHandler { } /// Handle the HTTP request -impl RPCRequestHandler for RPCGetClarityMarfValueRequestHandler { +impl RPCRequestHandler for RPCGetClarityMarfRequestHandler { /// Reset internal state fn restart(&mut self) { self.clarity_marf_key = None; @@ -143,7 +157,7 @@ impl RPCRequestHandler for RPCGetClarityMarfValueRequestHandler { }; let data = format!("0x{}", value_hex); - Some(ClarityMarfValueResponse { data, marf_proof }) + Some(ClarityMarfResponse { data, marf_proof }) }) }, ) @@ -177,19 +191,19 @@ impl RPCRequestHandler for RPCGetClarityMarfValueRequestHandler { } /// Decode the HTTP response -impl HttpResponse for RPCGetClarityMarfValueRequestHandler { +impl HttpResponse for RPCGetClarityMarfRequestHandler { fn try_parse_response( &self, preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let marf_value: ClarityMarfValueResponse = parse_json(preamble, body)?; + let marf_value: ClarityMarfResponse = parse_json(preamble, body)?; Ok(HttpResponsePayload::try_from_json(marf_value)?) } } impl StacksHttpRequest { - pub fn new_getclaritymarfvalue( + pub fn new_getclaritymarf( host: PeerHost, clarity_marf_key: String, tip_req: TipRequest, @@ -198,7 +212,7 @@ impl StacksHttpRequest { StacksHttpRequest::new_for_peer( host, "GET".into(), - format!("/v2/clarity_marf_value/{}", &clarity_marf_key), + format!("/v2/clarity/marf/{}", &clarity_marf_key), HttpRequestContents::new() .for_tip(tip_req) .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()), @@ -208,10 +222,10 @@ impl StacksHttpRequest { } impl StacksHttpResponse { - pub fn decode_clarity_marf_value_response(self) -> Result { + pub fn decode_clarity_marf_response(self) -> Result { let contents = self.get_http_payload_ok()?; let contents_json: serde_json::Value = contents.try_into()?; - let resp: ClarityMarfValueResponse = serde_json::from_value(contents_json) + let resp: ClarityMarfResponse = serde_json::from_value(contents_json) .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; Ok(resp) } diff --git a/stackslib/src/net/api/getclaritymetadata.rs b/stackslib/src/net/api/getclaritymetadata.rs index 8a08a48503..5ef3feee6e 100644 --- a/stackslib/src/net/api/getclaritymetadata.rs +++ b/stackslib/src/net/api/getclaritymetadata.rs @@ -15,11 +15,10 @@ // along with this program. If not, see . use clarity::vm::clarity::ClarityConnection; -use clarity::vm::representations::{ - CONTRACT_NAME_REGEX_STRING, METADATA_KEY_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, -}; +use clarity::vm::representations::{CONTRACT_NAME_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING}; use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::ContractName; +use lazy_static::lazy_static; use regex::{Captures, Regex}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::net::PeerHost; @@ -34,6 +33,15 @@ use crate::net::httpcore::{ }; use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +lazy_static! { + static ref CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING: String = + "[a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*|[-+=/*]|[<>]=?".into(); + static ref METADATA_KEY_REGEX_STRING: String = format!( + r"vm-metadata::\d+::(contract|contract-size|contract-src|contract-data-size|({}))", + *CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING, + ); +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ClarityMetadataResponse { pub data: String, @@ -61,7 +69,7 @@ impl HttpRequest for RPCGetClarityMetadataRequestHandler { fn path_regex(&self) -> Regex { Regex::new(&format!( - r"^/v2/clarity_metadata/(?P
{})/(?P{})/(?P(analysis)|({}))$", + r"^/v2/clarity/metadata/(?P
{})/(?P{})/(?P(analysis)|({}))$", *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *METADATA_KEY_REGEX_STRING @@ -70,7 +78,7 @@ impl HttpRequest for RPCGetClarityMetadataRequestHandler { } fn metrics_identifier(&self) -> &str { - "/v2/clarity_metadata/:principal/:contract_name/:clarity_metadata_key" + "/v2/clarity/metadata/:principal/:contract_name/:clarity_metadata_key" } /// Try to decode this request. @@ -89,7 +97,15 @@ impl HttpRequest for RPCGetClarityMetadataRequestHandler { } let contract_identifier = request::get_contract_address(captures, "address", "contract")?; - let metadata_key = request::get_clarity_key(captures, "clarity_metadata_key")?; + + let metadata_key = if let Some(key_str) = captures.name("clarity_metadata_key") { + key_str.as_str().to_string() + } else { + return Err(Error::Http( + 404, + "Missing `clarity_metadata_key`".to_string(), + )); + }; self.contract_identifier = Some(contract_identifier); self.clarity_metadata_key = Some(metadata_key); @@ -197,7 +213,7 @@ impl StacksHttpRequest { host, "GET".into(), format!( - "/v2/clarity_metadata/{}/{}/{}", + "/v2/clarity/metadata/{}/{}/{}", &contract_addr, &contract_name, &clarity_metadata_key ), HttpRequestContents::new().for_tip(tip_req), diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 9e6be1bf11..0057f9047d 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -92,9 +92,7 @@ impl StacksHttp { self.register_rpc_endpoint(getattachmentsinv::RPCGetAttachmentsInvRequestHandler::new()); self.register_rpc_endpoint(getblock::RPCBlocksRequestHandler::new()); self.register_rpc_endpoint(getblock_v3::RPCNakamotoBlockRequestHandler::new()); - self.register_rpc_endpoint( - getclaritymarfvalue::RPCGetClarityMarfValueRequestHandler::new(), - ); + self.register_rpc_endpoint(getclaritymarfvalue::RPCGetClarityMarfRequestHandler::new()); self.register_rpc_endpoint(getclaritymetadata::RPCGetClarityMetadataRequestHandler::new()); self.register_rpc_endpoint(getconstantval::RPCGetConstantValRequestHandler::new()); self.register_rpc_endpoint(getcontractabi::RPCGetContractAbiRequestHandler::new()); diff --git a/stackslib/src/net/api/tests/getclaritymarfvalue.rs b/stackslib/src/net/api/tests/getclaritymarfvalue.rs index 40e73c41e4..ce342b7442 100644 --- a/stackslib/src/net/api/tests/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/tests/getclaritymarfvalue.rs @@ -43,7 +43,7 @@ fn test_try_parse_request() { let valid_keys = [vm_key_epoch, vm_key_trip, vm_key_quad]; for key in valid_keys { - let request = StacksHttpRequest::new_getclaritymarfvalue( + let request = StacksHttpRequest::new_getclaritymarf( addr.into(), key.to_string(), TipRequest::SpecificTip(StacksBlockId([0x22; 32])), @@ -58,7 +58,7 @@ fn test_try_parse_request() { let bytes = request.try_serialize().unwrap(); let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); - let mut handler = getclaritymarfvalue::RPCGetClarityMarfValueRequestHandler::new(); + let mut handler = getclaritymarfvalue::RPCGetClarityMarfRequestHandler::new(); let mut parsed_request = http .handle_try_parse_request( &mut handler, @@ -88,7 +88,7 @@ fn test_try_make_response() { let mut requests = vec![]; // query existing - let request = StacksHttpRequest::new_getclaritymarfvalue( + let request = StacksHttpRequest::new_getclaritymarf( addr.into(), "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::bar".to_string(), TipRequest::UseLatestAnchoredTip, @@ -97,7 +97,7 @@ fn test_try_make_response() { requests.push(request); // query existing unconfirmed - let request = StacksHttpRequest::new_getclaritymarfvalue( + let request = StacksHttpRequest::new_getclaritymarf( addr.into(), "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed::1::bar-unconfirmed" .to_string(), @@ -107,7 +107,7 @@ fn test_try_make_response() { requests.push(request); // query non-existant var - let request = StacksHttpRequest::new_getclaritymarfvalue( + let request = StacksHttpRequest::new_getclaritymarf( addr.into(), "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::does-not-exist".to_string(), TipRequest::UseLatestAnchoredTip, @@ -116,7 +116,7 @@ fn test_try_make_response() { requests.push(request); // query non-existant contract - let request = StacksHttpRequest::new_getclaritymarfvalue( + let request = StacksHttpRequest::new_getclaritymarf( addr.into(), "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-exist::1::bar".to_string(), TipRequest::UseLatestAnchoredTip, diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 731b5fb771..dec51df42a 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -247,17 +247,6 @@ pub mod request { Ok(txid) } - /// Get a clarity key (MARF or Metadata) from a path's captures, given the name of the regex field. - pub fn get_clarity_key(captures: &Captures, clarity_key: &str) -> Result { - let key = if let Some(key_str) = captures.name(clarity_key) { - key_str.as_str().to_string() - } else { - return Err(HttpError::Http(404, format!("Missing `{}`", clarity_key))); - }; - - Ok(key) - } - /// Get and parse a Clarity name from a path's captures, given the name of the regex field. pub fn get_clarity_name(captures: &Captures, key: &str) -> Result { let clarity_name = if let Some(name_str) = captures.name(key) { From 74ac33fed796250db7642a3c1a3492a84fa856f8 Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Tue, 5 Nov 2024 18:49:23 +0100 Subject: [PATCH 06/56] feat: get data from hash --- clarity/src/vm/database/clarity_store.rs | 13 ++++- clarity/src/vm/database/sqlite.rs | 9 ++- stackslib/src/chainstate/stacks/index/marf.rs | 58 +++++++++++++++++++ stackslib/src/clarity_vm/database/marf.rs | 51 ++++++++++++++++ stackslib/src/clarity_vm/database/mod.rs | 9 +++ 5 files changed, 138 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index b6a45ee764..6e8f878f6e 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -18,7 +18,7 @@ use std::path::PathBuf; #[cfg(feature = "canonical")] use rusqlite::Connection; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, VRFSeed}; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, TrieHash, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use crate::vm::analysis::AnalysisDatabase; @@ -67,6 +67,10 @@ pub trait ClarityBackingStore { /// fetch K-V out of the committed datastore, along with the byte representation /// of the Merkle proof for that key-value pair fn get_data_with_proof(&mut self, key: &str) -> Result)>>; + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> Result)>>; fn has_entry(&mut self, key: &str) -> Result { Ok(self.get_data(key)?.is_some()) } @@ -213,6 +217,13 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't retrieve data") } + fn get_data_with_proof_from_path( + &mut self, + _hash: &TrieHash, + ) -> Result)>> { + panic!("NullBackingStore can't retrieve data") + } + #[cfg(feature = "canonical")] fn get_side_store(&mut self) -> &Connection { panic!("NullBackingStore has no side store") diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index dc3ad4f5bd..92a932676d 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -19,7 +19,7 @@ use rusqlite::{ params, Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OptionalExtension, Row, Savepoint, }; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, TrieHash}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::db_common::tx_busy_handler; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -328,6 +328,13 @@ impl ClarityBackingStore for MemoryBackingStore { Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) } + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> Result)>> { + self.get_data_with_proof(&hash.to_string()) + } + fn get_side_store(&mut self) -> &Connection { &self.side_store } diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index d5dd77c51f..81506a08c3 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -142,6 +142,22 @@ pub trait MarfConnection { }) } + fn get_with_proof_from_hash( + &mut self, + block_hash: &T, + hash: &TrieHash, + ) -> Result)>, Error> { + self.with_conn(|conn| { + let path = TriePath::from_bytes(hash.as_bytes()).ok_or(Error::BadSeekValue)?; + let marf_value = match MARF::get_by_path(conn, block_hash, &path)? { + None => return Ok(None), + Some(x) => x, + }; + let proof = TrieMerkleProof::from_path(conn, &path, &marf_value, block_hash)?; + Ok(Some((marf_value, proof))) + }) + } + fn get_block_at_height(&mut self, height: u32, tip: &T) -> Result, Error> { self.with_conn(|c| MARF::get_block_at_height(c, height, tip)) } @@ -1123,6 +1139,33 @@ impl MARF { Ok(MARF::from_storage(file_storage)) } + pub fn get_by_path( + storage: &mut TrieStorageConnection, + block_hash: &T, + path: &TriePath, + ) -> Result, Error> { + let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); + + let result = MARF::get_path(storage, block_hash, &path).or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }); + + // restore + storage + .open_block_maybe_id(&cur_block_hash, cur_block_id) + .map_err(|e| { + warn!( + "Failed to re-open {} {:?}: {:?}", + &cur_block_hash, cur_block_id, &e + ); + warn!("Result of failed path lookup '{}': {:?}", path, &result); + e + })?; + + result.map(|option_result| option_result.map(|leaf| leaf.data)) + } + pub fn get_by_key( storage: &mut TrieStorageConnection, block_hash: &T, @@ -1320,6 +1363,21 @@ impl MARF { Ok(Some((marf_value, proof))) } + pub fn get_with_proof_from_hash( + &mut self, + block_hash: &T, + hash: &TrieHash, + ) -> Result)>, Error> { + let mut conn = self.storage.connection(); + let path = TriePath::from_bytes(hash.as_bytes()).ok_or(Error::BadSeekValue)?; + let marf_value = match MARF::get_by_path(&mut conn, block_hash, &path)? { + None => return Ok(None), + Some(x) => x, + }; + let proof = TrieMerkleProof::from_path(&mut conn, &path, &marf_value, block_hash)?; + Ok(Some((marf_value, proof))) + } + pub fn get_bhh_at_height(&mut self, block_hash: &T, height: u32) -> Result, Error> { MARF::get_block_at_height(&mut self.storage.connection(), height, block_hash) } diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index fed0e70e95..be0f60ff56 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -20,6 +20,7 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, TrieHash}; use crate::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection, MarfTransaction, MARF}; +use crate::chainstate::stacks::index::node::TriePath; use crate::chainstate::stacks::index::{ ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieMerkleProof, }; @@ -422,6 +423,31 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { .transpose() } + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> InterpreterResult)>> { + self.marf + .get_with_proof_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|(marf_value, proof)| { + let side_key = marf_value.to_hex(); + let data = + SqliteConnection::get(self.get_side_store(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + })?; + Ok((data, proof.serialize_to_vec())) + }) + .transpose() + } + fn get_data(&mut self, key: &str) -> InterpreterResult> { trace!("MarfedKV get: {:?} tip={}", key, &self.chain_tip); self.marf @@ -653,6 +679,31 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { .transpose() } + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> InterpreterResult)>> { + self.marf + .get_with_proof_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|(marf_value, proof)| { + let side_key = marf_value.to_hex(); + let data = + SqliteConnection::get(self.marf.sqlite_tx(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + })?; + Ok((data, proof.serialize_to_vec())) + }) + .transpose() + } + fn get_side_store(&mut self) -> &Connection { self.marf.sqlite_tx() } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 81f0bac43c..49a29773bd 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -1,5 +1,6 @@ use std::ops::{Deref, DerefMut}; +use clarity::types::chainstate::TrieHash; use clarity::util::hash::Sha512Trunc256Sum; use clarity::vm::analysis::AnalysisDatabase; use clarity::vm::database::sqlite::{ @@ -1138,6 +1139,14 @@ impl ClarityBackingStore for MemoryBackingStore { Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) } + fn get_data_with_proof_from_path( + &mut self, + key: &TrieHash, + ) -> InterpreterResult)>> { + // Ok(SqliteConnection::get(self.get_side_store(), )?.map(|x| (x, vec![]))) + Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) + } + fn get_side_store(&mut self) -> &Connection { &self.side_store } From 14535c41085c6681ede71404d9efb691b2166050 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 8 Nov 2024 15:07:47 -0500 Subject: [PATCH 07/56] chore: remove needless TriePath type and use TrieHash everywhere. Also, add `MARF::get_by_path()` to look up `MARFValue`s by `TrieHash` instead of by `&str` keys, and add the relevant `ClarityBackingStore` implementation to the stackslib's read-only and writeable MARF stores --- stacks-common/src/types/chainstate.rs | 62 +++++++++++++++ stackslib/src/burnchains/tests/burnchain.rs | 1 - .../src/chainstate/burn/db/processing.rs | 1 - stackslib/src/chainstate/burn/db/sortdb.rs | 1 - stackslib/src/chainstate/burn/distribution.rs | 1 - stackslib/src/chainstate/burn/mod.rs | 1 - .../burn/operations/leader_block_commit.rs | 1 - .../burn/operations/leader_key_register.rs | 1 - stackslib/src/chainstate/burn/sortition.rs | 2 +- stackslib/src/chainstate/stacks/index/bits.rs | 10 +-- .../src/chainstate/stacks/index/cache.rs | 6 +- stackslib/src/chainstate/stacks/index/file.rs | 2 +- stackslib/src/chainstate/stacks/index/marf.rs | 74 +++++++++++++----- stackslib/src/chainstate/stacks/index/mod.rs | 65 ---------------- stackslib/src/chainstate/stacks/index/node.rs | 23 +----- .../src/chainstate/stacks/index/proofs.rs | 20 ++--- .../src/chainstate/stacks/index/storage.rs | 4 +- .../src/chainstate/stacks/index/test/cache.rs | 4 +- .../src/chainstate/stacks/index/test/file.rs | 4 +- .../src/chainstate/stacks/index/test/marf.rs | 66 ++++++++-------- .../src/chainstate/stacks/index/test/mod.rs | 8 +- .../src/chainstate/stacks/index/test/node.rs | 20 ++--- .../chainstate/stacks/index/test/proofs.rs | 10 +-- .../chainstate/stacks/index/test/storage.rs | 6 +- .../src/chainstate/stacks/index/test/trie.rs | 76 +++++++++---------- stackslib/src/chainstate/stacks/index/trie.rs | 2 +- .../src/chainstate/stacks/index/trie_sql.rs | 2 +- stackslib/src/core/tests/mod.rs | 2 +- 28 files changed, 241 insertions(+), 234 deletions(-) diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 47d6c3c499..59347ed36a 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -30,6 +30,68 @@ impl_byte_array_serde!(TrieHash); pub const TRIEHASH_ENCODED_SIZE: usize = 32; +impl TrieHash { + pub fn from_key(k: &str) -> Self { + Self::from_data(k.as_bytes()) + } + + /// TrieHash of zero bytes + pub fn from_empty_data() -> TrieHash { + // sha2-512/256 hash of empty string. + // this is used so frequently it helps performance if we just have a constant for it. + TrieHash([ + 0xc6, 0x72, 0xb8, 0xd1, 0xef, 0x56, 0xed, 0x28, 0xab, 0x87, 0xc3, 0x62, 0x2c, 0x51, + 0x14, 0x06, 0x9b, 0xdd, 0x3a, 0xd7, 0xb8, 0xf9, 0x73, 0x74, 0x98, 0xd0, 0xc0, 0x1e, + 0xce, 0xf0, 0x96, 0x7a, + ]) + } + + /// TrieHash from bytes + pub fn from_data(data: &[u8]) -> TrieHash { + if data.len() == 0 { + return TrieHash::from_empty_data(); + } + + let mut tmp = [0u8; 32]; + + let mut hasher = Sha512_256::new(); + hasher.update(data); + tmp.copy_from_slice(hasher.finalize().as_slice()); + + TrieHash(tmp) + } + + pub fn from_data_array>(data: &[B]) -> TrieHash { + if data.len() == 0 { + return TrieHash::from_empty_data(); + } + + let mut tmp = [0u8; 32]; + + let mut hasher = Sha512_256::new(); + + for item in data.iter() { + hasher.update(item); + } + tmp.copy_from_slice(hasher.finalize().as_slice()); + TrieHash(tmp) + } + + /// Convert to a String that can be used in e.g. sqlite + pub fn to_string(&self) -> String { + let s = format!("{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", + self.0[0], self.0[1], self.0[2], self.0[3], + self.0[4], self.0[5], self.0[6], self.0[7], + self.0[8], self.0[9], self.0[10], self.0[11], + self.0[12], self.0[13], self.0[14], self.0[15], + self.0[16], self.0[17], self.0[18], self.0[19], + self.0[20], self.0[21], self.0[22], self.0[23], + self.0[24], self.0[25], self.0[26], self.0[27], + self.0[28], self.0[29], self.0[30], self.0[31]); + s + } +} + #[derive(Serialize, Deserialize)] pub struct BurnchainHeaderHash(pub [u8; 32]); impl_array_newtype!(BurnchainHeaderHash, u8, 32); diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index b08d7a097e..1dd8d283ef 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -44,7 +44,6 @@ use crate::chainstate::burn::{ BlockSnapshot, ConsensusHash, ConsensusHashExtensions, OpsHash, SortitionHash, }; use crate::chainstate::stacks::address::StacksAddressExtensions; -use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::util_lib::db::Error as db_error; diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 82318bfe37..4aa0d64929 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -353,7 +353,6 @@ mod tests { use crate::chainstate::burn::operations::{LeaderBlockCommitOp, LeaderKeyRegisterOp}; use crate::chainstate::burn::*; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::MICROSTACKS_PER_STACKS; diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 53dc2d0547..7e697a7dff 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -6594,7 +6594,6 @@ pub mod tests { BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use crate::chainstate::burn::ConsensusHash; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::{StacksEpochExtension, *}; use crate::util_lib::db::Error as db_error; diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index ed01ae014b..59c335cd58 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -450,7 +450,6 @@ mod tests { }; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::MINING_COMMITMENT_WINDOW; diff --git a/stackslib/src/chainstate/burn/mod.rs b/stackslib/src/chainstate/burn/mod.rs index be92c3088f..4552210f44 100644 --- a/stackslib/src/chainstate/burn/mod.rs +++ b/stackslib/src/chainstate/burn/mod.rs @@ -432,7 +432,6 @@ mod tests { use crate::burnchains::bitcoin::address::BitcoinAddress; use crate::burnchains::bitcoin::keys::BitcoinPublicKey; use crate::chainstate::burn::db::sortdb::*; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::util_lib::db::Error as db_error; #[test] diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 910315f082..31643a33d9 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -1172,7 +1172,6 @@ mod tests { use crate::chainstate::burn::operations::*; use crate::chainstate::burn::{ConsensusHash, *}; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::{ StacksEpoch, StacksEpochExtension, StacksEpochId, PEER_VERSION_EPOCH_1_0, diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 44402adc0c..5608b6739d 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -253,7 +253,6 @@ pub mod tests { }; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash, OpsHash, SortitionHash}; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::core::StacksEpochId; pub struct OpFixture { diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index b0221f1439..63a7b7feb3 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -40,7 +40,7 @@ use crate::chainstate::burn::{ SortitionHash, }; use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::index::{ClarityMarfTrieId, MarfTrieId, TrieHashExtension}; +use crate::chainstate::stacks::index::{ClarityMarfTrieId, MarfTrieId}; use crate::core::*; use crate::util_lib::db::Error as db_error; diff --git a/stackslib/src/chainstate/stacks/index/bits.rs b/stackslib/src/chainstate/stacks/index/bits.rs index e212b03299..6397cee3a3 100644 --- a/stackslib/src/chainstate/stacks/index/bits.rs +++ b/stackslib/src/chainstate/stacks/index/bits.rs @@ -29,7 +29,7 @@ use stacks_common::util::macros::is_trace; use crate::chainstate::stacks::index::node::{ clear_backptr, ConsensusSerializable, TrieNode, TrieNode16, TrieNode256, TrieNode4, TrieNode48, - TrieNodeID, TrieNodeType, TriePtr, TRIEPATH_MAX_LEN, TRIEPTR_SIZE, + TrieNodeID, TrieNodeType, TriePtr, TRIEPTR_SIZE, }; use crate::chainstate::stacks::index::storage::{TrieFileStorage, TrieStorageConnection}; use crate::chainstate::stacks::index::{BlockMap, Error, MarfTrieId, TrieLeaf}; @@ -55,15 +55,15 @@ pub fn path_from_bytes(r: &mut R) -> Result, Error> { } })?; - if lenbuf[0] as usize > TRIEPATH_MAX_LEN { + if lenbuf[0] as usize > TRIEHASH_ENCODED_SIZE { trace!( "Path length is {} (expected <= {})", lenbuf[0], - TRIEPATH_MAX_LEN + TRIEHASH_ENCODED_SIZE ); return Err(Error::CorruptionError(format!( "Node path is longer than {} bytes (got {})", - TRIEPATH_MAX_LEN, lenbuf[0] + TRIEHASH_ENCODED_SIZE, lenbuf[0] ))); } @@ -326,7 +326,7 @@ pub fn read_nodetype_at_head_nohash( /// node hash id ptrs & ptr data path /// /// X is fixed and determined by the TrieNodeType variant. -/// Y is variable, but no more than TriePath::len(). +/// Y is variable, but no more than TrieHash::len(). /// /// If `read_hash` is false, then the contents of the node hash are undefined. fn inner_read_nodetype_at_head( diff --git a/stackslib/src/chainstate/stacks/index/cache.rs b/stackslib/src/chainstate/stacks/index/cache.rs index 7f92efdd8b..7547fd6d80 100644 --- a/stackslib/src/chainstate/stacks/index/cache.rs +++ b/stackslib/src/chainstate/stacks/index/cache.rs @@ -40,7 +40,7 @@ use crate::chainstate::stacks::index::bits::{ }; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::{trie_sql, ClarityMarfTrieId, Error, MarfTrieId, TrieLeaf}; use crate::util_lib::db::{ @@ -420,7 +420,7 @@ pub mod test { } } else { for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let leaf = TrieLeaf::from_value(&vec![], value.clone()); marf.insert_raw(path, leaf).unwrap(); } @@ -443,7 +443,7 @@ pub mod test { for (i, block_data) in data.iter().enumerate() { test_debug!("Read block {}", i); for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let marf_leaf = TrieLeaf::from_value(&vec![], value.clone()); let read_time = SystemTime::now(); diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 4123b1310a..5a7da69e52 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -42,7 +42,7 @@ use crate::chainstate::stacks::index::bits::{ }; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::storage::{NodeHashReader, TrieStorageConnection}; use crate::chainstate::stacks::index::{trie_sql, ClarityMarfTrieId, Error, MarfTrieId, TrieLeaf}; diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 81506a08c3..427cde29fc 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -28,14 +28,14 @@ use stacks_common::util::log; use crate::chainstate::stacks::index::bits::{get_leaf_hash, get_node_hash, read_root_hash}; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, CursorError, TrieCursor, TrieNode, TrieNode16, - TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, TRIEPTR_SIZE, + TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePtr, TRIEPTR_SIZE, }; use crate::chainstate::stacks::index::storage::{ TrieFileStorage, TrieHashCalculationMode, TrieStorageConnection, TrieStorageTransaction, }; use crate::chainstate::stacks::index::trie::Trie; use crate::chainstate::stacks::index::{ - ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieHashExtension, TrieLeaf, TrieMerkleProof, + ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieLeaf, TrieMerkleProof, }; use crate::util_lib::db::Error as db_error; @@ -126,6 +126,11 @@ pub trait MarfConnection { fn get(&mut self, block_hash: &T, key: &str) -> Result, Error> { self.with_conn(|c| MARF::get_by_key(c, block_hash, key)) } + + /// Resolve a TrieHash from the MARF to a MARFValue with respect to the given block height. + fn get_from_hash(&mut self, block_hash: &T, th: &TrieHash) -> Result, Error> { + self.with_conn(|c| MARF::get_by_hash(c, block_hash, th)) + } fn get_with_proof( &mut self, @@ -148,12 +153,11 @@ pub trait MarfConnection { hash: &TrieHash, ) -> Result)>, Error> { self.with_conn(|conn| { - let path = TriePath::from_bytes(hash.as_bytes()).ok_or(Error::BadSeekValue)?; - let marf_value = match MARF::get_by_path(conn, block_hash, &path)? { + let marf_value = match MARF::get_by_path(conn, block_hash, hash)? { None => return Ok(None), Some(x) => x, }; - let proof = TrieMerkleProof::from_path(conn, &path, &marf_value, block_hash)?; + let proof = TrieMerkleProof::from_path(conn, hash, &marf_value, block_hash)?; Ok(Some((marf_value, proof))) }) } @@ -797,7 +801,7 @@ impl MARF { fn walk_cow( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, ) -> Result, Error> { let block_id = storage.get_block_identifier(block_hash); MARF::extend_trie(storage, block_hash)?; @@ -902,7 +906,7 @@ impl MARF { fn walk( storage: &mut TrieStorageConnection, block_hash: &T, - path: &TriePath, + path: &TrieHash, ) -> Result<(TrieCursor, TrieNodeType), Error> { storage.open_block(block_hash)?; @@ -1010,7 +1014,7 @@ impl MARF { pub fn get_path( storage: &mut TrieStorageConnection, block_hash: &T, - path: &TriePath, + path: &TrieHash, ) -> Result, Error> { trace!("MARF::get_path({:?}) {:?}", block_hash, path); @@ -1061,7 +1065,7 @@ impl MARF { fn do_insert_leaf( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, leaf_value: &TrieLeaf, update_skiplist: bool, ) -> Result<(), Error> { @@ -1092,7 +1096,7 @@ impl MARF { pub fn insert_leaf( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, value: &TrieLeaf, ) -> Result<(), Error> { if storage.readonly() { @@ -1105,7 +1109,7 @@ impl MARF { pub fn insert_leaf_in_batch( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, value: &TrieLeaf, ) -> Result<(), Error> { if storage.readonly() { @@ -1142,7 +1146,7 @@ impl MARF { pub fn get_by_path( storage: &mut TrieStorageConnection, block_hash: &T, - path: &TriePath, + path: &TrieHash, ) -> Result, Error> { let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); @@ -1166,6 +1170,8 @@ impl MARF { result.map(|option_result| option_result.map(|leaf| leaf.data)) } + /// Load up a MARF value by key, given a handle to the storage connection and a tip to work off + /// of. pub fn get_by_key( storage: &mut TrieStorageConnection, block_hash: &T, @@ -1173,7 +1179,7 @@ impl MARF { ) -> Result, Error> { let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let result = MARF::get_path(storage, block_hash, &path).or_else(|e| match e { Error::NotFoundError => Ok(None), @@ -1195,6 +1201,35 @@ impl MARF { result.map(|option_result| option_result.map(|leaf| leaf.data)) } + /// Load up a MARF value by TrieHash, given a handle to the storage connection and a tip to + /// work off of. + pub fn get_by_hash( + storage: &mut TrieStorageConnection, + block_hash: &T, + path: &TrieHash, + ) -> Result, Error> { + let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); + + let result = MARF::get_path(storage, block_hash, &path).or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }); + + // restore + storage + .open_block_maybe_id(&cur_block_hash, cur_block_id) + .map_err(|e| { + warn!( + "Failed to re-open {} {:?}: {:?}", + &cur_block_hash, cur_block_id, &e + ); + warn!("Result of failed hash lookup '{}': {:?}", path, &result); + e + })?; + + result.map(|option_result| option_result.map(|leaf| leaf.data)) + } + pub fn get_block_height_miner_tip( storage: &mut TrieStorageConnection, block_hash: &T, @@ -1305,7 +1340,7 @@ impl MARF { .zip(values[0..last].iter()) .try_for_each(|((index, key), value)| { let marf_leaf = TrieLeaf::from_value(&[], value.clone()); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); if eta_enabled { let updated_progress = 100 * index / last; @@ -1323,7 +1358,7 @@ impl MARF { if result.is_ok() { // last insert updates the root with the skiplist hash let marf_leaf = TrieLeaf::from_value(&[], values[last].clone()); - let path = TriePath::from_key(&keys[last]); + let path = TrieHash::from_key(&keys[last]); result = MARF::insert_leaf(conn, block_hash, &path, &marf_leaf); } @@ -1362,14 +1397,13 @@ impl MARF { let proof = TrieMerkleProof::from_raw_entry(&mut conn, key, &marf_value, block_hash)?; Ok(Some((marf_value, proof))) } - + pub fn get_with_proof_from_hash( &mut self, block_hash: &T, - hash: &TrieHash, + path: &TrieHash, ) -> Result)>, Error> { let mut conn = self.storage.connection(); - let path = TriePath::from_bytes(hash.as_bytes()).ok_or(Error::BadSeekValue)?; let marf_value = match MARF::get_by_path(&mut conn, block_hash, &path)? { None => return Ok(None), Some(x) => x, @@ -1414,14 +1448,14 @@ impl MARF { return Err(Error::ReadOnlyError); } let marf_leaf = TrieLeaf::from_value(&[], value); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); self.insert_raw(path, marf_leaf) } /// Insert the given (key, value) pair into the MARF. Inserting the same key twice silently /// overwrites the existing key. Succeeds if there are no storage errors. /// Must be called after a call to .begin() (will fail otherwise) - pub fn insert_raw(&mut self, path: TriePath, marf_leaf: TrieLeaf) -> Result<(), Error> { + pub fn insert_raw(&mut self, path: TrieHash, marf_leaf: TrieLeaf) -> Result<(), Error> { if self.storage.readonly() { return Err(Error::ReadOnlyError); } diff --git a/stackslib/src/chainstate/stacks/index/mod.rs b/stackslib/src/chainstate/stacks/index/mod.rs index eb082747c5..9fee7ab2d6 100644 --- a/stackslib/src/chainstate/stacks/index/mod.rs +++ b/stackslib/src/chainstate/stacks/index/mod.rs @@ -151,71 +151,6 @@ impl MarfTrieId for BurnchainHeaderHash {} #[cfg(test)] impl MarfTrieId for BlockHeaderHash {} -pub trait TrieHashExtension { - fn from_empty_data() -> TrieHash; - fn from_data(data: &[u8]) -> TrieHash; - fn from_data_array>(data: &[B]) -> TrieHash; - fn to_string(&self) -> String; -} - -impl TrieHashExtension for TrieHash { - /// TrieHash of zero bytes - fn from_empty_data() -> TrieHash { - // sha2-512/256 hash of empty string. - // this is used so frequently it helps performance if we just have a constant for it. - TrieHash([ - 0xc6, 0x72, 0xb8, 0xd1, 0xef, 0x56, 0xed, 0x28, 0xab, 0x87, 0xc3, 0x62, 0x2c, 0x51, - 0x14, 0x06, 0x9b, 0xdd, 0x3a, 0xd7, 0xb8, 0xf9, 0x73, 0x74, 0x98, 0xd0, 0xc0, 0x1e, - 0xce, 0xf0, 0x96, 0x7a, - ]) - } - - /// TrieHash from bytes - fn from_data(data: &[u8]) -> TrieHash { - if data.len() == 0 { - return TrieHash::from_empty_data(); - } - - let mut tmp = [0u8; 32]; - - let mut hasher = TrieHasher::new(); - hasher.update(data); - tmp.copy_from_slice(hasher.finalize().as_slice()); - - TrieHash(tmp) - } - - fn from_data_array>(data: &[B]) -> TrieHash { - if data.len() == 0 { - return TrieHash::from_empty_data(); - } - - let mut tmp = [0u8; 32]; - - let mut hasher = TrieHasher::new(); - - for item in data.iter() { - hasher.update(item); - } - tmp.copy_from_slice(hasher.finalize().as_slice()); - TrieHash(tmp) - } - - /// Convert to a String that can be used in e.g. sqlite - fn to_string(&self) -> String { - let s = format!("{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", - self.0[0], self.0[1], self.0[2], self.0[3], - self.0[4], self.0[5], self.0[6], self.0[7], - self.0[8], self.0[9], self.0[10], self.0[11], - self.0[12], self.0[13], self.0[14], self.0[15], - self.0[16], self.0[17], self.0[18], self.0[19], - self.0[20], self.0[21], self.0[22], self.0[23], - self.0[24], self.0[25], self.0[26], self.0[27], - self.0[28], self.0[29], self.0[30], self.0[31]); - s - } -} - /// Structure that holds the actual data in a MARF leaf node. /// It only stores the hash of some value string, but we add 8 extra bytes for future extensions. /// If not used (the rule today), then they should all be 0. diff --git a/stackslib/src/chainstate/stacks/index/node.rs b/stackslib/src/chainstate/stacks/index/node.rs index 19e8aa327f..4436d7f239 100644 --- a/stackslib/src/chainstate/stacks/index/node.rs +++ b/stackslib/src/chainstate/stacks/index/node.rs @@ -32,7 +32,7 @@ use crate::chainstate::stacks::index::bits::{ get_path_byte_len, get_ptrs_byte_len, path_from_bytes, ptrs_from_bytes, write_path_to_bytes, }; use crate::chainstate::stacks::index::{ - BlockMap, ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieHashExtension, TrieHasher, + BlockMap, ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieHasher, TrieLeaf, MARF_VALUE_ENCODED_SIZE, }; @@ -106,23 +106,6 @@ fn ptrs_consensus_hash( Ok(()) } -/// A path in the Trie is the SHA2-512/256 hash of its key. -pub struct TriePath([u8; 32]); -impl_array_newtype!(TriePath, u8, 32); -impl_array_hexstring_fmt!(TriePath); -impl_byte_array_newtype!(TriePath, u8, 32); - -pub const TRIEPATH_MAX_LEN: usize = 32; - -impl TriePath { - pub fn from_key(k: &str) -> TriePath { - let h = TrieHash::from_data(k.as_bytes()); - let mut hb = [0u8; TRIEPATH_MAX_LEN]; - hb.copy_from_slice(h.as_bytes()); - TriePath(hb) - } -} - /// All Trie nodes implement the following methods: pub trait TrieNode { /// Node ID for encoding/decoding @@ -339,7 +322,7 @@ impl TriePtr { /// nodes to visit when updating the root node hash. #[derive(Debug, Clone, PartialEq)] pub struct TrieCursor { - pub path: TriePath, // the path to walk + pub path: TrieHash, // the path to walk pub index: usize, // index into the path pub node_path_index: usize, // index into the currently-visited node's compressed path pub nodes: Vec, // list of nodes this cursor visits @@ -349,7 +332,7 @@ pub struct TrieCursor { } impl TrieCursor { - pub fn new(path: &TriePath, root_ptr: TriePtr) -> TrieCursor { + pub fn new(path: &TrieHash, root_ptr: TriePtr) -> TrieCursor { TrieCursor { path: path.clone(), index: 0, diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index 815def9c91..aae802334c 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -35,14 +35,14 @@ use crate::chainstate::stacks::index::bits::{ use crate::chainstate::stacks::index::marf::MARF; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, ConsensusSerializable, CursorError, TrieCursor, - TrieNode, TrieNode16, TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePath, + TrieNode, TrieNode16, TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::storage::{TrieFileStorage, TrieStorageConnection}; use crate::chainstate::stacks::index::trie::Trie; use crate::chainstate::stacks::index::{ BlockMap, ClarityMarfTrieId, Error, MARFValue, MarfTrieId, ProofTrieNode, ProofTriePtr, - TrieHashExtension, TrieLeaf, TrieMerkleProof, TrieMerkleProofType, + TrieLeaf, TrieMerkleProof, TrieMerkleProofType, }; impl ConsensusSerializable<()> for ProofTrieNode { @@ -1004,7 +1004,7 @@ impl TrieMerkleProof { /// * segment proof i+1 must be a prefix of segment proof i /// * segment proof 0 must end in a leaf /// * all segment proofs must end in a Node256 (a root) - fn is_proof_well_formed(proof: &Vec>, expected_path: &TriePath) -> bool { + fn is_proof_well_formed(proof: &Vec>, expected_path: &TrieHash) -> bool { if proof.len() == 0 { trace!("Proof is empty"); return false; @@ -1048,7 +1048,7 @@ impl TrieMerkleProof { } }; - // first path bytes must be the expected TriePath + // first path bytes must be the expected TrieHash if expected_path.as_bytes().to_vec() != path_bytes { trace!( "Invalid proof -- path bytes {:?} differs from the expected path {:?}", @@ -1121,7 +1121,7 @@ impl TrieMerkleProof { /// NOTE: Trie root hashes are globally unique by design, even if they represent the same contents, so the root_to_block map is bijective with high probability. pub fn verify_proof( proof: &Vec>, - path: &TriePath, + path: &TrieHash, value: &MARFValue, root_hash: &TrieHash, root_to_block: &HashMap, @@ -1351,7 +1351,7 @@ impl TrieMerkleProof { /// Verify this proof pub fn verify( &self, - path: &TriePath, + path: &TrieHash, marf_value: &MARFValue, root_hash: &TrieHash, root_to_block: &HashMap, @@ -1362,7 +1362,7 @@ impl TrieMerkleProof { /// Walk down the trie pointed to by s until we reach a backptr or a leaf fn walk_to_leaf_or_backptr( storage: &mut TrieStorageConnection, - path: &TriePath, + path: &TrieHash, ) -> Result<(TrieCursor, TrieNodeType, TriePtr), Error> { trace!( "Walk path {:?} from {:?} to the first backptr", @@ -1438,7 +1438,7 @@ impl TrieMerkleProof { /// If the path doesn't resolve, return an error (NotFoundError) pub fn from_path( storage: &mut TrieStorageConnection, - path: &TriePath, + path: &TrieHash, expected_value: &MARFValue, root_block_header: &T, ) -> Result, Error> { @@ -1562,7 +1562,7 @@ impl TrieMerkleProof { root_block_header: &T, ) -> Result, Error> { let marf_value = MARFValue::from_value(value); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); TrieMerkleProof::from_path(storage, &path, &marf_value, root_block_header) } @@ -1572,7 +1572,7 @@ impl TrieMerkleProof { value: &MARFValue, root_block_header: &T, ) -> Result, Error> { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); TrieMerkleProof::from_path(storage, &path, value, root_block_header) } } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 6994c7ad05..170430c74c 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -46,12 +46,12 @@ use crate::chainstate::stacks::index::file::{TrieFile, TrieFileNodeHashReader}; use crate::chainstate::stacks::index::marf::MARFOpenOpts; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::profile::TrieBenchmark; use crate::chainstate::stacks::index::trie::Trie; use crate::chainstate::stacks::index::{ - trie_sql, BlockMap, ClarityMarfTrieId, Error, MarfTrieId, TrieHashExtension, TrieHasher, + trie_sql, BlockMap, ClarityMarfTrieId, Error, MarfTrieId, TrieHasher, TrieLeaf, }; use crate::util_lib::db::{ diff --git a/stackslib/src/chainstate/stacks/index/test/cache.rs b/stackslib/src/chainstate/stacks/index/test/cache.rs index 5a0bc41d00..1abd0e741a 100644 --- a/stackslib/src/chainstate/stacks/index/test/cache.rs +++ b/stackslib/src/chainstate/stacks/index/test/cache.rs @@ -105,7 +105,7 @@ fn test_marf_with_cache( } } else { for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let leaf = TrieLeaf::from_value(&vec![], value.clone()); marf.insert_raw(path, leaf).unwrap(); } @@ -128,7 +128,7 @@ fn test_marf_with_cache( for (i, block_data) in data.iter().enumerate() { test_debug!("Read block {}", i); for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let marf_leaf = TrieLeaf::from_value(&vec![], value.clone()); let read_time = SystemTime::now(); diff --git a/stackslib/src/chainstate/stacks/index/test/file.rs b/stackslib/src/chainstate/stacks/index/test/file.rs index 499198aca5..19ac5e60e4 100644 --- a/stackslib/src/chainstate/stacks/index/test/file.rs +++ b/stackslib/src/chainstate/stacks/index/test/file.rs @@ -106,7 +106,7 @@ fn test_migrate_existing_trie_blobs() { marf.begin(&last_block_header, &block_header).unwrap(); for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let leaf = TrieLeaf::from_value(&vec![], value.clone()); marf.insert_raw(path, leaf).unwrap(); } @@ -147,7 +147,7 @@ fn test_migrate_existing_trie_blobs() { // verify that we can read everything from the blobs for (i, block_data) in data.iter().enumerate() { for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let marf_leaf = TrieLeaf::from_value(&vec![], value.clone()); let leaf = MARF::get_path( diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index b66fc4dd8a..f4f4dd0de0 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -33,7 +33,7 @@ use crate::chainstate::stacks::index::storage::*; use crate::chainstate::stacks::index::test::*; use crate::chainstate::stacks::index::trie::*; use crate::chainstate::stacks::index::{ - ClarityMarfTrieId, Error, MARFValue, TrieHashExtension, TrieLeaf, + ClarityMarfTrieId, Error, MARFValue, TrieLeaf, }; #[test] @@ -52,7 +52,7 @@ fn marf_insert_different_leaf_same_block_100() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); for i in 0..100 { let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); @@ -117,7 +117,7 @@ fn marf_insert_different_leaf_different_path_different_block_100() { marf.commit().unwrap(); marf.begin(&BlockHeaderHash::sentinel(), &block_header) .unwrap(); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -140,7 +140,7 @@ fn marf_insert_different_leaf_different_path_different_block_100() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, i as u8, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); let leaf = MARF::get_path(&mut marf.borrow_storage_backend(), &block_header, &path) @@ -189,7 +189,7 @@ fn marf_insert_same_leaf_different_block_100() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); for i in 0..100 { let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); @@ -197,7 +197,7 @@ fn marf_insert_same_leaf_different_block_100() { marf.commit().unwrap(); marf.begin(&BlockHeaderHash::sentinel(), &next_block_header) .unwrap(); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -271,7 +271,7 @@ fn marf_insert_leaf_sequence_2() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let prior_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); marf.commit().unwrap(); @@ -294,7 +294,7 @@ fn marf_insert_leaf_sequence_2() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); let leaf = MARF::get_path( @@ -348,7 +348,7 @@ fn marf_insert_leaf_sequence_100() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); marf.commit().unwrap(); let next_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); @@ -372,7 +372,7 @@ fn marf_insert_leaf_sequence_100() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); eprintln!("Finding value inserted at {}", &next_block_header); @@ -567,7 +567,7 @@ where let next_path = path_gen(i, path.clone()); - let triepath = TriePath::from_bytes(&next_path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&next_path[..]).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); debug!("----------------"); @@ -582,7 +582,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &next_block_header, - &TriePath::from_bytes(&next_path[..]).unwrap(), + &TrieHash::from_bytes(&next_path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -603,7 +603,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &next_block_header, - &TriePath::from_bytes(&prev_path[..]).unwrap(), + &TrieHash::from_bytes(&prev_path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -675,7 +675,7 @@ where // add a leaf at the end of the path let next_path = path_gen(i, path.clone()); - let triepath = TriePath::from_bytes(&next_path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&next_path[..]).unwrap(); let value = MARFValue([i as u8; 40]); assert_eq!( @@ -847,7 +847,7 @@ fn marf_merkle_verify_backptrs() { marf.commit().unwrap(); marf.begin(&block_header_1, &block_header_2).unwrap(); marf.insert_raw( - TriePath::from_bytes(&path_2[..]).unwrap(), + TrieHash::from_bytes(&path_2[..]).unwrap(), TrieLeaf::new(&vec![], &[20 as u8; 40].to_vec()), ) .unwrap(); @@ -865,7 +865,7 @@ fn marf_merkle_verify_backptrs() { marf.commit().unwrap(); marf.begin(&block_header_2, &block_header_3).unwrap(); marf.insert_raw( - TriePath::from_bytes(&path_3[..]).unwrap(), + TrieHash::from_bytes(&path_3[..]).unwrap(), TrieLeaf::new(&vec![], &[21 as u8; 40].to_vec()), ) .unwrap(); @@ -922,7 +922,7 @@ where let (path, next_block_header) = path_gen(i); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -944,7 +944,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -998,7 +998,7 @@ where let i1 = i % 256; let (path, _next_block_header) = path_gen(i); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1011,7 +1011,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -1139,7 +1139,7 @@ fn marf_split_leaf_path() { .unwrap(); let path = [0u8; 32]; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new(&vec![], &[0u8; 40].to_vec()); debug!("----------------"); @@ -1161,7 +1161,7 @@ fn marf_split_leaf_path() { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ]; - let triepath_2 = TriePath::from_bytes(&path_2[..]).unwrap(); + let triepath_2 = TrieHash::from_bytes(&path_2[..]).unwrap(); let value_2 = TrieLeaf::new(&vec![], &[1u8; 40].to_vec()); debug!("----------------"); @@ -1602,7 +1602,7 @@ fn marf_read_random_1048576_4096_file_storage() { let path = TrieHash::from_data(&seed[..]).as_bytes()[0..32].to_vec(); seed = path.clone(); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1615,7 +1615,7 @@ fn marf_read_random_1048576_4096_file_storage() { let read_value = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -1896,7 +1896,7 @@ fn marf_insert_flush_to_different_block() { None }; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1919,7 +1919,7 @@ fn marf_insert_flush_to_different_block() { let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &target_block, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -2017,7 +2017,7 @@ fn marf_insert_flush_to_different_block() { 24, 25, 26, 27, 28, 29, i0 as u8, i1 as u8, ]; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -2037,7 +2037,7 @@ fn marf_insert_flush_to_different_block() { let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &read_from_block, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -2074,7 +2074,7 @@ fn test_marf_read_only() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let leaf = TrieLeaf::new( &vec![], &[ @@ -2138,13 +2138,13 @@ fn test_marf_begin_from_sentinel_twice() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_1 = TriePath::from_bytes(&path_1[..]).unwrap(); + let triepath_1 = TrieHash::from_bytes(&path_1[..]).unwrap(); let path_2 = [ 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_2 = TriePath::from_bytes(&path_2[..]).unwrap(); + let triepath_2 = TrieHash::from_bytes(&path_2[..]).unwrap(); let value_1 = TrieLeaf::new(&vec![], &vec![1u8; 40]); let value_2 = TrieLeaf::new(&vec![], &vec![2u8; 40]); @@ -2210,14 +2210,14 @@ fn test_marf_unconfirmed() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_1 = TriePath::from_bytes(&path_1[..]).unwrap(); + let triepath_1 = TrieHash::from_bytes(&path_1[..]).unwrap(); let value_1 = TrieLeaf::new(&vec![], &vec![1u8; 40]); let path_2 = [ 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_2 = TriePath::from_bytes(&path_2[..]).unwrap(); + let triepath_2 = TrieHash::from_bytes(&path_2[..]).unwrap(); let value_2 = TrieLeaf::new(&vec![], &vec![2u8; 40]); let block_header = StacksBlockId([0x33u8; 32]); diff --git a/stackslib/src/chainstate/stacks/index/test/mod.rs b/stackslib/src/chainstate/stacks/index/test/mod.rs index 2c3b04698c..9dbaa0959d 100644 --- a/stackslib/src/chainstate/stacks/index/test/mod.rs +++ b/stackslib/src/chainstate/stacks/index/test/mod.rs @@ -32,7 +32,7 @@ use crate::chainstate::stacks::index::proofs::*; use crate::chainstate::stacks::index::storage::*; use crate::chainstate::stacks::index::trie::*; use crate::chainstate::stacks::index::{ - MARFValue, MarfTrieId, TrieHashExtension, TrieLeaf, TrieMerkleProof, + MARFValue, MarfTrieId, TrieLeaf, TrieMerkleProof, }; use crate::chainstate::stacks::{BlockHeaderHash, TrieHash}; @@ -108,7 +108,7 @@ pub fn merkle_test( value: &Vec, ) -> () { let (_, root_hash) = Trie::read_root(s).unwrap(); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let block_header = BlockHeaderHash([0u8; 32]); s.open_block(&block_header).unwrap(); @@ -147,7 +147,7 @@ pub fn merkle_test_marf( s.open_block(header).unwrap(); let (_, root_hash) = Trie::read_root(s).unwrap(); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let mut marf_value = [0u8; 40]; marf_value.copy_from_slice(&value[0..40]); @@ -199,7 +199,7 @@ pub fn merkle_test_marf_key_value( test_debug!("---------"); let root_to_block = root_to_block.unwrap_or_else(|| s.read_root_to_block_table().unwrap()); - let triepath = TriePath::from_key(key); + let triepath = TrieHash::from_key(key); let marf_value = MARFValue::from_value(value); assert!(proof.verify(&triepath, &marf_value, &root_hash, &root_to_block)); diff --git a/stackslib/src/chainstate/stacks/index/test/node.rs b/stackslib/src/chainstate/stacks/index/test/node.rs index a98491595d..227adda439 100644 --- a/stackslib/src/chainstate/stacks/index/test/node.rs +++ b/stackslib/src/chainstate/stacks/index/test/node.rs @@ -4215,7 +4215,7 @@ fn trie_cursor_walk_full() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4313,7 +4313,7 @@ fn trie_cursor_walk_1() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4406,7 +4406,7 @@ fn trie_cursor_walk_2() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4496,7 +4496,7 @@ fn trie_cursor_walk_3() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4585,7 +4585,7 @@ fn trie_cursor_walk_4() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4673,7 +4673,7 @@ fn trie_cursor_walk_5() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4760,7 +4760,7 @@ fn trie_cursor_walk_6() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4845,7 +4845,7 @@ fn trie_cursor_walk_10() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4937,7 +4937,7 @@ fn trie_cursor_walk_20() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -5028,7 +5028,7 @@ fn trie_cursor_walk_32() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let walk_point = nodes[0].clone(); diff --git a/stackslib/src/chainstate/stacks/index/test/proofs.rs b/stackslib/src/chainstate/stacks/index/test/proofs.rs index 9642bfcdc5..9bd24af548 100644 --- a/stackslib/src/chainstate/stacks/index/test/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/test/proofs.rs @@ -59,7 +59,7 @@ fn verifier_catches_stale_proof() { let new_value = m.get(&block_2, &k1).unwrap().unwrap(); test_debug!("NEW: {:?}", new_value); - let path = TriePath::from_key(&k1); + let path = TrieHash::from_key(&k1); merkle_test_marf_key_value(&mut m.borrow_storage_backend(), &block_2, &k1, &new_v, None); @@ -75,7 +75,7 @@ fn verifier_catches_stale_proof() { .unwrap(); // the verifier should not allow a proof from k1 to old_v from block_2 - let triepath_2 = TriePath::from_key(&k1); + let triepath_2 = TrieHash::from_key(&k1); let marf_value_2 = MARFValue::from_value(&old_v); assert!(!proof_2.verify(&triepath_2, &marf_value_2, &root_hash_2, &root_to_block)); @@ -86,7 +86,7 @@ fn verifier_catches_stale_proof() { .unwrap(); // the verifier should allow a proof from k1 to old_v from block_1 - let triepath_1 = TriePath::from_key(&k1); + let triepath_1 = TrieHash::from_key(&k1); let marf_value_1 = MARFValue::from_value(&old_v); assert!(proof_1.verify(&triepath_1, &marf_value_1, &root_hash_1, &root_to_block)); } @@ -169,7 +169,7 @@ fn ncc_verifier_catches_stale_proof() { TrieMerkleProof::from_entry(&mut m.borrow_storage_backend(), &k1, &another_v, &block_5) .unwrap(); - let triepath_4 = TriePath::from_key(&k1); + let triepath_4 = TrieHash::from_key(&k1); let marf_value_4 = MARFValue::from_value(&another_v); let root_to_block = { m.borrow_storage_backend() @@ -186,7 +186,7 @@ fn ncc_verifier_catches_stale_proof() { TrieMerkleProof::from_entry(&mut m.borrow_storage_backend(), &k1, &old_v, &block_2) .unwrap(); - let triepath_4 = TriePath::from_key(&k1); + let triepath_4 = TrieHash::from_key(&k1); let marf_value_4 = MARFValue::from_value(&old_v); let root_to_block = { m.borrow_storage_backend() diff --git a/stackslib/src/chainstate/stacks/index/test/storage.rs b/stackslib/src/chainstate/stacks/index/test/storage.rs index a996bc7186..fdd3e30191 100644 --- a/stackslib/src/chainstate/stacks/index/test/storage.rs +++ b/stackslib/src/chainstate/stacks/index/test/storage.rs @@ -164,7 +164,7 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { ]; path_bytes[24..32].copy_from_slice(&i.to_be_bytes()); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); confirmed_marf.insert_raw(path.clone(), value).unwrap(); } @@ -213,7 +213,7 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { ]; path_bytes[24..32].copy_from_slice(&i.to_be_bytes()); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); // NOTE: may have been overwritten; just check for presence assert!( @@ -235,7 +235,7 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { path_bytes[16..24].copy_from_slice(&j.to_be_bytes()); } - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[(i + 128) as u8; 40].to_vec()); new_inserted.push((path.clone(), value.clone())); diff --git a/stackslib/src/chainstate/stacks/index/test/trie.rs b/stackslib/src/chainstate/stacks/index/test/trie.rs index ca2c0ced65..9bac45508c 100644 --- a/stackslib/src/chainstate/stacks/index/test/trie.rs +++ b/stackslib/src/chainstate/stacks/index/test/trie.rs @@ -137,7 +137,7 @@ fn trie_cursor_try_attach_leaf() { path[i] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); // end of path -- cursor points to the insertion point. @@ -164,7 +164,7 @@ fn trie_cursor_try_attach_leaf() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -194,7 +194,7 @@ fn trie_cursor_try_attach_leaf() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -250,7 +250,7 @@ fn trie_cursor_promote_leaf_to_node4() { // add a single leaf let mut c = TrieCursor::new( - &TriePath::from_bytes(&[ + &TrieHash::from_bytes(&[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]) @@ -275,7 +275,7 @@ fn trie_cursor_promote_leaf_to_node4() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&[ + &TrieHash::from_bytes(&[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 ]) @@ -317,7 +317,7 @@ fn trie_cursor_promote_leaf_to_node4() { path[i] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, node, node_hash) = walk_to_insertion_point(&mut f, &mut c); // end of path -- cursor points to the insertion point @@ -342,7 +342,7 @@ fn trie_cursor_promote_leaf_to_node4() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -372,7 +372,7 @@ fn trie_cursor_promote_leaf_to_node4() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -467,7 +467,7 @@ fn trie_cursor_promote_node4_to_node16() { path[k] = j + 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); f.open_block(&block_header).unwrap(); @@ -486,7 +486,7 @@ fn trie_cursor_promote_node4_to_node16() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -515,7 +515,7 @@ fn trie_cursor_promote_node4_to_node16() { path[k] = 128; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -536,7 +536,7 @@ fn trie_cursor_promote_node4_to_node16() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -627,7 +627,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = j + 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -648,7 +648,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -677,7 +677,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = 128; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -698,7 +698,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -734,7 +734,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = j + 40; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -755,7 +755,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -784,7 +784,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = 129; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -806,7 +806,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -897,7 +897,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = j + 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -918,7 +918,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -947,7 +947,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = 128; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -968,7 +968,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1004,7 +1004,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = j + 40; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1024,7 +1024,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1053,7 +1053,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = 129; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1074,7 +1074,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1110,7 +1110,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = j + 90; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1131,7 +1131,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1160,7 +1160,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = 130; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1181,7 +1181,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1256,7 +1256,7 @@ fn trie_cursor_splice_leaf_4() { path[5 * k + 2] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); test_debug!("Start splice-insert at {:?}", &c); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1283,7 +1283,7 @@ fn trie_cursor_splice_leaf_4() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1349,7 +1349,7 @@ fn trie_cursor_splice_leaf_2() { path[3 * k + 1] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); test_debug!("Start splice-insert at {:?}", &c); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1372,7 +1372,7 @@ fn trie_cursor_splice_leaf_2() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1413,7 +1413,7 @@ where for i in 0..count { eprintln!("{}", i); let path = path_gen(i); - let triepath = TriePath::from_bytes(&path).unwrap(); + let triepath = TrieHash::from_bytes(&path).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1519,7 +1519,7 @@ where for i in 0..count { let path = path_gen(i); - let triepath = TriePath::from_bytes(&path).unwrap(); + let triepath = TrieHash::from_bytes(&path).unwrap(); let value = MARF::get_path(&mut marf.borrow_storage_backend(), &block_header, &triepath) .unwrap() diff --git a/stackslib/src/chainstate/stacks/index/trie.rs b/stackslib/src/chainstate/stacks/index/trie.rs index 6c7cc7a08a..5c6f53ab65 100644 --- a/stackslib/src/chainstate/stacks/index/trie.rs +++ b/stackslib/src/chainstate/stacks/index/trie.rs @@ -40,7 +40,7 @@ use crate::chainstate::stacks::index::storage::{ TrieFileStorage, TrieHashCalculationMode, TrieStorageConnection, }; use crate::chainstate::stacks::index::{ - Error, MarfTrieId, TrieHashExtension, TrieHasher, TrieLeaf, + Error, MarfTrieId, TrieHasher, TrieLeaf, }; /// We don't actually instantiate a Trie, but we still need to pass a type parameter for the diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index c9d3b40dce..8134db9d44 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -45,7 +45,7 @@ use crate::chainstate::stacks::index::bits::{ use crate::chainstate::stacks::index::file::TrieFile; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::storage::{TrieFileStorage, TrieStorageConnection}; use crate::chainstate::stacks::index::{trie_sql, BlockMap, Error, MarfTrieId, TrieLeaf}; diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 01fcac9e89..7945a9331d 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -48,7 +48,7 @@ use crate::chainstate::stacks::db::test::{ }; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::events::StacksTransactionReceipt; -use crate::chainstate::stacks::index::{MarfTrieId, TrieHashExtension}; +use crate::chainstate::stacks::index::{MarfTrieId}; use crate::chainstate::stacks::miner::TransactionResult; use crate::chainstate::stacks::test::codec_all_transactions; use crate::chainstate::stacks::{ From 2cdf090ced19ec74b7c479051d2ee99ea0f3f89f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 8 Nov 2024 15:08:40 -0500 Subject: [PATCH 08/56] chore: provide access to MARF'ed data via `MARF::get_by_hash` and `ClarityBackingStore::get_data_from_path` --- clarity/src/vm/database/clarity_db.rs | 15 +++++ clarity/src/vm/database/clarity_store.rs | 6 ++ clarity/src/vm/database/key_value_wrapper.rs | 30 ++++++++++ clarity/src/vm/database/sqlite.rs | 4 ++ stackslib/src/clarity_vm/database/marf.rs | 61 +++++++++++++++++++- stackslib/src/clarity_vm/database/mod.rs | 7 ++- 6 files changed, 120 insertions(+), 3 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 50715fd98f..993746b8b8 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -26,6 +26,7 @@ use stacks_common::types::chainstate::{ VRFSeed, }; use stacks_common::types::{Address, StacksEpoch as GenericStacksEpoch, StacksEpochId}; +use stacks_common::types::chainstate::TrieHash; use stacks_common::util::hash::{to_hex, Hash160, Sha256Sum, Sha512Trunc256Sum}; use super::clarity_store::SpecialCaseHandler; @@ -464,6 +465,13 @@ impl<'a> ClarityDatabase<'a> { { self.store.get_data::(key) } + + pub fn get_data_by_hash(&mut self, hash: &TrieHash) -> Result> + where + T: ClarityDeserializable, + { + self.store.get_data_by_hash::(hash) + } pub fn put_value(&mut self, key: &str, value: Value, epoch: &StacksEpochId) -> Result<()> { self.put_value_with_size(key, value, epoch)?; @@ -521,6 +529,13 @@ impl<'a> ClarityDatabase<'a> { { self.store.get_data_with_proof(key) } + + pub fn get_data_with_proof_by_hash(&mut self, hash: &TrieHash) -> Result)>> + where + T: ClarityDeserializable, + { + self.store.get_data_with_proof_by_hash(hash) + } pub fn make_key_for_trip( contract_identifier: &QualifiedContractIdentifier, diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index 6e8f878f6e..68c788626b 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -64,6 +64,8 @@ pub trait ClarityBackingStore { fn put_all_data(&mut self, items: Vec<(String, String)>) -> Result<()>; /// fetch K-V out of the committed datastore fn get_data(&mut self, key: &str) -> Result>; + /// fetch Hash(K)-V out of the commmitted datastore + fn get_data_from_path(&mut self, hash: &TrieHash) -> Result>; /// fetch K-V out of the committed datastore, along with the byte representation /// of the Merkle proof for that key-value pair fn get_data_with_proof(&mut self, key: &str) -> Result)>>; @@ -212,6 +214,10 @@ impl ClarityBackingStore for NullBackingStore { fn get_data(&mut self, _key: &str) -> Result> { panic!("NullBackingStore can't retrieve data") } + + fn get_data_from_path(&mut self, _hash: &TrieHash) -> Result> { + panic!("NullBackingStore can't retrieve data") + } fn get_data_with_proof(&mut self, _key: &str) -> Result)>> { panic!("NullBackingStore can't retrieve data") diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index 3fd845f92f..d319603d73 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -18,6 +18,7 @@ use std::hash::Hash; use hashbrown::HashMap; use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::TrieHash; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -368,6 +369,18 @@ impl<'a> RollbackWrapper<'a> { .map(|(value, proof)| Ok((T::deserialize(&value)?, proof))) .transpose() } + + /// this function will only return commitment proofs for values _already_ materialized + /// in the underlying store. otherwise it returns None. + pub fn get_data_with_proof_by_hash(&mut self, hash: &TrieHash) -> InterpreterResult)>> + where + T: ClarityDeserializable, + { + self.store + .get_data_with_proof_from_path(hash)? + .map(|(value, proof)| Ok((T::deserialize(&value)?, proof))) + .transpose() + } pub fn get_data(&mut self, key: &str) -> InterpreterResult> where @@ -391,6 +404,23 @@ impl<'a> RollbackWrapper<'a> { .map(|x| T::deserialize(&x)) .transpose() } + + /// DO NOT USE IN CONSENSUS CODE. + /// + /// Load data directly from the underlying store, given its trie hash. The lookup map will not + /// be used. + /// + /// This should never be called from within the Clarity VM, or via block-processing. It's only + /// meant to be used by the RPC system. + pub fn get_data_by_hash(&mut self, hash: &TrieHash) -> InterpreterResult> + where + T: ClarityDeserializable, + { + self.store + .get_data_from_path(hash)? + .map(|x| T::deserialize(&x)) + .transpose() + } pub fn deserialize_value( value_hex: &str, diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 05c1939444..664bfddceb 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -324,6 +324,10 @@ impl ClarityBackingStore for MemoryBackingStore { SqliteConnection::get(self.get_side_store(), key) } + fn get_data_from_path(&mut self, hash: &TrieHash) -> Result> { + SqliteConnection::get(self.get_side_store(), hash.to_string().as_str()) + } + fn get_data_with_proof(&mut self, key: &str) -> Result)>> { Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) } diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index be0f60ff56..38537002f8 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -20,7 +20,6 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, TrieHash}; use crate::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection, MarfTransaction, MARF}; -use crate::chainstate::stacks::index::node::TriePath; use crate::chainstate::stacks::index::{ ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieMerkleProof, }; @@ -477,6 +476,36 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { }) .transpose() } + + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { + trace!("MarfedKV get_from_hash: {:?} tip={}", hash, &self.chain_tip); + self.marf + .get_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => { + trace!( + "MarfedKV get {:?} off of {:?}: not found", + hash, + &self.chain_tip + ); + Ok(None) + } + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|marf_value| { + let side_key = marf_value.to_hex(); + trace!("MarfedKV get side-key for {:?}: {:?}", hash, &side_key); + SqliteConnection::get(self.get_side_store(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + .into() + }) + }) + .transpose() + } fn put_all_data(&mut self, _items: Vec<(String, String)>) -> InterpreterResult<()> { error!("Attempted to commit changes to read-only MARF"); @@ -656,6 +685,36 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { }) .transpose() } + + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { + trace!("MarfedKV get_from_hash: {:?} tip={}", hash, &self.chain_tip); + self.marf + .get_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => { + trace!( + "MarfedKV get {:?} off of {:?}: not found", + hash, + &self.chain_tip + ); + Ok(None) + } + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|marf_value| { + let side_key = marf_value.to_hex(); + trace!("MarfedKV get side-key for {:?}: {:?}", hash, &side_key); + SqliteConnection::get(self.marf.sqlite_tx(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + .into() + }) + }) + .transpose() + } fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> { self.marf diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 51bfc57690..81c132f6fa 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -1232,6 +1232,10 @@ impl ClarityBackingStore for MemoryBackingStore { fn get_data(&mut self, key: &str) -> InterpreterResult> { SqliteConnection::get(self.get_side_store(), key) } + + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { + SqliteConnection::get(self.get_side_store(), hash.to_string().as_str()) + } fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> { Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) @@ -1241,8 +1245,7 @@ impl ClarityBackingStore for MemoryBackingStore { &mut self, key: &TrieHash, ) -> InterpreterResult)>> { - // Ok(SqliteConnection::get(self.get_side_store(), )?.map(|x| (x, vec![]))) - Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) + Ok(SqliteConnection::get(self.get_side_store(), key.to_string().as_str())?.map(|x| (x, vec![]))) } fn get_side_store(&mut self) -> &Connection { From 4dacdd4ef0f9272f964fb0b66e6ee90cb49a3bef Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 8 Nov 2024 15:09:20 -0500 Subject: [PATCH 09/56] chore: use new ClarityDB `get_data_from_path()` to load MARF data by key hash, instead of by key --- stackslib/src/net/api/getclaritymarfvalue.rs | 49 +++++++------------ .../src/net/api/tests/getclaritymarfvalue.rs | 22 ++++----- 2 files changed, 29 insertions(+), 42 deletions(-) diff --git a/stackslib/src/net/api/getclaritymarfvalue.rs b/stackslib/src/net/api/getclaritymarfvalue.rs index ff584a0ccf..c1d6146157 100644 --- a/stackslib/src/net/api/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/getclaritymarfvalue.rs @@ -19,6 +19,7 @@ use clarity::vm::representations::CONTRACT_PRINCIPAL_REGEX_STRING; use lazy_static::lazy_static; use regex::{Captures, Regex}; use stacks_common::types::net::PeerHost; +use stacks_common::types::chainstate::TrieHash; use stacks_common::util::hash::to_hex; use crate::net::http::{ @@ -31,17 +32,6 @@ use crate::net::httpcore::{ }; use crate::net::{Error as NetError, StacksNodeState, TipRequest}; -lazy_static! { - static ref CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING: String = - "[a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*|[-+=/*]|[<>]=?".into(); - static ref MARF_KEY_FOR_TRIP_REGEX_STRING: String = format!( - r"vm::{}::\d+::({})", - *CONTRACT_PRINCIPAL_REGEX_STRING, *CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING, - ); - static ref MARF_KEY_FOR_QUAD_REGEX_STRING: String = - format!(r"{}::[0-9a-fA-F]+", *MARF_KEY_FOR_TRIP_REGEX_STRING,); -} - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ClarityMarfResponse { pub data: String, @@ -53,12 +43,12 @@ pub struct ClarityMarfResponse { #[derive(Clone)] pub struct RPCGetClarityMarfRequestHandler { - pub clarity_marf_key: Option, + pub marf_key_hash: Option, } impl RPCGetClarityMarfRequestHandler { pub fn new() -> Self { Self { - clarity_marf_key: None, + marf_key_hash: None, } } } @@ -70,15 +60,11 @@ impl HttpRequest for RPCGetClarityMarfRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(&format!( - r"^/v2/clarity/marf/(?P(vm-epoch::epoch-version)|({})|({}))$", - *MARF_KEY_FOR_TRIP_REGEX_STRING, *MARF_KEY_FOR_QUAD_REGEX_STRING - )) - .unwrap() + Regex::new(r#"^/v2/clarity/marf/(?P[0-9a-f]{64})$"#).unwrap() } fn metrics_identifier(&self) -> &str { - "/v2/clarity/marf/:clarity_marf_key" + "/v2/clarity/marf/:marf_key_hash" } /// Try to decode this request. @@ -96,13 +82,14 @@ impl HttpRequest for RPCGetClarityMarfRequestHandler { )); } - let marf_key = if let Some(key_str) = captures.name("clarity_marf_key") { - key_str.as_str().to_string() + let marf_key = if let Some(key_str) = captures.name("marf_key_hash") { + TrieHash::from_hex(key_str.as_str()) + .map_err(|e| Error::Http(400, format!("Invalid hash string: {e:?}")))? } else { - return Err(Error::Http(404, "Missing `clarity_marf_key`".to_string())); + return Err(Error::Http(404, "Missing `marf_key_hash`".to_string())); }; - self.clarity_marf_key = Some(marf_key); + self.marf_key_hash = Some(marf_key); let contents = HttpRequestContents::new().query_string(query); Ok(contents) @@ -113,7 +100,7 @@ impl HttpRequest for RPCGetClarityMarfRequestHandler { impl RPCRequestHandler for RPCGetClarityMarfRequestHandler { /// Reset internal state fn restart(&mut self) { - self.clarity_marf_key = None; + self.marf_key_hash = None; } /// Make the response @@ -123,8 +110,8 @@ impl RPCRequestHandler for RPCGetClarityMarfRequestHandler { contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let clarity_marf_key = self.clarity_marf_key.take().ok_or(NetError::SendError( - "`clarity_marf_key` not set".to_string(), + let marf_key_hash = self.marf_key_hash.take().ok_or(NetError::SendError( + "`marf_key_hash` not set".to_string(), ))?; let tip = match node.load_stacks_chain_tip(&preamble, &contents) { @@ -144,13 +131,13 @@ impl RPCRequestHandler for RPCGetClarityMarfRequestHandler { clarity_tx.with_clarity_db_readonly(|clarity_db| { let (value_hex, marf_proof): (String, _) = if with_proof { clarity_db - .get_data_with_proof(&clarity_marf_key) + .get_data_with_proof_by_hash(&marf_key_hash) .ok() .flatten() .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? } else { clarity_db - .get_data(&clarity_marf_key) + .get_data_by_hash(&marf_key_hash) .ok() .flatten() .map(|a| (a, None))? @@ -168,7 +155,7 @@ impl RPCRequestHandler for RPCGetClarityMarfRequestHandler { Ok(Some(None)) => { return StacksHttpResponse::new_error( &preamble, - &HttpNotFound::new("Marf key not found".to_string()), + &HttpNotFound::new("Marf key hash not found".to_string()), ) .try_into_contents() .map_err(NetError::from); @@ -205,14 +192,14 @@ impl HttpResponse for RPCGetClarityMarfRequestHandler { impl StacksHttpRequest { pub fn new_getclaritymarf( host: PeerHost, - clarity_marf_key: String, + marf_key_hash: TrieHash, tip_req: TipRequest, with_proof: bool, ) -> StacksHttpRequest { StacksHttpRequest::new_for_peer( host, "GET".into(), - format!("/v2/clarity/marf/{}", &clarity_marf_key), + format!("/v2/clarity/marf/{}", &marf_key_hash), HttpRequestContents::new() .for_tip(tip_req) .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()), diff --git a/stackslib/src/net/api/tests/getclaritymarfvalue.rs b/stackslib/src/net/api/tests/getclaritymarfvalue.rs index ce342b7442..f1e47fa377 100644 --- a/stackslib/src/net/api/tests/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/tests/getclaritymarfvalue.rs @@ -20,6 +20,7 @@ use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; use clarity::vm::{ClarityName, ContractName}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::TrieHash; use stacks_common::types::net::PeerHost; use stacks_common::types::Address; @@ -37,15 +38,15 @@ fn test_try_parse_request() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); - let vm_key_epoch = "vm-epoch::epoch-version"; - let vm_key_trip = "vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::1::count"; - let vm_key_quad = "vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::0::data::1234"; + let vm_key_epoch = TrieHash::from_key("vm-epoch::epoch-version"); + let vm_key_trip = TrieHash::from_key("vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::1::count"); + let vm_key_quad = TrieHash::from_key("vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::0::data::1234"); let valid_keys = [vm_key_epoch, vm_key_trip, vm_key_quad]; for key in valid_keys { let request = StacksHttpRequest::new_getclaritymarf( addr.into(), - key.to_string(), + key, TipRequest::SpecificTip(StacksBlockId([0x22; 32])), true, ); @@ -72,12 +73,12 @@ fn test_try_parse_request() { let (preamble, contents) = parsed_request.destruct(); // consumed path args - assert_eq!(handler.clarity_marf_key, Some(key.to_string())); + assert_eq!(handler.marf_key_hash, Some(key.clone())); assert_eq!(&preamble, request.preamble()); handler.restart(); - assert!(handler.clarity_marf_key.is_none()); + assert!(handler.marf_key_hash.is_none()); } } @@ -90,7 +91,7 @@ fn test_try_make_response() { // query existing let request = StacksHttpRequest::new_getclaritymarf( addr.into(), - "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::bar".to_string(), + TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::bar"), TipRequest::UseLatestAnchoredTip, true, ); @@ -99,8 +100,7 @@ fn test_try_make_response() { // query existing unconfirmed let request = StacksHttpRequest::new_getclaritymarf( addr.into(), - "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed::1::bar-unconfirmed" - .to_string(), + TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed::1::bar-unconfirmed"), TipRequest::UseLatestUnconfirmedTip, true, ); @@ -109,7 +109,7 @@ fn test_try_make_response() { // query non-existant var let request = StacksHttpRequest::new_getclaritymarf( addr.into(), - "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::does-not-exist".to_string(), + TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::does-not-exist"), TipRequest::UseLatestAnchoredTip, true, ); @@ -118,7 +118,7 @@ fn test_try_make_response() { // query non-existant contract let request = StacksHttpRequest::new_getclaritymarf( addr.into(), - "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-exist::1::bar".to_string(), + TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-exist::1::bar"), TipRequest::UseLatestAnchoredTip, true, ); From 4a7f9e982307f379125080d2746ef60e7b2d0aed Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 8 Nov 2024 15:10:49 -0500 Subject: [PATCH 10/56] chore: cargo fmt --- clarity/src/vm/database/clarity_db.rs | 12 +++++++----- clarity/src/vm/database/clarity_store.rs | 2 +- clarity/src/vm/database/key_value_wrapper.rs | 12 +++++++----- clarity/src/vm/database/sqlite.rs | 2 +- stackslib/src/chainstate/stacks/index/marf.rs | 4 ++-- stackslib/src/chainstate/stacks/index/node.rs | 4 ++-- stackslib/src/chainstate/stacks/index/proofs.rs | 3 +-- stackslib/src/chainstate/stacks/index/storage.rs | 3 +-- stackslib/src/chainstate/stacks/index/test/marf.rs | 4 +--- stackslib/src/chainstate/stacks/index/test/mod.rs | 4 +--- stackslib/src/chainstate/stacks/index/trie.rs | 4 +--- stackslib/src/clarity_vm/database/marf.rs | 4 ++-- stackslib/src/clarity_vm/database/mod.rs | 7 +++++-- stackslib/src/core/tests/mod.rs | 2 +- stackslib/src/net/api/getclaritymarfvalue.rs | 9 +++++---- stackslib/src/net/api/tests/getclaritymarfvalue.rs | 13 ++++++++----- 16 files changed, 46 insertions(+), 43 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 993746b8b8..ff9bdfaf1a 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -23,10 +23,9 @@ use stacks_common::consts::{ }; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, - VRFSeed, + TrieHash, VRFSeed, }; use stacks_common::types::{Address, StacksEpoch as GenericStacksEpoch, StacksEpochId}; -use stacks_common::types::chainstate::TrieHash; use stacks_common::util::hash::{to_hex, Hash160, Sha256Sum, Sha512Trunc256Sum}; use super::clarity_store::SpecialCaseHandler; @@ -465,7 +464,7 @@ impl<'a> ClarityDatabase<'a> { { self.store.get_data::(key) } - + pub fn get_data_by_hash(&mut self, hash: &TrieHash) -> Result> where T: ClarityDeserializable, @@ -529,8 +528,11 @@ impl<'a> ClarityDatabase<'a> { { self.store.get_data_with_proof(key) } - - pub fn get_data_with_proof_by_hash(&mut self, hash: &TrieHash) -> Result)>> + + pub fn get_data_with_proof_by_hash( + &mut self, + hash: &TrieHash, + ) -> Result)>> where T: ClarityDeserializable, { diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index 68c788626b..07d48c9504 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -214,7 +214,7 @@ impl ClarityBackingStore for NullBackingStore { fn get_data(&mut self, _key: &str) -> Result> { panic!("NullBackingStore can't retrieve data") } - + fn get_data_from_path(&mut self, _hash: &TrieHash) -> Result> { panic!("NullBackingStore can't retrieve data") } diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index d319603d73..c444aa553e 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -17,8 +17,7 @@ use std::hash::Hash; use hashbrown::HashMap; -use stacks_common::types::chainstate::StacksBlockId; -use stacks_common::types::chainstate::TrieHash; +use stacks_common::types::chainstate::{StacksBlockId, TrieHash}; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -369,10 +368,13 @@ impl<'a> RollbackWrapper<'a> { .map(|(value, proof)| Ok((T::deserialize(&value)?, proof))) .transpose() } - + /// this function will only return commitment proofs for values _already_ materialized /// in the underlying store. otherwise it returns None. - pub fn get_data_with_proof_by_hash(&mut self, hash: &TrieHash) -> InterpreterResult)>> + pub fn get_data_with_proof_by_hash( + &mut self, + hash: &TrieHash, + ) -> InterpreterResult)>> where T: ClarityDeserializable, { @@ -404,7 +406,7 @@ impl<'a> RollbackWrapper<'a> { .map(|x| T::deserialize(&x)) .transpose() } - + /// DO NOT USE IN CONSENSUS CODE. /// /// Load data directly from the underlying store, given its trie hash. The lookup map will not diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 664bfddceb..65b4dfaea5 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -326,7 +326,7 @@ impl ClarityBackingStore for MemoryBackingStore { fn get_data_from_path(&mut self, hash: &TrieHash) -> Result> { SqliteConnection::get(self.get_side_store(), hash.to_string().as_str()) - } + } fn get_data_with_proof(&mut self, key: &str) -> Result)>> { Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 427cde29fc..ffe9af2174 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -126,7 +126,7 @@ pub trait MarfConnection { fn get(&mut self, block_hash: &T, key: &str) -> Result, Error> { self.with_conn(|c| MARF::get_by_key(c, block_hash, key)) } - + /// Resolve a TrieHash from the MARF to a MARFValue with respect to the given block height. fn get_from_hash(&mut self, block_hash: &T, th: &TrieHash) -> Result, Error> { self.with_conn(|c| MARF::get_by_hash(c, block_hash, th)) @@ -1397,7 +1397,7 @@ impl MARF { let proof = TrieMerkleProof::from_raw_entry(&mut conn, key, &marf_value, block_hash)?; Ok(Some((marf_value, proof))) } - + pub fn get_with_proof_from_hash( &mut self, block_hash: &T, diff --git a/stackslib/src/chainstate/stacks/index/node.rs b/stackslib/src/chainstate/stacks/index/node.rs index 4436d7f239..da9fc8bbd2 100644 --- a/stackslib/src/chainstate/stacks/index/node.rs +++ b/stackslib/src/chainstate/stacks/index/node.rs @@ -32,8 +32,8 @@ use crate::chainstate::stacks::index::bits::{ get_path_byte_len, get_ptrs_byte_len, path_from_bytes, ptrs_from_bytes, write_path_to_bytes, }; use crate::chainstate::stacks::index::{ - BlockMap, ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieHasher, - TrieLeaf, MARF_VALUE_ENCODED_SIZE, + BlockMap, ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieHasher, TrieLeaf, + MARF_VALUE_ENCODED_SIZE, }; #[derive(Debug, Clone, PartialEq)] diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index aae802334c..85e91ebefb 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -35,8 +35,7 @@ use crate::chainstate::stacks::index::bits::{ use crate::chainstate::stacks::index::marf::MARF; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, ConsensusSerializable, CursorError, TrieCursor, - TrieNode, TrieNode16, TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, - TriePtr, + TrieNode, TrieNode16, TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::storage::{TrieFileStorage, TrieStorageConnection}; use crate::chainstate::stacks::index::trie::Trie; diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 170430c74c..6e7ca815c9 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -51,8 +51,7 @@ use crate::chainstate::stacks::index::node::{ use crate::chainstate::stacks::index::profile::TrieBenchmark; use crate::chainstate::stacks::index::trie::Trie; use crate::chainstate::stacks::index::{ - trie_sql, BlockMap, ClarityMarfTrieId, Error, MarfTrieId, TrieHasher, - TrieLeaf, + trie_sql, BlockMap, ClarityMarfTrieId, Error, MarfTrieId, TrieHasher, TrieLeaf, }; use crate::util_lib::db::{ sql_pragma, sqlite_open, tx_begin_immediate, tx_busy_handler, Error as db_error, diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index f4f4dd0de0..e7535e9553 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -32,9 +32,7 @@ use crate::chainstate::stacks::index::proofs::*; use crate::chainstate::stacks::index::storage::*; use crate::chainstate::stacks::index::test::*; use crate::chainstate::stacks::index::trie::*; -use crate::chainstate::stacks::index::{ - ClarityMarfTrieId, Error, MARFValue, TrieLeaf, -}; +use crate::chainstate::stacks::index::{ClarityMarfTrieId, Error, MARFValue, TrieLeaf}; #[test] fn marf_insert_different_leaf_same_block_100() { diff --git a/stackslib/src/chainstate/stacks/index/test/mod.rs b/stackslib/src/chainstate/stacks/index/test/mod.rs index 9dbaa0959d..0ccdffa78b 100644 --- a/stackslib/src/chainstate/stacks/index/test/mod.rs +++ b/stackslib/src/chainstate/stacks/index/test/mod.rs @@ -31,9 +31,7 @@ use crate::chainstate::stacks::index::node::*; use crate::chainstate::stacks::index::proofs::*; use crate::chainstate::stacks::index::storage::*; use crate::chainstate::stacks::index::trie::*; -use crate::chainstate::stacks::index::{ - MARFValue, MarfTrieId, TrieLeaf, TrieMerkleProof, -}; +use crate::chainstate::stacks::index::{MARFValue, MarfTrieId, TrieLeaf, TrieMerkleProof}; use crate::chainstate::stacks::{BlockHeaderHash, TrieHash}; pub mod cache; diff --git a/stackslib/src/chainstate/stacks/index/trie.rs b/stackslib/src/chainstate/stacks/index/trie.rs index 5c6f53ab65..65e41cf3ed 100644 --- a/stackslib/src/chainstate/stacks/index/trie.rs +++ b/stackslib/src/chainstate/stacks/index/trie.rs @@ -39,9 +39,7 @@ use crate::chainstate::stacks::index::node::{ use crate::chainstate::stacks::index::storage::{ TrieFileStorage, TrieHashCalculationMode, TrieStorageConnection, }; -use crate::chainstate::stacks::index::{ - Error, MarfTrieId, TrieHasher, TrieLeaf, -}; +use crate::chainstate::stacks::index::{Error, MarfTrieId, TrieHasher, TrieLeaf}; /// We don't actually instantiate a Trie, but we still need to pass a type parameter for the /// storage implementation. diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index 38537002f8..3a8636b3b5 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -476,7 +476,7 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { }) .transpose() } - + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { trace!("MarfedKV get_from_hash: {:?} tip={}", hash, &self.chain_tip); self.marf @@ -685,7 +685,7 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { }) .transpose() } - + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { trace!("MarfedKV get_from_hash: {:?} tip={}", hash, &self.chain_tip); self.marf diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 81c132f6fa..0bce54dcfb 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -1232,7 +1232,7 @@ impl ClarityBackingStore for MemoryBackingStore { fn get_data(&mut self, key: &str) -> InterpreterResult> { SqliteConnection::get(self.get_side_store(), key) } - + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { SqliteConnection::get(self.get_side_store(), hash.to_string().as_str()) } @@ -1245,7 +1245,10 @@ impl ClarityBackingStore for MemoryBackingStore { &mut self, key: &TrieHash, ) -> InterpreterResult)>> { - Ok(SqliteConnection::get(self.get_side_store(), key.to_string().as_str())?.map(|x| (x, vec![]))) + Ok( + SqliteConnection::get(self.get_side_store(), key.to_string().as_str())? + .map(|x| (x, vec![])), + ) } fn get_side_store(&mut self) -> &Connection { diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 7945a9331d..03447e9bf4 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -48,7 +48,7 @@ use crate::chainstate::stacks::db::test::{ }; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::events::StacksTransactionReceipt; -use crate::chainstate::stacks::index::{MarfTrieId}; +use crate::chainstate::stacks::index::MarfTrieId; use crate::chainstate::stacks::miner::TransactionResult; use crate::chainstate::stacks::test::codec_all_transactions; use crate::chainstate::stacks::{ diff --git a/stackslib/src/net/api/getclaritymarfvalue.rs b/stackslib/src/net/api/getclaritymarfvalue.rs index c1d6146157..44d2e4dc7f 100644 --- a/stackslib/src/net/api/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/getclaritymarfvalue.rs @@ -18,8 +18,8 @@ use clarity::vm::clarity::ClarityConnection; use clarity::vm::representations::CONTRACT_PRINCIPAL_REGEX_STRING; use lazy_static::lazy_static; use regex::{Captures, Regex}; -use stacks_common::types::net::PeerHost; use stacks_common::types::chainstate::TrieHash; +use stacks_common::types::net::PeerHost; use stacks_common::util::hash::to_hex; use crate::net::http::{ @@ -110,9 +110,10 @@ impl RPCRequestHandler for RPCGetClarityMarfRequestHandler { contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let marf_key_hash = self.marf_key_hash.take().ok_or(NetError::SendError( - "`marf_key_hash` not set".to_string(), - ))?; + let marf_key_hash = self + .marf_key_hash + .take() + .ok_or(NetError::SendError("`marf_key_hash` not set".to_string()))?; let tip = match node.load_stacks_chain_tip(&preamble, &contents) { Ok(tip) => tip, diff --git a/stackslib/src/net/api/tests/getclaritymarfvalue.rs b/stackslib/src/net/api/tests/getclaritymarfvalue.rs index f1e47fa377..3b1453c212 100644 --- a/stackslib/src/net/api/tests/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/tests/getclaritymarfvalue.rs @@ -19,8 +19,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; use clarity::vm::{ClarityName, ContractName}; use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::StacksAddress; -use stacks_common::types::chainstate::TrieHash; +use stacks_common::types::chainstate::{StacksAddress, TrieHash}; use stacks_common::types::net::PeerHost; use stacks_common::types::Address; @@ -39,8 +38,10 @@ fn test_try_parse_request() { let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); let vm_key_epoch = TrieHash::from_key("vm-epoch::epoch-version"); - let vm_key_trip = TrieHash::from_key("vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::1::count"); - let vm_key_quad = TrieHash::from_key("vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::0::data::1234"); + let vm_key_trip = + TrieHash::from_key("vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::1::count"); + let vm_key_quad = + TrieHash::from_key("vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::0::data::1234"); let valid_keys = [vm_key_epoch, vm_key_trip, vm_key_quad]; for key in valid_keys { @@ -109,7 +110,9 @@ fn test_try_make_response() { // query non-existant var let request = StacksHttpRequest::new_getclaritymarf( addr.into(), - TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::does-not-exist"), + TrieHash::from_key( + "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::does-not-exist", + ), TipRequest::UseLatestAnchoredTip, true, ); From 48a03bbff9ae308df6d7f3d38f946553d09257fb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 8 Nov 2024 15:41:49 -0500 Subject: [PATCH 11/56] chore: test get_by_hash() by loading both the value in get_by_key() and the same value by get_by_hash(hash(key)) --- stackslib/src/chainstate/stacks/index/marf.rs | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index ffe9af2174..a4082627fd 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -122,8 +122,31 @@ pub trait MarfConnection { fn sqlite_conn(&self) -> &Connection; + /// Get and check a value against get_from_hash + /// (test only) + #[cfg(test)] + fn get_and_check_with_hash(&mut self, block_hash: &T, key: &str) { + let res = self.with_conn(|c| MARF::get_by_key(c, block_hash, key)); + let res_with_hash = + self.with_conn(|c| MARF::get_by_hash(c, block_hash, &TrieHash::from_key(key))); + match (res, res_with_hash) { + (Ok(Some(x)), Ok(Some(y))) => { + assert_eq!(x, y); + } + (Ok(None), Ok(None)) => {} + (Err(_), Err(_)) => {} + (x, y) => { + panic!("Inconsistency: {x:?} != {y:?}"); + } + } + } + + #[cfg(not(test))] + fn get_and_check_with_hash(&mut self, _block_hash: &T, _key: &str) {} + /// Resolve a key from the MARF to a MARFValue with respect to the given block height. fn get(&mut self, block_hash: &T, key: &str) -> Result, Error> { + self.get_and_check_with_hash(block_hash, key); self.with_conn(|c| MARF::get_by_key(c, block_hash, key)) } From 27a19cc71f0455dc800870ea733a5212f0028591 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 15 Nov 2024 00:03:51 -0500 Subject: [PATCH 12/56] feat: add Epoch 3.1 and SIP-029 coinbase to go with it --- stacks-common/src/types/mod.rs | 310 ++++++++++++++++++++++++++- stacks-common/src/types/tests.rs | 352 +++++++++++++++++++++++++++++++ 2 files changed, 654 insertions(+), 8 deletions(-) create mode 100644 stacks-common/src/types/tests.rs diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 23f2b006db..dfd3287239 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -1,3 +1,20 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cell::LazyCell; use std::cmp::Ordering; use std::fmt; @@ -10,6 +27,7 @@ use crate::address::{ C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; +use crate::consts::MICROSTACKS_PER_STACKS; use crate::deps_common::bitcoin::blockdata::transaction::TxOut; use crate::types::chainstate::{StacksAddress, StacksPublicKey}; use crate::util::hash::Hash160; @@ -18,6 +36,9 @@ use crate::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; pub mod chainstate; pub mod net; +#[cfg(test)] +pub mod tests; + /// A container for public keys (compressed secp256k1 public keys) pub struct StacksPublicKeyBuffer(pub [u8; 33]); impl_array_newtype!(StacksPublicKeyBuffer, u8, 33); @@ -80,6 +101,7 @@ pub enum StacksEpochId { Epoch24 = 0x02019, Epoch25 = 0x0201a, Epoch30 = 0x03000, + Epoch31 = 0x03001, } #[derive(Debug)] @@ -88,9 +110,153 @@ pub enum MempoolCollectionBehavior { ByReceiveTime, } +/// Struct describing an interval of time (measured in burnchain blocks) during which a coinbase is +/// allotted. Applies to SIP-029 code paths and later. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CoinbaseInterval { + /// amount of uSTX to award + pub coinbase: u128, + /// height of the chain after Stacks chain genesis at which this coinbase interval starts + pub effective_start_height: u64, +} + +/// From SIP-029: +/// +/// | Coinbase Interval | Bitcoin Height | Offset Height | Approx. Supply | STX Reward | Annual Inflation | +/// |--------------------|----------------|---------------------|------------------|------------|------------------| +/// | Current | - | - | 1,552,452,847 | 1000 | - | +/// | 1st | 945,000 | 278,950 | 1,627,352,847 | 500 (50%) | 3.23% | +/// | 2nd | 1,050,000 | 383,950 | 1,679,852,847 | 250 (50%) | 1.57% | +/// | 3rd | 1,260,000 | 593,950 | 1,732,352,847 | 125 (50%) | 0.76% | +/// | 4th | 1,470,000 | 803,950 | 1,758,602,847 | 62.5 (50%) | 0.37% | +/// | - | 2,197,560 | 1,531,510 | 1,804,075,347 | 62.5 (0%) | 0.18% | +/// +/// The above is for mainnet, which has a burnchain year of 52596 blocks and starts at burnchain height 666050. +/// The `Offset Height` column is simply the difference between `Bitcoin Height` and 666050. + +/// Mainnet coinbase intervals, as of SIP-029 +pub const COINBASE_INTERVALS_MAINNET: LazyCell<[CoinbaseInterval; 5]> = LazyCell::new(|| { + let emissions_schedule = [ + CoinbaseInterval { + coinbase: 1_000 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 0, + }, + CoinbaseInterval { + coinbase: 500 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 278_950, + }, + CoinbaseInterval { + coinbase: 250 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 383_950, + }, + CoinbaseInterval { + coinbase: 125 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 593_950, + }, + CoinbaseInterval { + coinbase: (625 * u128::from(MICROSTACKS_PER_STACKS)) / 10, + effective_start_height: 803_950, + }, + ]; + assert!(CoinbaseInterval::check_order(&emissions_schedule)); + emissions_schedule +}); + +/// Testnet coinbase intervals, as of SIP-029 +pub const COINBASE_INTERVALS_TESTNET: LazyCell<[CoinbaseInterval; 5]> = LazyCell::new(|| { + let emissions_schedule = [ + CoinbaseInterval { + coinbase: 1_000 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 0, + }, + CoinbaseInterval { + coinbase: 500 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 1000, + }, + CoinbaseInterval { + coinbase: 250 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 2000, + }, + CoinbaseInterval { + coinbase: 125 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 3000, + }, + CoinbaseInterval { + coinbase: (625 * u128::from(MICROSTACKS_PER_STACKS)) / 10, + effective_start_height: 4000, + }, + ]; + assert!(CoinbaseInterval::check_order(&emissions_schedule)); + emissions_schedule +}); + +/// Used for testing to substitute a coinbase schedule +#[cfg(any(test, feature = "testing"))] +pub static COINBASE_INTERVALS_TEST: std::sync::Mutex>> = + std::sync::Mutex::new(None); + +#[cfg(any(test, feature = "testing"))] +pub fn set_test_coinbase_schedule(coinbase_schedule: Option>) { + match COINBASE_INTERVALS_TEST.lock() { + Ok(mut schedule_guard) => { + *schedule_guard = coinbase_schedule; + } + Err(_e) => { + panic!("COINBASE_INTERVALS_TEST mutex poisoned"); + } + } +} + +impl CoinbaseInterval { + /// Look up the value of a coinbase at an effective height. + /// Precondition: `intervals` must be sorted in ascending order by `effective_start_height` + pub fn get_coinbase_at_effective_height( + intervals: &[CoinbaseInterval], + effective_height: u64, + ) -> u128 { + if intervals.len() == 0 { + return 0; + } + if intervals.len() == 1 { + if intervals[0].effective_start_height <= effective_height { + return intervals[0].coinbase; + } else { + return 0; + } + } + + for i in 0..(intervals.len() - 1) { + if intervals[i].effective_start_height <= effective_height + && effective_height < intervals[i + 1].effective_start_height + { + return intervals[i].coinbase; + } + } + + // in last interval, which per the above checks is guaranteed to exist + intervals.last().unwrap_or_else(|| unreachable!()).coinbase + } + + /// Verify that a list of intervals is sorted in ascending order by `effective_start_height` + pub fn check_order(intervals: &[CoinbaseInterval]) -> bool { + if intervals.len() < 2 { + return true; + } + + let mut ht = intervals[0].effective_start_height; + for i in 1..intervals.len() { + if intervals[i].effective_start_height < ht { + return false; + } + ht = intervals[i].effective_start_height; + } + true + } +} + impl StacksEpochId { pub fn latest() -> StacksEpochId { - StacksEpochId::Epoch30 + StacksEpochId::Epoch31 } /// In this epoch, how should the mempool perform garbage collection? @@ -104,7 +270,9 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => MempoolCollectionBehavior::ByStacksHeight, - StacksEpochId::Epoch30 => MempoolCollectionBehavior::ByReceiveTime, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { + MempoolCollectionBehavior::ByReceiveTime + } } } @@ -119,7 +287,7 @@ impl StacksEpochId { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => false, - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, } } @@ -133,7 +301,10 @@ impl StacksEpochId { | StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => false, - StacksEpochId::Epoch24 | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => true, } } @@ -149,7 +320,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, } } @@ -165,7 +336,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, } } @@ -196,7 +367,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => 0, - StacksEpochId::Epoch30 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, } } @@ -232,7 +403,128 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => cur_reward_cycle > first_epoch30_reward_cycle, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { + cur_reward_cycle > first_epoch30_reward_cycle + } + } + } + + /// What is the coinbase (in uSTX) to award for the given burnchain height? + /// Applies prior to SIP-029 + fn coinbase_reward_pre_sip029( + &self, + first_burnchain_height: u64, + current_burnchain_height: u64, + ) -> u128 { + /* + From https://forum.stacks.org/t/pox-consensus-and-stx-future-supply + + """ + + 1000 STX for years 0-4 + 500 STX for years 4-8 + 250 STX for years 8-12 + 125 STX in perpetuity + + + From the Token Whitepaper: + + We expect that once native mining goes live, approximately 4383 blocks will be pro- + cessed per month, or approximately 52,596 blocks will be processed per year. + + """ + */ + // this is saturating subtraction for the initial reward calculation + // where we are computing the coinbase reward for blocks that occur *before* + // the `first_burn_block_height` + let effective_ht = current_burnchain_height.saturating_sub(first_burnchain_height); + let blocks_per_year = 52596; + let stx_reward = if effective_ht < blocks_per_year * 4 { + 1000 + } else if effective_ht < blocks_per_year * 8 { + 500 + } else if effective_ht < blocks_per_year * 12 { + 250 + } else { + 125 + }; + + stx_reward * (u128::from(MICROSTACKS_PER_STACKS)) + } + + /// Get the coinbase intervals to use. + /// Can be overriden by tests + #[cfg(any(test, feature = "testing"))] + pub(crate) fn get_coinbase_intervals(mainnet: bool) -> Vec { + match COINBASE_INTERVALS_TEST.lock() { + Ok(schedule_opt) => { + if let Some(schedule) = (*schedule_opt).as_ref() { + return schedule.clone(); + } + } + Err(_e) => { + panic!("COINBASE_INTERVALS_TEST mutex poisoned"); + } + } + + if mainnet { + COINBASE_INTERVALS_MAINNET.to_vec() + } else { + COINBASE_INTERVALS_TESTNET.to_vec() + } + } + + #[cfg(not(any(test, feature = "testing")))] + pub(crate) fn get_coinbase_intervals(mainnet: bool) -> Vec { + if mainnet { + COINBASE_INTERVALS_MAINNET.to_vec() + } else { + COINBASE_INTERVALS_TESTNET.to_vec() + } + } + + /// what are the offsets after chain-start when coinbase reductions occur? + /// Applies at and after SIP-029. + /// Uses coinbase intervals defined by COINBASE_INTERVALS_MAINNET, unless overridden by a unit + /// or integration test. + fn coinbase_reward_sip029( + &self, + mainnet: bool, + first_burnchain_height: u64, + current_burnchain_height: u64, + ) -> u128 { + let effective_ht = current_burnchain_height.saturating_sub(first_burnchain_height); + let coinbase_intervals = Self::get_coinbase_intervals(mainnet); + CoinbaseInterval::get_coinbase_at_effective_height(&coinbase_intervals, effective_ht) + } + + /// What is the coinbase to award? + pub fn coinbase_reward( + &self, + mainnet: bool, + first_burnchain_height: u64, + current_burnchain_height: u64, + ) -> u128 { + match self { + StacksEpochId::Epoch10 => { + // Stacks is not active + 0 + } + StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 => { + self.coinbase_reward_pre_sip029(first_burnchain_height, current_burnchain_height) + } + StacksEpochId::Epoch31 => self.coinbase_reward_sip029( + mainnet, + first_burnchain_height, + current_burnchain_height, + ), } } } @@ -249,6 +541,7 @@ impl std::fmt::Display for StacksEpochId { StacksEpochId::Epoch24 => write!(f, "2.4"), StacksEpochId::Epoch25 => write!(f, "2.5"), StacksEpochId::Epoch30 => write!(f, "3.0"), + StacksEpochId::Epoch31 => write!(f, "3.1"), } } } @@ -267,6 +560,7 @@ impl TryFrom for StacksEpochId { x if x == StacksEpochId::Epoch24 as u32 => Ok(StacksEpochId::Epoch24), x if x == StacksEpochId::Epoch25 as u32 => Ok(StacksEpochId::Epoch25), x if x == StacksEpochId::Epoch30 as u32 => Ok(StacksEpochId::Epoch30), + x if x == StacksEpochId::Epoch31 as u32 => Ok(StacksEpochId::Epoch31), _ => Err("Invalid epoch"), } } diff --git a/stacks-common/src/types/tests.rs b/stacks-common/src/types/tests.rs new file mode 100644 index 0000000000..20676999e7 --- /dev/null +++ b/stacks-common/src/types/tests.rs @@ -0,0 +1,352 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::{ + set_test_coinbase_schedule, CoinbaseInterval, StacksEpochId, COINBASE_INTERVALS_MAINNET, + COINBASE_INTERVALS_TESTNET, +}; + +#[test] +fn test_mainnet_coinbase_emissions() { + assert_eq!(COINBASE_INTERVALS_MAINNET.len(), 5); + assert_eq!(COINBASE_INTERVALS_MAINNET[0].coinbase, 1_000_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[1].coinbase, 500_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[2].coinbase, 250_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[3].coinbase, 125_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[4].coinbase, 62_500_000); + + // heights from SIP-029 + assert_eq!( + COINBASE_INTERVALS_MAINNET[0].effective_start_height, + 666_050 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[1].effective_start_height, + 945_000 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[2].effective_start_height, + 1_050_000 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[3].effective_start_height, + 1_260_000 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[4].effective_start_height, + 1_470_000 - 666_050 + ); +} + +#[test] +fn test_get_coinbase_at_effective_height() { + assert!(CoinbaseInterval::check_order(&*COINBASE_INTERVALS_MAINNET)); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 666050 - 666050 + ), + 1_000_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 666051 - 666050 + ), + 1_000_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 944_999 - 666050 + ), + 1_000_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 945_000 - 666050 + ), + 500_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 945_001 - 666050 + ), + 500_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_049_999 - 666050 + ), + 500_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_050_000 - 666050 + ), + 250_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_050_001 - 666050 + ), + 250_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_259_999 - 666050 + ), + 250_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_260_000 - 666050 + ), + 125_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_260_001 - 666050 + ), + 125_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_469_999 - 666050 + ), + 125_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_470_000 - 666050 + ), + 62_500_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_470_001 - 666050 + ), + 62_500_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 2_197_559 - 666050 + ), + 62_500_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 2_197_560 - 666050 + ), + 62_500_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 2_197_561 - 666050 + ), + 62_500_000 + ); +} + +#[test] +fn test_epoch_coinbase_reward() { + // new coinbase schedule + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 666050), + 1_000_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 666051), + 1_000_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 944_999), + 1_000_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 945_000), + 500_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 945_001), + 500_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_049_999), + 500_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_050_000), + 250_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_050_001), + 250_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_259_999), + 250_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_260_000), + 125_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_260_001), + 125_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_469_999), + 125_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_470_000), + 62_500_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_470_001), + 62_500_000 + ); + + // old coinbase schedule + for epoch in [ + StacksEpochId::Epoch20, + StacksEpochId::Epoch2_05, + StacksEpochId::Epoch21, + StacksEpochId::Epoch22, + StacksEpochId::Epoch23, + StacksEpochId::Epoch24, + StacksEpochId::Epoch25, + ] + .iter() + { + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 4 - 1), + 1_000_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 4), + 500_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 4 + 1), + 500_000_000 + ); + + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 8 - 1), + 500_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 8), + 250_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 8 + 1), + 250_000_000 + ); + + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 12 - 1), + 250_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 12), + 125_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 12 + 1), + 125_000_000 + ); + } +} + +/// Verifies that the test facility for setting a coinbase schedule in a unit or integration test +/// actually works. +#[test] +fn test_set_coinbase_intervals() { + let new_sched = vec![ + CoinbaseInterval { + coinbase: 1, + effective_start_height: 0, + }, + CoinbaseInterval { + coinbase: 2, + effective_start_height: 1, + }, + CoinbaseInterval { + coinbase: 3, + effective_start_height: 2, + }, + CoinbaseInterval { + coinbase: 4, + effective_start_height: 3, + }, + CoinbaseInterval { + coinbase: 5, + effective_start_height: 4, + }, + ]; + + assert_eq!( + StacksEpochId::get_coinbase_intervals(true), + *COINBASE_INTERVALS_MAINNET + ); + assert_eq!( + StacksEpochId::get_coinbase_intervals(false), + *COINBASE_INTERVALS_TESTNET + ); + + set_test_coinbase_schedule(Some(new_sched.clone())); + + assert_eq!(StacksEpochId::get_coinbase_intervals(true), new_sched); + assert_eq!(StacksEpochId::get_coinbase_intervals(false), new_sched); + + set_test_coinbase_schedule(None); + + assert_eq!( + StacksEpochId::get_coinbase_intervals(true), + *COINBASE_INTERVALS_MAINNET + ); + assert_eq!( + StacksEpochId::get_coinbase_intervals(false), + *COINBASE_INTERVALS_TESTNET + ); +} From 34f43745324390a401462b8bc93c1f253793c502 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 15 Nov 2024 00:04:16 -0500 Subject: [PATCH 13/56] chore: backfill Epoch31 match cases --- clarity/src/vm/analysis/mod.rs | 3 +- clarity/src/vm/analysis/type_checker/mod.rs | 6 +- .../analysis/type_checker/v2_1/tests/mod.rs | 1 + clarity/src/vm/costs/mod.rs | 3 +- clarity/src/vm/functions/mod.rs | 2 + clarity/src/vm/test_util/mod.rs | 3 +- clarity/src/vm/tests/mod.rs | 4 + clarity/src/vm/types/signatures.rs | 9 +- clarity/src/vm/version.rs | 1 + stacks-common/src/libcommon.rs | 6 +- stackslib/src/burnchains/burnchain.rs | 5 +- stackslib/src/burnchains/tests/burnchain.rs | 5 + stackslib/src/burnchains/tests/mod.rs | 1 + .../src/chainstate/burn/db/processing.rs | 14 ++ stackslib/src/chainstate/burn/db/sortdb.rs | 3 + .../burn/operations/leader_block_commit.rs | 6 +- stackslib/src/chainstate/burn/sortition.rs | 6 + stackslib/src/chainstate/coordinator/mod.rs | 5 +- .../chainstate/nakamoto/coordinator/mod.rs | 1 + stackslib/src/chainstate/nakamoto/mod.rs | 2 +- stackslib/src/chainstate/nakamoto/tenure.rs | 2 + stackslib/src/chainstate/stacks/db/blocks.rs | 54 ++--- stackslib/src/chainstate/stacks/db/mod.rs | 1 + .../src/chainstate/stacks/db/transactions.rs | 1 + stackslib/src/core/mod.rs | 192 +++++++++++++++++- stackslib/src/cost_estimates/pessimistic.rs | 2 + stackslib/src/main.rs | 3 + stackslib/src/net/api/postblock_proposal.rs | 11 +- stackslib/src/net/chat.rs | 2 + 29 files changed, 289 insertions(+), 65 deletions(-) diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 6a8f64f1b2..d563dce6e8 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -148,7 +148,8 @@ pub fn run_analysis( | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db, build_type_map) } StacksEpochId::Epoch10 => { diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index 800347d0f0..b4f6557c2e 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -52,7 +52,8 @@ impl FunctionType { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => self.check_args_2_1(accounting, args, clarity_version), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => self.check_args_2_1(accounting, args, clarity_version), StacksEpochId::Epoch10 => { return Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) } @@ -75,7 +76,8 @@ impl FunctionType { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } StacksEpochId::Epoch10 => { diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 12597c88fa..6050c7d600 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -48,6 +48,7 @@ pub mod contracts; /// Backwards-compatibility shim for type_checker tests. Runs at latest Clarity version. pub fn mem_type_check(exp: &str) -> CheckResult<(Option, ContractAnalysis)> { + // TODO (question for reviewers): This uses Clarity 3 with Epoch 2.1. Is this a problem? mem_run_analysis( exp, crate::vm::ClarityVersion::latest(), diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 0751822ed0..51dbf95ef5 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -775,7 +775,8 @@ impl LimitedCostTracker { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => COSTS_3_NAME.to_string(), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => COSTS_3_NAME.to_string(), }; Ok(result) } diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index 833ed4baf8..6482493a29 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -63,6 +63,8 @@ macro_rules! switch_on_global_epoch { StacksEpochId::Epoch25 => $Epoch205Version(args, env, context), // Note: We reuse 2.05 for 3.0. StacksEpochId::Epoch30 => $Epoch205Version(args, env, context), + // Note: We reuse 2.05 for 3.1. + StacksEpochId::Epoch31 => $Epoch205Version(args, env, context), } } }; diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index f2b6d4dd09..295909859f 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -52,7 +52,8 @@ pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnState | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => UnitTestBurnStateDB { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => UnitTestBurnStateDB { epoch_id, ast_rules: ASTRules::PrecheckSize, }, diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 2c6f23ef42..5fa58b507b 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -122,6 +122,7 @@ epochs_template! { Epoch24, Epoch25, Epoch30, + Epoch31, } clarity_template! { @@ -140,6 +141,9 @@ clarity_template! { (Epoch30, Clarity1), (Epoch30, Clarity2), (Epoch30, Clarity3), + (Epoch31, Clarity1), + (Epoch31, Clarity2), + (Epoch31, Clarity3), } #[cfg(test)] diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 280258e026..b3984c5251 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -587,7 +587,8 @@ impl TypeSignature { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => self.admits_type_v2_1(other), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => self.admits_type_v2_1(other), StacksEpochId::Epoch10 => { return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) } @@ -800,7 +801,8 @@ impl TypeSignature { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => self.canonicalize_v2_1(), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => self.canonicalize_v2_1(), } } @@ -1158,7 +1160,8 @@ impl TypeSignature { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => Self::least_supertype_v2_1(a, b), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => Self::least_supertype_v2_1(a, b), StacksEpochId::Epoch10 => { return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) } diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index 4c437d52cc..7050d5dbd9 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -40,6 +40,7 @@ impl ClarityVersion { StacksEpochId::Epoch24 => ClarityVersion::Clarity2, StacksEpochId::Epoch25 => ClarityVersion::Clarity2, StacksEpochId::Epoch30 => ClarityVersion::Clarity3, + StacksEpochId::Epoch31 => ClarityVersion::Clarity3, } } } diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 1a13aa02ed..04c3acc1ea 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -81,10 +81,11 @@ pub mod consts { pub const PEER_VERSION_EPOCH_2_4: u8 = 0x09; pub const PEER_VERSION_EPOCH_2_5: u8 = 0x0a; pub const PEER_VERSION_EPOCH_3_0: u8 = 0x0b; + pub const PEER_VERSION_EPOCH_3_1: u8 = 0x0c; /// this should be updated to the latest network epoch version supported by /// this node. this will be checked by the `validate_epochs()` method. - pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_0 as u32; + pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_1 as u32; /// set the fourth byte of the peer version pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; @@ -93,6 +94,9 @@ pub mod consts { /// network identifiers pub const NETWORK_ID_MAINNET: u32 = 0x17000000; pub const NETWORK_ID_TESTNET: u32 = 0xff000000; + + /// number of uSTX per STX + pub const MICROSTACKS_PER_STACKS: u32 = 1_000_000; } /// This test asserts that the constant above doesn't change. diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index c1d07994d7..58b0edee0d 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -1082,7 +1082,9 @@ impl Burnchain { } /// Hand off the block to the ChainsCoordinator _and_ process the sortition - /// *only* to be used by legacy stacks node interfaces, like the Helium node + /// *only* to be used by legacy stacks node interfaces, like the Helium node. + /// + /// It does not work on mainnet. fn process_block_and_sortition_deprecated( db: &mut SortitionDB, burnchain_db: &mut BurnchainDB, @@ -1119,6 +1121,7 @@ impl Burnchain { // method is deprecated and only used in defunct helium nodes db.evaluate_sortition( + false, &header, blockstack_txs, burnchain, diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index b08d7a097e..17a11cb469 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -478,6 +478,7 @@ fn test_process_block_ops() { let (sn121, _) = tx .process_block_ops( + false, &burnchain, &initial_snapshot, &header, @@ -500,6 +501,7 @@ fn test_process_block_ops() { let (sn122, _) = tx .process_block_ops( + false, &burnchain, &block_121_snapshot, &header, @@ -521,6 +523,7 @@ fn test_process_block_ops() { let mut tx = SortitionHandleTx::begin(&mut db, &block_122_snapshot.sortition_id).unwrap(); let (sn123, _) = tx .process_block_ops( + false, &burnchain, &block_122_snapshot, &header, @@ -632,6 +635,7 @@ fn test_process_block_ops() { SortitionHandleTx::begin(&mut db, &block_123_snapshot.sortition_id).unwrap(); let (sn124, _) = tx .process_block_ops( + false, &burnchain, &block_123_snapshot, &header, @@ -873,6 +877,7 @@ fn test_burn_snapshot_sequence() { let mut tx = SortitionHandleTx::begin(&mut db, &prev_snapshot.sortition_id).unwrap(); let (sn, _) = tx .process_block_ops( + false, &burnchain, &prev_snapshot, &header, diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index e7fa51a89c..72a3ef898f 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -571,6 +571,7 @@ impl TestBurnchainBlock { let new_snapshot = sortition_db_handle .process_block_txs( + false, &parent_snapshot, &header, burnchain, diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 82318bfe37..47dd44934c 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -112,6 +112,7 @@ impl<'a> SortitionHandleTx<'a> { /// * return the snapshot (and sortition results) fn process_checked_block_ops( &mut self, + mainnet: bool, burnchain: &Burnchain, parent_snapshot: &BlockSnapshot, block_header: &BurnchainBlockHeader, @@ -141,6 +142,7 @@ impl<'a> SortitionHandleTx<'a> { // do the cryptographic sortition and pick the next winning block. let mut snapshot = BlockSnapshot::make_snapshot( + mainnet, self, burnchain, &next_sortition_id, @@ -158,6 +160,11 @@ impl<'a> SortitionHandleTx<'a> { BurnchainError::DBError(e) })?; + let snapshot_epoch = SortitionDB::get_stacks_epoch(self, snapshot.block_height)? + .unwrap_or_else(|| { + panic!("FATAL: no epoch defined for snapshot"); + }); + // was this snapshot the first with mining? // compute the initial block rewards. let initialize_bonus = if snapshot.sortition && parent_snapshot.total_burn == 0 { @@ -166,6 +173,8 @@ impl<'a> SortitionHandleTx<'a> { let mut total_reward = 0; for burn_block_height in burnchain.initial_reward_start_block..snapshot.block_height { total_reward += StacksChainState::get_coinbase_reward( + snapshot_epoch.epoch_id, + mainnet, burn_block_height, self.context.first_block_height, ); @@ -227,6 +236,7 @@ impl<'a> SortitionHandleTx<'a> { /// Returns the BlockSnapshot created from this block. pub fn process_block_ops( &mut self, + mainnet: bool, burnchain: &Burnchain, parent_snapshot: &BlockSnapshot, block_header: &BurnchainBlockHeader, @@ -279,6 +289,7 @@ impl<'a> SortitionHandleTx<'a> { // process them let res = self .process_checked_block_ops( + mainnet, burnchain, parent_snapshot, block_header, @@ -305,6 +316,7 @@ impl<'a> SortitionHandleTx<'a> { /// list of blockstack transactions. pub fn process_block_txs( &mut self, + mainnet: bool, parent_snapshot: &BlockSnapshot, this_block_header: &BurnchainBlockHeader, burnchain: &Burnchain, @@ -324,6 +336,7 @@ impl<'a> SortitionHandleTx<'a> { ); let new_snapshot = self.process_block_ops( + mainnet, burnchain, &parent_snapshot, &this_block_header, @@ -432,6 +445,7 @@ mod tests { let processed = ic .process_block_ops( + false, &burnchain, &snapshot, &next_block_header, diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 53dc2d0547..664eed1108 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3163,6 +3163,7 @@ impl SortitionDB { StacksEpochId::Epoch24 => version_u32 >= 3, StacksEpochId::Epoch25 => version_u32 >= 3, StacksEpochId::Epoch30 => version_u32 >= 3, + StacksEpochId::Epoch31 => version_u32 >= 3, } } @@ -4281,6 +4282,7 @@ impl SortitionDB { /// commits its results. This is used to post the calculated reward set to an event observer. pub fn evaluate_sortition) -> ()>( &mut self, + mainnet: bool, burn_header: &BurnchainBlockHeader, ops: Vec, burnchain: &Burnchain, @@ -4358,6 +4360,7 @@ impl SortitionDB { }; let new_snapshot = sortition_db_handle.process_block_txs( + mainnet, &parent_snapshot, burn_header, burnchain, diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 910315f082..ba87907348 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -42,7 +42,7 @@ use crate::chainstate::stacks::{StacksPrivateKey, StacksPublicKey}; use crate::core::{ StacksEpoch, StacksEpochId, STACKS_EPOCH_2_05_MARKER, STACKS_EPOCH_2_1_MARKER, STACKS_EPOCH_2_2_MARKER, STACKS_EPOCH_2_3_MARKER, STACKS_EPOCH_2_4_MARKER, - STACKS_EPOCH_2_5_MARKER, STACKS_EPOCH_3_0_MARKER, + STACKS_EPOCH_2_5_MARKER, STACKS_EPOCH_3_0_MARKER, STACKS_EPOCH_3_1_MARKER, }; use crate::net::Error as net_error; @@ -869,6 +869,7 @@ impl LeaderBlockCommitOp { StacksEpochId::Epoch24 => self.check_epoch_commit_marker(STACKS_EPOCH_2_4_MARKER), StacksEpochId::Epoch25 => self.check_epoch_commit_marker(STACKS_EPOCH_2_5_MARKER), StacksEpochId::Epoch30 => self.check_epoch_commit_marker(STACKS_EPOCH_3_0_MARKER), + StacksEpochId::Epoch31 => self.check_epoch_commit_marker(STACKS_EPOCH_3_1_MARKER), } } @@ -888,7 +889,8 @@ impl LeaderBlockCommitOp { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { // correct behavior -- uses *sortition height* to find the intended sortition ID let sortition_height = self .block_height diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index b0221f1439..1239d0f338 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -498,6 +498,7 @@ impl BlockSnapshot { /// /// Call this *after* you store all of the block's transactions to the burn db. pub fn make_snapshot( + mainnet: bool, sort_tx: &mut SortitionHandleTx, burnchain: &Burnchain, my_sortition_id: &SortitionId, @@ -518,6 +519,7 @@ impl BlockSnapshot { .epoch_id; Self::make_snapshot_in_epoch( + mainnet, sort_tx, burnchain, my_sortition_id, @@ -531,6 +533,7 @@ impl BlockSnapshot { } pub fn make_snapshot_in_epoch( + mainnet: bool, sort_tx: &mut SortitionHandleTx, burnchain: &Burnchain, my_sortition_id: &SortitionId, @@ -561,6 +564,8 @@ impl BlockSnapshot { initial_mining_bonus_ustx } else { let missed_coinbase = StacksChainState::get_coinbase_reward( + epoch_id, + mainnet, parent_snapshot.block_height, first_block_height, ); @@ -788,6 +793,7 @@ mod test { burnchain_state_transition: &BurnchainStateTransition, ) -> Result { BlockSnapshot::make_snapshot( + false, sort_tx, burnchain, my_sortition_id, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 5b7c7e89b6..139a666098 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -419,8 +419,8 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { return Ok(RewardSet::empty()); } } - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { - // Epoch 2.5 and 3.0 compute reward sets, but *only* if PoX-4 is active + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { + // Epoch 2.5, 3.0, and 3.1 compute reward sets, but *only* if PoX-4 is active if burnchain .pox_constants .active_pox_contract(current_burn_height) @@ -2674,6 +2674,7 @@ impl< let (next_snapshot, _) = self .sortition_db .evaluate_sortition( + self.chain_state_db.mainnet, &header, ops, &self.burnchain, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index cb1966d806..2fb8ec193a 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -1137,6 +1137,7 @@ impl< let (next_snapshot, _) = self .sortition_db .evaluate_sortition( + self.chain_state_db.mainnet, &header, ops, &self.burnchain, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d67de8e987..3e01d6f810 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -130,7 +130,7 @@ pub use self::staging_blocks::{ NakamotoStagingBlocksConn, NakamotoStagingBlocksConnRef, NakamotoStagingBlocksTx, }; -pub const NAKAMOTO_BLOCK_VERSION: u8 = 0; +pub const NAKAMOTO_BLOCK_VERSION: u8 = 1; define_named_enum!(HeaderTypeNames { Nakamoto("nakamoto"), diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 4b7734653c..0035c29eb9 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -957,6 +957,8 @@ impl NakamotoChainState { .accumulated_coinbase_ustx; let coinbase_at_block = StacksChainState::get_coinbase_reward( + evaluated_epoch, + chainstate_tx.config.mainnet, chain_tip_burn_header_height, burn_dbconn.context.first_block_height, ); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 115678ada8..ea837eb7bc 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -3612,41 +3612,13 @@ impl StacksChainState { } /// Get the coinbase at this burn block height, in microSTX - pub fn get_coinbase_reward(burn_block_height: u64, first_burn_block_height: u64) -> u128 { - /* - From https://forum.stacks.org/t/pox-consensus-and-stx-future-supply - - """ - - 1000 STX for years 0-4 - 500 STX for years 4-8 - 250 STX for years 8-12 - 125 STX in perpetuity - - - From the Token Whitepaper: - - We expect that once native mining goes live, approximately 4383 blocks will be pro- - cessed per month, or approximately 52,596 blocks will be processed per year. - - """ - */ - // this is saturating subtraction for the initial reward calculation - // where we are computing the coinbase reward for blocks that occur *before* - // the `first_burn_block_height` - let effective_ht = burn_block_height.saturating_sub(first_burn_block_height); - let blocks_per_year = 52596; - let stx_reward = if effective_ht < blocks_per_year * 4 { - 1000 - } else if effective_ht < blocks_per_year * 8 { - 500 - } else if effective_ht < blocks_per_year * 12 { - 250 - } else { - 125 - }; - - stx_reward * (u128::from(MICROSTACKS_PER_STACKS)) + pub fn get_coinbase_reward( + epoch: StacksEpochId, + mainnet: bool, + burn_block_height: u64, + first_burn_block_height: u64, + ) -> u128 { + epoch.coinbase_reward(mainnet, burn_block_height, first_burn_block_height) } /// Create the block reward. @@ -4132,6 +4104,11 @@ impl StacksChainState { current_epoch = StacksEpochId::Epoch30; } StacksEpochId::Epoch30 => { + // no special initialization is needed, since only the coinbase emission + // schedule is changing. + current_epoch = StacksEpochId::Epoch31; + } + StacksEpochId::Epoch31 => { panic!("No defined transition from Epoch30 forward") } } @@ -4942,8 +4919,7 @@ impl StacksChainState { )?; Ok((stack_ops, transfer_ops, delegate_ops, vec![])) } - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { - // TODO: sbtc ops in epoch 3.0 + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( chainstate_tx, parent_index_hash, @@ -5033,7 +5009,7 @@ impl StacksChainState { pox_reward_cycle, pox_start_cycle_info, ), - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { Self::handle_pox_cycle_start_pox_4( clarity_tx, pox_reward_cycle, @@ -5758,6 +5734,8 @@ impl StacksChainState { .accumulated_coinbase_ustx; let coinbase_at_block = StacksChainState::get_coinbase_reward( + evaluated_epoch, + mainnet, u64::from(chain_tip_burn_header_height), burn_dbconn.context.first_block_height, ); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 6b6f523f88..7fed3e9a46 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -307,6 +307,7 @@ impl DBConfig { StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 8, StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 8, StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch31 => version_u32 >= 3 && version_u32 <= 8, } } } diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index e9de9139a2..aef9627d15 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -8725,6 +8725,7 @@ pub mod test { StacksEpochId::Epoch24 => self.get_stacks_epoch(5), StacksEpochId::Epoch25 => self.get_stacks_epoch(6), StacksEpochId::Epoch30 => self.get_stacks_epoch(7), + StacksEpochId::Epoch31 => self.get_stacks_epoch(8), } } fn get_pox_payout_addrs( diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 491ba21ca0..3dd2d8cb7f 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -18,6 +18,7 @@ use std::collections::HashSet; use clarity::vm::costs::ExecutionCost; use lazy_static::lazy_static; +pub use stacks_common::consts::MICROSTACKS_PER_STACKS; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; use stacks_common::types::StacksEpoch as GenericStacksEpoch; pub use stacks_common::types::StacksEpochId; @@ -45,7 +46,7 @@ pub use stacks_common::consts::{ NETWORK_ID_TESTNET, PEER_NETWORK_EPOCH, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, - PEER_VERSION_MAINNET, PEER_VERSION_MAINNET_MAJOR, PEER_VERSION_TESTNET, + PEER_VERSION_EPOCH_3_1, PEER_VERSION_MAINNET, PEER_VERSION_MAINNET_MAJOR, PEER_VERSION_TESTNET, PEER_VERSION_TESTNET_MAJOR, STACKS_EPOCH_MAX, }; @@ -98,7 +99,11 @@ pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 791_551; pub const BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT: u64 = 840_360; /// This is Epoch-3.0, activation height proposed in SIP-021 pub const BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT: u64 = 867_867; +/// This is Epoch-3.1, activation height proposed in SIP-029 +pub const BITCOIN_MAINNET_STACKS_31_BURN_HEIGHT: u64 = 875_000; +/// Bitcoin mainline testnet3 activation heights. +/// TODO: No longer used since testnet3 is dead, so remove. pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = @@ -110,6 +115,7 @@ pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_633; pub const BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT: u64 = 2_432_545; pub const BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT: u64 = 2_583_893; pub const BITCOIN_TESTNET_STACKS_30_BURN_HEIGHT: u64 = 30_000_000; +pub const BITCOIN_TESTNET_STACKS_31_BURN_HEIGHT: u64 = 30_000_001; /// This constant sets the approximate testnet bitcoin height at which 2.5 Xenon /// was reorged back to 2.5 instantiation. This is only used to calculate the @@ -132,8 +138,6 @@ lazy_static! { pub const BOOT_BLOCK_HASH: BlockHeaderHash = BlockHeaderHash([0xff; 32]); pub const BURNCHAIN_BOOT_CONSENSUS_HASH: ConsensusHash = ConsensusHash([0xff; 20]); -pub const MICROSTACKS_PER_STACKS: u32 = 1_000_000; - pub const POX_SUNSET_START: u64 = 100_000; pub const POX_SUNSET_END: u64 = POX_SUNSET_START + 400_000; @@ -237,7 +241,7 @@ pub fn check_fault_injection(fault_name: &str) -> bool { } lazy_static! { - pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 9] = [ + pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 10] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -297,15 +301,22 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_MAINNET_STACKS_31_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: BITCOIN_MAINNET_STACKS_31_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]; } lazy_static! { - pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 9] = [ + pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 10] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -365,15 +376,22 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: BITCOIN_TESTNET_STACKS_30_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_TESTNET_STACKS_31_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: BITCOIN_TESTNET_STACKS_31_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]; } lazy_static! { - pub static ref STACKS_EPOCHS_REGTEST: [StacksEpoch; 9] = [ + pub static ref STACKS_EPOCHS_REGTEST: [StacksEpoch; 10] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -433,10 +451,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: 7001, - end_height: STACKS_EPOCH_MAX, + end_height: 8001, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: 8001, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]; } @@ -468,6 +493,10 @@ pub static STACKS_EPOCH_2_5_MARKER: u8 = 0x0a; /// *or greater*. pub static STACKS_EPOCH_3_0_MARKER: u8 = 0x0b; +/// Stacks 3.1 epoch marker. All block-commits in 3.1 must have a memo bitfield with this value +/// *or greater*. +pub static STACKS_EPOCH_3_1_MARKER: u8 = 0x0c; + #[test] fn test_ord_for_stacks_epoch() { let epochs = STACKS_EPOCHS_MAINNET.clone(); @@ -515,6 +544,15 @@ fn test_ord_for_stacks_epoch() { assert_eq!(epochs[8].cmp(&epochs[5]), Ordering::Greater); assert_eq!(epochs[8].cmp(&epochs[6]), Ordering::Greater); assert_eq!(epochs[8].cmp(&epochs[7]), Ordering::Greater); + assert_eq!(epochs[9].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[9].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[9].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[9].cmp(&epochs[3]), Ordering::Greater); + assert_eq!(epochs[9].cmp(&epochs[4]), Ordering::Greater); + assert_eq!(epochs[9].cmp(&epochs[5]), Ordering::Greater); + assert_eq!(epochs[9].cmp(&epochs[6]), Ordering::Greater); + assert_eq!(epochs[9].cmp(&epochs[7]), Ordering::Greater); + assert_eq!(epochs[9].cmp(&epochs[8]), Ordering::Greater); } #[test] @@ -578,6 +616,8 @@ pub trait StacksEpochExtension { #[cfg(test)] fn unit_test_3_0(epoch_2_0_block_height: u64) -> Vec; #[cfg(test)] + fn unit_test_3_1(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; #[cfg(test)] fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec; @@ -1217,6 +1257,135 @@ impl StacksEpochExtension for StacksEpoch { ] } + #[cfg(test)] + fn unit_test_3_1(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test_3_1 first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 16, + end_height: first_burnchain_height + 20, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: first_burnchain_height + 20, + end_height: first_burnchain_height + 24, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: first_burnchain_height + 24, + end_height: first_burnchain_height + 28, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height + 28, + end_height: first_burnchain_height + 32, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: first_burnchain_height + 32, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_3_1, + }, + ] + } + #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { info!( @@ -1355,6 +1524,7 @@ impl StacksEpochExtension for StacksEpoch { StacksEpochId::Epoch24 => StacksEpoch::unit_test_2_4(first_burnchain_height), StacksEpochId::Epoch25 => StacksEpoch::unit_test_2_5(first_burnchain_height), StacksEpochId::Epoch30 => StacksEpoch::unit_test_3_0(first_burnchain_height), + StacksEpochId::Epoch31 => StacksEpoch::unit_test_3_1(first_burnchain_height), } } @@ -1409,8 +1579,8 @@ impl StacksEpochExtension for StacksEpoch { .iter() .max() .expect("FATAL: expect at least one epoch"); - if max_epoch.epoch_id == StacksEpochId::Epoch30 { - assert!(PEER_NETWORK_EPOCH >= u32::from(PEER_VERSION_EPOCH_2_5)); + if max_epoch.epoch_id == StacksEpochId::Epoch31 { + assert!(PEER_NETWORK_EPOCH >= u32::from(PEER_VERSION_EPOCH_3_0)); } else { assert!( max_epoch.network_epoch as u32 <= PEER_NETWORK_EPOCH, diff --git a/stackslib/src/cost_estimates/pessimistic.rs b/stackslib/src/cost_estimates/pessimistic.rs index bb1cf48f38..cdb3ceb7da 100644 --- a/stackslib/src/cost_estimates/pessimistic.rs +++ b/stackslib/src/cost_estimates/pessimistic.rs @@ -230,6 +230,8 @@ impl PessimisticEstimator { StacksEpochId::Epoch25 => ":2.1", // reuse cost estimates in Epoch30 StacksEpochId::Epoch30 => ":2.1", + // reuse cost estimates in Epoch31 + StacksEpochId::Epoch31 => ":2.1", }; format!( "cc{}:{}:{}.{}", diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 98315cffa8..e403fb9b9b 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1345,6 +1345,7 @@ simulating a miner. SortitionDB::get_canonical_burn_chain_tip(new_sortition_db.conn()).unwrap(); new_sortition_db .evaluate_sortition( + false, &burn_block_header, blockstack_txs, &burnchain, @@ -1813,6 +1814,7 @@ fn analyze_sortition_mev(argv: Vec) { debug!("Re-evaluate sortition at height {}", height); let (next_sn, state_transition) = sortdb .evaluate_sortition( + true, &burn_block.header, burn_block.ops.clone(), &burnchain, @@ -1828,6 +1830,7 @@ fn analyze_sortition_mev(argv: Vec) { let mut sort_tx = sortdb.tx_begin_at_tip(); let tip_pox_id = sort_tx.get_pox_id().unwrap(); let next_sn_nakamoto = BlockSnapshot::make_snapshot_in_epoch( + true, &mut sort_tx, &burnchain, &ancestor_sn.sortition_id, diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index b67b6166aa..59d9248aa4 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -38,7 +38,7 @@ use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::Txid; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleConn}; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NAKAMOTO_BLOCK_VERSION}; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; @@ -374,6 +374,15 @@ impl NakamotoBlockProposal { }); } + // Check block version. If it's less than the compiled-in version, just emit a warning + // because there's a new version of the node / signer binary available that really ought to + // be used (hint, hint) + if self.block.header.version != NAKAMOTO_BLOCK_VERSION { + warn!("Proposed block has unexpected version. Upgrade your node and/or signer ASAP."; + "block.header.version" => %self.block.header.version, + "expected" => %NAKAMOTO_BLOCK_VERSION); + } + let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip); let mut db_handle = sortdb.index_handle(&sort_tip); diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 273c1c7335..b32be090ca 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -6376,6 +6376,8 @@ mod test { } } + // TODO: test for has_acceptable_epoch() + #[test] fn convo_process_relayers() { let conn_opts = ConnectionOptions::default(); From 4b1e3e59bfe43d7c2945e713b06a30d7f6d83512 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 15 Nov 2024 00:13:38 -0500 Subject: [PATCH 14/56] chore: API sync --- testnet/stacks-node/src/burnchains/mocknet_controller.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index a626cfb443..d36bfa01f5 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -274,6 +274,7 @@ impl BurnchainController for MocknetController { .unwrap(); let new_chain_tip = burn_tx .process_block_ops( + false, &self.burnchain, &chain_tip.block_snapshot, &next_block_header, From 878a8a308f021ac900ae0750999d38f8243f5681 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Fri, 15 Nov 2024 12:56:20 +0100 Subject: [PATCH 15/56] feat: validate get clarity metadata key format --- clarity/src/vm/database/clarity_db.rs | 112 ++++++++++++++++-- stackslib/src/net/api/getclaritymetadata.rs | 50 ++++++-- .../src/net/api/tests/getclaritymarfvalue.rs | 2 +- .../src/net/api/tests/getclaritymetadata.rs | 73 +++++++++++- 4 files changed, 215 insertions(+), 22 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index ff9bdfaf1a..4f6f3f7781 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -76,6 +76,68 @@ pub enum StoreType { PoxUnlockHeight = 0x15, } +impl TryFrom<&str> for StoreType { + type Error = String; + + fn try_from(value: &str) -> core::result::Result { + use self::StoreType::*; + + let hex_value = u8::from_str_radix(value, 10).map_err(|e| e.to_string())?; + match hex_value { + 0x00 => Ok(DataMap), + 0x01 => Ok(Variable), + 0x02 => Ok(FungibleToken), + 0x03 => Ok(CirculatingSupply), + 0x04 => Ok(NonFungibleToken), + 0x05 => Ok(DataMapMeta), + 0x06 => Ok(VariableMeta), + 0x07 => Ok(FungibleTokenMeta), + 0x08 => Ok(NonFungibleTokenMeta), + 0x09 => Ok(Contract), + 0x10 => Ok(SimmedBlock), + 0x11 => Ok(SimmedBlockHeight), + 0x12 => Ok(Nonce), + 0x13 => Ok(STXBalance), + 0x14 => Ok(PoxSTXLockup), + 0x15 => Ok(PoxUnlockHeight), + _ => Err("Invalid StoreType".into()), + } + } +} + +pub enum ContractDataVarName { + Contract, + ContractSize, + ContractSrc, + ContractDataSize, +} + +impl ContractDataVarName { + pub fn as_str(&self) -> &str { + match self { + Self::Contract => "contract", + Self::ContractSize => "contract-size", + Self::ContractSrc => "contract-src", + Self::ContractDataSize => "contract-data-size", + } + } +} + +impl TryFrom<&str> for ContractDataVarName { + type Error = String; + + fn try_from(value: &str) -> core::result::Result { + use self::ContractDataVarName::*; + match value { + "contract" => Ok(Contract), + "contract-size" => Ok(ContractSize), + "contract-src" => Ok(ContractSrc), + "contract-data-size" => Ok(ContractDataSize), + _ => Err("Invalid ContractDataVarName".into()), + } + } +} + pub struct ClarityDatabase<'a> { pub store: RollbackWrapper<'a>, headers_db: &'a dyn HeadersDB, @@ -576,12 +638,18 @@ impl<'a> ClarityDatabase<'a> { self.store .prepare_for_contract_metadata(contract_identifier, hash)?; // insert contract-size - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSize.as_str(), + ); self.insert_metadata(contract_identifier, &key, &(contract_content.len() as u64))?; // insert contract-src if STORE_CONTRACT_SRC_INTERFACE { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-src"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSrc.as_str(), + ); self.insert_metadata(contract_identifier, &key, &contract_content.to_string())?; } Ok(()) @@ -591,7 +659,10 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Option { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-src"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSrc.as_str(), + ); self.fetch_metadata(contract_identifier, &key) .ok() .flatten() @@ -700,7 +771,10 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Result { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSize.as_str(), + ); let contract_size: u64 = self.fetch_metadata(contract_identifier, &key)? .ok_or_else(|| { @@ -708,7 +782,10 @@ impl<'a> ClarityDatabase<'a> { "Failed to read non-consensus contract metadata, even though contract exists in MARF." .into()) })?; - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-data-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractDataSize.as_str(), + ); let data_size: u64 = self .fetch_metadata(contract_identifier, &key)? .ok_or_else(|| { @@ -727,7 +804,10 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, data_size: u64, ) -> Result<()> { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSize.as_str(), + ); let contract_size: u64 = self.fetch_metadata(contract_identifier, &key)? .ok_or_else(|| { @@ -737,7 +817,10 @@ impl<'a> ClarityDatabase<'a> { })?; contract_size.cost_overflow_add(data_size)?; - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-data-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractDataSize.as_str(), + ); self.insert_metadata(contract_identifier, &key, &data_size)?; Ok(()) } @@ -747,13 +830,19 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, contract: Contract, ) -> Result<()> { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::Contract.as_str(), + ); self.insert_metadata(contract_identifier, &key, &contract)?; Ok(()) } pub fn has_contract(&mut self, contract_identifier: &QualifiedContractIdentifier) -> bool { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::Contract.as_str(), + ); self.store.has_metadata_entry(contract_identifier, &key) } @@ -761,7 +850,10 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Result { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::Contract.as_str(), + ); let mut data: Contract = self.fetch_metadata(contract_identifier, &key)? .ok_or_else(|| InterpreterError::Expect( "Failed to read non-consensus contract metadata, even though contract exists in MARF." diff --git a/stackslib/src/net/api/getclaritymetadata.rs b/stackslib/src/net/api/getclaritymetadata.rs index 5ef3feee6e..5700e87af9 100644 --- a/stackslib/src/net/api/getclaritymetadata.rs +++ b/stackslib/src/net/api/getclaritymetadata.rs @@ -15,6 +15,8 @@ // along with this program. If not, see . use clarity::vm::clarity::ClarityConnection; +use clarity::vm::database::clarity_db::ContractDataVarName; +use clarity::vm::database::StoreType; use clarity::vm::representations::{CONTRACT_NAME_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING}; use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::ContractName; @@ -37,7 +39,7 @@ lazy_static! { static ref CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING: String = "[a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*|[-+=/*]|[<>]=?".into(); static ref METADATA_KEY_REGEX_STRING: String = format!( - r"vm-metadata::\d+::(contract|contract-size|contract-src|contract-data-size|({}))", + r"vm-metadata::(?P(\d{{1,2}}))::(?P(contract|contract-size|contract-src|contract-data-size|({})))", *CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING, ); } @@ -81,8 +83,6 @@ impl HttpRequest for RPCGetClarityMetadataRequestHandler { "/v2/clarity/metadata/:principal/:contract_name/:clarity_metadata_key" } - /// Try to decode this request. - /// There's nothing to load here, so just make sure the request is well-formed. fn try_parse_request( &mut self, preamble: &HttpRequestPreamble, @@ -98,13 +98,43 @@ impl HttpRequest for RPCGetClarityMetadataRequestHandler { let contract_identifier = request::get_contract_address(captures, "address", "contract")?; - let metadata_key = if let Some(key_str) = captures.name("clarity_metadata_key") { - key_str.as_str().to_string() - } else { - return Err(Error::Http( - 404, - "Missing `clarity_metadata_key`".to_string(), - )); + // Validate that the metadata key is well-formed. It must be of data type: + // DataMapMeta (5) | VariableMeta (6) | FungibleTokenMeta (7) | NonFungibleTokenMeta (8) + // or Contract (9) followed by a valid contract metadata name + match captures + .name("data_type") + .and_then(|data_type| StoreType::try_from(data_type.as_str()).ok()) + { + Some(data_type) => match data_type { + StoreType::DataMapMeta + | StoreType::VariableMeta + | StoreType::FungibleTokenMeta + | StoreType::NonFungibleTokenMeta => {} + StoreType::Contract => { + if captures + .name("var_name") + .and_then(|var_name| ContractDataVarName::try_from(var_name.as_str()).ok()) + .is_none() + { + return Err(Error::DecodeError("Invalid metadata var name".to_string())); + } + } + _ => { + return Err(Error::DecodeError("Invalid metadata type".to_string())); + } + }, + None => { + return Err(Error::DecodeError("Invalid metadata type".to_string())); + } + } + + let metadata_key = match captures.name("clarity_metadata_key") { + Some(key_str) => key_str.as_str().to_string(), + None => { + return Err(Error::DecodeError( + "Missing `clarity_metadata_key`".to_string(), + )); + } }; self.contract_identifier = Some(contract_identifier); diff --git a/stackslib/src/net/api/tests/getclaritymarfvalue.rs b/stackslib/src/net/api/tests/getclaritymarfvalue.rs index 3b1453c212..e360b7a72c 100644 --- a/stackslib/src/net/api/tests/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/tests/getclaritymarfvalue.rs @@ -89,7 +89,7 @@ fn test_try_make_response() { let mut requests = vec![]; - // query existing + // query existing marf value let request = StacksHttpRequest::new_getclaritymarf( addr.into(), TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::bar"), diff --git a/stackslib/src/net/api/tests/getclaritymetadata.rs b/stackslib/src/net/api/tests/getclaritymetadata.rs index 3de5949a87..57baf705ce 100644 --- a/stackslib/src/net/api/tests/getclaritymetadata.rs +++ b/stackslib/src/net/api/tests/getclaritymetadata.rs @@ -26,11 +26,12 @@ use stacks_common::types::Address; use super::test_rpc; use crate::net::api::*; use crate::net::connection::ConnectionOptions; +use crate::net::http::Error as HttpError; use crate::net::httpcore::{ HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, }; -use crate::net::{ProtocolFamily, TipRequest}; +use crate::net::{Error as NetError, ProtocolFamily, TipRequest}; #[test] fn test_try_parse_request() { @@ -85,6 +86,76 @@ fn test_try_parse_request() { assert!(handler.clarity_metadata_key.is_none()); } +#[test] +fn test_try_parse_invalid_store_type() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::2::contract-size".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let parsed_request_err = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap_err(); + + assert_eq!( + parsed_request_err, + HttpError::DecodeError("Invalid metadata type".to_string()).into() + ); + handler.restart(); +} + +#[test] +fn test_try_parse_invalid_contract_metadata_var_name() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-invalid-key".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let parsed_request_err = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap_err(); + + assert_eq!( + parsed_request_err, + HttpError::DecodeError("Invalid metadata var name".to_string()).into() + ); + handler.restart(); +} + #[test] fn test_try_parse_request_for_analysis() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); From f27c84fee83cd744ec963b9b7606046a774cd457 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Fri, 15 Nov 2024 14:00:31 +0100 Subject: [PATCH 16/56] docs: update openapi.yaml and rpc-endpoint.md --- docs/rpc-endpoints.md | 4 ++-- docs/rpc/openapi.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 973e3478d4..63a086ca1b 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -172,7 +172,7 @@ Where data is the hex serialization of the variable value. This endpoint also accepts a querystring parameter `?proof=` which when supplied `0`, will return the JSON object _without_ the `proof` field. -### GET /v2/clarity_marf_value/[Clarity MARF Key] +### GET /v2/clarity/marf/[Clarity MARF Key] Attempt to fetch the value of a MARF key. The key is identified with [Clarity MARF Key]. Returns JSON data in the form: @@ -186,7 +186,7 @@ Returns JSON data in the form: Where data is the hex serialization of the value. -### GET /v2/clarity_metadata/[Stacks Address]/[Contract Name]/[Clarity Metadata Key] +### GET /v2/clarity/metadata/[Stacks Address]/[Contract Name]/[Clarity Metadata Key] Attempt to fetch the metadata of a contract. The contract is identified with [Stacks Address] and [Contract Name] in the URL path. The metadata key is identified with [Clarity Metadata Key]. diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 2b73198511..c4b1ec7b63 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -486,7 +486,7 @@ paths: If tip == "latest", the query will be run from the latest known tip (includes unconfirmed state). If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). - /v2/clarity_marf_value/{clarity_marf_key}: + /v2/clarity/marf/{clarity_marf_key}: post: summary: Get the MARF value for a given key tags: @@ -526,7 +526,7 @@ paths: description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest known tip (includes unconfirmed state). - /v2/clarity_metadata/{contract_address}/{contract_name}/{clarity_metadata_key}: + /v2/clarity/metadata/{contract_address}/{contract_name}/{clarity_metadata_key}: post: summary: Get the contract metadata for the metadata key tags: From 7f86352f185fefca963f20b4d3567d98bcefb2e9 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Fri, 15 Nov 2024 16:07:02 +0100 Subject: [PATCH 17/56] fix: metadata key validation for cotnract analysis --- stackslib/src/net/api/getclaritymetadata.rs | 66 +++++++++++---------- 1 file changed, 36 insertions(+), 30 deletions(-) diff --git a/stackslib/src/net/api/getclaritymetadata.rs b/stackslib/src/net/api/getclaritymetadata.rs index 5700e87af9..29ba1d6f4e 100644 --- a/stackslib/src/net/api/getclaritymetadata.rs +++ b/stackslib/src/net/api/getclaritymetadata.rs @@ -98,36 +98,6 @@ impl HttpRequest for RPCGetClarityMetadataRequestHandler { let contract_identifier = request::get_contract_address(captures, "address", "contract")?; - // Validate that the metadata key is well-formed. It must be of data type: - // DataMapMeta (5) | VariableMeta (6) | FungibleTokenMeta (7) | NonFungibleTokenMeta (8) - // or Contract (9) followed by a valid contract metadata name - match captures - .name("data_type") - .and_then(|data_type| StoreType::try_from(data_type.as_str()).ok()) - { - Some(data_type) => match data_type { - StoreType::DataMapMeta - | StoreType::VariableMeta - | StoreType::FungibleTokenMeta - | StoreType::NonFungibleTokenMeta => {} - StoreType::Contract => { - if captures - .name("var_name") - .and_then(|var_name| ContractDataVarName::try_from(var_name.as_str()).ok()) - .is_none() - { - return Err(Error::DecodeError("Invalid metadata var name".to_string())); - } - } - _ => { - return Err(Error::DecodeError("Invalid metadata type".to_string())); - } - }, - None => { - return Err(Error::DecodeError("Invalid metadata type".to_string())); - } - } - let metadata_key = match captures.name("clarity_metadata_key") { Some(key_str) => key_str.as_str().to_string(), None => { @@ -137,6 +107,42 @@ impl HttpRequest for RPCGetClarityMetadataRequestHandler { } }; + if metadata_key != "analysis" { + // Validate that the metadata key is well-formed. It must be of data type: + // DataMapMeta (5) | VariableMeta (6) | FungibleTokenMeta (7) | NonFungibleTokenMeta (8) + // or Contract (9) followed by a valid contract metadata name + match captures + .name("data_type") + .and_then(|data_type| StoreType::try_from(data_type.as_str()).ok()) + { + Some(data_type) => match data_type { + StoreType::DataMapMeta + | StoreType::VariableMeta + | StoreType::FungibleTokenMeta + | StoreType::NonFungibleTokenMeta => {} + StoreType::Contract => { + if captures + .name("var_name") + .and_then(|var_name| { + ContractDataVarName::try_from(var_name.as_str()).ok() + }) + .is_none() + { + return Err(Error::DecodeError( + "Invalid metadata var name".to_string(), + )); + } + } + _ => { + return Err(Error::DecodeError("Invalid metadata type".to_string())); + } + }, + None => { + return Err(Error::DecodeError("Invalid metadata type".to_string())); + } + } + } + self.contract_identifier = Some(contract_identifier); self.clarity_metadata_key = Some(metadata_key); From 198843e8e18f47334317768b38a7e3e79ce95638 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 15 Nov 2024 15:31:48 -0500 Subject: [PATCH 18/56] chore: fix failing tests --- stackslib/src/clarity_vm/tests/large_contract.rs | 3 ++- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 8db6b3043a..e7d8faff0c 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -168,7 +168,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { let (ast, _analysis) = tx .analyze_smart_contract( &boot_code_id("costs-3", false), diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6ae34fce42..f82faad154 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3807,8 +3807,13 @@ fn follower_bootup_across_multiple_cycles() { .reward_cycle_length * 2 { + let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); + wait_for(20, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); } info!("Nakamoto miner has advanced two reward cycles"); From 9429c8e309a09de689e895f1271b533d030d6abf Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Fri, 15 Nov 2024 22:40:19 +0100 Subject: [PATCH 19/56] docs: address review --- docs/rpc/openapi.yaml | 4 ++-- stackslib/src/net/api/getclaritymarfvalue.rs | 3 +-- stackslib/src/net/api/getclaritymetadata.rs | 3 +-- stackslib/src/net/api/tests/getclaritymarfvalue.rs | 3 +-- stackslib/src/net/api/tests/getclaritymetadata.rs | 3 +-- 5 files changed, 6 insertions(+), 10 deletions(-) diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index c4b1ec7b63..606db3a453 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -493,7 +493,7 @@ paths: - Smart Contracts operationId: get_clarity_marf_value description: | - Attempt to fetch the value of a MARF key. The key is identified with [Clarity MARF Key]. + Attempt to fetch the value of a MARF key. In the response, `data` is the hex serialization of the value. responses: @@ -533,7 +533,7 @@ paths: - Smart Contracts operationId: get_clarity_metadata_key description: | - Attempt to fetch the metadata of a contract. The contract is identified with [Stacks Address] and [Contract Name] in the URL path. The metadata key is identified with [Clarity Metadata Key]. + Attempt to fetch the metadata of a contract. The contract is identified with [Contract Address] and [Contract Name] in the URL path. The metadata key is identified with [Clarity Metadata Key]. In the response, `data` is formatted as JSON. responses: diff --git a/stackslib/src/net/api/getclaritymarfvalue.rs b/stackslib/src/net/api/getclaritymarfvalue.rs index 44d2e4dc7f..678d4fa46b 100644 --- a/stackslib/src/net/api/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/getclaritymarfvalue.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by diff --git a/stackslib/src/net/api/getclaritymetadata.rs b/stackslib/src/net/api/getclaritymetadata.rs index 29ba1d6f4e..a6606fe62a 100644 --- a/stackslib/src/net/api/getclaritymetadata.rs +++ b/stackslib/src/net/api/getclaritymetadata.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by diff --git a/stackslib/src/net/api/tests/getclaritymarfvalue.rs b/stackslib/src/net/api/tests/getclaritymarfvalue.rs index e360b7a72c..e36b13ca74 100644 --- a/stackslib/src/net/api/tests/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/tests/getclaritymarfvalue.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by diff --git a/stackslib/src/net/api/tests/getclaritymetadata.rs b/stackslib/src/net/api/tests/getclaritymetadata.rs index 57baf705ce..de52b27f2d 100644 --- a/stackslib/src/net/api/tests/getclaritymetadata.rs +++ b/stackslib/src/net/api/tests/getclaritymetadata.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by From 74397d0841f8b842af55972d7b5e77d8611e3e34 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Mon, 18 Nov 2024 12:43:37 +0100 Subject: [PATCH 20/56] refactor: fix unbounded regex and add tests --- stackslib/src/net/api/getclaritymetadata.rs | 12 +++-- .../src/net/api/tests/getclaritymetadata.rs | 47 +++++++++++++++++-- 2 files changed, 50 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/api/getclaritymetadata.rs b/stackslib/src/net/api/getclaritymetadata.rs index a6606fe62a..ee6ec96567 100644 --- a/stackslib/src/net/api/getclaritymetadata.rs +++ b/stackslib/src/net/api/getclaritymetadata.rs @@ -16,7 +16,9 @@ use clarity::vm::clarity::ClarityConnection; use clarity::vm::database::clarity_db::ContractDataVarName; use clarity::vm::database::StoreType; -use clarity::vm::representations::{CONTRACT_NAME_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING}; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, MAX_STRING_LEN, STANDARD_PRINCIPAL_REGEX_STRING, +}; use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::ContractName; use lazy_static::lazy_static; @@ -35,10 +37,12 @@ use crate::net::httpcore::{ use crate::net::{Error as NetError, StacksNodeState, TipRequest}; lazy_static! { - static ref CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING: String = - "[a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*|[-+=/*]|[<>]=?".into(); + static ref CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING: String = format!( + "([a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*|[-+=/*]|[<>]=?){{1,{}}}", + MAX_STRING_LEN + ); static ref METADATA_KEY_REGEX_STRING: String = format!( - r"vm-metadata::(?P(\d{{1,2}}))::(?P(contract|contract-size|contract-src|contract-data-size|({})))", + r"vm-metadata::(?P(\d{{1,2}}))::(?P(contract|contract-size|contract-src|contract-data-size|{}))", *CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING, ); } diff --git a/stackslib/src/net/api/tests/getclaritymetadata.rs b/stackslib/src/net/api/tests/getclaritymetadata.rs index de52b27f2d..47249eb290 100644 --- a/stackslib/src/net/api/tests/getclaritymetadata.rs +++ b/stackslib/src/net/api/tests/getclaritymetadata.rs @@ -15,8 +15,10 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::database::{ClaritySerializable, DataMapMetadata, DataVariableMetadata}; +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions, TypeSignature}; use clarity::vm::{ClarityName, ContractName}; +use serde_json::json; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::net::PeerHost; @@ -211,7 +213,7 @@ fn test_try_make_response() { let mut requests = vec![]; - // query existing + // query existing contract size metadata let request = StacksHttpRequest::new_getclaritymetadata( addr.into(), StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), @@ -221,16 +223,51 @@ fn test_try_make_response() { ); requests.push(request); + // query existing data var metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::5::test-map".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing data map metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::6::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + let mut responses = test_rpc(function_name!(), requests); - // latest data + // contract size metadata let response = responses.remove(0); - assert_eq!( response.preamble().get_canonical_stacks_tip_height(), Some(1) ); - let resp = response.decode_clarity_metadata_response().unwrap(); assert_eq!(resp.data, "1432"); + + // data map metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataMapMetadata { + key_type: TypeSignature::UIntType, + value_type: TypeSignature::UIntType, + }; + assert_eq!(resp.data, expected.serialize()); + + // data var metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataVariableMetadata { + value_type: TypeSignature::IntType, + }; + assert_eq!(resp.data, expected.serialize()); } From a94abfda195ca653b6344924eaa62fba7bf2e250 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Mon, 18 Nov 2024 19:30:39 +0100 Subject: [PATCH 21/56] test: add getclaritymetadata test case for errors --- .../src/net/api/tests/getclaritymetadata.rs | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/stackslib/src/net/api/tests/getclaritymetadata.rs b/stackslib/src/net/api/tests/getclaritymetadata.rs index 47249eb290..3bac8daf5d 100644 --- a/stackslib/src/net/api/tests/getclaritymetadata.rs +++ b/stackslib/src/net/api/tests/getclaritymetadata.rs @@ -243,6 +243,26 @@ fn test_try_make_response() { ); requests.push(request); + // query undeclared var metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::6::non-existing-var".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query invalid metadata key (wrong store type) + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::2::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + let mut responses = test_rpc(function_name!(), requests); // contract size metadata @@ -270,4 +290,14 @@ fn test_try_make_response() { value_type: TypeSignature::IntType, }; assert_eq!(resp.data, expected.serialize()); + + // invalid metadata key + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // unknwnon data var + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); } From c9d5295f1e668405609eb1c77ab3c363867c0581 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Mon, 18 Nov 2024 20:50:06 +0100 Subject: [PATCH 22/56] tests: add failing case --- stackslib/src/net/api/tests/getclaritymetadata.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/stackslib/src/net/api/tests/getclaritymetadata.rs b/stackslib/src/net/api/tests/getclaritymetadata.rs index 3bac8daf5d..9ef7e678d2 100644 --- a/stackslib/src/net/api/tests/getclaritymetadata.rs +++ b/stackslib/src/net/api/tests/getclaritymetadata.rs @@ -263,6 +263,16 @@ fn test_try_make_response() { ); requests.push(request); + // query existing contract size metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-size".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + let mut responses = test_rpc(function_name!(), requests); // contract size metadata From 7568bf569bd7cc4e3cc5305602fbd87a0e930eeb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 18 Nov 2024 18:01:21 -0500 Subject: [PATCH 23/56] chore: bugfixes and SIP-029 integration test --- .github/workflows/bitcoin-tests.yml | 1 + stacks-common/src/types/mod.rs | 18 +- stackslib/src/chainstate/stacks/db/blocks.rs | 2 +- .../stacks-node/src/nakamoto_node/relayer.rs | 4 +- testnet/stacks-node/src/tests/epoch_25.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 236 +++++++++++++++++- 6 files changed, 249 insertions(+), 14 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 52f46fbc49..57c159eac1 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -140,6 +140,7 @@ jobs: - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::nakamoto_integrations::v3_signer_api_endpoint - tests::nakamoto_integrations::signer_chainstate + - tests::nakamoto_integrations::sip029_coinbase_change # TODO: enable these once v1 signer is supported by a new nakamoto epoch # - tests::signer::v1::dkg # - tests::signer::v1::sign_request_rejected diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index dfd3287239..be94511de2 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -459,6 +459,7 @@ impl StacksEpochId { match COINBASE_INTERVALS_TEST.lock() { Ok(schedule_opt) => { if let Some(schedule) = (*schedule_opt).as_ref() { + info!("Use overridden coinbase schedule {:?}", &schedule); return schedule.clone(); } } @@ -520,11 +521,18 @@ impl StacksEpochId { | StacksEpochId::Epoch30 => { self.coinbase_reward_pre_sip029(first_burnchain_height, current_burnchain_height) } - StacksEpochId::Epoch31 => self.coinbase_reward_sip029( - mainnet, - first_burnchain_height, - current_burnchain_height, - ), + StacksEpochId::Epoch31 => { + let cb = self.coinbase_reward_sip029( + mainnet, + first_burnchain_height, + current_burnchain_height, + ); + info!( + "Epoch31 coinbase at ({},{}) is {}", + first_burnchain_height, current_burnchain_height, cb + ); + cb + } } } } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index ea837eb7bc..d5d38f34e1 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -3618,7 +3618,7 @@ impl StacksChainState { burn_block_height: u64, first_burn_block_height: u64, ) -> u128 { - epoch.coinbase_reward(mainnet, burn_block_height, first_burn_block_height) + epoch.coinbase_reward(mainnet, first_burn_block_height, burn_block_height) } /// Create the block reward. diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 7c8dc6f2c5..6873e79a98 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -38,7 +38,7 @@ use stacks::chainstate::stacks::miner::{ get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, }; use stacks::core::mempool::MemPoolDB; -use stacks::core::STACKS_EPOCH_3_0_MARKER; +use stacks::core::STACKS_EPOCH_3_1_MARKER; use stacks::monitoring::increment_stx_blocks_mined_counter; use stacks::net::db::LocalPeer; use stacks::net::p2p::NetworkHandle; @@ -689,7 +689,7 @@ impl RelayerThread { key_block_ptr: u32::try_from(key.block_height) .expect("FATAL: burn block height exceeded u32"), key_vtxindex: u16::try_from(key.op_vtxindex).expect("FATAL: vtxindex exceeded u16"), - memo: vec![STACKS_EPOCH_3_0_MARKER], + memo: vec![STACKS_EPOCH_3_1_MARKER], new_seed: VRFSeed::from_proof(&tip_vrf_proof), parent_block_ptr: u32::try_from(commit_parent_block_burn_height) .expect("FATAL: burn block height exceeded u32"), diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 34083fb22a..bc30e51528 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -23,7 +23,7 @@ use stacks_common::types::chainstate::StacksPrivateKey; use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; -use crate::tests::nakamoto_integrations::{next_block_and, wait_for}; +use crate::tests::nakamoto_integrations::wait_for; use crate::tests::neon_integrations::{ get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f82faad154..23d2384643 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -29,6 +29,7 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::SignerMessage as SignerMessageV0; use libsigner::{SignerSession, StackerDBSession}; +use rusqlite::OptionalExtension; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ @@ -60,7 +61,7 @@ use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, - PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_TESTNET, + PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_EPOCH_3_1, PEER_VERSION_TESTNET, }; use stacks::libstackerdb::SlotMetadata; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; @@ -70,7 +71,7 @@ use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, }; -use stacks::types::chainstate::StacksBlockId; +use stacks::types::chainstate::{ConsensusHash, StacksBlockId}; use stacks::util::hash::hex_bytes; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -84,7 +85,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash, }; -use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::types::{set_test_coinbase_schedule, CoinbaseInterval, StacksPublicKeyBuffer}; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; @@ -114,7 +115,7 @@ pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; pub static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; lazy_static! { - pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ + pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 10] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -174,10 +175,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: 231, - end_height: STACKS_EPOCH_MAX, + end_height: 241, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: 241, + end_height: STACKS_EPOCH_MAX, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]; } @@ -9500,3 +9508,221 @@ fn skip_mining_long_tx() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// Integration test for SIP-029 +fn sip029_coinbase_change() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let new_sched = vec![ + CoinbaseInterval { + coinbase: 1_000_000_000, + effective_start_height: 0, + }, + // NOTE: epoch 3.1 goes into effect at 241 + CoinbaseInterval { + coinbase: 500_000_000, + effective_start_height: 245, + }, + CoinbaseInterval { + coinbase: 125_000_000, + effective_start_height: 255, + }, + CoinbaseInterval { + coinbase: 62_500_000, + effective_start_height: 265, + }, + ]; + + set_test_coinbase_schedule(Some(new_sched.clone())); + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.node.pox_sync_sample_secs = 180; + naka_conf.burnchain.max_rbf = 10_000_000; + + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // mine until burnchain height 270 + loop { + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + wait_for(20, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); + + let node_info = get_chain_info_opt(&naka_conf).unwrap(); + if node_info.burn_block_height >= 270 { + break; + } + } + + info!("Nakamoto miner has advanced to burn height 270"); + + // inspect `payments` table to see that coinbase was applied + let all_snapshots = sortdb.get_all_snapshots().unwrap(); + + // whether or not the last snapshot had a sortition + let mut prev_sortition = false; + + // whether or not we witnessed the requisite coinbases + let mut witnessed_1000 = false; + let mut witnessed_500 = false; + let mut witnessed_125 = false; + let mut witnessed_62_5 = false; + + // initial mining bonus + let initial_mining_bonus = 20400000; + + for sn in all_snapshots { + if !sn.sortition { + prev_sortition = false; + continue; + } + if sn.consensus_hash == ConsensusHash([0x00; 20]) { + continue; + } + let coinbase = { + let sql = "SELECT coinbase FROM payments WHERE consensus_hash = ?1"; + let args = rusqlite::params![&sn.consensus_hash]; + let Some(coinbase) = chainstate + .db() + .query_row(sql, args, |r| { + let coinbase_txt: String = r.get_unwrap(0); + let coinbase: u64 = coinbase_txt.parse().unwrap(); + Ok(coinbase) + }) + .optional() + .unwrap() + else { + info!("No coinbase for {} {}", sn.block_height, &sn.consensus_hash); + continue; + }; + + coinbase + }; + + info!( + "Coinbase at {} {}: {}", + sn.block_height, &sn.consensus_hash, coinbase + ); + // use >= for coinbases since a missed sortition can lead to coinbase accumulation + if sn.block_height < 245 { + if prev_sortition { + assert_eq!(coinbase, 1_000_000_000 + initial_mining_bonus); + witnessed_1000 = true; + } else { + assert!(coinbase >= 1_000_000_000 + initial_mining_bonus); + } + } else if sn.block_height < 255 { + if prev_sortition { + assert_eq!(coinbase, 500_000_000 + initial_mining_bonus); + witnessed_500 = true; + } else { + assert!(coinbase >= 500_000_000 + initial_mining_bonus); + } + } else if sn.block_height < 265 { + if prev_sortition { + assert_eq!(coinbase, 125_000_000 + initial_mining_bonus); + witnessed_125 = true; + } else { + assert!(coinbase >= 125_000_000 + initial_mining_bonus); + } + } else { + if prev_sortition { + assert_eq!(coinbase, 62_500_000 + initial_mining_bonus); + witnessed_62_5 = true; + } else { + assert!(coinbase >= 62_500_000 + initial_mining_bonus); + } + } + + prev_sortition = true; + } + + assert!(witnessed_1000); + assert!(witnessed_500); + assert!(witnessed_125); + assert!(witnessed_62_5); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From abfe0aabee29b8eec95901f3215f69dbd787bf2e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 19 Nov 2024 17:51:17 -0500 Subject: [PATCH 24/56] chore: remove testing output --- stacks-common/src/types/mod.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 1e3fc5b407..f5b58b5c36 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -528,10 +528,6 @@ impl StacksEpochId { first_burnchain_height, current_burnchain_height, ); - info!( - "Epoch31 coinbase at ({},{}) is {}", - first_burnchain_height, current_burnchain_height, cb - ); cb } } From 3401b036eea6ba2707fcbdbba5f7b9ed70df04e1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 20 Nov 2024 11:29:12 -0500 Subject: [PATCH 25/56] chore: remove accidentally-added files --- stackslib/src/net/api/getdiagnostics.rs | 170 ----------------------- stackslib/src/net/api/setloglevel.rs.bak | 164 ---------------------- 2 files changed, 334 deletions(-) delete mode 100644 stackslib/src/net/api/getdiagnostics.rs delete mode 100644 stackslib/src/net/api/setloglevel.rs.bak diff --git a/stackslib/src/net/api/getdiagnostics.rs b/stackslib/src/net/api/getdiagnostics.rs deleted file mode 100644 index 34391b2c0c..0000000000 --- a/stackslib/src/net/api/getdiagnostics.rs +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::io::{Read, Write}; - -use regex::{Captures, Regex}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, StacksBlockId, StacksPublicKey, -}; -use stacks_common::types::net::PeerHost; -use stacks_common::types::StacksPublicKeyBuffer; -use stacks_common::util::hash::{Hash160, Sha256Sum}; - -use crate::burnchains::affirmation::AffirmationMap; -use crate::burnchains::Txid; -use crate::chainstate::burn::db::sortdb::SortitionDB; -use crate::chainstate::nakamoto::NakamotoChainState; -use crate::chainstate::stacks::db::StacksChainState; -use crate::core::mempool::MemPoolDB; -use crate::net::http::{ - parse_json, Error, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, - HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, -}; -use crate::net::httpcore::{ - HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, -}; -use crate::net::p2p::PeerNetwork; -use crate::net::{Error as NetError, StacksNodeState}; - -/// The request to GET /v3/diagnostics -#[derive(Clone)] -pub struct RPCDiagnosticsHandler {} -impl RPCDiagnosticsHandler { - pub fn new() -> Self { - Self {} - } -} - -/// Node diagnostics -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCDiagnosticsData { -} - -impl RPCDiagnosticsData { - pub fn from_network( - network: &PeerNetwork, - chainstate: &StacksChainState, - ) -> RPCDiagnosticsData { - - } -} - -/// Decode the HTTP request -impl HttpRequest for RPCDiagnosticsHandler { - fn verb(&self) -> &'static str { - "GET" - } - - fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/diagnostics$"#).unwrap() - } - - fn metrics_identifier(&self) -> &str { - "/v3/diagnostics" - } - - /// Try to decode this request. - /// There's nothing to load here, so just make sure the request is well-formed. - fn try_parse_request( - &mut self, - preamble: &HttpRequestPreamble, - _captures: &Captures, - query: Option<&str>, - _body: &[u8], - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(Error::DecodeError( - "Invalid Http request: expected 0-length body for GetInfo".to_string(), - )); - } - Ok(HttpRequestContents::new().query_string(query)) - } -} - -impl RPCRequestHandler for RPCDiagnosticsHandler { - /// Reset internal state - fn restart(&mut self) {} - - /// Make the response - fn try_handle_request( - &mut self, - preamble: HttpRequestPreamble, - _contents: HttpRequestContents, - node: &mut StacksNodeState, - ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - - let rpc_peer_info: Result = - node.with_node_state(|network, _sortdb, chainstate, _mempool, rpc_args| { - let coinbase_height = network.stacks_tip.coinbase_height; - - Ok(RPCDiagnosticsData::from_network( - network, - chainstate, - )) - }); - - let rpc_diagnostics_info = match rpc_diagnostics_info { - Ok(rpc_diagnostics_info) => rpc_diagnostics_info, - Err(response) => { - return response.try_into_contents().map_err(NetError::from); - } - }; - - let mut preamble = HttpResponsePreamble::ok_json(&preamble); - preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); - let body = HttpResponseContents::try_from_json(&rpc_peer_info)?; - Ok((preamble, body)) - } -} - -/// Decode the HTTP response -impl HttpResponse for RPCDiagnosticsHandler { - fn try_parse_response( - &self, - preamble: &HttpResponsePreamble, - body: &[u8], - ) -> Result { - let peer_info: RPCPeerInfoData = parse_json(preamble, body)?; - Ok(HttpResponsePayload::try_from_json(peer_info)?) - } -} - -impl StacksHttpRequest { - /// Make a new getdiagnostics request to this endpoint - pub fn new_getdiagnostics(host: PeerHost) -> StacksHttpRequest { - let mut req = StacksHttpRequest::new_for_peer( - host, - "GET".into(), - "/v3/diagnostics".into(), - HttpRequestContents::new(), - ) - .expect("FATAL: failed to construct request from infallible data"); - req.preamble_mut() - .set_canonical_stacks_tip_height(stacks_height); - req - } -} - -impl StacksHttpResponse { - pub fn decode_diagnostics_data(self) -> Result { - let contents = self.get_http_payload_ok()?; - let response_json: serde_json::Value = contents.try_into()?; - let diagnostics_info: RPCDiagnosticsData = serde_json::from_value(response_json) - .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; - Ok(diagnostics_info) - } -} diff --git a/stackslib/src/net/api/setloglevel.rs.bak b/stackslib/src/net/api/setloglevel.rs.bak deleted file mode 100644 index e9d138f100..0000000000 --- a/stackslib/src/net/api/setloglevel.rs.bak +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::io::{Read, Write}; - -use clarity::vm::costs::ExecutionCost; -use regex::{Captures, Regex}; -use stacks_common::codec::{Error as CodecError, StacksMessageCodec, MAX_PAYLOAD_LEN}; -use stacks_common::types::net::PeerHost; - -use crate::net::http::{ - parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, - HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, - HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, -}; -use crate::net::httpcore::{ - HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, -}; - -use slog; - -#[derive(Clone)] -pub struct RPCSetLogLevelRequestHandler { - pub loglevel: Option; - pub password: Option, -} - -impl RPCPostTransactionRequestHandler { - pub fn new(password: Option) -> Self { - Self { - level: None, - password - } - } -} - -/// Decode the HTTP request -impl HttpRequest for RPCPostTransactionRequestHandler { - fn verb(&self) -> &'static str { - "POST" - } - - fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/node/loglevel$"#).unwrap() - } - - fn metrics_identifier(&self) -> &str { - "/v3/node/loglevel" - } - - /// Try to decode this request. - /// There's nothing to load here, so just make sure the request is well-formed. - fn try_parse_request( - &mut self, - preamble: &HttpRequestPreamble, - _captures: &Captures, - query: Option<&str>, - body: &[u8], - ) -> Result { - // If no authorization is set, then the block proposal endpoint is not enabled - let Some(password) = &self.auth else { - return Err(Error::Http(400, "Bad Request.".into())); - }; - let Some(auth_header) = preamble.headers.get("authorization") else { - return Err(Error::Http(401, "Unauthorized".into())); - }; - if auth_header != password { - return Err(Error::Http(401, "Unauthorized".into())); - } - - if preamble.get_content_length() == 0 { - return Err(Error::DecodeError( - "Invalid Http request: expected non-zero-length body for PostTransaction" - .to_string(), - )); - } - - if preamble.get_content_length() > MAX_PAYLOAD_LEN { - return Err(Error::DecodeError( - "Invalid Http request: PostTransaction body is too big".to_string(), - )); - } - - match preamble.content_type { - None => { - return Err(Error::DecodeError( - "Missing Content-Type for transaction".to_string(), - )); - } - Some(HttpContentType::Text) => { - // TODO - - } - _ => { - return Err(Error::DecodeError( - "Wrong Content-Type for loglevel; expected tex/plain".to_string(), - )); - } - } - - Ok(HttpRequestContents::new().query_string(query)) - } -} - -impl RPCRequestHandler for RPCSetLogLevelRequestHandler { - /// Reset internal state - fn restart(&mut self) { - self.loglevel = None; - } - - /// Make the response - fn try_handle_request( - &mut self, - preamble: HttpRequestPreamble, - _contents: HttpRequestContents, - node: &mut StacksNodeState, - ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - - // TODO - - let mut preamble = HttpResponsePreamble::ok_json(&preamble); - preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); - let body = HttpResponseContents::try_from_json(&txid)?; - Ok((preamble, body)) - } -} - -/// Decode the HTTP response -impl HttpResponse for RPCSetLogLevelRequestHandler { - fn try_parse_response( - &self, - preamble: &HttpResponsePreamble, - body: &[u8], - ) -> Result { - Ok(HttpResponsePayload::from_ram(body.to_vec())) - } -} - -impl StacksHttpRequest { - /// Make a new post-transaction request - pub fn new_set_loglevel(host: PeerHost, level: slog::Level) -> StacksHttpRequest { - StacksHttpRequest::new_for_peer( - host, - "POST".into(), - "/v3/node/loglevel".to_string(), - HttpRequestContents::new().payload_text(Self::loglevel_to_string(level)) - ) - .expect("FATAL: failed to construct request from infallible data") - } -} - From 7b62238bcf5d708ed14850678072aaa2066443d5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 20 Nov 2024 11:34:11 -0500 Subject: [PATCH 26/56] chore: fix merge bug --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6ac2131b04..5edf6b83b2 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -9824,6 +9824,11 @@ fn clarity_cost_spend_down() { ); info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); let small_contract = format!( r#" From 5fbe0af2b63d6925fa302561927c9d4d86cbaa06 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 20 Nov 2024 11:34:47 -0500 Subject: [PATCH 27/56] chore: cargo fmt --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5edf6b83b2..e28215b34b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -9824,7 +9824,7 @@ fn clarity_cost_spend_down() { ); info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); - + info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); From 79ea22782552089f975a0331bca556d5d33ea5d2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 20 Nov 2024 17:29:59 -0500 Subject: [PATCH 28/56] chore: PR feedback --- clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs | 1 - stacks-common/src/types/mod.rs | 2 +- stackslib/src/chainstate/stacks/db/blocks.rs | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 6050c7d600..12597c88fa 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -48,7 +48,6 @@ pub mod contracts; /// Backwards-compatibility shim for type_checker tests. Runs at latest Clarity version. pub fn mem_type_check(exp: &str) -> CheckResult<(Option, ContractAnalysis)> { - // TODO (question for reviewers): This uses Clarity 3 with Epoch 2.1. Is this a problem? mem_run_analysis( exp, crate::vm::ClarityVersion::latest(), diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index f5b58b5c36..10e30a4318 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -215,7 +215,7 @@ impl CoinbaseInterval { intervals: &[CoinbaseInterval], effective_height: u64, ) -> u128 { - if intervals.len() == 0 { + if intervals.is_empty() { return 0; } if intervals.len() == 1 { diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 00fdfbca91..791f2064dc 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4109,7 +4109,7 @@ impl StacksChainState { current_epoch = StacksEpochId::Epoch31; } StacksEpochId::Epoch31 => { - panic!("No defined transition from Epoch30 forward") + panic!("No defined transition from Epoch31 forward") } } } From 8e8fd17efc14359100b9cb2aab8091485af57ee6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 21 Nov 2024 12:50:43 -0500 Subject: [PATCH 29/56] chore: expand test coverage to include errors --- .../src/net/api/tests/getclaritymetadata.rs | 72 +++++++++++++++++-- stackslib/src/net/api/tests/mod.rs | 58 +++++++++------ 2 files changed, 103 insertions(+), 27 deletions(-) diff --git a/stackslib/src/net/api/tests/getclaritymetadata.rs b/stackslib/src/net/api/tests/getclaritymetadata.rs index 9ef7e678d2..495bbb514f 100644 --- a/stackslib/src/net/api/tests/getclaritymetadata.rs +++ b/stackslib/src/net/api/tests/getclaritymetadata.rs @@ -213,6 +213,16 @@ fn test_try_make_response() { let mut requests = vec![]; + // query invalid metadata key (wrong store type) + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::2::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + // query existing contract size metadata let request = StacksHttpRequest::new_getclaritymetadata( addr.into(), @@ -223,7 +233,7 @@ fn test_try_make_response() { ); requests.push(request); - // query existing data var metadata + // query existing data map metadata let request = StacksHttpRequest::new_getclaritymetadata( addr.into(), StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), @@ -233,7 +243,7 @@ fn test_try_make_response() { ); requests.push(request); - // query existing data map metadata + // query existing data var metadata let request = StacksHttpRequest::new_getclaritymetadata( addr.into(), StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), @@ -243,22 +253,32 @@ fn test_try_make_response() { ); requests.push(request); - // query undeclared var metadata + // query existing data var metadata let request = StacksHttpRequest::new_getclaritymetadata( addr.into(), StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), "hello-world".try_into().unwrap(), - "vm-metadata::6::non-existing-var".to_string(), + "vm-metadata::6::bar".to_string(), TipRequest::UseLatestAnchoredTip, ); requests.push(request); - // query invalid metadata key (wrong store type) + // query existing data var metadata let request = StacksHttpRequest::new_getclaritymetadata( addr.into(), StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), "hello-world".try_into().unwrap(), - "vm-metadata::2::bar".to_string(), + "vm-metadata::6::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query undeclared var metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::6::non-existing-var".to_string(), TipRequest::UseLatestAnchoredTip, ); requests.push(request); @@ -273,8 +293,23 @@ fn test_try_make_response() { ); requests.push(request); + // query invalid metadata key (wrong store type) + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::2::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + let mut responses = test_rpc(function_name!(), requests); + // unknwnon data var + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); + // contract size metadata let response = responses.remove(0); assert_eq!( @@ -301,11 +336,36 @@ fn test_try_make_response() { }; assert_eq!(resp.data, expected.serialize()); + // data var metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataVariableMetadata { + value_type: TypeSignature::IntType, + }; + assert_eq!(resp.data, expected.serialize()); + + // data var metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataVariableMetadata { + value_type: TypeSignature::IntType, + }; + assert_eq!(resp.data, expected.serialize()); + // invalid metadata key let response = responses.remove(0); let (preamble, body) = response.destruct(); assert_eq!(preamble.status_code, 404); + // contract size metadata + let response = responses.remove(0); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + let resp = response.decode_clarity_metadata_response().unwrap(); + assert_eq!(resp.data, "1432"); + // unknwnon data var let response = responses.remove(0); let (preamble, body) = response.destruct(); diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 7c191ca674..d63bec8c4d 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -1024,7 +1024,7 @@ impl<'a> TestRPC<'a> { peer_2.sortdb = Some(peer_2_sortdb); peer_2.stacks_node = Some(peer_2_stacks_node); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + peer_2.mempool = Some(peer_2_mempool); convo_send_recv(&mut convo_2, &mut convo_1); @@ -1033,8 +1033,6 @@ impl<'a> TestRPC<'a> { // hack around the borrow-checker convo_send_recv(&mut convo_1, &mut convo_2); - peer_2.mempool = Some(peer_2_mempool); - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); @@ -1056,27 +1054,45 @@ impl<'a> TestRPC<'a> { .unwrap(); } - { - let rpc_args = RPCHandlerArgs::default(); - let mut node_state = StacksNodeState::new( - &mut peer_1.network, - &peer_1_sortdb, - &mut peer_1_stacks_node.chainstate, - &mut peer_1_mempool, - &rpc_args, - false, - ); - convo_1.chat(&mut node_state).unwrap(); - } - - convo_1.try_flush().unwrap(); - peer_1.sortdb = Some(peer_1_sortdb); peer_1.stacks_node = Some(peer_1_stacks_node); - peer_1.mempool = Some(peer_1_mempool); - // should have gotten a reply - let resp_opt = convo_1.try_get_response(); + let resp_opt = loop { + debug!("Peer 1 try get response"); + convo_send_recv(&mut convo_1, &mut convo_2); + { + let peer_1_sortdb = peer_1.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + + let rpc_args = RPCHandlerArgs::default(); + let mut node_state = StacksNodeState::new( + &mut peer_1.network, + &peer_1_sortdb, + &mut peer_1_stacks_node.chainstate, + &mut peer_1_mempool, + &rpc_args, + false, + ); + + convo_1.chat(&mut node_state).unwrap(); + + peer_1.sortdb = Some(peer_1_sortdb); + peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.mempool = Some(peer_1_mempool); + } + + convo_1.try_flush().unwrap(); + + info!("Try get response from request {:?}", &request); + + // should have gotten a reply + let resp_opt = convo_1.try_get_response(); + if resp_opt.is_some() { + break resp_opt; + } + }; + assert!(resp_opt.is_some()); let resp = resp_opt.unwrap(); From f3bd41de2dc388577cc045bacf4bf21d4bb75677 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Thu, 21 Nov 2024 20:36:42 +0100 Subject: [PATCH 30/56] tests: add rpc get marf value vm-account test case --- .../src/net/api/tests/getclaritymarfvalue.rs | 25 +++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/tests/getclaritymarfvalue.rs b/stackslib/src/net/api/tests/getclaritymarfvalue.rs index e36b13ca74..8536fd563e 100644 --- a/stackslib/src/net/api/tests/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/tests/getclaritymarfvalue.rs @@ -15,8 +15,8 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; -use clarity::vm::{ClarityName, ContractName}; +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions, TypeSignature}; +use clarity::vm::{ClarityName, ContractName, Value}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksAddress, TrieHash}; use stacks_common::types::net::PeerHost; @@ -126,6 +126,15 @@ fn test_try_make_response() { ); requests.push(request); + // query vm-account balance + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + TrieHash::from_key("vm-account::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R::19"), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + let mut responses = test_rpc(function_name!(), requests); // existing data @@ -179,4 +188,16 @@ fn test_try_make_response() { let (preamble, body) = response.destruct(); assert_eq!(preamble.status_code, 404); + + // vm-account blaance + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_data_var_response().unwrap(); + let balance = Value::try_deserialize_hex(&resp.data[2..], &TypeSignature::IntType, false); + assert_eq!(balance, Ok(Value::Int(256_000_000_000))); + assert!(resp.marf_proof.is_some()); } From f486e55c31013742adef131adfd91af0787de011 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 21 Nov 2024 17:57:07 -0500 Subject: [PATCH 31/56] chore: fix failing integration tests --- testnet/stacks-node/src/tests/signer/v0.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b55b9bafe6..1b7ff4627b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1222,8 +1222,18 @@ fn bitcoind_forking_test() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = - SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |_| {}, + |node_config| { + let epochs = node_config.burnchain.epochs.as_mut().unwrap(); + epochs[StacksEpochId::Epoch30].end_height = 3_015; + epochs[StacksEpochId::Epoch31].start_height = 3_015; + }, + None, + None, + ); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); let miner_address = Keychain::default(conf.node.seed.clone()) @@ -3025,6 +3035,8 @@ fn mock_sign_epoch_25() { let epochs = node_config.burnchain.epochs.as_mut().unwrap(); epochs[StacksEpochId::Epoch25].end_height = 251; epochs[StacksEpochId::Epoch30].start_height = 251; + epochs[StacksEpochId::Epoch30].end_height = 265; + epochs[StacksEpochId::Epoch31].start_height = 265; }, None, None, @@ -3123,7 +3135,7 @@ fn mock_sign_epoch_25() { ); } assert!( - main_poll_time.elapsed() <= Duration::from_secs(45), + main_poll_time.elapsed() <= Duration::from_secs(145), "Timed out waiting to advance epoch 3.0 boundary" ); } @@ -3185,6 +3197,8 @@ fn multiple_miners_mock_sign_epoch_25() { let epochs = config.burnchain.epochs.as_mut().unwrap(); epochs[StacksEpochId::Epoch25].end_height = 251; epochs[StacksEpochId::Epoch30].start_height = 251; + epochs[StacksEpochId::Epoch30].end_height = 265; + epochs[StacksEpochId::Epoch31].start_height = 265; config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { warn!( From 9f86cfacde652fddd7cf43ce5e51655777f2ce90 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 22 Nov 2024 23:36:01 +0200 Subject: [PATCH 32/56] miner auto-set mining key as node.seed --- testnet/stacks-node/src/config.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 785ce057e5..5300faf2e8 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -833,7 +833,12 @@ impl Config { } let miner = match config_file.miner { - Some(miner) => miner.into_config_default(miner_default_config)?, + Some(mut miner) => { + if miner.mining_key.is_none() && !node.seed.is_empty() { + miner.mining_key = Some(to_hex(&node.seed)); + } + miner.into_config_default(miner_default_config)? + } None => miner_default_config, }; @@ -2546,6 +2551,13 @@ pub struct MinerConfigFile { impl MinerConfigFile { fn into_config_default(self, miner_default_config: MinerConfig) -> Result { + match &self.mining_key { + Some(_) => {} + None => { + panic!("mining key not set"); + } + } + let mining_key = self .mining_key .as_ref() From bd5199b244d1d40ee1fa32f7ead2416cb3612d0e Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 22 Nov 2024 23:45:07 +0200 Subject: [PATCH 33/56] import to_hex --- testnet/stacks-node/src/config.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 5300faf2e8..2a9816fea4 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -53,6 +53,7 @@ use stacks::net::connection::ConnectionOptions; use stacks::net::{Neighbor, NeighborKey}; use stacks::types::chainstate::BurnchainHeaderHash; use stacks::types::EpochList; +use stacks::util::hash::to_hex; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::db::Error as DBError; use stacks_common::consts::SIGNER_SLOTS_PER_USER; From 4198e2ce796eae112d5c3621e3946b857c1a178f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 26 Nov 2024 11:09:12 -0500 Subject: [PATCH 34/56] chore: add more logging in `StacksClient` --- stacks-signer/src/client/stacks_client.rs | 48 ++++++++++++++++++----- 1 file changed, 39 insertions(+), 9 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cae6a210b7..4676738629 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -173,6 +173,9 @@ impl StacksClient { &self, consensus_hash: &ConsensusHash, ) -> Result { + debug!("StacksClient: Getting tenure tip"; + "consensus_hash" => %consensus_hash, + ); let send_request = || { self.stacks_node_client .get(self.tenure_tip_path(consensus_hash)) @@ -192,6 +195,7 @@ impl StacksClient { /// Get the last set reward cycle stored within the stackerdb contract pub fn get_last_set_cycle(&self) -> Result { + debug!("StacksClient: Getting last set cycle"); let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); let function_name_str = "get-last-set-cycle"; let function_name = ClarityName::from(function_name_str); @@ -210,6 +214,10 @@ impl StacksClient { stackerdb_contract: &QualifiedContractIdentifier, page: u32, ) -> Result, ClientError> { + debug!("StacksClient: Getting signer slots"; + "stackerdb_contract" => %stackerdb_contract, + "page" => page, + ); let function_name_str = "stackerdb-get-signer-slots-page"; let function_name = ClarityName::from(function_name_str); let function_args = &[ClarityValue::UInt(page.into())]; @@ -250,6 +258,9 @@ impl StacksClient { &self, reward_cycle: u64, ) -> Result, ClientError> { + debug!("StacksClient: Getting parsed signer slots"; + "reward_cycle" => reward_cycle, + ); let signer_set = u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); @@ -272,6 +283,7 @@ impl StacksClient { /// Determine the stacks node current epoch pub fn get_node_epoch(&self) -> Result { + debug!("StacksClient: Getting node epoch"); let pox_info = self.get_pox_data()?; let burn_block_height = self.get_burn_block_height()?; @@ -302,7 +314,7 @@ impl StacksClient { /// Submit the block proposal to the stacks node. The block will be validated and returned via the HTTP endpoint for Block events. pub fn submit_block_for_validation(&self, block: NakamotoBlock) -> Result<(), ClientError> { - debug!("stacks_node_client: Submitting block for validation..."; + debug!("StacksClient: Submitting block for validation"; "signer_sighash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, @@ -337,6 +349,10 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { + debug!("StacksClient: Getting tenure forking info"; + "chosen_parent" => %chosen_parent, + "last_sortition" => %last_sortition, + ); let mut tenures: VecDeque = self.get_tenure_forking_info_step(chosen_parent, last_sortition)?; if tenures.is_empty() { @@ -373,7 +389,7 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { - debug!("stacks_node_client: Getting tenure forking info..."; + debug!("StacksClient: Getting tenure forking info"; "chosen_parent" => %chosen_parent, "last_sortition" => %last_sortition, ); @@ -402,7 +418,7 @@ impl StacksClient { /// Get the current winning sortition and the last winning sortition pub fn get_current_and_last_sortition(&self) -> Result { - debug!("stacks_node_client: Getting current and prior sortition..."); + debug!("StacksClient: Getting current and prior sortition"); let path = format!("{}/latest_and_last", self.sortition_info_path()); let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { @@ -443,7 +459,7 @@ impl StacksClient { /// Get the current peer info data from the stacks node pub fn get_peer_info(&self) -> Result { - debug!("stacks_node_client: Getting peer info..."); + debug!("StacksClient: Getting peer info"); let timer = crate::monitoring::new_rpc_call_timer(&self.core_info_path(), &self.http_origin); let send_request = || { @@ -466,7 +482,9 @@ impl StacksClient { &self, reward_cycle: u64, ) -> Result>, ClientError> { - debug!("stacks_node_client: Getting reward set signers for reward cycle {reward_cycle}..."); + debug!("StacksClient: Getting reward set signers"; + "reward_cycle" => reward_cycle, + ); let timer = crate::monitoring::new_rpc_call_timer( &format!("{}/v3/stacker_set/:reward_cycle", self.http_origin), &self.http_origin, @@ -502,7 +520,7 @@ impl StacksClient { /// Retrieve the current pox data from the stacks node pub fn get_pox_data(&self) -> Result { - debug!("stacks_node_client: Getting pox data..."); + debug!("StacksClient: Getting pox data"); let timer = crate::monitoring::new_rpc_call_timer(&self.pox_path(), &self.http_origin); let send_request = || { self.stacks_node_client @@ -521,11 +539,13 @@ impl StacksClient { /// Helper function to retrieve the burn tip height from the stacks node fn get_burn_block_height(&self) -> Result { + debug!("StacksClient: Getting burn block height"); self.get_peer_info().map(|info| info.burn_block_height) } /// Get the current reward cycle info from the stacks node pub fn get_current_reward_cycle_info(&self) -> Result { + debug!("StacksClient: Getting current reward cycle info"); let pox_data = self.get_pox_data()?; let blocks_mined = pox_data .current_burnchain_block_height @@ -548,7 +568,9 @@ impl StacksClient { &self, address: &StacksAddress, ) -> Result { - debug!("stacks_node_client: Getting account info..."); + debug!("StacksClient: Getting account info"; + "address" => %address, + ); let timer_label = format!("{}/v2/accounts/:principal", self.http_origin); let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); let send_request = || { @@ -570,6 +592,11 @@ impl StacksClient { /// /// In tests, this panics if the retry takes longer than 30 seconds. pub fn post_block_until_ok(&self, log_fmt: &F, block: &NakamotoBlock) -> bool { + debug!("StacksClient: Posting block to stacks node"; + "signer_sighash" => %block.header.signer_signature_hash(), + "block_id" => %block.header.block_id(), + "block_height" => %block.header.chain_length, + ); let start_time = Instant::now(); loop { match self.post_block(block) { @@ -595,7 +622,8 @@ impl StacksClient { /// Returns `true` if the block was accepted or `false` if the block /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { - debug!("stacks_node_client: Posting block to the stacks node..."; + debug!("StacksClient: Posting block to the stacks node"; + "signer_sighash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, ); @@ -630,7 +658,9 @@ impl StacksClient { function_name: &ClarityName, function_args: &[ClarityValue], ) -> Result { - debug!("stacks_node_client: Calling read-only function {function_name} with args {function_args:?}..."); + debug!( + "StacksClient: Calling read-only function {function_name} with args {function_args:?}" + ); let args = function_args .iter() .filter_map(|arg| arg.serialize_to_hex().ok()) From fc6db45cc063abf8029bdf6de19a03a6707a8771 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 26 Nov 2024 13:22:10 -0500 Subject: [PATCH 35/56] chore: fix compile error from merge --- stacks-common/src/types/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index db94f41a21..4a1f34cbc7 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -352,7 +352,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, } } From b2bfb5ee3ec5e0a39330b57b7e704d38c0af7242 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 26 Nov 2024 13:30:58 -0500 Subject: [PATCH 36/56] chore: use STXBalance --- .../src/net/api/tests/getclaritymarfvalue.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/api/tests/getclaritymarfvalue.rs b/stackslib/src/net/api/tests/getclaritymarfvalue.rs index 8536fd563e..7255d1ee99 100644 --- a/stackslib/src/net/api/tests/getclaritymarfvalue.rs +++ b/stackslib/src/net/api/tests/getclaritymarfvalue.rs @@ -15,6 +15,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use clarity::vm::database::{ClarityDeserializable, STXBalance}; use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions, TypeSignature}; use clarity::vm::{ClarityName, ContractName, Value}; use stacks_common::codec::StacksMessageCodec; @@ -149,7 +150,7 @@ fn test_try_make_response() { Some(1) ); - let resp = response.decode_data_var_response().unwrap(); + let resp = response.decode_clarity_marf_response().unwrap(); assert_eq!(resp.data, "0x0000000000000000000000000000000000"); assert!(resp.marf_proof.is_some()); @@ -165,7 +166,7 @@ fn test_try_make_response() { Some(1) ); - let resp = response.decode_data_var_response().unwrap(); + let resp = response.decode_clarity_marf_response().unwrap(); assert_eq!(resp.data, "0x0100000000000000000000000000000001"); assert!(resp.marf_proof.is_some()); @@ -189,15 +190,16 @@ fn test_try_make_response() { let (preamble, body) = response.destruct(); assert_eq!(preamble.status_code, 404); - // vm-account blaance + // vm-account balance let response = responses.remove(0); debug!( "Response:\n{}\n", std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() ); - let resp = response.decode_data_var_response().unwrap(); - let balance = Value::try_deserialize_hex(&resp.data[2..], &TypeSignature::IntType, false); - assert_eq!(balance, Ok(Value::Int(256_000_000_000))); - assert!(resp.marf_proof.is_some()); + let resp = response.decode_clarity_marf_response().unwrap(); + let balance = STXBalance::deserialize(&resp.data[2..]).unwrap(); + + assert_eq!(balance.amount_unlocked(), 1_000_000_000); + assert_eq!(balance.amount_locked(), 0); } From 7af0a11b493a9a462c29dd894aaf9e6844f9bf2e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 26 Nov 2024 22:07:00 -0500 Subject: [PATCH 37/56] hotfix: load the BurnStateDB implementation from the block's header's consensus_hash field, not the canonical tip --- stackslib/src/net/api/postblock_proposal.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 517105515c..cb009d0621 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -363,9 +363,17 @@ impl NakamotoBlockProposal { }); } - let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; - let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip); - let mut db_handle = sortdb.index_handle(&sort_tip); + let sort_tip = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &self.block.header.consensus_hash, + )? + .ok_or_else(|| BlockValidateRejectReason { + reason_code: ValidateRejectCode::NoSuchTenure, + reason: "Failed to find sortition for block tenure".to_string(), + })?; + + let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip.sortition_id); + let mut db_handle = sortdb.index_handle(&sort_tip.sortition_id); // (For the signer) // Verify that the block's tenure is on the canonical sortition history From 622536657bc5230568cde91fe26cb7e8cd9f3987 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 26 Nov 2024 22:32:51 -0500 Subject: [PATCH 38/56] chore: questions for reviewers --- testnet/stacks-node/src/nakamoto_node/miner.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a08c0ab353..7b56a987f5 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1049,12 +1049,14 @@ impl BlockMinerThread { // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, + // TODO (for reviewers): should this be `self.burn_election_block.consensus_hash`? &burn_db .index_handle_at_ch(&self.burn_block.consensus_hash) .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &mut mem_pool, &parent_block_info.stacks_parent_header, &self.burn_election_block.consensus_hash, + // TODO (for reviewers): should this be `self.burn_election_block.total_burn`? self.burn_block.total_burn, tenure_start_info, self.config From f71a949f111a4cb2af03f68645f1edc2ac989e56 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 26 Nov 2024 22:38:35 -0500 Subject: [PATCH 39/56] chore: fix burn views --- testnet/stacks-node/src/nakamoto_node/miner.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 7b56a987f5..d51c23119d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1049,15 +1049,13 @@ impl BlockMinerThread { // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, - // TODO (for reviewers): should this be `self.burn_election_block.consensus_hash`? &burn_db - .index_handle_at_ch(&self.burn_block.consensus_hash) + .index_handle_at_ch(&self.burn_election_block.consensus_hash) .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &mut mem_pool, &parent_block_info.stacks_parent_header, &self.burn_election_block.consensus_hash, - // TODO (for reviewers): should this be `self.burn_election_block.total_burn`? - self.burn_block.total_burn, + self.burn_election_block.total_burn, tenure_start_info, self.config .make_nakamoto_block_builder_settings(self.globals.get_miner_status()), From 40d3563a380c5d0932a83914da94323cdcb47c5b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 26 Nov 2024 22:50:51 -0500 Subject: [PATCH 40/56] fix; revert change to Nakamoto block builder from PR feedback --- testnet/stacks-node/src/nakamoto_node/miner.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index d51c23119d..d08fe9c25a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -139,7 +139,11 @@ pub struct BlockMinerThread { registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner burn_election_block: BlockSnapshot, - /// Current burnchain tip + /// Current burnchain tip as of the last TenureChange + /// * if the last tenure-change was a BlockFound, then this is the same as the + /// `burn_election_block`. + /// * otherwise, if the last tenure-change is an Extend, then this is the sortition of the burn + /// view consensus hash in the TenureChange burn_block: BlockSnapshot, /// The start of the parent tenure for this tenure parent_tenure_id: StacksBlockId, @@ -1050,12 +1054,12 @@ impl BlockMinerThread { let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, &burn_db - .index_handle_at_ch(&self.burn_election_block.consensus_hash) + .index_handle_at_ch(&self.burn_block.consensus_hash) .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &mut mem_pool, &parent_block_info.stacks_parent_header, &self.burn_election_block.consensus_hash, - self.burn_election_block.total_burn, + self.burn_block.total_burn, tenure_start_info, self.config .make_nakamoto_block_builder_settings(self.globals.get_miner_status()), From 31073de553c147e606bbb4dcc710a25501f90117 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Wed, 27 Nov 2024 12:01:40 +0100 Subject: [PATCH 41/56] docs: add 2 new clarity endpoints to the unreleased changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 046ca667a0..5225a99c5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,10 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added +- New RPC endpoints + - `/v2/clarity/marf/:marf_key_hash` + - `/v2/clarity/metadata/:principal/:contract_name/:clarity_metadata_key` + ### Changed From f56bfc79735cae6f328de017febf871746463a6f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 27 Nov 2024 13:41:22 -0500 Subject: [PATCH 42/56] chore: use the same burn view loader in both block validation and block processing --- stackslib/src/chainstate/nakamoto/mod.rs | 125 +++++++++++--------- stackslib/src/net/api/postblock_proposal.rs | 31 +++-- 2 files changed, 87 insertions(+), 69 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b8d0441591..6266543e2a 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1763,6 +1763,73 @@ impl NakamotoChainState { } } + /// Get the current burnchain view + /// This is either: + /// (1) set by the tenure change tx if one exists + /// (2) the same as parent block id + pub fn get_block_burn_view( + sort_db: &SortitionDB, + next_ready_block: &NakamotoBlock, + parent_header_info: &StacksHeaderInfo, + ) -> Result { + let burnchain_view = if let Some(tenure_change) = next_ready_block.get_tenure_tx_payload() { + if let Some(ref parent_burn_view) = parent_header_info.burn_view { + // check that the tenure_change's burn view descends from the parent + let parent_burn_view_sn = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + parent_burn_view, + )? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; + let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + if connected_sort_id != parent_burn_view_sn.sortition_id { + warn!( + "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Does not connect to burn view of parent block ID".into(), + )); + } + } + tenure_change.burn_view_consensus_hash + } else { + parent_header_info.burn_view.clone().ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })? + }; + Ok(burnchain_view) + } + /// Process the next ready block. /// If there exists a ready Nakamoto block, then this method returns Ok(Some(..)) with the /// receipt. Otherwise, it returns Ok(None). @@ -1882,62 +1949,8 @@ impl NakamotoChainState { // this is either: // (1) set by the tenure change tx if one exists // (2) the same as parent block id - - let burnchain_view = if let Some(tenure_change) = next_ready_block.get_tenure_tx_payload() { - if let Some(ref parent_burn_view) = parent_header_info.burn_view { - // check that the tenure_change's burn view descends from the parent - let parent_burn_view_sn = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - parent_burn_view, - )? - .ok_or_else(|| { - warn!( - "Cannot process Nakamoto block: could not find parent block's burnchain view"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) - })?; - let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; - let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? - .ok_or_else(|| { - warn!( - "Cannot process Nakamoto block: could not find parent block's burnchain view"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) - })?; - if connected_sort_id != parent_burn_view_sn.sortition_id { - warn!( - "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - return Err(ChainstateError::InvalidStacksBlock( - "Does not connect to burn view of parent block ID".into(), - )); - } - } - tenure_change.burn_view_consensus_hash - } else { - parent_header_info.burn_view.clone().ok_or_else(|| { - warn!( - "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) - })? - }; + let burnchain_view = + Self::get_block_burn_view(sort_db, &next_ready_block, &parent_header_info)?; let Some(burnchain_view_sn) = SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &burnchain_view)? else { diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index cb009d0621..0d1cf2ebf9 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -363,15 +363,28 @@ impl NakamotoBlockProposal { }); } - let sort_tip = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &self.block.header.consensus_hash, + // open sortition view to the current burn view. + // If the block has a TenureChange with an Extend cause, then the burn view is whatever is + // indicated in the TenureChange. + // Otherwise, it's the same as the block's parent's burn view. + let parent_stacks_header = NakamotoChainState::get_block_header( + chainstate.db(), + &self.block.header.parent_block_id, )? .ok_or_else(|| BlockValidateRejectReason { - reason_code: ValidateRejectCode::NoSuchTenure, - reason: "Failed to find sortition for block tenure".to_string(), + reason_code: ValidateRejectCode::InvalidBlock, + reason: "Invalid parent block".into(), })?; + let burn_view_consensus_hash = + NakamotoChainState::get_block_burn_view(sortdb, &self.block, &parent_stacks_header)?; + let sort_tip = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &burn_view_consensus_hash)? + .ok_or_else(|| BlockValidateRejectReason { + reason_code: ValidateRejectCode::NoSuchTenure, + reason: "Failed to find sortition for block tenure".to_string(), + })?; + let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip.sortition_id); let mut db_handle = sortdb.index_handle(&sort_tip.sortition_id); @@ -409,14 +422,6 @@ impl NakamotoBlockProposal { )?; // Validate txs against chainstate - let parent_stacks_header = NakamotoChainState::get_block_header( - chainstate.db(), - &self.block.header.parent_block_id, - )? - .ok_or_else(|| BlockValidateRejectReason { - reason_code: ValidateRejectCode::InvalidBlock, - reason: "Invalid parent block".into(), - })?; // Validate the block's timestamp. It must be: // - Greater than the parent block's timestamp From dbee3fad3735145e2e9961c7e0124a87db8c707d Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 27 Nov 2024 11:37:03 -0800 Subject: [PATCH 43/56] fix: block proposal test assertion --- testnet/stacks-node/src/tests/signer/v0.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2486043ccc..cd3488bd07 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -506,7 +506,7 @@ fn block_proposal_rejection() { signer_test.wait_for_validate_reject_response(short_timeout, block_signer_signature_hash_2); assert!(matches!( reject.reason_code, - ValidateRejectCode::UnknownParent + ValidateRejectCode::InvalidBlock )); let start_polling = Instant::now(); @@ -532,7 +532,10 @@ fn block_proposal_rejection() { assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); } else if signer_signature_hash == block_signer_signature_hash_2 { found_signer_signature_hash_2 = true; - assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); + assert!(matches!( + reason_code, + RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock) + )); } else { continue; } From bd3943502b8032b2f7ebf7530cf6fb7e5ebd22e1 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 2 Dec 2024 16:31:00 +0200 Subject: [PATCH 44/56] fix v3 update block to support no trailing --- stackslib/src/net/api/postblock_v3.rs | 2 +- stackslib/src/net/api/tests/postblock_v3.rs | 60 +++++++++++++++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index 9bd174d322..aff20d962f 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -70,7 +70,7 @@ impl HttpRequest for RPCPostBlockRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(&format!("^{PATH}$")).unwrap() + Regex::new(&format!("^{}(/)?$", PATH.trim_end_matches('/'))).unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/api/tests/postblock_v3.rs b/stackslib/src/net/api/tests/postblock_v3.rs index 5cc652fc83..0b0a95f3a4 100644 --- a/stackslib/src/net/api/tests/postblock_v3.rs +++ b/stackslib/src/net/api/tests/postblock_v3.rs @@ -214,6 +214,66 @@ fn handle_req_accepted() { assert_eq!(resp.stacks_block_id, next_block_id); } +#[test] +fn handle_req_without_trailing_accepted() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let path_without_slash: &str = "/v3/blocks/upload"; + let observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &observer); + let (next_block, ..) = rpc_test.peer_1.single_block_tenure( + &rpc_test.privk1, + |_| {}, + |burn_ops| { + rpc_test.peer_2.next_burnchain_block(burn_ops.clone()); + }, + |_| true, + ); + let next_block_id = next_block.block_id(); + let mut requests = vec![]; + + // post the block + requests.push( + StacksHttpRequest::new_for_peer( + addr.into(), + "POST".into(), + path_without_slash.into(), + HttpRequestContents::new().payload_stacks(&next_block), + ) + .unwrap(), + ); + + // idempotent + requests.push( + StacksHttpRequest::new_for_peer( + addr.into(), + "POST".into(), + path_without_slash.into(), + HttpRequestContents::new().payload_stacks(&next_block), + ) + .unwrap(), + ); + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + info!( + "Response for the request that has the path without the last '/': {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, true); + assert_eq!(resp.stacks_block_id, next_block_id); + + let response = responses.remove(0); + info!( + "Response for the request that has the path without the last '/': {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, false); + assert_eq!(resp.stacks_block_id, next_block_id); +} + #[test] fn handle_req_unknown_burn_block() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); From e3002238c8cfc6a5da083a314b6b04fbd511cb66 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Mon, 2 Dec 2024 18:41:59 +0100 Subject: [PATCH 45/56] added config options for sip29 on testnet --- testnet/stacks-node/src/config.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 785ce057e5..5918c1fe9b 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -659,6 +659,8 @@ impl Config { Ok(StacksEpochId::Epoch25) } else if epoch_name == EPOCH_CONFIG_3_0_0 { Ok(StacksEpochId::Epoch30) + } else if epoch_name == EPOCH_CONFIG_3_1_0 { + Ok(StacksEpochId::Epoch31) } else { Err(format!("Unknown epoch name specified: {epoch_name}")) }?; @@ -685,6 +687,7 @@ impl Config { StacksEpochId::Epoch24, StacksEpochId::Epoch25, StacksEpochId::Epoch30, + StacksEpochId::Epoch31, ]; for (expected_epoch, configured_epoch) in expected_list .iter() @@ -1291,6 +1294,7 @@ pub const EPOCH_CONFIG_2_3_0: &str = "2.3"; pub const EPOCH_CONFIG_2_4_0: &str = "2.4"; pub const EPOCH_CONFIG_2_5_0: &str = "2.5"; pub const EPOCH_CONFIG_3_0_0: &str = "3.0"; +pub const EPOCH_CONFIG_3_1_0: &str = "3.1"; #[derive(Clone, Deserialize, Default, Debug)] pub struct AffirmationOverride { From f50c75ee0da9b14141f5e69764e576687c4ad0af Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Mon, 2 Dec 2024 19:47:24 +0100 Subject: [PATCH 46/56] updated testnet config --- stackslib/src/core/mod.rs | 2 +- testnet/stacks-node/conf/testnet-follower-conf.toml | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 636b71c902..ba4dbf14d2 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -457,7 +457,7 @@ lazy_static! { network_epoch: PEER_VERSION_EPOCH_3_0 }, StacksEpoch { - epoch_id: StacksEpochId::Epoch30, + epoch_id: StacksEpochId::Epoch31, start_height: 8001, end_height: STACKS_EPOCH_MAX, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index 80226c5b89..f44be0172a 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -78,3 +78,7 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" start_height = 56_457 + +[[burnchain.epochs]] +epoch_name = "3.1" +start_height = 67_568 From 30ae1b709e0502c92e1467bda9f71c2f43964e79 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 2 Dec 2024 15:51:01 -0500 Subject: [PATCH 47/56] chore: downgrade noisy log --- stackslib/src/chainstate/burn/db/sortdb.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 54a804a2fe..5cc06f54b1 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3986,7 +3986,7 @@ impl<'a> SortitionDBConn<'a> { tip, reward_cycle_id, )?; - info!("Fetching preprocessed reward set"; + debug!("Fetching preprocessed reward set"; "tip_sortition_id" => %tip, "reward_cycle_id" => reward_cycle_id, "prepare_phase_start_sortition_id" => %first_sortition, From 6bc6612b30d9e55bc87b3fc2d5ba4c83ea6dc3fb Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 2 Dec 2024 16:12:18 -0500 Subject: [PATCH 48/56] chore: remove unused function --- stacks-signer/src/signerdb.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 9fcaa1fa1b..c0bd679a54 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -871,16 +871,6 @@ where .map_err(DBError::SerializationError) } -#[cfg(test)] -pub fn test_signer_db(db_path: &str) -> SignerDb { - use std::fs; - - if fs::metadata(db_path).is_ok() { - fs::remove_file(db_path).unwrap(); - } - SignerDb::new(db_path).expect("Failed to create signer db") -} - #[cfg(test)] mod tests { use std::fs; From 1a002800cd21891f5a3b8042b5446af3dba208c4 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 2 Dec 2024 13:34:48 -0800 Subject: [PATCH 49/56] Update changelogs for 3.0.0.0.4 --- CHANGELOG.md | 7 +++++++ stacks-signer/CHANGELOG.md | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 046ca667a0..9466e16447 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,13 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +## [3.0.0.0.4] + +### Added + +### Changed + +- Load the BurnStateDB implementation from the block's header's `consensus_hash` field ## [3.0.0.0.3] diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 46e25b285f..5170723b32 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,14 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +## [3.0.0.0.4.0] + +### Added + +### Changed + +- Load the BurnStateDB implementation from the block's header's `consensus_hash` field + ## [3.0.0.0.3.0] ### Added From 64fc88b94671ff732b093ee35b7fd22121a60f5c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 2 Dec 2024 15:51:01 -0500 Subject: [PATCH 50/56] chore: downgrade noisy log --- stackslib/src/chainstate/burn/db/sortdb.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index dd543ac7f7..661342ace5 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3986,7 +3986,7 @@ impl<'a> SortitionDBConn<'a> { tip, reward_cycle_id, )?; - info!("Fetching preprocessed reward set"; + debug!("Fetching preprocessed reward set"; "tip_sortition_id" => %tip, "reward_cycle_id" => reward_cycle_id, "prepare_phase_start_sortition_id" => %first_sortition, From c15d6ffc0ea06b77fef2361212969378bd090b79 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 2 Dec 2024 16:12:18 -0500 Subject: [PATCH 51/56] chore: remove unused function --- stacks-signer/src/signerdb.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 9fcaa1fa1b..c0bd679a54 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -871,16 +871,6 @@ where .map_err(DBError::SerializationError) } -#[cfg(test)] -pub fn test_signer_db(db_path: &str) -> SignerDb { - use std::fs; - - if fs::metadata(db_path).is_ok() { - fs::remove_file(db_path).unwrap(); - } - SignerDb::new(db_path).expect("Failed to create signer db") -} - #[cfg(test)] mod tests { use std::fs; From ad7ac2ba0eb84cc08be5aba882f5de646669287a Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 2 Dec 2024 15:51:44 -0800 Subject: [PATCH 52/56] Update per PR comments --- CHANGELOG.md | 2 +- stacks-signer/CHANGELOG.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9466e16447..b7ba323582 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed -- Load the BurnStateDB implementation from the block's header's `consensus_hash` field +- Use the same burn view loader in both block validation and block processing ## [3.0.0.0.3] diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 5170723b32..a332b344ce 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -17,7 +17,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed -- Load the BurnStateDB implementation from the block's header's `consensus_hash` field +- Use the same burn view loader in both block validation and block processing ## [3.0.0.0.3.0] From f861f520bc5d1938f1586521c46d86b634e0a51c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 2 Dec 2024 15:51:01 -0500 Subject: [PATCH 53/56] chore: downgrade noisy log --- stackslib/src/chainstate/burn/db/sortdb.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index cde7ca2d0c..e399121e07 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3987,7 +3987,7 @@ impl<'a> SortitionDBConn<'a> { tip, reward_cycle_id, )?; - info!("Fetching preprocessed reward set"; + debug!("Fetching preprocessed reward set"; "tip_sortition_id" => %tip, "reward_cycle_id" => reward_cycle_id, "prepare_phase_start_sortition_id" => %first_sortition, From edb352b6038d4d04248078a3064ae341dd26e826 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 2 Dec 2024 16:12:18 -0500 Subject: [PATCH 54/56] chore: remove unused function --- stacks-signer/src/signerdb.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 9fcaa1fa1b..c0bd679a54 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -871,16 +871,6 @@ where .map_err(DBError::SerializationError) } -#[cfg(test)] -pub fn test_signer_db(db_path: &str) -> SignerDb { - use std::fs; - - if fs::metadata(db_path).is_ok() { - fs::remove_file(db_path).unwrap(); - } - SignerDb::new(db_path).expect("Failed to create signer db") -} - #[cfg(test)] mod tests { use std::fs; From e1a09dd03672bcab86f2fda146575f20491b0023 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 3 Dec 2024 16:50:12 +0100 Subject: [PATCH 55/56] removed config for 3.1 --- testnet/stacks-node/conf/testnet-follower-conf.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index f44be0172a..80226c5b89 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -78,7 +78,3 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" start_height = 56_457 - -[[burnchain.epochs]] -epoch_name = "3.1" -start_height = 67_568 From c679dccb463cb0a4cb1021661c9cd15c87bcc1fd Mon Sep 17 00:00:00 2001 From: lvyaoting Date: Thu, 5 Dec 2024 23:48:57 +0800 Subject: [PATCH 56/56] chore: remove redundant words in comment Signed-off-by: lvyaoting --- CONTRIBUTING.md | 2 +- clarity/src/vm/docs/mod.rs | 2 +- clarity/src/vm/mod.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8d6c3aabba..b8c63abc2c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -579,7 +579,7 @@ _Do_ document things that are not clear, e.g.: Keep in mind that better variable names can reduce the need for comments, e.g.: - `burnblock_height` instead of `height` may eliminate the need to comment that `height` refers to a burnblock height -- `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to to explain that the inputs are microblocks +- `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to explain that the inputs are microblocks - `add_transaction_to_microblock` explains more than `handle_transaction`, and reduces the need to even read the comment # Licensing and contributor license agreement diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 9075c55e71..b23e356dea 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -529,7 +529,7 @@ const LOG2_API: SimpleFunctionAPI = SimpleFunctionAPI { snippet: "log2 ${1:expr-1}", signature: "(log2 n)", description: - "Returns the power to which the number 2 must be raised to to obtain the value `n`, rounded + "Returns the power to which the number 2 must be raised to obtain the value `n`, rounded down to the nearest integer. Fails on a negative numbers. ", example: "(log2 u8) ;; Returns u3 diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index d64b207522..ff991f5513 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -575,7 +575,7 @@ pub fn execute(program: &str) -> Result> { ) } -/// Execute for test in in Clarity2, Epoch21, testnet. +/// Execute for test in Clarity2, Epoch21, testnet. #[cfg(any(test, feature = "testing"))] pub fn execute_v2(program: &str) -> Result> { execute_with_parameters(