diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index adab04a104..6dec7efb11 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -133,6 +133,7 @@ jobs: - tests::signer::v0::block_validation_response_timeout - tests::signer::v0::tenure_extend_after_bad_commit - tests::signer::v0::block_proposal_max_age_rejections + - tests::signer::v0::global_acceptance_depends_on_block_announcement - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state @@ -143,6 +144,7 @@ jobs: - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles + - tests::nakamoto_integrations::nakamoto_lockup_events - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::nakamoto_integrations::v3_signer_api_endpoint diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml index 83cf240815..4be5785f3d 100644 --- a/.github/workflows/clippy.yml +++ b/.github/workflows/clippy.yml @@ -37,4 +37,4 @@ jobs: uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} - args: -p libstackerdb -p stacks-signer -p pox-locking --no-deps --tests --all-features -- -D warnings \ No newline at end of file + args: -p libstackerdb -p stacks-signer -p pox-locking -p clarity -p libsigner --no-deps --tests --all-features -- -D warnings \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 387cc1d9eb..d9474493fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +- Nodes will assume that all PoX anchor blocks exist by default, and stall initial block download indefinitely to await their arrival (#5502) + ## [3.1.0.0.1] ### Added diff --git a/clarity/src/vm/analysis/analysis_db.rs b/clarity/src/vm/analysis/analysis_db.rs index 1bef2834a8..36e1f8c970 100644 --- a/clarity/src/vm/analysis/analysis_db.rs +++ b/clarity/src/vm/analysis/analysis_db.rs @@ -50,11 +50,11 @@ impl<'a> AnalysisDatabase<'a> { self.begin(); let result = f(self).or_else(|e| { self.roll_back() - .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into())?; + .map_err(|e| CheckErrors::Expects(format!("{e:?}")))?; Err(e) })?; self.commit() - .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into())?; + .map_err(|e| CheckErrors::Expects(format!("{e:?}")))?; Ok(result) } @@ -130,9 +130,9 @@ impl<'a> AnalysisDatabase<'a> { .map_err(|_| CheckErrors::Expects("Bad data deserialized from DB".into())) }) .transpose()? - .and_then(|mut x| { + .map(|mut x| { x.canonicalize_types(epoch); - Some(x) + x })) } diff --git a/clarity/src/vm/analysis/arithmetic_checker/mod.rs b/clarity/src/vm/analysis/arithmetic_checker/mod.rs index aa69f650f0..429907b4c6 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/mod.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/mod.rs @@ -68,7 +68,7 @@ impl std::fmt::Display for Error { } } -impl<'a> ArithmeticOnlyChecker<'a> { +impl ArithmeticOnlyChecker<'_> { pub fn check_contract_cost_eligible(contract_analysis: &mut ContractAnalysis) { let is_eligible = ArithmeticOnlyChecker::run(contract_analysis).is_ok(); contract_analysis.is_cost_contract_eligible = is_eligible; diff --git a/clarity/src/vm/analysis/contract_interface_builder/mod.rs b/clarity/src/vm/analysis/contract_interface_builder/mod.rs index 6d91f33b1c..4e0aa9a0cb 100644 --- a/clarity/src/vm/analysis/contract_interface_builder/mod.rs +++ b/clarity/src/vm/analysis/contract_interface_builder/mod.rs @@ -276,7 +276,7 @@ impl ContractInterfaceFunction { outputs: ContractInterfaceFunctionOutput { type_f: match function_type { FunctionType::Fixed(FixedFunction { returns, .. }) => { - ContractInterfaceAtomType::from_type_signature(&returns) + ContractInterfaceAtomType::from_type_signature(returns) } _ => return Err(CheckErrors::Expects( "Contract functions should only have fixed function return types!" @@ -287,7 +287,7 @@ impl ContractInterfaceFunction { }, args: match function_type { FunctionType::Fixed(FixedFunction { args, .. }) => { - ContractInterfaceFunctionArg::from_function_args(&args) + ContractInterfaceFunctionArg::from_function_args(args) } _ => { return Err(CheckErrors::Expects( diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index f86308f8d9..5c3f68c7f9 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -207,10 +207,10 @@ impl CheckErrors { /// Does this check error indicate that the transaction should be /// rejected? pub fn rejectable(&self) -> bool { - match &self { - CheckErrors::SupertypeTooLarge | CheckErrors::Expects(_) => true, - _ => false, - } + matches!( + self, + CheckErrors::SupertypeTooLarge | CheckErrors::Expects(_) + ) } } @@ -323,7 +323,7 @@ pub fn check_arguments_at_most(expected: usize, args: &[T]) -> Result<(), Che } } -fn formatted_expected_types(expected_types: &Vec) -> String { +fn formatted_expected_types(expected_types: &[TypeSignature]) -> String { let mut expected_types_joined = format!("'{}'", expected_types[0]); if expected_types.len() > 2 { diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index d563dce6e8..8dde917df9 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -17,7 +17,6 @@ pub mod analysis_db; pub mod arithmetic_checker; pub mod contract_interface_builder; -#[allow(clippy::result_large_err)] pub mod errors; pub mod read_only_checker; pub mod trait_checker; @@ -52,7 +51,7 @@ pub fn mem_type_check( epoch: StacksEpochId, ) -> CheckResult<(Option, ContractAnalysis)> { let contract_identifier = QualifiedContractIdentifier::transient(); - let mut contract = build_ast_with_rules( + let contract = build_ast_with_rules( &contract_identifier, snippet, &mut (), @@ -68,7 +67,7 @@ pub fn mem_type_check( let cost_tracker = LimitedCostTracker::new_free(); match run_analysis( &QualifiedContractIdentifier::transient(), - &mut contract, + &contract, &mut analysis_db, false, cost_tracker, @@ -120,6 +119,7 @@ pub fn type_check( .map_err(|(e, _cost_tracker)| e) } +#[allow(clippy::too_many_arguments)] pub fn run_analysis( contract_identifier: &QualifiedContractIdentifier, expressions: &[SymbolicExpression], diff --git a/clarity/src/vm/analysis/read_only_checker/mod.rs b/clarity/src/vm/analysis/read_only_checker/mod.rs index 006b4f0cfe..f60ce11a44 100644 --- a/clarity/src/vm/analysis/read_only_checker/mod.rs +++ b/clarity/src/vm/analysis/read_only_checker/mod.rs @@ -50,7 +50,7 @@ pub struct ReadOnlyChecker<'a, 'b> { clarity_version: ClarityVersion, } -impl<'a, 'b> AnalysisPass for ReadOnlyChecker<'a, 'b> { +impl AnalysisPass for ReadOnlyChecker<'_, '_> { fn run_pass( epoch: &StacksEpochId, contract_analysis: &mut ContractAnalysis, @@ -250,13 +250,12 @@ impl<'a, 'b> ReadOnlyChecker<'a, 'b> { Ok(result) } - /// Checks the native function application of the function named by the - /// string `function` to `args` to determine whether it is read-only - /// compliant. + /// Checks the native function application of the function named by the string `function` + /// to `args` to determine whether it is read-only compliant. /// /// - Returns `None` if there is no native function named `function`. - /// - If there is such a native function, returns `true` iff this function application is - /// read-only. + /// - If there is such a native function, returns `true` iff this function + /// application is read-only. /// /// # Errors /// - Contract parsing errors @@ -414,15 +413,15 @@ impl<'a, 'b> ReadOnlyChecker<'a, 'b> { } } - /// Checks the native and user-defined function applications implied by `expressions`. The - /// first expression is used as the function name, and the tail expressions are used as the - /// arguments. + /// Checks the native and user-defined function applications implied by `expressions`. + /// + /// The first expression is used as the function name, and the tail expressions are used as the arguments. /// /// Returns `true` iff the function application is read-only. /// /// # Errors /// - `CheckErrors::NonFunctionApplication` if there is no first expression, or if the first - /// expression is not a `ClarityName`. + /// expression is not a `ClarityName`. /// - `CheckErrors::UnknownFunction` if the first expression does not name a known function. fn check_expression_application_is_read_only( &mut self, diff --git a/clarity/src/vm/analysis/type_checker/contexts.rs b/clarity/src/vm/analysis/type_checker/contexts.rs index 936cc47bc4..356ebf5944 100644 --- a/clarity/src/vm/analysis/type_checker/contexts.rs +++ b/clarity/src/vm/analysis/type_checker/contexts.rs @@ -92,7 +92,7 @@ impl TypeMap { } } -impl<'a> TypingContext<'a> { +impl TypingContext<'_> { pub fn new(epoch: StacksEpochId, clarity_version: ClarityVersion) -> TypingContext<'static> { TypingContext { epoch, diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index b4f6557c2e..36aa2519cc 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -55,7 +55,7 @@ impl FunctionType { | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => self.check_args_2_1(accounting, args, clarity_version), StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) + Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) } } } @@ -81,17 +81,14 @@ impl FunctionType { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) + Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) } } } } fn is_reserved_word_v3(word: &str) -> bool { - match word { - "block-height" => true, - _ => false, - } + word == "block-height" } /// Is this a reserved word that should trigger an analysis error for the given diff --git a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs index 2b913a3ac9..77083b88cf 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs @@ -239,10 +239,7 @@ impl FunctionType { Ok(TypeSignature::BoolType) } FunctionType::Binary(_, _, _) => { - return Err(CheckErrors::Expects( - "Binary type should not be reached in 2.05".into(), - ) - .into()) + Err(CheckErrors::Expects("Binary type should not be reached in 2.05".into()).into()) } } } @@ -286,8 +283,8 @@ impl FunctionType { )?; } (expected_type, value) => { - if !expected_type.admits(&StacksEpochId::Epoch2_05, &value)? { - let actual_type = TypeSignature::type_of(&value)?; + if !expected_type.admits(&StacksEpochId::Epoch2_05, value)? { + let actual_type = TypeSignature::type_of(value)?; return Err( CheckErrors::TypeError(expected_type.clone(), actual_type).into() ); @@ -438,41 +435,39 @@ impl<'a, 'b> TypeChecker<'a, 'b> { context: &TypingContext, expected_type: &TypeSignature, ) -> TypeResult { - match (&expr.expr, expected_type) { - ( - LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), - TypeSignature::TraitReferenceType(trait_identifier), - ) => { - let contract_to_check = self - .db - .load_contract(&contract_identifier, &StacksEpochId::Epoch2_05)? - .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; - - let contract_defining_trait = self - .db - .load_contract( - &trait_identifier.contract_identifier, - &StacksEpochId::Epoch2_05, - )? - .ok_or(CheckErrors::NoSuchContract( - trait_identifier.contract_identifier.to_string(), - ))?; - - let trait_definition = contract_defining_trait - .get_defined_trait(&trait_identifier.name) - .ok_or(CheckErrors::NoSuchTrait( - trait_identifier.contract_identifier.to_string(), - trait_identifier.name.to_string(), - ))?; - - contract_to_check.check_trait_compliance( + if let ( + LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), + TypeSignature::TraitReferenceType(trait_identifier), + ) = (&expr.expr, expected_type) + { + let contract_to_check = self + .db + .load_contract(contract_identifier, &StacksEpochId::Epoch2_05)? + .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; + + let contract_defining_trait = self + .db + .load_contract( + &trait_identifier.contract_identifier, &StacksEpochId::Epoch2_05, - trait_identifier, - trait_definition, - )?; - return Ok(expected_type.clone()); - } - (_, _) => {} + )? + .ok_or(CheckErrors::NoSuchContract( + trait_identifier.contract_identifier.to_string(), + ))?; + + let trait_definition = contract_defining_trait + .get_defined_trait(&trait_identifier.name) + .ok_or(CheckErrors::NoSuchTrait( + trait_identifier.contract_identifier.to_string(), + trait_identifier.name.to_string(), + ))?; + + contract_to_check.check_trait_compliance( + &StacksEpochId::Epoch2_05, + trait_identifier, + trait_definition, + )?; + return Ok(expected_type.clone()); } let actual_type = self.type_check(expr, context)?; diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index 201c307986..3c5ab99029 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -776,8 +776,7 @@ impl TypedNativeFunction { | ReplaceAt | GetStacksBlockInfo | GetTenureInfo => { return Err(CheckErrors::Expects( "Clarity 2+ keywords should not show up in 2.05".into(), - ) - .into()) + )) } }; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index 7caf775c19..7899b3e27d 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -247,7 +247,7 @@ impl FunctionType { Err(CheckErrors::IncorrectArgumentCount(arg_types.len(), arg_index).into()), ); } - return (None, Ok(None)); + (None, Ok(None)) } // For the following function types, the visitor will just // tell the processor that any results greater than len 1 or 2 @@ -260,7 +260,7 @@ impl FunctionType { Err(CheckErrors::IncorrectArgumentCount(1, arg_index).into()), ); } - return (None, Ok(None)); + (None, Ok(None)) } FunctionType::ArithmeticBinary | FunctionType::ArithmeticComparison @@ -271,7 +271,7 @@ impl FunctionType { Err(CheckErrors::IncorrectArgumentCount(2, arg_index).into()), ); } - return (None, Ok(None)); + (None, Ok(None)) } } } @@ -576,8 +576,8 @@ impl FunctionType { )?; } (expected_type, value) => { - if !expected_type.admits(&StacksEpochId::Epoch21, &value)? { - let actual_type = TypeSignature::type_of(&value)?; + if !expected_type.admits(&StacksEpochId::Epoch21, value)? { + let actual_type = TypeSignature::type_of(value)?; return Err( CheckErrors::TypeError(expected_type.clone(), actual_type).into() ); @@ -854,7 +854,7 @@ fn clarity2_inner_type_check_type( TypeSignature::CallableType(CallableSubtype::Trait(expected_trait_id)), ) => { let contract_to_check = match db - .load_contract(&contract_identifier, &StacksEpochId::Epoch21)? + .load_contract(contract_identifier, &StacksEpochId::Epoch21)? { Some(contract) => { runtime_cost( @@ -1014,7 +1014,7 @@ impl<'a, 'b> TypeChecker<'a, 'b> { build_type_map: bool, ) -> TypeChecker<'a, 'b> { Self { - epoch: epoch.clone(), + epoch: *epoch, db, cost_track, contract_context: ContractContext::new(contract_identifier.clone(), *clarity_version), @@ -1240,6 +1240,7 @@ impl<'a, 'b> TypeChecker<'a, 'b> { .cloned() } + #[allow(clippy::unnecessary_lazy_evaluations)] fn type_check_define_function( &mut self, signature: &[SymbolicExpression], @@ -1440,41 +1441,39 @@ impl<'a, 'b> TypeChecker<'a, 'b> { context: &TypingContext, expected_type: &TypeSignature, ) -> TypeResult { - match (&expr.expr, expected_type) { - ( - LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), - TypeSignature::CallableType(CallableSubtype::Trait(trait_identifier)), - ) => { - let contract_to_check = self - .db - .load_contract(&contract_identifier, &StacksEpochId::Epoch21)? - .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; - - let contract_defining_trait = self - .db - .load_contract( - &trait_identifier.contract_identifier, - &StacksEpochId::Epoch21, - )? - .ok_or(CheckErrors::NoSuchContract( - trait_identifier.contract_identifier.to_string(), - ))?; - - let trait_definition = contract_defining_trait - .get_defined_trait(&trait_identifier.name) - .ok_or(CheckErrors::NoSuchTrait( - trait_identifier.contract_identifier.to_string(), - trait_identifier.name.to_string(), - ))?; - - contract_to_check.check_trait_compliance( + if let ( + LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), + TypeSignature::CallableType(CallableSubtype::Trait(trait_identifier)), + ) = (&expr.expr, expected_type) + { + let contract_to_check = self + .db + .load_contract(contract_identifier, &StacksEpochId::Epoch21)? + .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; + + let contract_defining_trait = self + .db + .load_contract( + &trait_identifier.contract_identifier, &StacksEpochId::Epoch21, - trait_identifier, - &trait_definition, - )?; - return Ok(expected_type.clone()); - } - (_, _) => {} + )? + .ok_or(CheckErrors::NoSuchContract( + trait_identifier.contract_identifier.to_string(), + ))?; + + let trait_definition = contract_defining_trait + .get_defined_trait(&trait_identifier.name) + .ok_or(CheckErrors::NoSuchTrait( + trait_identifier.contract_identifier.to_string(), + trait_identifier.name.to_string(), + ))?; + + contract_to_check.check_trait_compliance( + &StacksEpochId::Epoch21, + trait_identifier, + trait_definition, + )?; + return Ok(expected_type.clone()); } let actual_type = self.type_check(expr, context)?; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs index 9876062241..95fe6f9bf9 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs @@ -7,9 +7,10 @@ use crate::vm::analysis::CheckError; use crate::vm::types::{BufferLength, SequenceSubtype, TypeSignature}; use crate::vm::SymbolicExpression; -/// to-consensus-buff? admits exactly one argument: -/// * the Clarity value to serialize -/// it returns an `(optional (buff x))` where `x` is the maximum possible +/// `to-consensus-buff?` admits exactly one argument: +/// * the Clarity value to serialize +/// +/// It returns an `(optional (buff x))`, where `x` is the maximum possible /// consensus buffer length based on the inferred type of the supplied value. pub fn check_special_to_consensus_buff( checker: &mut TypeChecker, @@ -25,10 +26,11 @@ pub fn check_special_to_consensus_buff( .map_err(CheckError::from) } -/// from-consensus-buff? admits exactly two arguments: -/// * a type signature indicating the expected return type `t1` -/// * a buffer (of up to max length) -/// it returns an `(optional t1)` +/// `from-consensus-buff?` admits exactly two arguments: +/// * a type signature indicating the expected return type `t1` +/// * a buffer (of up to max length) +/// +/// It returns an `(optional t1)` pub fn check_special_from_consensus_buff( checker: &mut TypeChecker, args: &[SymbolicExpression], diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index b576277a5b..7769652d25 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -79,7 +79,7 @@ fn check_special_list_cons( }); costs.push(cost); - if let Some(cur_size) = entries_size.clone() { + if let Some(cur_size) = entries_size { entries_size = cur_size.checked_add(checked.size()?); } if let Some(cur_size) = entries_size { @@ -263,6 +263,7 @@ pub fn check_special_tuple_cons( Ok(TypeSignature::TupleType(tuple_signature)) } +#[allow(clippy::unnecessary_lazy_evaluations)] fn check_special_let( checker: &mut TypeChecker, args: &[SymbolicExpression], @@ -1016,7 +1017,7 @@ impl TypedNativeFunction { /// The return type of `principal-destruct` is a Response, in which the success /// and error types are the same. fn parse_principal_basic_type() -> Result { - Ok(TupleTypeSignature::try_from(vec![ + TupleTypeSignature::try_from(vec![ ("version".into(), BUFF_1.clone()), ("hash-bytes".into(), BUFF_20.clone()), ( @@ -1032,7 +1033,7 @@ impl TypedNativeFunction { "FAIL: PrincipalDestruct failed to initialize type signature" .into(), ) - })?) + }) } TypeSignature::ResponseType(Box::new(( parse_principal_basic_type()?.into(), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs index 6a097a8cd6..772bdd32a4 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs @@ -274,6 +274,7 @@ pub fn check_special_unwrap_err( inner_unwrap_err(input, checker) } +#[allow(clippy::unnecessary_lazy_evaluations)] fn eval_with_new_binding( body: &SymbolicExpression, bind_name: ClarityName, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 12597c88fa..498b52dcb0 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -3411,7 +3411,7 @@ fn test_trait_args() { }, TraitIdentifier { name: ClarityName::from("trait-bar"), - contract_identifier: contract_identifier, + contract_identifier, }, )]; diff --git a/clarity/src/vm/ast/definition_sorter/mod.rs b/clarity/src/vm/ast/definition_sorter/mod.rs index a5a551298c..bd611851b6 100644 --- a/clarity/src/vm/ast/definition_sorter/mod.rs +++ b/clarity/src/vm/ast/definition_sorter/mod.rs @@ -420,7 +420,7 @@ impl Graph { let list = self .adjacency_list .get_mut(src_expr_index) - .ok_or_else(|| ParseErrors::InterpreterFailure)?; + .ok_or(ParseErrors::InterpreterFailure)?; list.push(dst_expr_index); Ok(()) } @@ -491,7 +491,7 @@ impl GraphWalker { fn get_cycling_dependencies( &mut self, graph: &Graph, - sorted_indexes: &Vec, + sorted_indexes: &[usize], ) -> Option> { let mut tainted: HashSet = HashSet::new(); diff --git a/clarity/src/vm/ast/errors.rs b/clarity/src/vm/ast/errors.rs index c1a0914b5f..56f8e40f86 100644 --- a/clarity/src/vm/ast/errors.rs +++ b/clarity/src/vm/ast/errors.rs @@ -113,10 +113,7 @@ impl ParseError { } pub fn rejectable(&self) -> bool { - match self.err { - ParseErrors::InterpreterFailure => true, - _ => false, - } + matches!(self.err, ParseErrors::InterpreterFailure) } pub fn has_pre_expression(&self) -> bool { diff --git a/clarity/src/vm/ast/parser/v1.rs b/clarity/src/vm/ast/parser/v1.rs index 5c2715e9f7..4cef2e5411 100644 --- a/clarity/src/vm/ast/parser/v1.rs +++ b/clarity/src/vm/ast/parser/v1.rs @@ -219,9 +219,7 @@ fn inner_lex(input: &str, max_nesting: u64) -> ParseResult { if !args.is_empty() { self.probe_for_generics( - args[1..].to_vec().into_iter(), + args[1..].iter().copied(), &mut referenced_traits, false, )?; diff --git a/clarity/src/vm/ast/types.rs b/clarity/src/vm/ast/types.rs index aedd31eae3..2071130131 100644 --- a/clarity/src/vm/ast/types.rs +++ b/clarity/src/vm/ast/types.rs @@ -96,6 +96,10 @@ impl PreExpressionsDrain { pub fn len(&self) -> usize { self.len } + + pub fn is_empty(&self) -> bool { + self.len == 0 + } } impl Iterator for PreExpressionsDrain { diff --git a/clarity/src/vm/callables.rs b/clarity/src/vm/callables.rs index 9cd991ec97..4691025a8d 100644 --- a/clarity/src/vm/callables.rs +++ b/clarity/src/vm/callables.rs @@ -37,6 +37,7 @@ use crate::vm::types::{ }; use crate::vm::{eval, Environment, LocalContext, Value}; +#[allow(clippy::type_complexity)] pub enum CallableType { UserFunction(DefinedFunction), NativeFunction(&'static str, NativeHandle, ClarityCostFunction), @@ -244,7 +245,11 @@ impl DefinedFunction { ) .into()); } - if let Some(_) = context.variables.insert(name.clone(), value.clone()) { + if context + .variables + .insert(name.clone(), value.clone()) + .is_some() + { return Err(CheckErrors::NameAlreadyUsed(name.to_string()).into()); } } @@ -286,7 +291,7 @@ impl DefinedFunction { } } - if let Some(_) = context.variables.insert(name.clone(), cast_value) { + if context.variables.insert(name.clone(), cast_value).is_some() { return Err(CheckErrors::NameAlreadyUsed(name.to_string()).into()); } } @@ -323,7 +328,7 @@ impl DefinedFunction { self.name.to_string(), ))?; - let args = self.arg_types.iter().map(|a| a.clone()).collect(); + let args = self.arg_types.to_vec(); if !expected_sig.check_args_trait_compliance(epoch, args)? { return Err( CheckErrors::BadTraitImplementation(trait_name, self.name.to_string()).into(), @@ -393,16 +398,12 @@ impl CallableType { impl FunctionIdentifier { fn new_native_function(name: &str) -> FunctionIdentifier { let identifier = format!("_native_:{}", name); - FunctionIdentifier { - identifier: identifier, - } + FunctionIdentifier { identifier } } fn new_user_function(name: &str, context: &str) -> FunctionIdentifier { let identifier = format!("{}:{}", context, name); - FunctionIdentifier { - identifier: identifier, - } + FunctionIdentifier { identifier } } } @@ -636,12 +637,9 @@ mod test { let cast_list = clarity2_implicit_cast(&list_opt_ty, &list_opt_contract).unwrap(); let items = cast_list.expect_list().unwrap(); for item in items { - match item.expect_optional().unwrap() { - Some(cast_opt) => { - let cast_trait = cast_opt.expect_callable().unwrap(); - assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); - } - None => (), + if let Some(cast_opt) = item.expect_optional().unwrap() { + let cast_trait = cast_opt.expect_callable().unwrap(); + assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); } } diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index 11145ab11a..1e503d1425 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -113,6 +113,7 @@ pub trait ClarityConnection { self.with_clarity_db_readonly_owned(|mut db| (to_do(&mut db), db)) } + #[allow(clippy::too_many_arguments)] fn with_readonly_clarity_env( &mut self, mainnet: bool, @@ -151,12 +152,15 @@ pub trait ClarityConnection { pub trait TransactionConnection: ClarityConnection { /// Do something with this connection's Clarity environment that can be aborted - /// with `abort_call_back`. + /// with `abort_call_back`. + /// /// This returns the return value of `to_do`: - /// * the generic term `R` - /// * the asset changes during `to_do` in an `AssetMap` - /// * the Stacks events during the transaction - /// and a `bool` value which is `true` if the `abort_call_back` caused the changes to abort + /// * the generic term `R` + /// * the asset changes during `to_do` in an `AssetMap` + /// * the Stacks events during the transaction + /// + /// and a `bool` value which is `true` if the `abort_call_back` caused the changes to abort. + /// /// If `to_do` returns an `Err` variant, then the changes are aborted. fn with_abort_callback( &mut self, @@ -197,14 +201,14 @@ pub trait TransactionConnection: ClarityConnection { ast_rules, ); - let mut contract_ast = match ast_result { + let contract_ast = match ast_result { Ok(x) => x, Err(e) => return (cost_track, Err(e.into())), }; let result = analysis::run_analysis( identifier, - &mut contract_ast.expressions, + &contract_ast.expressions, db, false, cost_track, @@ -272,7 +276,7 @@ pub trait TransactionConnection: ClarityConnection { }, |_, _| false, ) - .and_then(|(value, assets, events, _)| Ok((value, assets, events))) + .map(|(value, assets, events, _)| (value, assets, events)) } /// Execute a contract call in the current block. diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index eedc2857fa..b3eb8c9fe5 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -180,7 +180,7 @@ impl AssetMap { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct EventBatch { pub events: Vec, } @@ -243,6 +243,12 @@ pub type StackTrace = Vec; pub const TRANSIENT_CONTRACT_NAME: &str = "__transient"; +impl Default for AssetMap { + fn default() -> Self { + Self::new() + } +} + impl AssetMap { pub fn new() -> AssetMap { AssetMap { @@ -276,11 +282,11 @@ impl AssetMap { asset: &AssetIdentifier, amount: u128, ) -> Result { - let current_amount = match self.token_map.get(principal) { - Some(principal_map) => *principal_map.get(asset).unwrap_or(&0), - None => 0, - }; - + let current_amount = self + .token_map + .get(principal) + .and_then(|x| x.get(asset)) + .unwrap_or(&0); current_amount .checked_add(amount) .ok_or(RuntimeErrorType::ArithmeticOverflow.into()) @@ -393,17 +399,14 @@ impl AssetMap { for (principal, stx_amount) in self.stx_map.drain() { let output_map = map.entry(principal.clone()).or_default(); - output_map.insert( - AssetIdentifier::STX(), - AssetMapEntry::STX(stx_amount as u128), - ); + output_map.insert(AssetIdentifier::STX(), AssetMapEntry::STX(stx_amount)); } for (principal, stx_burned_amount) in self.burn_map.drain() { let output_map = map.entry(principal.clone()).or_default(); output_map.insert( AssetIdentifier::STX_burned(), - AssetMapEntry::Burn(stx_burned_amount as u128), + AssetMapEntry::Burn(stx_burned_amount), ); } @@ -414,7 +417,7 @@ impl AssetMap { } } - return map; + map } pub fn get_stx(&self, principal: &PrincipalData) -> Option { @@ -440,13 +443,8 @@ impl AssetMap { principal: &PrincipalData, asset_identifier: &AssetIdentifier, ) -> Option { - match self.token_map.get(principal) { - Some(ref assets) => match assets.get(asset_identifier) { - Some(value) => Some(*value), - None => None, - }, - None => None, - } + let assets = self.token_map.get(principal)?; + assets.get(asset_identifier).copied() } pub fn get_nonfungible_tokens( @@ -454,13 +452,8 @@ impl AssetMap { principal: &PrincipalData, asset_identifier: &AssetIdentifier, ) -> Option<&Vec> { - match self.asset_map.get(principal) { - Some(ref assets) => match assets.get(asset_identifier) { - Some(values) => Some(values), - None => None, - }, - None => None, - } + let assets = self.asset_map.get(principal)?; + assets.get(asset_identifier) } } @@ -469,23 +462,23 @@ impl fmt::Display for AssetMap { write!(f, "[")?; for (principal, principal_map) in self.token_map.iter() { for (asset, amount) in principal_map.iter() { - write!(f, "{} spent {} {}\n", principal, amount, asset)?; + writeln!(f, "{principal} spent {amount} {asset}")?; } } for (principal, principal_map) in self.asset_map.iter() { for (asset, transfer) in principal_map.iter() { - write!(f, "{} transfered [", principal)?; + write!(f, "{principal} transfered [")?; for t in transfer { - write!(f, "{}, ", t)?; + write!(f, "{t}, ")?; } - write!(f, "] {}\n", asset)?; + writeln!(f, "] {asset}")?; } } for (principal, stx_amount) in self.stx_map.iter() { - write!(f, "{} spent {} microSTX\n", principal, stx_amount)?; + writeln!(f, "{principal} spent {stx_amount} microSTX")?; } for (principal, stx_burn_amount) in self.burn_map.iter() { - write!(f, "{} burned {} microSTX\n", principal, stx_burn_amount)?; + writeln!(f, "{principal} burned {stx_burn_amount} microSTX")?; } write!(f, "]") } @@ -493,7 +486,7 @@ impl fmt::Display for AssetMap { impl EventBatch { pub fn new() -> EventBatch { - EventBatch { events: vec![] } + EventBatch::default() } } @@ -614,12 +607,11 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { self.begin(); let result = { - let mut initial_context = initial_context.unwrap_or(ContractContext::new( + let initial_context = initial_context.unwrap_or(ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity1, )); - let mut exec_env = - self.get_exec_environment(Some(sender), sponsor, &mut initial_context); + let mut exec_env = self.get_exec_environment(Some(sender), sponsor, &initial_context); f(&mut exec_env) }; @@ -737,7 +729,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { let mut snapshot = env .global_context .database - .get_stx_balance_snapshot(&recipient) + .get_stx_balance_snapshot(recipient) .unwrap(); snapshot.credit(amount).unwrap(); @@ -949,7 +941,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { program: &str, rules: ast::ASTRules, ) -> Result { - let clarity_version = self.contract_context.clarity_version.clone(); + let clarity_version = self.contract_context.clarity_version; let parsed = ast::build_ast_with_rules( contract_identifier, @@ -961,7 +953,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { )? .expressions; - if parsed.len() < 1 { + if parsed.is_empty() { return Err(RuntimeErrorType::ParseError( "Expected a program of at least length 1".to_string(), ) @@ -981,7 +973,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let result = { let mut nested_env = Environment::new( - &mut self.global_context, + self.global_context, &contract.contract_context, self.call_stack, self.sender.clone(), @@ -1008,7 +1000,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { pub fn eval_raw_with_rules(&mut self, program: &str, rules: ast::ASTRules) -> Result { let contract_id = QualifiedContractIdentifier::transient(); - let clarity_version = self.contract_context.clarity_version.clone(); + let clarity_version = self.contract_context.clarity_version; let parsed = ast::build_ast_with_rules( &contract_id, @@ -1020,15 +1012,14 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { )? .expressions; - if parsed.len() < 1 { + if parsed.is_empty() { return Err(RuntimeErrorType::ParseError( "Expected a program of at least length 1".to_string(), ) .into()); } let local_context = LocalContext::new(); - let result = { eval(&parsed[0], self, &local_context) }; - result + eval(&parsed[0], self, &local_context) } #[cfg(any(test, feature = "testing"))] @@ -1150,7 +1141,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { Ok(value) => { if let Some(handler) = self.global_context.database.get_cc_special_cases_handler() { handler( - &mut self.global_context, + self.global_context, self.sender.as_ref(), self.sponsor.as_ref(), contract_identifier, @@ -1185,7 +1176,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let result = { let mut nested_env = Environment::new( - &mut self.global_context, + self.global_context, next_contract_context, self.call_stack, self.sender.clone(), @@ -1240,7 +1231,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_content: &str, ast_rules: ASTRules, ) -> Result<()> { - let clarity_version = self.contract_context.clarity_version.clone(); + let clarity_version = self.contract_context.clarity_version; let contract_ast = ast::build_ast_with_rules( &contract_identifier, @@ -1254,7 +1245,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_identifier, clarity_version, &contract_ast, - &contract_content, + contract_content, ) } @@ -1299,7 +1290,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_identifier.clone(), contract_content, self.sponsor.clone(), - &mut self.global_context, + self.global_context, contract_version, ); self.drop_memory(memory_use)?; @@ -1561,7 +1552,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { } pub fn is_top_level(&self) -> bool { - self.asset_maps.len() == 0 + self.asset_maps.is_empty() } fn get_asset_map(&mut self) -> Result<&mut AssetMap> { @@ -1841,6 +1832,12 @@ impl ContractContext { } } +impl Default for LocalContext<'_> { + fn default() -> Self { + Self::new() + } +} + impl<'a> LocalContext<'a> { pub fn new() -> LocalContext<'a> { LocalContext { @@ -1898,6 +1895,12 @@ impl<'a> LocalContext<'a> { } } +impl Default for CallStack { + fn default() -> Self { + Self::new() + } +} + impl CallStack { pub fn new() -> CallStack { CallStack { @@ -1946,10 +1949,10 @@ impl CallStack { } Ok(()) } else { - return Err(InterpreterError::InterpreterError( + Err(InterpreterError::InterpreterError( "Tried to remove item from empty call stack.".to_string(), ) - .into()); + .into()) } } @@ -2149,8 +2152,8 @@ mod test { // not simply rollback the tx and squelch the error as includable. let e = env .stx_transfer( - &PrincipalData::from(u1.clone()), - &PrincipalData::from(u2.clone()), + &PrincipalData::from(u1), + &PrincipalData::from(u2), 1000, &BuffData::empty(), ) diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 2113ec6479..a3c7fa7140 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -46,9 +46,9 @@ type Result = std::result::Result; pub const CLARITY_MEMORY_LIMIT: u64 = 100 * 1000 * 1000; // TODO: factor out into a boot lib? -pub const COSTS_1_NAME: &'static str = "costs"; -pub const COSTS_2_NAME: &'static str = "costs-2"; -pub const COSTS_3_NAME: &'static str = "costs-3"; +pub const COSTS_1_NAME: &str = "costs"; +pub const COSTS_2_NAME: &str = "costs-2"; +pub const COSTS_3_NAME: &str = "costs-3"; lazy_static! { static ref COST_TUPLE_TYPE_SIGNATURE: TypeSignature = { @@ -254,6 +254,7 @@ pub struct TrackerData { } #[derive(Clone)] +#[allow(clippy::large_enum_variant)] pub enum LimitedCostTracker { Limited(TrackerData), Free, @@ -334,11 +335,7 @@ pub enum CostErrors { impl CostErrors { fn rejectable(&self) -> bool { - match self { - CostErrors::InterpreterFailure => true, - CostErrors::Expect(_) => true, - _ => false, - } + matches!(self, CostErrors::InterpreterFailure | CostErrors::Expect(_)) } } @@ -650,7 +647,7 @@ fn load_cost_functions( continue; } for arg in &cost_func_type.args { - if &arg.signature != &TypeSignature::UIntType { + if arg.signature != TypeSignature::UIntType { warn!("Confirmed cost proposal invalid: contains non uint argument"; "confirmed_proposal_id" => confirmed_proposal, ); @@ -872,7 +869,7 @@ impl TrackerData { .map_err(|e| CostErrors::Expect(e.to_string()))?; } - return Ok(()); + Ok(()) } } @@ -884,7 +881,7 @@ impl LimitedCostTracker { } } #[allow(clippy::panic)] - pub fn set_total(&mut self, total: ExecutionCost) -> () { + pub fn set_total(&mut self, total: ExecutionCost) { // used by the miner to "undo" the cost of a transaction when trying to pack a block. match self { Self::Limited(ref mut data) => data.total = total, @@ -982,8 +979,7 @@ fn compute_cost( .cost_contracts .get_mut(&cost_function_reference.contract_id) .ok_or(CostErrors::CostComputationFailed(format!( - "CostFunction not found: {}", - &cost_function_reference + "CostFunction not found: {cost_function_reference}" )))?; let mut program = vec![SymbolicExpression::atom( @@ -1050,7 +1046,7 @@ impl CostTracker for LimitedCostTracker { match self { Self::Free => { // tracker is free, return zero! - return Ok(ExecutionCost::ZERO); + Ok(ExecutionCost::ZERO) } Self::Limited(ref mut data) => { if cost_function == ClarityCostFunction::Unimplemented { @@ -1062,8 +1058,7 @@ impl CostTracker for LimitedCostTracker { .cost_function_references .get(&cost_function) .ok_or(CostErrors::CostComputationFailed(format!( - "CostFunction not defined: {}", - &cost_function + "CostFunction not defined: {cost_function}" )))? .clone(); @@ -1177,20 +1172,16 @@ pub trait CostOverflowingMath { impl CostOverflowingMath for u64 { fn cost_overflow_mul(self, other: u64) -> Result { - self.checked_mul(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_mul(other).ok_or(CostErrors::CostOverflow) } fn cost_overflow_add(self, other: u64) -> Result { - self.checked_add(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_add(other).ok_or(CostErrors::CostOverflow) } fn cost_overflow_sub(self, other: u64) -> Result { - self.checked_sub(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_sub(other).ok_or(CostErrors::CostOverflow) } fn cost_overflow_div(self, other: u64) -> Result { - self.checked_div(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_div(other).ok_or(CostErrors::CostOverflow) } } @@ -1207,7 +1198,7 @@ impl ExecutionCost { pub fn proportion_largest_dimension(&self, numerator: &ExecutionCost) -> u64 { // max() should always return because there are > 0 elements #[allow(clippy::expect_used)] - [ + *[ numerator.runtime / cmp::max(1, self.runtime / 100), numerator.write_length / cmp::max(1, self.write_length / 100), numerator.write_count / cmp::max(1, self.write_count / 100), @@ -1217,7 +1208,6 @@ impl ExecutionCost { .iter() .max() .expect("BUG: should find maximum") - .clone() } /// Returns the dot product of this execution cost with `resolution`/block_limit diff --git a/clarity/src/vm/coverage.rs b/clarity/src/vm/coverage.rs index be8a647e9c..862c035f98 100644 --- a/clarity/src/vm/coverage.rs +++ b/clarity/src/vm/coverage.rs @@ -26,6 +26,12 @@ struct CoverageFileInfo { coverage: HashMap>, } +impl Default for CoverageReporter { + fn default() -> Self { + Self::new() + } +} + impl CoverageReporter { pub fn new() -> CoverageReporter { CoverageReporter { diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 4f6f3f7781..cbb8bcb4de 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -82,7 +82,7 @@ impl TryFrom<&str> for StoreType { fn try_from(value: &str) -> core::result::Result { use self::StoreType::*; - let hex_value = u8::from_str_radix(value, 10).map_err(|e| e.to_string())?; + let hex_value = value.parse::().map_err(|e| e.to_string())?; match hex_value { 0x00 => Ok(DataMap), 0x01 => Ok(Variable), @@ -506,7 +506,7 @@ impl<'a> ClarityDatabase<'a> { } pub fn put_data(&mut self, key: &str, value: &T) -> Result<()> { - self.store.put_data(&key, &value.serialize()) + self.store.put_data(key, &value.serialize()) } /// Like `put()`, but returns the serialized byte size of the stored value @@ -516,7 +516,7 @@ impl<'a> ClarityDatabase<'a> { value: &T, ) -> Result { let serialized = value.serialize(); - self.store.put_data(&key, &serialized)?; + self.store.put_data(key, &serialized)?; Ok(byte_len_of_serialization(&serialized)) } @@ -568,7 +568,7 @@ impl<'a> ClarityDatabase<'a> { let size = serialized.len() as u64; let hex_serialized = to_hex(serialized.as_slice()); - self.store.put_data(&key, &hex_serialized)?; + self.store.put_data(key, &hex_serialized)?; Ok(pre_sanitized_size.unwrap_or(size)) } @@ -755,16 +755,14 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Result> { - let x_opt = self - .store + self.store .get_metadata(contract_identifier, AnalysisDatabase::storage_key()) // treat NoSuchContract error thrown by get_metadata as an Option::None -- // the analysis will propagate that as a CheckError anyways. - .ok(); - match x_opt.flatten() { - None => Ok(None), - Some(x) => ContractAnalysis::deserialize(&x).map(|out| Some(out)), - } + .ok() + .flatten() + .map(|x| ContractAnalysis::deserialize(&x)) + .transpose() } pub fn get_contract_size( @@ -978,7 +976,7 @@ impl<'a> ClarityDatabase<'a> { // Get block information -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { /// Returns the ID of a *Stacks* block, by a *Stacks* block height. /// /// Fails if `block_height` >= the "currently" under construction Stacks block height. @@ -1066,7 +1064,7 @@ impl<'a> ClarityDatabase<'a> { let query_tip = self.get_index_block_header_hash(current_height.saturating_sub(1))?; Ok(self .headers_db - .get_stacks_height_for_tenure_height(&query_tip, tenure_height.into())) + .get_stacks_height_for_tenure_height(&query_tip, tenure_height)) } /// Get the last-known burnchain block height. @@ -1158,7 +1156,7 @@ impl<'a> ClarityDatabase<'a> { /// This is the highest Stacks block in this fork whose consensus hash is known. /// 3. Resolve the parent StacksBlockId to its consensus hash /// 4. Resolve the consensus hash to the associated SortitionId - /// In Epoch 3+: + /// In Epoch 3+: /// 1. Get the SortitionId of the current Stacks tip fn get_sortition_id_for_stacks_tip(&mut self) -> Result> { if !self @@ -1276,8 +1274,7 @@ impl<'a> ClarityDatabase<'a> { InterpreterError::Expect( "FATAL: no winning burnchain token spend record for block".into(), ) - })? - .into()) + })?) } pub fn get_miner_spend_total(&mut self, block_height: u32) -> Result { @@ -1294,8 +1291,7 @@ impl<'a> ClarityDatabase<'a> { InterpreterError::Expect( "FATAL: no total burnchain token spend record for block".into(), ) - })? - .into()) + })?) } pub fn get_block_reward(&mut self, block_height: u32) -> Result> { @@ -1316,7 +1312,6 @@ impl<'a> ClarityDatabase<'a> { let reward: u128 = self .headers_db .get_tokens_earned_for_block(&id_bhh, &epoch) - .map(|x| x.into()) .ok_or_else(|| { InterpreterError::Expect("FATAL: matured block has no recorded reward".into()) })?; @@ -1337,7 +1332,7 @@ impl<'a> ClarityDatabase<'a> { // poison-microblock -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn make_microblock_pubkey_height_key(pubkey_hash: &Hash160) -> String { format!("microblock-pubkey-hash::{}", pubkey_hash) } @@ -1360,6 +1355,7 @@ impl<'a> ClarityDatabase<'a> { self.store.get_cc_special_cases_handler() } + #[allow(clippy::unnecessary_fallible_conversions)] pub fn insert_microblock_poison( &mut self, height: u32, @@ -1451,11 +1447,11 @@ impl<'a> ClarityDatabase<'a> { if let PrincipalData::Standard(principal_data) = reporter_principal { Ok((principal_data, seq)) } else { - return Err(InterpreterError::Expect( + Err(InterpreterError::Expect( "BUG: poison-microblock report principal is not a standard principal" .into(), ) - .into()); + .into()) } }) .transpose() @@ -1472,7 +1468,7 @@ fn map_no_contract_as_none(res: Result>) -> Result> { } // Variable Functions... -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn create_variable( &mut self, contract_identifier: &QualifiedContractIdentifier, @@ -1605,7 +1601,7 @@ impl<'a> ClarityDatabase<'a> { } // Data Map Functions -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn create_map( &mut self, contract_identifier: &QualifiedContractIdentifier, @@ -1951,7 +1947,7 @@ impl<'a> ClarityDatabase<'a> { // Asset Functions -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn create_fungible_token( &mut self, contract_identifier: &QualifiedContractIdentifier, @@ -2294,19 +2290,13 @@ impl<'a> ClarityDatabase<'a> { let key = ClarityDatabase::make_key_for_account_balance(principal); debug!("Fetching account balance"; "principal" => %principal.to_string()); let result = self.get_data(&key)?; - Ok(match result { - None => STXBalance::zero(), - Some(balance) => balance, - }) + Ok(result.unwrap_or_default()) } pub fn get_account_nonce(&mut self, principal: &PrincipalData) -> Result { let key = ClarityDatabase::make_key_for_account_nonce(principal); let result = self.get_data(&key)?; - Ok(match result { - None => 0, - Some(nonce) => nonce, - }) + Ok(result.unwrap_or_default()) } pub fn set_account_nonce(&mut self, principal: &PrincipalData, nonce: u64) -> Result<()> { @@ -2316,7 +2306,7 @@ impl<'a> ClarityDatabase<'a> { } // access burnchain state -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { self.burn_state_db.get_burn_block_height(sortition_id) } @@ -2328,7 +2318,7 @@ impl<'a> ClarityDatabase<'a> { } pub fn get_stacks_epoch_for_block(&self, id_bhh: &StacksBlockId) -> Result { - let burn_block = self.get_burnchain_block_height(&id_bhh).ok_or_else(|| { + let burn_block = self.get_burnchain_block_height(id_bhh).ok_or_else(|| { InterpreterError::Expect(format!( "FATAL: no burnchain block height found for Stacks block {}", id_bhh diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index c0b75be83f..eecbe092ea 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -76,11 +76,11 @@ fn rollback_value_check(value: &String, check: &RollbackValueCheck) { assert_eq!(value, check) } #[cfg(feature = "rollback_value_check")] -fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, value: &String) +fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, value: &str) where T: Eq + Hash + Clone, { - edits.push((key, value.clone())); + edits.push((key, value.to_owned())); } // this function is used to check the lookup map when committing at the "bottom" of the // wrapper -- i.e., when committing to the underlying store. @@ -88,7 +88,7 @@ where fn rollback_check_pre_bottom_commit( edits: Vec<(T, RollbackValueCheck)>, lookup_map: &mut HashMap>, -) -> Vec<(T, String)> +) -> Result, InterpreterError> where T: Eq + Hash + Clone, { @@ -96,10 +96,10 @@ where edit_history.reverse(); } for (key, value) in edits.iter() { - rollback_lookup_map(key, &value, lookup_map); + let _ = rollback_lookup_map(key, value, lookup_map); } assert!(lookup_map.is_empty()); - edits + Ok(edits) } /// Result structure for fetched values from the @@ -283,7 +283,7 @@ impl<'a> RollbackWrapper<'a> { // stack is empty, committing to the backing store let all_edits = rollback_check_pre_bottom_commit(last_item.edits, &mut self.lookup_map)?; - if all_edits.len() > 0 { + if !all_edits.is_empty() { self.store.put_all_data(all_edits).map_err(|e| { InterpreterError::Expect(format!( "ERROR: Failed to commit data to sql store: {e:?}" @@ -295,7 +295,7 @@ impl<'a> RollbackWrapper<'a> { last_item.metadata_edits, &mut self.metadata_lookup_map, )?; - if metadata_edits.len() > 0 { + if !metadata_edits.is_empty() { self.store.put_all_metadata(metadata_edits).map_err(|e| { InterpreterError::Expect(format!( "ERROR: Failed to commit data to sql store: {e:?}" @@ -316,12 +316,12 @@ fn inner_put_data( ) where T: Eq + Hash + Clone, { - let key_edit_deque = lookup_map.entry(key.clone()).or_insert_with(|| Vec::new()); + let key_edit_deque = lookup_map.entry(key.clone()).or_default(); rollback_edits_push(edits, key, &value); key_edit_deque.push(value); } -impl<'a> RollbackWrapper<'a> { +impl RollbackWrapper<'_> { pub fn put_data(&mut self, key: &str, value: &str) -> InterpreterResult<()> { let current = self.stack.last_mut().ok_or_else(|| { InterpreterError::Expect( @@ -329,12 +329,13 @@ impl<'a> RollbackWrapper<'a> { ) })?; - Ok(inner_put_data( + inner_put_data( &mut self.lookup_map, &mut current.edits, key.to_string(), value.to_string(), - )) + ); + Ok(()) } /// @@ -347,13 +348,12 @@ impl<'a> RollbackWrapper<'a> { bhh: StacksBlockId, query_pending_data: bool, ) -> InterpreterResult { - self.store.set_block_hash(bhh).map(|x| { + self.store.set_block_hash(bhh).inspect(|_| { // use and_then so that query_pending_data is only set once set_block_hash succeeds // this doesn't matter in practice, because a set_block_hash failure always aborts // the transaction with a runtime error (destroying its environment), but it's much // better practice to do this, especially if the abort behavior changes in the future. self.query_pending_data = query_pending_data; - x }) } @@ -501,12 +501,13 @@ impl<'a> RollbackWrapper<'a> { let metadata_key = (contract.clone(), key.to_string()); - Ok(inner_put_data( + inner_put_data( &mut self.metadata_lookup_map, &mut current.metadata_edits, metadata_key, value.to_string(), - )) + ); + Ok(()) } // Throws a NoSuchContract error if contract doesn't exist, diff --git a/clarity/src/vm/database/mod.rs b/clarity/src/vm/database/mod.rs index d16d944d55..a9c2182806 100644 --- a/clarity/src/vm/database/mod.rs +++ b/clarity/src/vm/database/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . - use hashbrown::HashMap; #[cfg(feature = "canonical")] pub use sqlite::MemoryBackingStore; diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index e4fab929bd..b88420ff6a 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -257,7 +257,7 @@ impl ClaritySerializable for STXBalance { impl ClarityDeserializable for STXBalance { fn deserialize(input: &str) -> Result { - let bytes = hex_bytes(&input).map_err(|_| { + let bytes = hex_bytes(input).map_err(|_| { InterpreterError::Expect("STXBalance deserialization: failed decoding bytes.".into()) })?; let result = if bytes.len() == STXBalance::unlocked_and_v1_size { @@ -555,7 +555,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { ); } - if !(self.balance.amount_locked() <= new_total_locked) { + if self.balance.amount_locked() > new_total_locked { return Err(InterpreterError::Expect( "FATAL: account must lock more after `increase_lock_v2`".into(), ) @@ -623,7 +623,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { } // caller needs to have checked this - if !(amount_to_lock > 0) { + if amount_to_lock == 0 { return Err(InterpreterError::Expect("BUG: cannot lock 0 tokens".into()).into()); } @@ -980,6 +980,12 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { } } +impl Default for STXBalance { + fn default() -> Self { + STXBalance::zero() + } +} + // NOTE: do _not_ add mutation methods to this struct. Put them in STXBalanceSnapshot! impl STXBalance { pub const unlocked_and_v1_size: usize = 40; diff --git a/clarity/src/vm/diagnostic.rs b/clarity/src/vm/diagnostic.rs index 81939237d7..164875151f 100644 --- a/clarity/src/vm/diagnostic.rs +++ b/clarity/src/vm/diagnostic.rs @@ -66,24 +66,26 @@ impl Diagnostic { impl fmt::Display for Diagnostic { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.level)?; - if self.spans.len() == 1 { - write!( + match self.spans.len().cmp(&1) { + std::cmp::Ordering::Equal => write!( f, " (line {}, column {})", self.spans[0].start_line, self.spans[0].start_column - )?; - } else if self.spans.len() > 1 { - let lines: Vec = self - .spans - .iter() - .map(|s| format!("line: {}", s.start_line)) - .collect(); - write!(f, " ({})", lines.join(", "))?; + )?, + std::cmp::Ordering::Greater => { + let lines: Vec = self + .spans + .iter() + .map(|s| format!("line: {}", s.start_line)) + .collect(); + write!(f, " ({})", lines.join(", "))?; + } + _ => {} } write!(f, ": {}.", &self.message)?; if let Some(suggestion) = &self.suggestion { - write!(f, "\n{}", suggestion)?; + write!(f, "\n{suggestion}")?; } - write!(f, "\n") + writeln!(f) } } diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index b23e356dea..8c9a48f006 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -814,19 +814,19 @@ pub fn get_output_type_string(function_type: &FunctionType) -> String { FunctionType::Binary(left, right, ref out_sig) => match out_sig { FunctionReturnsSignature::Fixed(out_type) => format!("{}", out_type), FunctionReturnsSignature::TypeOfArgAtPosition(pos) => { - let arg_sig: &FunctionArgSignature; - match pos { - 0 => arg_sig = left, - 1 => arg_sig = right, - _ => panic!("Index out of range: TypeOfArgAtPosition for FunctionType::Binary can only handle two arguments, zero-indexed (0 or 1).") - } + let arg_sig = match pos { + 0 => left, + 1 => right, + _ => panic!("Index out of range: TypeOfArgAtPosition for FunctionType::Binary can only handle two arguments, zero-indexed (0 or 1).") + }; + match arg_sig { - FunctionArgSignature::Single(arg_type) => format!("{}", arg_type), - FunctionArgSignature::Union(arg_types) => { - let out_types: Vec = - arg_types.iter().map(|x| format!("{}", x)).collect(); - out_types.join(" | ") - } + FunctionArgSignature::Single(arg_type) => arg_type.to_string(), + FunctionArgSignature::Union(arg_types) => arg_types + .iter() + .map(ToString::to_string) + .collect::>() + .join(" | "), } } }, @@ -835,15 +835,12 @@ pub fn get_output_type_string(function_type: &FunctionType) -> String { pub fn get_signature(function_name: &str, function_type: &FunctionType) -> Option { if let FunctionType::Fixed(FixedFunction { ref args, .. }) = function_type { - let in_names: Vec = args - .iter() - .map(|x| format!("{}", x.name.as_str())) - .collect(); + let in_names: Vec = args.iter().map(|x| x.name.to_string()).collect(); let arg_examples = in_names.join(" "); Some(format!( "({}{}{})", function_name, - if arg_examples.len() == 0 { "" } else { " " }, + if arg_examples.is_empty() { "" } else { " " }, arg_examples )) } else { @@ -860,7 +857,7 @@ fn make_for_simple_native( ) -> FunctionAPI { let (input_type, output_type) = { if let TypedNativeFunction::Simple(SimpleNativeFunction(function_type)) = - TypedNativeFunction::type_native_function(&function) + TypedNativeFunction::type_native_function(function) .expect("Failed to type a native function") { let input_type = get_input_type_string(&function_type); @@ -877,8 +874,8 @@ fn make_for_simple_native( FunctionAPI { name: api.name.map_or(name, |x| x.to_string()), snippet: api.snippet.to_string(), - input_type: input_type, - output_type: output_type, + input_type, + output_type, signature: api.signature.to_string(), description: api.description.to_string(), example: api.example.to_string(), @@ -2526,35 +2523,35 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { use crate::vm::functions::NativeFunctions::*; let name = function.get_name(); match function { - Add => make_for_simple_native(&ADD_API, &function, name), - ToUInt => make_for_simple_native(&TO_UINT_API, &function, name), - ToInt => make_for_simple_native(&TO_INT_API, &function, name), - Subtract => make_for_simple_native(&SUB_API, &function, name), - Multiply => make_for_simple_native(&MUL_API, &function, name), - Divide => make_for_simple_native(&DIV_API, &function, name), - BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, &function, name), - BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, &function, name), - BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, &function, name), - BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, &function, name), - IsStandard => make_for_simple_native(&IS_STANDARD_API, &function, name), - PrincipalDestruct => make_for_simple_native(&PRINCPIPAL_DESTRUCT_API, &function, name), - PrincipalConstruct => make_for_special(&PRINCIPAL_CONSTRUCT_API, &function), - StringToInt => make_for_simple_native(&STRING_TO_INT_API, &function, name), - StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, &function, name), - IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, &function, name), - IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, &function, name), - CmpGeq => make_for_simple_native(&GEQ_API, &function, name), - CmpLeq => make_for_simple_native(&LEQ_API, &function, name), - CmpLess => make_for_simple_native(&LESS_API, &function, name), - CmpGreater => make_for_simple_native(&GREATER_API, &function, name), - Modulo => make_for_simple_native(&MOD_API, &function, name), - Power => make_for_simple_native(&POW_API, &function, name), - Sqrti => make_for_simple_native(&SQRTI_API, &function, name), - Log2 => make_for_simple_native(&LOG2_API, &function, name), - BitwiseXor => make_for_simple_native(&XOR_API, &function, name), - And => make_for_simple_native(&AND_API, &function, name), - Or => make_for_simple_native(&OR_API, &function, name), - Not => make_for_simple_native(&NOT_API, &function, name), + Add => make_for_simple_native(&ADD_API, function, name), + ToUInt => make_for_simple_native(&TO_UINT_API, function, name), + ToInt => make_for_simple_native(&TO_INT_API, function, name), + Subtract => make_for_simple_native(&SUB_API, function, name), + Multiply => make_for_simple_native(&MUL_API, function, name), + Divide => make_for_simple_native(&DIV_API, function, name), + BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, function, name), + BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, function, name), + BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, function, name), + BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, function, name), + IsStandard => make_for_simple_native(&IS_STANDARD_API, function, name), + PrincipalDestruct => make_for_simple_native(&PRINCPIPAL_DESTRUCT_API, function, name), + PrincipalConstruct => make_for_special(&PRINCIPAL_CONSTRUCT_API, function), + StringToInt => make_for_simple_native(&STRING_TO_INT_API, function, name), + StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, function, name), + IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, function, name), + IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, function, name), + CmpGeq => make_for_simple_native(&GEQ_API, function, name), + CmpLeq => make_for_simple_native(&LEQ_API, function, name), + CmpLess => make_for_simple_native(&LESS_API, function, name), + CmpGreater => make_for_simple_native(&GREATER_API, function, name), + Modulo => make_for_simple_native(&MOD_API, function, name), + Power => make_for_simple_native(&POW_API, function, name), + Sqrti => make_for_simple_native(&SQRTI_API, function, name), + Log2 => make_for_simple_native(&LOG2_API, function, name), + BitwiseXor => make_for_simple_native(&XOR_API, function, name), + And => make_for_simple_native(&AND_API, function, name), + Or => make_for_simple_native(&OR_API, function, name), + Not => make_for_simple_native(&NOT_API, function, name), Equals => make_for_special(&EQUALS_API, function), If => make_for_special(&IF_API, function), Let => make_for_special(&LET_API, function), @@ -2620,20 +2617,20 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { BurnAsset => make_for_special(&BURN_ASSET, function), GetTokenSupply => make_for_special(&GET_TOKEN_SUPPLY, function), AtBlock => make_for_special(&AT_BLOCK, function), - GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, &function, name), - StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, &function, name), + GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, function, name), + StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, function, name), StxTransfer => make_for_special(&STX_TRANSFER, function), StxTransferMemo => make_for_special(&STX_TRANSFER_MEMO, function), - StxBurn => make_for_simple_native(&STX_BURN, &function, name), + StxBurn => make_for_simple_native(&STX_BURN, function, name), ToConsensusBuff => make_for_special(&TO_CONSENSUS_BUFF, function), FromConsensusBuff => make_for_special(&FROM_CONSENSUS_BUFF, function), ReplaceAt => make_for_special(&REPLACE_AT, function), - BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, &function, name), - BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, &function, name), - BitwiseOr => make_for_simple_native(&BITWISE_OR_API, &function, name), - BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, &function, name), - BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, &function, name), - BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, &function, name), + BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, function, name), + BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, function, name), + BitwiseOr => make_for_simple_native(&BITWISE_OR_API, function, name), + BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, function, name), + BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, function, name), + BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, function, name), } } @@ -2726,7 +2723,7 @@ fn make_all_api_reference() -> ReferenceAPIs { .filter_map(make_keyword_reference) .collect(); - keywords.sort_by(|x, y| x.name.cmp(&y.name)); + keywords.sort_by_key(|x| x.name); ReferenceAPIs { functions, @@ -2737,10 +2734,9 @@ fn make_all_api_reference() -> ReferenceAPIs { #[allow(clippy::expect_used)] pub fn make_json_api_reference() -> String { let api_out = make_all_api_reference(); - format!( - "{}", - serde_json::to_string(&api_out).expect("Failed to serialize documentation") - ) + serde_json::to_string(&api_out) + .expect("Failed to serialize documentation") + .to_string() } #[cfg(test)] @@ -2777,7 +2773,7 @@ mod test { const DOC_HEADER_DB: DocHeadersDB = DocHeadersDB {}; impl MemoryBackingStore { - pub fn as_docs_clarity_db<'a>(&'a mut self) -> ClarityDatabase<'a> { + pub fn as_docs_clarity_db(&mut self) -> ClarityDatabase<'_> { ClarityDatabase::new(self, &DOC_HEADER_DB, &DOC_POX_STATE_DB) } } @@ -3001,13 +2997,13 @@ mod test { let mut current_segment: String = "".into(); for line in program.lines() { current_segment.push_str(line); - current_segment.push_str("\n"); + current_segment.push('\n'); if line.contains(";;") && line.contains("Returns ") { segments.push(current_segment); current_segment = "".into(); } } - if current_segment.len() > 0 { + if !current_segment.is_empty() { segments.push(current_segment); } @@ -3067,7 +3063,7 @@ mod test { .type_map .as_ref() .unwrap() - .get_type_expected(&analysis.expressions.last().unwrap()) + .get_type_expected(analysis.expressions.last().unwrap()) .cloned(), ); } @@ -3162,7 +3158,7 @@ mod test { let mut analysis_db = store.as_analysis_db(); let mut parsed = ast::build_ast( &contract_id, - &token_contract_content, + token_contract_content, &mut (), ClarityVersion::latest(), StacksEpochId::latest(), @@ -3232,7 +3228,7 @@ mod test { env.initialize_contract( contract_id, - &token_contract_content, + token_contract_content, None, ASTRules::PrecheckSize, ) diff --git a/clarity/src/vm/errors.rs b/clarity/src/vm/errors.rs index b3b0ca5fea..911465d4ba 100644 --- a/clarity/src/vm/errors.rs +++ b/clarity/src/vm/errors.rs @@ -37,6 +37,7 @@ pub struct IncomparableError { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] pub enum Error { /// UncheckedErrors are errors that *should* be caught by the /// TypeChecker and other check passes. Test executions may @@ -117,7 +118,7 @@ pub type InterpreterResult = Result; impl PartialEq> for IncomparableError { fn eq(&self, _other: &IncomparableError) -> bool { - return false; + false } } @@ -137,19 +138,16 @@ impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::Runtime(ref err, ref stack) => { - match err { - _ => write!(f, "{}", err), - }?; - + write!(f, "{err}")?; if let Some(ref stack_trace) = stack { - write!(f, "\n Stack Trace: \n")?; + writeln!(f, "\n Stack Trace: ")?; for item in stack_trace.iter() { - write!(f, "{}\n", item)?; + writeln!(f, "{item}")?; } } Ok(()) } - _ => write!(f, "{:?}", self), + _ => write!(f, "{self:?}"), } } } @@ -226,9 +224,9 @@ impl From for () { fn from(err: Error) -> Self {} } -impl Into for ShortReturnType { - fn into(self) -> Value { - match self { +impl From for Value { + fn from(val: ShortReturnType) -> Self { + match val { ShortReturnType::ExpectedValue(v) => v, ShortReturnType::AssertionFailed(v) => v, } diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index 0d004a846a..3dca730928 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -210,6 +210,7 @@ pub fn special_stx_transfer_memo( } } +#[allow(clippy::unnecessary_fallible_conversions)] pub fn special_stx_account( args: &[SymbolicExpression], env: &mut Environment, @@ -286,10 +287,7 @@ pub fn special_stx_burn( env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; env.add_memory(STXBalance::unlocked_and_v1_size as u64)?; - let mut burner_snapshot = env - .global_context - .database - .get_stx_balance_snapshot(&from)?; + let mut burner_snapshot = env.global_context.database.get_stx_balance_snapshot(from)?; if !burner_snapshot.can_transfer(amount)? { return clarity_ecode!(StxErrorCodes::NOT_ENOUGH_BALANCE); } diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index 090f0d2107..142c1308eb 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -57,13 +57,13 @@ pub fn buff_to_int_generic( > BufferLength::try_from(16_u32) .map_err(|_| InterpreterError::Expect("Failed to construct".into()))? { - return Err(CheckErrors::TypeValueError( + Err(CheckErrors::TypeValueError( SequenceType(BufferType(BufferLength::try_from(16_u32).map_err( |_| InterpreterError::Expect("Failed to construct".into()), )?)), value, ) - .into()); + .into()) } else { let mut transfer_buffer = [0u8; 16]; let original_slice = sequence_data.as_slice(); @@ -82,15 +82,13 @@ pub fn buff_to_int_generic( Ok(value) } } - _ => { - return Err(CheckErrors::TypeValueError( - SequenceType(BufferType(BufferLength::try_from(16_u32).map_err( - |_| InterpreterError::Expect("Failed to construct".into()), - )?)), - value, - ) - .into()) - } + _ => Err(CheckErrors::TypeValueError( + SequenceType(BufferType(BufferLength::try_from(16_u32).map_err( + |_| InterpreterError::Expect("Failed to construct".into()), + )?)), + value, + ) + .into()), } } diff --git a/clarity/src/vm/functions/crypto.rs b/clarity/src/vm/functions/crypto.rs index dd55f3a56f..86d92283ca 100644 --- a/clarity/src/vm/functions/crypto.rs +++ b/clarity/src/vm/functions/crypto.rs @@ -126,8 +126,8 @@ pub fn special_principal_of( pubkey_to_address_v1(pub_key)? }; let principal = addr.to_account_principal(); - return Ok(Value::okay(Value::Principal(principal)) - .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?); + Ok(Value::okay(Value::Principal(principal)) + .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?) } else { Ok(Value::err_uint(1)) } @@ -169,17 +169,14 @@ pub fn special_secp256k1_recover( _ => return Err(CheckErrors::TypeValueError(BUFF_65.clone(), param1).into()), }; - match secp256k1_recover(&message, &signature).map_err(|_| CheckErrors::InvalidSecp65k1Signature) - { - Ok(pubkey) => { - return Ok(Value::okay( - Value::buff_from(pubkey.to_vec()) - .map_err(|_| InterpreterError::Expect("Failed to construct buff".into()))?, - ) - .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?) - } - _ => return Ok(Value::err_uint(1)), - }; + match secp256k1_recover(message, signature).map_err(|_| CheckErrors::InvalidSecp65k1Signature) { + Ok(pubkey) => Ok(Value::okay( + Value::buff_from(pubkey.to_vec()) + .map_err(|_| InterpreterError::Expect("Failed to construct buff".into()))?, + ) + .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?), + _ => Ok(Value::err_uint(1)), + } } pub fn special_secp256k1_verify( diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index ff14507ead..12fb1cd3da 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -730,14 +730,12 @@ pub fn special_delete_entry_v205( /// - `miner-spend-winner` returns the number of satoshis spent by the winning miner for the block at `block-height` /// - `miner-spend-total` returns the total number of satoshis spent by all miners for the block at `block-height` /// - `block-reward` returns the block reward for the block at `block-height` - /// /// # Errors: /// - CheckErrors::IncorrectArgumentCount if there aren't 2 arguments. /// - CheckErrors::GetStacksBlockInfoExpectPropertyName if `args[0]` isn't a ClarityName. /// - CheckErrors::NoSuchStacksBlockInfoProperty if `args[0]` isn't a StacksBlockInfoProperty. /// - CheckErrors::TypeValueError if `args[1]` isn't a `uint`. - pub fn special_get_block_info( args: &[SymbolicExpression], env: &mut Environment, diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index 6482493a29..a8971b3fa0 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -79,7 +79,6 @@ mod boolean; mod conversions; mod crypto; mod database; -#[allow(clippy::result_large_err)] pub mod define; mod options; pub mod principals; diff --git a/clarity/src/vm/functions/options.rs b/clarity/src/vm/functions/options.rs index 26829618af..e3305395a5 100644 --- a/clarity/src/vm/functions/options.rs +++ b/clarity/src/vm/functions/options.rs @@ -212,7 +212,7 @@ pub fn special_match( match input { Value::Response(data) => special_match_resp(data, &args[1..], env, context), Value::Optional(data) => special_match_opt(data, &args[1..], env, context), - _ => return Err(CheckErrors::BadMatchInput(TypeSignature::type_of(&input)?).into()), + _ => Err(CheckErrors::BadMatchInput(TypeSignature::type_of(&input)?).into()), } } diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index ff991f5513..8680c06224 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(clippy::result_large_err)] pub mod diagnostic; pub mod errors; @@ -172,33 +173,31 @@ fn lookup_variable(name: &str, context: &LocalContext, env: &mut Environment) -> name )) .into()) + } else if let Some(value) = variables::lookup_reserved_variable(name, context, env)? { + Ok(value) } else { - if let Some(value) = variables::lookup_reserved_variable(name, context, env)? { + runtime_cost( + ClarityCostFunction::LookupVariableDepth, + env, + context.depth(), + )?; + if let Some(value) = context.lookup_variable(name) { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; + Ok(value.clone()) + } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; + let (value, _) = + Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value)?, value) + .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; Ok(value) - } else { - runtime_cost( - ClarityCostFunction::LookupVariableDepth, - env, - context.depth(), - )?; - if let Some(value) = context.lookup_variable(name) { - runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; - Ok(value.clone()) - } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { - runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; - let (value, _) = - Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value)?, value) - .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; - Ok(value) - } else if let Some(callable_data) = context.lookup_callable_contract(name) { - if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { - Ok(callable_data.contract_identifier.clone().into()) - } else { - Ok(Value::CallableContract(callable_data.clone())) - } + } else if let Some(callable_data) = context.lookup_callable_contract(name) { + if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { + Ok(callable_data.contract_identifier.clone().into()) } else { - Err(CheckErrors::UndefinedVariable(name.to_string()).into()) + Ok(Value::CallableContract(callable_data.clone())) } + } else { + Err(CheckErrors::UndefinedVariable(name.to_string()).into()) } } } @@ -238,11 +237,7 @@ pub fn apply( // only enough to do recursion detection. // do recursion check on user functions. - let track_recursion = match function { - CallableType::UserFunction(_) => true, - _ => false, - }; - + let track_recursion = matches!(function, CallableType::UserFunction(_)); if track_recursion && env.call_stack.contains(&identifier) { return Err(CheckErrors::CircularReference(vec![identifier.to_string()]).into()); } @@ -311,9 +306,9 @@ pub fn apply( } } -pub fn eval<'a>( +pub fn eval( exp: &SymbolicExpression, - env: &'a mut Environment, + env: &mut Environment, context: &LocalContext, ) -> Result { use crate::vm::representations::SymbolicExpressionType::{ @@ -329,7 +324,7 @@ pub fn eval<'a>( let res = match exp.expr { AtomValue(ref value) | LiteralValue(ref value) => Ok(value.clone()), - Atom(ref value) => lookup_variable(&value, context, env), + Atom(ref value) => lookup_variable(value, context, env), List(ref children) => { let (function_variable, rest) = children .split_first() @@ -338,8 +333,8 @@ pub fn eval<'a>( let function_name = function_variable .match_atom() .ok_or(CheckErrors::BadFunctionName)?; - let f = lookup_function(&function_name, env)?; - apply(&f, &rest, env, context) + let f = lookup_function(function_name, env)?; + apply(&f, rest, env, context) } TraitReference(_, _) | Field(_) => { return Err(InterpreterError::BadSymbolicRepresentation( @@ -360,13 +355,8 @@ pub fn eval<'a>( } pub fn is_reserved(name: &str, version: &ClarityVersion) -> bool { - if let Some(_result) = functions::lookup_reserved_functions(name, version) { - true - } else if variables::is_reserved_name(name, version) { - true - } else { - false - } + functions::lookup_reserved_functions(name, version).is_some() + || variables::is_reserved_name(name, version) } /// This function evaluates a list of expressions, sharing a global context. @@ -629,7 +619,7 @@ mod test { func_body, DefineType::Private, &"do_work".into(), - &"", + "", ); let context = LocalContext::new(); diff --git a/clarity/src/vm/representations.rs b/clarity/src/vm/representations.rs index c80e3c7467..0f779b479f 100644 --- a/clarity/src/vm/representations.rs +++ b/clarity/src/vm/representations.rs @@ -125,8 +125,8 @@ impl StacksMessageCodec for ClarityName { impl StacksMessageCodec for ContractName { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - if self.as_bytes().len() < CONTRACT_MIN_NAME_LENGTH as usize - || self.as_bytes().len() > CONTRACT_MAX_NAME_LENGTH as usize + if self.as_bytes().len() < CONTRACT_MIN_NAME_LENGTH + || self.as_bytes().len() > CONTRACT_MAX_NAME_LENGTH { return Err(codec_error::SerializeError(format!( "Failed to serialize contract name: too short or too long: {}", diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 295909859f..861c88ad0a 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -70,7 +70,7 @@ pub fn execute_on_network(s: &str, use_mainnet: bool) -> Value { pub fn symbols_from_values(vec: Vec) -> Vec { vec.into_iter() - .map(|value| SymbolicExpression::atom_value(value)) + .map(SymbolicExpression::atom_value) .collect() } diff --git a/clarity/src/vm/tests/assets.rs b/clarity/src/vm/tests/assets.rs index e42f2c59da..e332f72d46 100644 --- a/clarity/src/vm/tests/assets.rs +++ b/clarity/src/vm/tests/assets.rs @@ -1006,7 +1006,7 @@ fn test_simple_naming_system( _ => panic!(), }; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let tokens_contract_id = @@ -1107,7 +1107,7 @@ fn test_simple_naming_system( assert!(is_committed(&result)); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); assert_eq!( env.eval_read_only(&names_contract_id.clone(), "(nft-get-owner? names 1)") .unwrap(), @@ -1378,7 +1378,7 @@ fn test_simple_naming_system( assert_eq!(asset_map.to_table().len(), 0); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); assert_eq!( env.eval_read_only(&names_contract_id.clone(), "(nft-get-owner? names 5)") .unwrap(), diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 9cb5aea4b1..94433958c4 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -119,7 +119,7 @@ fn test_get_block_info_eval( Ok(Value::none()), ]; - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -138,7 +138,7 @@ fn test_get_block_info_eval( ) .unwrap(); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); eprintln!("{}", contracts[i]); let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); match expected[i] { @@ -172,13 +172,13 @@ fn test_contract_caller(epoch: StacksEpochId, mut env_factory: MemoryEnvironment (as-contract (contract-call? .contract-a get-caller)))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, @@ -200,7 +200,7 @@ fn test_contract_caller(epoch: StacksEpochId, mut env_factory: MemoryEnvironment let mut env = owned_env.get_exec_environment( Some(p1.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -312,7 +312,7 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener .expect_principal() .unwrap(); let p2 = execute("'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -324,11 +324,8 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener }; { - let mut env = owned_env.get_exec_environment( - Some(p1.clone()), - sponsor.clone(), - &mut placeholder_context, - ); + let mut env = + owned_env.get_exec_environment(Some(p1.clone()), sponsor.clone(), &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, @@ -345,11 +342,8 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener // Sponsor is equal to some(principal) in this code block. { - let mut env = owned_env.get_exec_environment( - Some(p1.clone()), - sponsor.clone(), - &mut placeholder_context, - ); + let mut env = + owned_env.get_exec_environment(Some(p1.clone()), sponsor.clone(), &placeholder_context); tx_sponsor_contract_asserts(&mut env, sponsor); } @@ -357,7 +351,7 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener { let sponsor = None; let mut env = - owned_env.get_exec_environment(Some(p1), sponsor.clone(), &mut placeholder_context); + owned_env.get_exec_environment(Some(p1), sponsor.clone(), &placeholder_context); tx_sponsor_contract_asserts(&mut env, sponsor); } } @@ -381,13 +375,13 @@ fn test_fully_qualified_contract_call( (as-contract (contract-call? .contract-a get-caller)))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, @@ -409,7 +403,7 @@ fn test_fully_qualified_contract_call( let mut env = owned_env.get_exec_environment( Some(p1.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -520,13 +514,13 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let name_hash_expensive_0 = execute("(hash160 1)"); let name_hash_expensive_1 = execute("(hash160 2)"); let name_hash_cheap_0 = execute("(hash160 100001)"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("tokens").unwrap(); env.initialize_contract(contract_identifier, tokens_contract, ASTRules::PrecheckSize) @@ -541,7 +535,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p2.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_err_code( @@ -560,7 +554,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p1.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_committed( &env.execute_contract( @@ -588,7 +582,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p2.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_err_code( &env.execute_contract( @@ -607,7 +601,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_committed( &env.execute_contract( @@ -625,7 +619,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p2.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_committed( &env.execute_contract( @@ -690,7 +684,7 @@ fn test_simple_contract_call(epoch: StacksEpochId, mut env_factory: MemoryEnviro (contract-call? .factorial-contract compute 8008)) "; - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -698,7 +692,7 @@ fn test_simple_contract_call(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(get_principal().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let contract_identifier = QualifiedContractIdentifier::local("factorial-contract").unwrap(); @@ -776,12 +770,12 @@ fn test_aborts(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator (contract-call? .contract-1 modify-data 105 105) (err 1))) "; - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("contract-1").unwrap(); env.initialize_contract(contract_identifier, contract_1, ASTRules::PrecheckSize) @@ -890,12 +884,12 @@ fn test_aborts(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator fn test_factorial_contract(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { let mut owned_env = env_factory.get_env(epoch); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("factorial").unwrap(); env.initialize_contract( @@ -1092,9 +1086,9 @@ fn test_cc_stack_depth( 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1)) (bar) "; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) @@ -1133,9 +1127,9 @@ fn test_cc_trait_stack_depth( (bar .c-foo) "; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) @@ -1156,7 +1150,7 @@ fn test_eval_with_non_existing_contract( ) { let mut owned_env = env_factory.get_env(epoch); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -1164,7 +1158,7 @@ fn test_eval_with_non_existing_contract( let mut env = owned_env.get_exec_environment( Some(get_principal().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let result = env.eval_read_only( diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 5fa58b507b..cada7e973b 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -36,7 +36,7 @@ mod traits; mod variables; #[cfg(any(test, feature = "testing"))] -impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { +impl OwnedEnvironment<'_, '_> { pub fn set_tenure_height(&mut self, tenure_height: u32) { self.context.database.begin(); self.context diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index 44f3447bad..98db149273 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -711,7 +711,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(22, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -735,7 +735,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(20, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -799,7 +799,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(26, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -823,7 +823,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(21, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -854,7 +854,7 @@ fn create_principal_from_strings( // contract principal requested Value::Principal(PrincipalData::Contract(QualifiedContractIdentifier::new( StandardPrincipalData(version_array[0], principal_array), - name.try_into().unwrap(), + name.into(), ))) } else { // standard principal requested diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index d9e52c0222..f6dbd87090 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -73,7 +73,7 @@ fn test_simple_let(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId (+ z y)) x))"; let contract_id = QualifiedContractIdentifier::transient(); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); if let Ok(parsed_program) = parse(&contract_id, program, version, epoch) { let context = LocalContext::new(); @@ -84,7 +84,7 @@ fn test_simple_let(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId Ok(Value::Int(7)), eval( &parsed_program[0], - &mut env.get_exec_environment(None, None, &mut placeholder_context), + &mut env.get_exec_environment(None, None, &placeholder_context), &context ) ); diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index 97c4292b0d..d3fcfb7779 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -40,11 +40,11 @@ fn test_dynamic_dispatch_by_defining_trait( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -66,7 +66,7 @@ fn test_dynamic_dispatch_by_defining_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -98,11 +98,11 @@ fn test_dynamic_dispatch_pass_trait_nested_in_let( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -124,7 +124,7 @@ fn test_dynamic_dispatch_pass_trait_nested_in_let( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -155,11 +155,11 @@ fn test_dynamic_dispatch_pass_trait( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -181,7 +181,7 @@ fn test_dynamic_dispatch_pass_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -211,11 +211,11 @@ fn test_dynamic_dispatch_intra_contract_call( (define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -237,7 +237,7 @@ fn test_dynamic_dispatch_intra_contract_call( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -270,11 +270,11 @@ fn test_dynamic_dispatch_by_implementing_imported_trait( (define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -302,7 +302,7 @@ fn test_dynamic_dispatch_by_implementing_imported_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -335,11 +335,11 @@ fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( (define-public (get-2 (x uint)) (ok u2))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -367,7 +367,7 @@ fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -397,11 +397,11 @@ fn test_dynamic_dispatch_by_importing_trait( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -429,7 +429,7 @@ fn test_dynamic_dispatch_by_importing_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -466,11 +466,11 @@ fn test_dynamic_dispatch_including_nested_trait( let target_nested_contract = "(define-public (get-a (x uint)) (ok u99))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-nested-trait").unwrap(), contract_defining_nested_trait, @@ -513,7 +513,7 @@ fn test_dynamic_dispatch_including_nested_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -542,11 +542,11 @@ fn test_dynamic_dispatch_mismatched_args( let target_contract = "(define-public (get-1 (x int)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -568,7 +568,7 @@ fn test_dynamic_dispatch_mismatched_args( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -599,11 +599,11 @@ fn test_dynamic_dispatch_mismatched_returned( let target_contract = "(define-public (get-1 (x uint)) (ok 1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -625,7 +625,7 @@ fn test_dynamic_dispatch_mismatched_returned( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -659,11 +659,11 @@ fn test_reentrant_dynamic_dispatch( "(define-public (get-1 (x uint)) (contract-call? .dispatching-contract wrapped-get-1 .target-contract))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -685,7 +685,7 @@ fn test_reentrant_dynamic_dispatch( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -716,11 +716,11 @@ fn test_readwrite_dynamic_dispatch( let target_contract = "(define-read-only (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -742,7 +742,7 @@ fn test_readwrite_dynamic_dispatch( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -773,11 +773,11 @@ fn test_readwrite_violation_dynamic_dispatch( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -799,7 +799,7 @@ fn test_readwrite_violation_dynamic_dispatch( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -837,11 +837,11 @@ fn test_bad_call_with_trait( (contract-call? .dispatch wrapped-get-1 contract))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -872,7 +872,7 @@ fn test_bad_call_with_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -905,11 +905,11 @@ fn test_good_call_with_trait( (contract-call? .dispatch wrapped-get-1 .implem))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -940,7 +940,7 @@ fn test_good_call_with_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -974,11 +974,11 @@ fn test_good_call_2_with_trait( (contract-call? .dispatch wrapped-get-1 contract))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -1012,7 +1012,7 @@ fn test_good_call_2_with_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( @@ -1045,11 +1045,11 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio (define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -1077,7 +1077,7 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1108,11 +1108,11 @@ fn test_contract_of_value( (define-public (get-1 (x uint)) (ok u99))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -1141,7 +1141,7 @@ fn test_contract_of_value( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( @@ -1175,11 +1175,11 @@ fn test_contract_of_no_impl( (define-public (get-1 (x uint)) (ok u99))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -1208,7 +1208,7 @@ fn test_contract_of_no_impl( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( @@ -1240,11 +1240,11 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1266,7 +1266,7 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1297,11 +1297,11 @@ fn test_return_trait_with_contract_of_wrapped_in_let( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1323,7 +1323,7 @@ fn test_return_trait_with_contract_of_wrapped_in_let( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1352,11 +1352,11 @@ fn test_return_trait_with_contract_of( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1378,7 +1378,7 @@ fn test_return_trait_with_contract_of( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1414,13 +1414,13 @@ fn test_pass_trait_to_subtrait(epoch: StacksEpochId, mut env_factory: MemoryEnvi (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1443,7 +1443,7 @@ fn test_pass_trait_to_subtrait(epoch: StacksEpochId, mut env_factory: MemoryEnvi let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1476,13 +1476,13 @@ fn test_embedded_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentG let target_contract = "(define-public (echo (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1506,7 +1506,7 @@ fn test_embedded_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentG let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1549,13 +1549,13 @@ fn test_pass_embedded_trait_to_subtrait_optional( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1578,7 +1578,7 @@ fn test_pass_embedded_trait_to_subtrait_optional( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1621,13 +1621,13 @@ fn test_pass_embedded_trait_to_subtrait_ok( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1650,7 +1650,7 @@ fn test_pass_embedded_trait_to_subtrait_ok( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1693,13 +1693,13 @@ fn test_pass_embedded_trait_to_subtrait_err( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1722,7 +1722,7 @@ fn test_pass_embedded_trait_to_subtrait_err( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1765,13 +1765,13 @@ fn test_pass_embedded_trait_to_subtrait_list( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1794,7 +1794,7 @@ fn test_pass_embedded_trait_to_subtrait_list( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1840,13 +1840,13 @@ fn test_pass_embedded_trait_to_subtrait_list_option( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1869,7 +1869,7 @@ fn test_pass_embedded_trait_to_subtrait_list_option( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1915,13 +1915,13 @@ fn test_pass_embedded_trait_to_subtrait_option_list( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1944,7 +1944,7 @@ fn test_pass_embedded_trait_to_subtrait_option_list( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1976,13 +1976,13 @@ fn test_let_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenera let target_contract = "(define-public (echo (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -2005,7 +2005,7 @@ fn test_let_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenera let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -2041,13 +2041,13 @@ fn test_let3_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener let target_contract = "(define-public (echo (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -2070,7 +2070,7 @@ fn test_let3_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -2102,13 +2102,13 @@ fn test_pass_principal_literal_to_trait( let target_contract = "(define-public (get-1 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -2131,7 +2131,7 @@ fn test_pass_principal_literal_to_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( diff --git a/clarity/src/vm/tests/variables.rs b/clarity/src/vm/tests/variables.rs index 5b392bb678..e862aeb0df 100644 --- a/clarity/src/vm/tests/variables.rs +++ b/clarity/src/vm/tests/variables.rs @@ -36,13 +36,13 @@ fn test_block_height( ) { let contract = "(define-read-only (test-func) block-height)"; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -70,7 +70,7 @@ fn test_block_height( ASTRules::PrecheckSize, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); @@ -94,13 +94,13 @@ fn test_stacks_block_height( ) { let contract = "(define-read-only (test-func) stacks-block-height)"; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -128,7 +128,7 @@ fn test_stacks_block_height( ASTRules::PrecheckSize, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); @@ -154,13 +154,13 @@ fn test_tenure_height( ) { let contract = "(define-read-only (test-func) tenure-height)"; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -188,7 +188,7 @@ fn test_tenure_height( ASTRules::PrecheckSize, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); @@ -213,6 +213,7 @@ enum WhenError { } #[cfg(test)] +#[allow(clippy::type_complexity)] fn expect_contract_error( version: ClarityVersion, epoch: StacksEpochId, @@ -226,13 +227,13 @@ fn expect_contract_error( )], expected_success: Value, ) { - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::local(name).unwrap(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local(name).unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -280,7 +281,7 @@ fn expect_contract_error( } } - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); diff --git a/clarity/src/vm/tooling/mod.rs b/clarity/src/vm/tooling/mod.rs index f218b2ccab..5b89145588 100644 --- a/clarity/src/vm/tooling/mod.rs +++ b/clarity/src/vm/tooling/mod.rs @@ -21,7 +21,7 @@ pub fn mem_type_check( epoch: StacksEpochId, ) -> CheckResult<(Option, ContractAnalysis)> { let contract_identifier = QualifiedContractIdentifier::transient(); - let mut contract = build_ast_with_rules( + let contract = build_ast_with_rules( &contract_identifier, snippet, &mut (), @@ -37,7 +37,7 @@ pub fn mem_type_check( let cost_tracker = LimitedCostTracker::new_free(); match run_analysis( &QualifiedContractIdentifier::transient(), - &mut contract, + &contract, &mut analysis_db, false, cost_tracker, @@ -51,7 +51,7 @@ pub fn mem_type_check( .type_map .as_ref() .unwrap() - .get_type_expected(&x.expressions.last().unwrap()) + .get_type_expected(x.expressions.last().unwrap()) .cloned(); Ok((first_type, x)) } diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index e1837ee034..ef4b565834 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -14,9 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[allow(clippy::result_large_err)] pub mod serialization; -#[allow(clippy::result_large_err)] pub mod signatures; use std::collections::btree_map::Entry; @@ -279,6 +277,10 @@ impl SequenceData { } } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn element_at(self, index: usize) -> Result> { if self.len() <= index { return Ok(None); @@ -613,7 +615,7 @@ pub trait SequencedValue { fn atom_values(&mut self) -> Result> { self.drained_items() .iter() - .map(|item| Ok(SymbolicExpression::atom_value(Self::to_value(&item)?))) + .map(|item| Ok(SymbolicExpression::atom_value(Self::to_value(item)?))) .collect() } } @@ -751,11 +753,11 @@ define_named_enum!(TenureInfoProperty { impl OptionalData { pub fn type_signature(&self) -> std::result::Result { let type_result = match self.data { - Some(ref v) => TypeSignature::new_option(TypeSignature::type_of(&v)?), + Some(ref v) => TypeSignature::new_option(TypeSignature::type_of(v)?), None => TypeSignature::new_option(TypeSignature::NoType), }; type_result.map_err(|_| { - CheckErrors::Expects("Should not have constructed too large of a type.".into()).into() + CheckErrors::Expects("Should not have constructed too large of a type.".into()) }) } } @@ -773,7 +775,7 @@ impl ResponseData { ), }; type_result.map_err(|_| { - CheckErrors::Expects("Should not have constructed too large of a type.".into()).into() + CheckErrors::Expects("Should not have constructed too large of a type.".into()) }) } } @@ -1265,6 +1267,10 @@ impl ListData { .map_err(|_| InterpreterError::Expect("Data length should be valid".into()).into()) } + pub fn is_empty(&self) -> bool { + self.data.is_empty() + } + fn append(&mut self, epoch: &StacksEpochId, other_seq: ListData) -> Result<()> { let entry_type_a = self.type_signature.get_list_item_type(); let entry_type_b = other_seq.type_signature.get_list_item_type(); diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 7dcda788a8..48030519c8 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -782,14 +782,12 @@ impl Value { expected_type.unwrap(), )); } - } else { - if len as u64 != tuple_type.len() { - // unwrap is safe because of the match condition - #[allow(clippy::unwrap_used)] - return Err(SerializationError::DeserializeExpected( - expected_type.unwrap(), - )); - } + } else if u64::from(len) != tuple_type.len() { + // unwrap is safe because of the match condition + #[allow(clippy::unwrap_used)] + return Err(SerializationError::DeserializeExpected( + expected_type.unwrap(), + )); } Some(tuple_type) } @@ -1344,7 +1342,7 @@ impl ClaritySerializable for u32 { impl ClarityDeserializable for u32 { fn deserialize(input: &str) -> Result { - let bytes = hex_bytes(&input).map_err(|_| { + let bytes = hex_bytes(input).map_err(|_| { InterpreterError::Expect("u32 deserialization: failed decoding bytes.".into()) })?; assert_eq!(bytes.len(), 4); @@ -1419,13 +1417,10 @@ pub mod tests { } fn test_bad_expectation(v: Value, e: TypeSignature) { - assert!( - match Value::try_deserialize_hex(&v.serialize_to_hex().unwrap(), &e, false).unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - } - ) + assert!(matches!( + Value::try_deserialize_hex(&v.serialize_to_hex().unwrap(), &e, false).unwrap_err(), + SerializationError::DeserializeExpected(_) + )); } #[test] @@ -1704,40 +1699,37 @@ pub mod tests { ); // field number not equal to expectations - assert!(match Value::try_deserialize_hex( - &t_3.serialize_to_hex().unwrap(), - &TypeSignature::type_of(&t_1).unwrap(), - false - ) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - }); + assert!(matches!( + Value::try_deserialize_hex( + &t_3.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_1).unwrap(), + false + ) + .unwrap_err(), + SerializationError::DeserializeExpected(_) + )); // field type mismatch - assert!(match Value::try_deserialize_hex( - &t_2.serialize_to_hex().unwrap(), - &TypeSignature::type_of(&t_1).unwrap(), - false - ) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - }); + assert!(matches!( + Value::try_deserialize_hex( + &t_2.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_1).unwrap(), + false + ) + .unwrap_err(), + SerializationError::DeserializeExpected(_) + )); // field not-present in expected - assert!(match Value::try_deserialize_hex( - &t_1.serialize_to_hex().unwrap(), - &TypeSignature::type_of(&t_4).unwrap(), - false - ) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - }); + assert!(matches!( + Value::try_deserialize_hex( + &t_1.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_4).unwrap(), + false + ) + .unwrap_err(), + SerializationError::DeserializeExpected(_) + )); } #[apply(test_clarity_versions)] diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index b3984c5251..a85c56ff3e 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -589,9 +589,7 @@ impl TypeSignature { | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => self.admits_type_v2_1(other), - StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) - } + StacksEpochId::Epoch10 => Err(CheckErrors::Expects("epoch 1.0 not supported".into())), } } @@ -678,16 +676,12 @@ impl TypeSignature { } } NoType => Err(CheckErrors::CouldNotDetermineType), - CallableType(_) => { - return Err(CheckErrors::Expects( - "CallableType should not be used in epoch v2.0".into(), - )) - } - ListUnionType(_) => { - return Err(CheckErrors::Expects( - "ListUnionType should not be used in epoch v2.0".into(), - )) - } + CallableType(_) => Err(CheckErrors::Expects( + "CallableType should not be used in epoch v2.0".into(), + )), + ListUnionType(_) => Err(CheckErrors::Expects( + "ListUnionType should not be used in epoch v2.0".into(), + )), _ => Ok(other == self), } } @@ -1162,9 +1156,7 @@ impl TypeSignature { | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => Self::least_supertype_v2_1(a, b), - StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) - } + StacksEpochId::Epoch10 => Err(CheckErrors::Expects("epoch 1.0 not supported".into())), } } @@ -1455,8 +1447,7 @@ impl TypeSignature { // Checks if resulting type signature is of valid size. pub fn construct_parent_list_type(args: &[Value]) -> Result { - let children_types: Result> = - args.iter().map(|x| TypeSignature::type_of(x)).collect(); + let children_types: Result> = args.iter().map(TypeSignature::type_of).collect(); TypeSignature::parent_list_type(&children_types?) } @@ -1660,7 +1651,7 @@ impl TypeSignature { ) -> Result> { let mut trait_signature: BTreeMap = BTreeMap::new(); let functions_types = type_args - .get(0) + .first() .ok_or_else(|| CheckErrors::InvalidTypeDescription)? .match_list() .ok_or(CheckErrors::DefineTraitBadSignature)?; @@ -1682,11 +1673,10 @@ impl TypeSignature { let fn_args_exprs = args[1] .match_list() .ok_or(CheckErrors::DefineTraitBadSignature)?; - let mut fn_args = Vec::with_capacity(fn_args_exprs.len()); - for arg_type in fn_args_exprs.into_iter() { - let arg_t = TypeSignature::parse_type_repr(epoch, arg_type, accounting)?; - fn_args.push(arg_t); - } + let fn_args = fn_args_exprs + .iter() + .map(|arg_type| TypeSignature::parse_type_repr(epoch, arg_type, accounting)) + .collect::>()?; // Extract function's type return - must be a response let fn_return = match TypeSignature::parse_type_repr(epoch, &args[2], accounting) { @@ -1766,7 +1756,6 @@ impl TypeSignature { "FAIL: .size() overflowed on too large of a type. construction should have failed!" .into(), ) - .into() }) } @@ -1885,9 +1874,8 @@ impl TupleTypeSignature { } pub fn size(&self) -> Result { - self.inner_size()?.ok_or_else(|| { - CheckErrors::Expects("size() overflowed on a constructed type.".into()).into() - }) + self.inner_size()? + .ok_or_else(|| CheckErrors::Expects("size() overflowed on a constructed type.".into())) } fn max_depth(&self) -> u8 { diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 1de0e34f09..52a77e2bb8 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -114,6 +114,13 @@ pub enum SignerEvent { /// the time at which this event was received by the signer's event processor received_time: SystemTime, }, + /// A new processed Stacks block was received from the node with the given block hash + NewBlock { + /// The block header hash for the newly processed stacks block + block_hash: Sha512Trunc256Sum, + /// The block height for the newly processed stacks block + block_height: u64, + }, } /// Trait to implement a stop-signaler for the event receiver thread. @@ -298,29 +305,25 @@ impl EventReceiver for SignerEventReceiver { &request.method(), ))); } + debug!("Processing {} event", request.url()); if request.url() == "/stackerdb_chunks" { - process_stackerdb_event(event_receiver.local_addr, request) - .map_err(|e| { - error!("Error processing stackerdb_chunks message"; "err" => ?e); - e - }) + process_event::(request) } else if request.url() == "/proposal_response" { - process_proposal_response(request) + process_event::(request) } else if request.url() == "/new_burn_block" { - process_new_burn_block_event(request) + process_event::(request) } else if request.url() == "/shutdown" { event_receiver.stop_signal.store(true, Ordering::SeqCst); - return Err(EventError::Terminated); + Err(EventError::Terminated) + } else if request.url() == "/new_block" { + process_event::(request) } else { let url = request.url().to_string(); - // `/new_block` is expected, but not specifically handled. do not log. - if &url != "/new_block" { - debug!( - "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", - event_receiver.local_addr, - url - ); - } + debug!( + "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", + event_receiver.local_addr, + url + ); ack_dispatcher(request); Err(EventError::UnrecognizedEvent(url)) } @@ -385,12 +388,13 @@ fn ack_dispatcher(request: HttpRequest) { // TODO: add tests from mutation testing results #4835 #[cfg_attr(test, mutants::skip)] -/// Process a stackerdb event from the node -fn process_stackerdb_event( - local_addr: Option, - mut request: HttpRequest, -) -> Result, EventError> { +fn process_event(mut request: HttpRequest) -> Result, EventError> +where + T: SignerEventTrait, + E: serde::de::DeserializeOwned + TryInto, Error = EventError>, +{ let mut body = String::new(); + if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); ack_dispatcher(request); @@ -399,27 +403,12 @@ fn process_stackerdb_event( &e ))); } - - debug!("Got stackerdb_chunks event"; "chunks_event_body" => %body); - let event: StackerDBChunksEvent = serde_json::from_slice(body.as_bytes()) + // Regardless of whether we successfully deserialize, we should ack the dispatcher so they don't keep resending it + ack_dispatcher(request); + let json_event: E = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let event_contract_id = event.contract_id.clone(); - - let signer_event = match SignerEvent::try_from(event) { - Err(e) => { - info!( - "[{:?}] next_event got event from an unexpected contract id {}, return OK so other side doesn't keep sending this", - local_addr, - event_contract_id - ); - ack_dispatcher(request); - return Err(e); - } - Ok(x) => x, - }; - - ack_dispatcher(request); + let signer_event: SignerEvent = json_event.try_into()?; Ok(signer_event) } @@ -466,78 +455,69 @@ impl TryFrom for SignerEvent { } } -/// Process a proposal response from the node -fn process_proposal_response( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got proposal_response event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); + fn try_from(block_validate_response: BlockValidateResponse) -> Result { + Ok(SignerEvent::BlockValidationResponse( + block_validate_response, + )) } +} - let event: BlockValidateResponse = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; +#[derive(Debug, Deserialize)] +struct BurnBlockEvent { + burn_block_hash: String, + burn_block_height: u64, + reward_recipients: Vec, + reward_slot_holders: Vec, + burn_amount: u64, +} - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; + + fn try_from(burn_block_event: BurnBlockEvent) -> Result { + let burn_header_hash = burn_block_event + .burn_block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + BurnchainHeaderHash::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + + Ok(SignerEvent::NewBurnBlock { + burn_height: burn_block_event.burn_block_height, + received_time: SystemTime::now(), + burn_header_hash, + }) } +} - Ok(SignerEvent::BlockValidationResponse(event)) +#[derive(Debug, Deserialize)] +struct BlockEvent { + block_hash: String, + block_height: u64, } -/// Process a new burn block event from the node -fn process_new_burn_block_event( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got burn_block event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); - } - #[derive(Debug, Deserialize)] - struct TempBurnBlockEvent { - burn_block_hash: String, - burn_block_height: u64, - reward_recipients: Vec, - reward_slot_holders: Vec, - burn_amount: u64, - } - let temp: TempBurnBlockEvent = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let burn_header_hash = temp - .burn_block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - BurnchainHeaderHash::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - let event = SignerEvent::NewBurnBlock { - burn_height: temp.burn_block_height, - received_time: SystemTime::now(), - burn_header_hash, - }; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); + fn try_from(block_event: BlockEvent) -> Result { + let block_hash: Sha512Trunc256Sum = block_event + .block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + Sha512Trunc256Sum::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + Ok(SignerEvent::NewBlock { + block_hash, + block_height: block_event.block_height, + }) } - Ok(event) } pub fn get_signers_db_signer_set_message_id(name: &str) -> Option<(u32, u32)> { diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index 0a5ed49a6d..40a097088e 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -120,9 +120,8 @@ impl, R, T: SignerEventTrait> RunningSigner { pub fn join(self) -> Option { debug!("Try join event loop..."); // wait for event receiver join - let _ = self.event_join.join().map_err(|thread_panic| { + let _ = self.event_join.join().inspect_err(|thread_panic| { error!("Event thread panicked with: '{:?}'", &thread_panic); - thread_panic }); info!("Event receiver thread joined"); @@ -131,9 +130,8 @@ impl, R, T: SignerEventTrait> RunningSigner { let result_opt = self .signer_join .join() - .map_err(|thread_panic| { + .inspect_err(|thread_panic| { error!("Event thread panicked with: '{:?}'", &thread_panic); - thread_panic }) .unwrap_or(None); diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index a9dfc47806..5f733eddad 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -35,6 +35,9 @@ use std::path::Path; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; +#[cfg(any(test, feature = "testing"))] +pub mod tests; + pub fn get_epoch_time_secs() -> u64 { let start = SystemTime::now(); let since_the_epoch = start diff --git a/stacks-common/src/util/tests.rs b/stacks-common/src/util/tests.rs new file mode 100644 index 0000000000..b87e913718 --- /dev/null +++ b/stacks-common/src/util/tests.rs @@ -0,0 +1,99 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::{Arc, Mutex}; +/// `TestFlag` is a thread-safe utility designed for managing shared state in testing scenarios. It wraps +/// a value of type `T` inside an `Arc>>`, allowing you to set and retrieve a value +/// across different parts of your codebase while ensuring thread safety. +/// +/// This structure is particularly useful when: +/// - You need a global or static variable in tests. +/// - You want to control the execution of custom test code paths by setting and checking a shared value. +/// +/// # Type Parameter +/// - `T`: The type of the value managed by the `TestFlag`. It must implement the `Default` and `Clone` traits. +/// +/// # Examples +/// +/// ```rust +/// use stacks_common::util::tests::TestFlag; +/// use std::sync::{Arc, Mutex}; +/// +/// // Create a TestFlag instance +/// let test_flag = TestFlag::default(); +/// +/// // Set a value in the test flag +/// test_flag.set("test_value".to_string()); +/// +/// // Retrieve the value +/// assert_eq!(test_flag.get(), "test_value".to_string()); +/// +/// // Reset the value to default +/// test_flag.set("".to_string()); +/// assert_eq!(test_flag.get(), "".to_string()); +/// ``` +#[derive(Clone)] +pub struct TestFlag(pub Arc>>); + +impl Default for TestFlag { + fn default() -> Self { + Self(Arc::new(Mutex::new(None))) + } +} + +impl TestFlag { + /// Sets the value of the test flag. + /// + /// This method updates the value stored inside the `TestFlag`, replacing any existing value. + /// + /// # Arguments + /// - `value`: The new value to set for the `TestFlag`. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// test_flag.set(42); + /// assert_eq!(test_flag.get(), 42); + /// ``` + pub fn set(&self, value: T) { + *self.0.lock().unwrap() = Some(value); + } + + /// Retrieves the current value of the test flag. + /// + /// If no value has been set, this method returns the default value for the type `T`. + /// + /// # Returns + /// - The current value of the test flag, or the default value of `T` if none has been set. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// + /// // Get the default value + /// assert_eq!(test_flag.get(), 0); // For T = i32, default is 0 + /// + /// // Set a value + /// test_flag.set(123); + /// + /// // Get the updated value + /// assert_eq!(test_flag.get(), 123); + /// ``` + pub fn get(&self) -> T { + self.0.lock().unwrap().clone().unwrap_or_default().clone() + } +} diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index cd2aac31ac..78cadc0c05 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## Changed - Improvements to the stale signer cleanup logic: deletes the prior signer if it has no remaining unprocessed blocks in its database +- Signers now listen to new block events from the stacks node to determine whether a block has been successfully appended to the chain tip ## [3.1.0.0.1.0] diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 8c3d8a5a35..22204daf97 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -36,7 +36,7 @@ serde_stacker = "0.1" slog = { version = "2.5.2", features = [ "max_level_trace" ] } slog-json = { version = "2.3.0", optional = true } slog-term = "2.6.0" -stacks-common = { path = "../stacks-common" } +stacks-common = { path = "../stacks-common", features = ["testing"] } stackslib = { path = "../stackslib" } thiserror = { workspace = true } tiny_http = { version = "0.12", optional = true } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 9c4c348f8e..4cdc61471a 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -244,18 +244,10 @@ impl BlockInfo { } match state { BlockState::Unprocessed => false, - BlockState::LocallyAccepted => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyAccepted - ) - } - BlockState::LocallyRejected => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyRejected - ) - } + BlockState::LocallyAccepted | BlockState::LocallyRejected => !matches!( + prev_state, + BlockState::GloballyRejected | BlockState::GloballyAccepted + ), BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), } @@ -942,12 +934,8 @@ impl SignerDb { block_sighash: &Sha512Trunc256Sum, ts: u64, ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2), state = ?2 WHERE signer_signature_hash = ?3"; - let args = params![ - u64_to_sql(ts)?, - BlockState::GloballyAccepted.to_string(), - block_sighash - ]; + let qry = "UPDATE blocks SET broadcasted = ?1 WHERE signer_signature_hash = ?2"; + let args = params![u64_to_sql(ts)?, block_sighash]; debug!("Marking block {} as broadcasted at {}", block_sighash, ts); self.db.execute(qry, args)?; @@ -1394,14 +1382,7 @@ mod tests { .expect("Unable to get block from db") .expect("Unable to get block from db") .state, - BlockState::GloballyAccepted - ); - assert_eq!( - db.get_last_globally_accepted_block(&block_info_1.block.header.consensus_hash) - .unwrap() - .unwrap() - .signer_signature_hash(), - block_info_1.block.header.signer_signature_hash() + BlockState::Unprocessed ); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); @@ -1428,7 +1409,14 @@ mod tests { assert_eq!(block.state, BlockState::LocallyAccepted); assert!(!block.check_state(BlockState::Unprocessed)); assert!(block.check_state(BlockState::LocallyAccepted)); - assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::LocallyRejected).unwrap(); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(block.check_state(BlockState::LocallyAccepted)); + assert!(block.check_state(BlockState::LocallyRejected)); assert!(block.check_state(BlockState::GloballyAccepted)); assert!(block.check_state(BlockState::GloballyRejected)); @@ -1440,15 +1428,8 @@ mod tests { assert!(block.check_state(BlockState::GloballyAccepted)); assert!(!block.check_state(BlockState::GloballyRejected)); - // Must manually override as will not be able to move from GloballyAccepted to LocallyAccepted - block.state = BlockState::LocallyRejected; - assert!(!block.check_state(BlockState::Unprocessed)); - assert!(!block.check_state(BlockState::LocallyAccepted)); - assert!(block.check_state(BlockState::LocallyRejected)); - assert!(block.check_state(BlockState::GloballyAccepted)); - assert!(block.check_state(BlockState::GloballyRejected)); - - block.move_to(BlockState::GloballyRejected).unwrap(); + // Must manually override as will not be able to move from GloballyAccepted to GloballyRejected + block.state = BlockState::GloballyRejected; assert!(!block.check_state(BlockState::Unprocessed)); assert!(!block.check_state(BlockState::LocallyAccepted)); assert!(!block.check_state(BlockState::LocallyRejected)); diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs index 520fb36ca1..34b363311e 100644 --- a/stacks-signer/src/v0/mod.rs +++ b/stacks-signer/src/v0/mod.rs @@ -17,6 +17,10 @@ /// The signer module for processing events pub mod signer; +#[cfg(any(test, feature = "testing"))] +/// Test specific functions for the signer module +pub mod tests; + use libsigner::v0::messages::SignerMessage; use crate::v0::signer::Signer; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index df5a1208c3..5a5128cce4 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -44,29 +44,13 @@ use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list -pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list -pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// Pause the block broadcast -pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// Skip broadcasting the block to the network -pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); - /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { + /// The private key of the signer + #[cfg(any(test, feature = "testing"))] + pub private_key: StacksPrivateKey, + #[cfg(not(any(test, feature = "testing")))] /// The private key of the signer private_key: StacksPrivateKey, /// The stackerdb client @@ -128,6 +112,7 @@ impl SignerTrait for Signer { Some(SignerEvent::BlockValidationResponse(_)) | Some(SignerEvent::MinerMessages(..)) | Some(SignerEvent::NewBurnBlock { .. }) + | Some(SignerEvent::NewBlock { .. }) | Some(SignerEvent::StatusCheck) | None => None, Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), @@ -170,21 +155,8 @@ impl SignerTrait for Signer { match message { SignerMessage::BlockProposal(block_proposal) => { #[cfg(any(test, feature = "testing"))] - if let Some(public_keys) = - &*TEST_IGNORE_ALL_BLOCK_PROPOSALS.lock().unwrap() - { - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private( - &self.private_key, - ), - ) { - warn!("{self}: Ignoring block proposal due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - continue; - } + if self.test_ignore_all_block_proposals(block_proposal) { + continue; } self.handle_block_proposal( stacks_client, @@ -248,6 +220,33 @@ impl SignerTrait for Signer { }); *sortition_state = None; } + SignerEvent::NewBlock { + block_hash, + block_height, + } => { + debug!( + "{self}: Received a new block event."; + "block_hash" => %block_hash, + "block_height" => block_height + ); + if let Ok(Some(mut block_info)) = self + .signer_db + .block_lookup(block_hash) + .inspect_err(|e| warn!("{self}: Failed to load block state: {e:?}")) + { + if block_info.state == BlockState::GloballyAccepted { + // We have already globally accepted this block. Do nothing. + return; + } + if let Err(e) = block_info.mark_globally_accepted() { + warn!("{self}: Failed to mark block as globally accepted: {e:?}"); + return; + } + if let Err(e) = self.signer_db.insert_block(&block_info) { + warn!("{self}: Failed to update block state to globally accepted: {e:?}"); + } + } + } } } @@ -406,7 +405,10 @@ impl Signer { "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); + #[cfg(any(test, feature = "testing"))] let mut block_info = BlockInfo::from(block_proposal.clone()); + #[cfg(not(any(test, feature = "testing")))] + let block_info = BlockInfo::from(block_proposal.clone()); // Get sortition view if we don't have it if sortition_state.is_none() { @@ -496,10 +498,7 @@ impl Signer { self.test_reject_block_proposal(block_proposal, &mut block_info, block_response); if let Some(block_response) = block_response { - // We know proposal is invalid. Send rejection message, do not do further validation - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; + // We know proposal is invalid. Send rejection message, do not do further validation and do not store it. debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); let res = self .stackerdb @@ -906,7 +905,7 @@ impl Signer { // Not enough rejection signatures to make a decision return; } - debug!("{self}: {total_reject_weight}/{total_weight} signers voteed to reject the block {block_hash}"); + debug!("{self}: {total_reject_weight}/{total_weight} signers voted to reject the block {block_hash}"); if let Err(e) = block_info.mark_globally_rejected() { warn!("{self}: Failed to mark block as globally rejected: {e:?}",); } @@ -1028,7 +1027,7 @@ impl Signer { return; }; // move block to LOCALLY accepted state. - // We only mark this GLOBALLY accepted if we manage to broadcast it... + // It is only considered globally accepted IFF we receive a new block event confirming it OR see the chain tip of the node advance to it. if let Err(e) = block_info.mark_locally_accepted(true) { // Do not abort as we should still try to store the block signature threshold warn!("{self}: Failed to mark block as locally accepted: {e:?}"); @@ -1041,22 +1040,8 @@ impl Signer { panic!("{self} Failed to write block to signerdb: {e}"); }); #[cfg(any(test, feature = "testing"))] - { - if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block broadcast is stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Block validation is no longer stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - } - } + self.test_pause_block_broadcast(&block_info); + self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); if self .submitted_block_proposal @@ -1104,71 +1089,6 @@ impl Signer { } } - #[cfg(any(test, feature = "testing"))] - fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { - if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - let block_hash = block.header.signer_signature_hash(); - warn!( - "{self}: Skipping block broadcast due to testing directive"; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - "consensus_hash" => %block.header.consensus_hash - ); - - if let Err(e) = self - .signer_db - .set_block_broadcasted(&block_hash, get_epoch_time_secs()) - { - warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); - } - return true; - } - false - } - - #[cfg(any(test, feature = "testing"))] - fn test_reject_block_proposal( - &mut self, - block_proposal: &BlockProposal, - block_info: &mut BlockInfo, - block_response: Option, - ) -> Option { - let Some(public_keys) = &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() else { - return block_response; - }; - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), - ) { - warn!("{self}: Rejecting block proposal automatically due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; - // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject - // as invalid since we rejected in a prior round if this crops up again) - // in case this is the first time we saw this block. Safe to do since this is testing case only. - self.signer_db - .insert_block(block_info) - .unwrap_or_else(|e| self.handle_insert_block_error(e)); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::TestingDirective, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) - } else { - None - } - } - /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); @@ -1183,7 +1103,7 @@ impl Signer { } /// Helper for logging insert_block error - fn handle_insert_block_error(&self, e: DBError) { + pub fn handle_insert_block_error(&self, e: DBError) { error!("{self}: Failed to insert block into signer-db: {e:?}"); panic!("{self} Failed to write block to signerdb: {e}"); } diff --git a/stacks-signer/src/v0/tests.rs b/stacks-signer/src/v0/tests.rs new file mode 100644 index 0000000000..0b9cdcc569 --- /dev/null +++ b/stacks-signer/src/v0/tests.rs @@ -0,0 +1,141 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::LazyLock; + +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use libsigner::v0::messages::{BlockResponse, RejectCode}; +use libsigner::BlockProposal; +use slog::{slog_info, slog_warn}; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::tests::TestFlag; +use stacks_common::{info, warn}; + +use super::signer::Signer; +use crate::signerdb::BlockInfo; + +/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list +pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list +pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to pause broadcasting the block to the network +pub static TEST_PAUSE_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +/// A global variable that can be used to skip broadcasting the block to the network +pub static TEST_SKIP_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +impl Signer { + /// Skip the block broadcast if the TEST_SKIP_BLOCK_BROADCAST flag is set + pub fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { + if TEST_SKIP_BLOCK_BROADCAST.get() { + let block_hash = block.header.signer_signature_hash(); + warn!( + "{self}: Skipping block broadcast due to testing directive"; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash + ); + + if let Err(e) = self + .signer_db + .set_block_broadcasted(&block_hash, get_epoch_time_secs()) + { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); + } + return true; + } + false + } + + /// Reject block proposals if the TEST_REJECT_ALL_BLOCK_PROPOSAL flag is set for the signer's public key + pub fn test_reject_block_proposal( + &mut self, + block_proposal: &BlockProposal, + block_info: &mut BlockInfo, + block_response: Option, + ) -> Option { + let public_keys = TEST_REJECT_ALL_BLOCK_PROPOSAL.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject + // as invalid since we rejected in a prior round if this crops up again) + // in case this is the first time we saw this block. Safe to do since this is testing case only. + self.signer_db + .insert_block(block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::TestingDirective, + &self.private_key, + self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), + )) + } else { + block_response + } + } + + /// Pause the block broadcast if the TEST_PAUSE_BLOCK_BROADCAST flag is set + pub fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { + if TEST_PAUSE_BLOCK_BROADCAST.get() { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Block broadcast is stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + while TEST_PAUSE_BLOCK_BROADCAST.get() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("{self}: Block validation is no longer stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + } + } + + /// Ignore block proposals if the TEST_IGNORE_ALL_BLOCK_PROPOSALS flag is set for the signer's public key + pub fn test_ignore_all_block_proposals(&self, block_proposal: &BlockProposal) -> bool { + let public_keys = TEST_IGNORE_ALL_BLOCK_PROPOSALS.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Ignoring block proposal due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + return true; + } + false + } +} diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 139a666098..209c6b8ef0 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -198,6 +198,9 @@ pub trait BlockEventDispatcher { } pub struct ChainsCoordinatorConfig { + /// true: assume all anchor blocks are present, and block chain sync until they arrive + /// false: process sortitions in reward cycles without anchor blocks + pub assume_present_anchor_blocks: bool, /// true: use affirmation maps before 2.1 /// false: only use affirmation maps in 2.1 or later pub always_use_affirmation_maps: bool, @@ -209,8 +212,17 @@ pub struct ChainsCoordinatorConfig { impl ChainsCoordinatorConfig { pub fn new() -> ChainsCoordinatorConfig { ChainsCoordinatorConfig { - always_use_affirmation_maps: false, + always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, + } + } + + pub fn test_new() -> ChainsCoordinatorConfig { + ChainsCoordinatorConfig { + always_use_affirmation_maps: false, + require_affirmed_anchor_blocks: false, + assume_present_anchor_blocks: false, } } } @@ -700,7 +712,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader notifier: (), atlas_config, atlas_db: Some(atlas_db), - config: ChainsCoordinatorConfig::new(), + config: ChainsCoordinatorConfig::test_new(), burnchain_indexer, refresh_stacker_db: Arc::new(AtomicBool::new(false)), in_nakamoto_epoch: false, @@ -2336,6 +2348,20 @@ impl< panic!("BUG: no epoch defined at height {}", header.block_height) }); + if self.config.assume_present_anchor_blocks { + // anchor blocks are always assumed to be present in the chain history, + // so report its absence if we don't have it. + if let PoxAnchorBlockStatus::SelectedAndUnknown(missing_anchor_block, _) = + &rc_info.anchor_status + { + info!( + "Currently missing PoX anchor block {}, which is assumed to be present", + &missing_anchor_block + ); + return Ok(Some(missing_anchor_block.clone())); + } + } + if cur_epoch.epoch_id >= StacksEpochId::Epoch21 || self.config.always_use_affirmation_maps { // potentially have an anchor block, but only process the next reward cycle (and // subsequent reward cycles) with it if the prepare-phase block-commits affirm its diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 35f6e5d1e1..929d8dfe90 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -73,7 +73,8 @@ use super::stacks::db::{ use super::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use super::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeError, TenureChangePayload, TransactionPayload, + TenureChangeError, TenureChangePayload, TokenTransferMemo, TransactionPayload, + TransactionVersion, }; use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; @@ -108,8 +109,7 @@ use crate::core::{ }; use crate::net::stackerdb::{StackerDBConfig, MINER_SLOT_COUNT}; use crate::net::Error as net_error; -use crate::util_lib::boot; -use crate::util_lib::boot::boot_code_id; +use crate::util_lib::boot::{self, boot_code_addr, boot_code_id, boot_code_tx_auth}; use crate::util_lib::db::{ query_int, query_row, query_row_columns, query_row_panic, query_rows, sqlite_open, tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, @@ -2093,7 +2093,8 @@ impl NakamotoChainState { return Err(e); }; - let (receipt, clarity_commit, reward_set_data) = ok_opt.expect("FATAL: unreachable"); + let (mut receipt, clarity_commit, reward_set_data, phantom_unlock_events) = + ok_opt.expect("FATAL: unreachable"); assert_eq!( receipt.header.anchored_header.block_hash(), @@ -2147,6 +2148,20 @@ impl NakamotoChainState { &receipt.header.anchored_header.block_hash() ); + let tx_receipts = &mut receipt.tx_receipts; + if let Some(unlock_receipt) = + // For the event dispatcher, attach any STXMintEvents that + // could not be included in the block (e.g. because the + // block didn't have a Coinbase transaction). + Self::generate_phantom_unlock_tx( + phantom_unlock_events, + &stacks_chain_state.config(), + next_ready_block.header.chain_length, + ) + { + tx_receipts.push(unlock_receipt); + } + // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { let block_event = ( @@ -2157,7 +2172,7 @@ impl NakamotoChainState { dispatcher.announce_block( &block_event, &receipt.header.clone(), - &receipt.tx_receipts, + &tx_receipts, &parent_block_id, next_ready_block_snapshot.winning_block_txid, &receipt.matured_rewards, @@ -4193,11 +4208,13 @@ impl NakamotoChainState { applied_epoch_transition: bool, signers_updated: bool, coinbase_height: u64, + phantom_lockup_events: Vec, ) -> Result< ( StacksEpochReceipt, PreCommitClarityBlock<'a>, Option, + Vec, ), ChainstateError, > { @@ -4234,7 +4251,7 @@ impl NakamotoChainState { coinbase_height, }; - return Ok((epoch_receipt, clarity_commit, None)); + return Ok((epoch_receipt, clarity_commit, None, phantom_lockup_events)); } /// Append a Nakamoto Stacks block to the Stacks chain state. @@ -4260,6 +4277,7 @@ impl NakamotoChainState { StacksEpochReceipt, PreCommitClarityBlock<'a>, Option, + Vec, ), ChainstateError, > { @@ -4527,18 +4545,20 @@ impl NakamotoChainState { Ok(lockup_events) => lockup_events, }; - // if any, append lockups events to the coinbase receipt - if lockup_events.len() > 0 { + // If any, append lockups events to the coinbase receipt + if let Some(receipt) = tx_receipts.get_mut(0) { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction - if let Some(receipt) = tx_receipts.get_mut(0) { - if receipt.is_coinbase_tx() { - receipt.events.append(&mut lockup_events); - } - } else { - warn!("Unable to attach lockups events, block's first transaction is not a coinbase transaction") + if receipt.is_coinbase_tx() { + receipt.events.append(&mut lockup_events); } } + + // If lockup_events still contains items, it means they weren't attached + if !lockup_events.is_empty() { + info!("Unable to attach lockup events, block's first transaction is not a coinbase transaction. Will attach as a phantom tx."); + } + // if any, append auto unlock events to the coinbase receipt if auto_unlock_events.len() > 0 { // Receipts are appended in order, so the first receipt should be @@ -4611,6 +4631,7 @@ impl NakamotoChainState { applied_epoch_transition, signer_set_calc.is_some(), coinbase_height, + lockup_events, ); } @@ -4724,7 +4745,12 @@ impl NakamotoChainState { coinbase_height, }; - Ok((epoch_receipt, clarity_commit, reward_set_data)) + Ok(( + epoch_receipt, + clarity_commit, + reward_set_data, + lockup_events, + )) } /// Create a StackerDB config for the .miners contract. @@ -4885,6 +4911,53 @@ impl NakamotoChainState { clarity.save_analysis(&contract_id, &analysis).unwrap(); }) } + + /// Generate a "phantom" transaction to include STXMintEvents for + /// lockups that could not be attached to a Coinbase transaction + /// (because the block doesn't have a Coinbase transaction). + fn generate_phantom_unlock_tx( + events: Vec, + config: &ChainstateConfig, + stacks_block_height: u64, + ) -> Option { + if events.is_empty() { + return None; + } + info!("Generating phantom unlock tx"); + let version = if config.mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + // Make the txid unique -- the phantom tx payload should include something block-specific otherwise + // they will always have the same txid. In this case we use the block height in the memo. This also + // happens to give some indication of the purpose of this phantom tx, for anyone looking. + let memo = TokenTransferMemo({ + let str = format!("Block {} token unlocks", stacks_block_height); + let mut buf = [0u8; 34]; + buf[..str.len().min(34)].copy_from_slice(&str.as_bytes()[..]); + buf + }); + let boot_code_address = boot_code_addr(config.mainnet); + let boot_code_auth = boot_code_tx_auth(boot_code_address.clone()); + let unlock_tx = StacksTransaction::new( + version, + boot_code_auth, + TransactionPayload::TokenTransfer( + PrincipalData::Standard(boot_code_address.into()), + 0, + memo, + ), + ); + let unlock_receipt = StacksTransactionReceipt::from_stx_transfer( + unlock_tx, + events, + Value::okay_true(), + ExecutionCost::ZERO, + ); + Some(unlock_receipt) + } } impl StacksMessageCodec for NakamotoBlock { diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 42f72d5165..ffdea5a7dd 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2903,7 +2903,7 @@ pub mod test { // Just update the expected value assert_eq!( genesis_root_hash.to_string(), - "c771616ff6acb710051238c9f4a3c48020a6d70cda637d34b89f2311a7e27886" + "0eb3076f0635ccdfcdc048afb8dea9048c5180a2e2b2952874af1d18f06321e8" ); } diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index f0e732a7e7..42663372f6 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -181,7 +181,7 @@ impl ConfigFile { mode: Some("xenon".to_string()), rpc_port: Some(18332), peer_port: Some(18333), - peer_host: Some("bitcoind.testnet.stacks.co".to_string()), + peer_host: Some("0.0.0.0".to_string()), magic_bytes: Some("T2".into()), ..BurnchainConfigFile::default() }; @@ -227,9 +227,9 @@ impl ConfigFile { mode: Some("mainnet".to_string()), rpc_port: Some(8332), peer_port: Some(8333), - peer_host: Some("bitcoin.blockstack.com".to_string()), - username: Some("blockstack".to_string()), - password: Some("blockstacksystem".to_string()), + peer_host: Some("0.0.0.0".to_string()), + username: Some("bitcoin".to_string()), + password: Some("bitcoin".to_string()), magic_bytes: Some("X2".to_string()), ..BurnchainConfigFile::default() }; @@ -1656,6 +1656,7 @@ pub struct NodeConfig { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: bool, pub require_affirmed_anchor_blocks: bool, + pub assume_present_anchor_blocks: bool, /// Fault injection for failing to push blocks pub fault_injection_block_push_fail_probability: Option, // fault injection for hiding blocks. @@ -1939,6 +1940,7 @@ impl Default for NodeConfig { use_test_genesis_chainstate: None, always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, fault_injection_block_push_fail_probability: None, fault_injection_hide_blocks: false, chain_liveness_poll_time_secs: 300, @@ -2428,6 +2430,7 @@ pub struct NodeConfigFile { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: Option, pub require_affirmed_anchor_blocks: Option, + pub assume_present_anchor_blocks: Option, /// At most, how often should the chain-liveness thread /// wake up the chains-coordinator. Defaults to 300s (5 min). pub chain_liveness_poll_time_secs: Option, @@ -2509,6 +2512,10 @@ impl NodeConfigFile { // miners should always try to mine, even if they don't have the anchored // blocks in the canonical affirmation map. Followers, however, can stall. require_affirmed_anchor_blocks: self.require_affirmed_anchor_blocks.unwrap_or(!miner), + // as of epoch 3.0, all prepare phases have anchor blocks. + // at the start of epoch 3.0, the chain stalls without anchor blocks. + // only set this to false if you're doing some very extreme testing. + assume_present_anchor_blocks: true, // chainstate fault_injection activation for hide_blocks. // you can't set this in the config file. fault_injection_hide_blocks: false, diff --git a/stx-genesis/chainstate-test.txt b/stx-genesis/chainstate-test.txt index 614cf3d9f4..6eedf241d1 100644 --- a/stx-genesis/chainstate-test.txt +++ b/stx-genesis/chainstate-test.txt @@ -69,4 +69,5 @@ SM1ZH700J7CEDSEHM5AJ4C4MKKWNESTS35DD3SZM5,13888889,2267 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,45467 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,6587 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,2267 +SP2CTPPV8BHBVSQR727A3MK00ZD85RNY903KAG9F3,12345678,35 -----END STX VESTING----- \ No newline at end of file diff --git a/stx-genesis/chainstate-test.txt.sha256 b/stx-genesis/chainstate-test.txt.sha256 index 56782ae494..69ac95c254 100644 --- a/stx-genesis/chainstate-test.txt.sha256 +++ b/stx-genesis/chainstate-test.txt.sha256 @@ -1 +1 @@ -014402b47d53b0716402c172fa746adf308b03a826ebea91944a5eb6a304a823 \ No newline at end of file +088c3caea982a8f6f74dda48ec5f06f51f7605def9760a971b1acd763ee6b7cf \ No newline at end of file diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 14382a059a..2f71838adb 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -26,6 +26,8 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; +#[cfg(any(test, feature = "testing"))] +use lazy_static::lazy_static; use rand::Rng; use rusqlite::{params, Connection}; use serde_json::json; @@ -60,6 +62,8 @@ use stacks::net::http::HttpRequestContents; use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; use stacks::util::hash::to_hex; +#[cfg(any(test, feature = "testing"))] +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -69,6 +73,12 @@ use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use url::Url; +#[cfg(any(test, feature = "testing"))] +lazy_static! { + /// Do not announce a signed/mined block to the network when set to true. + pub static ref TEST_SKIP_BLOCK_ANNOUNCEMENT: TestFlag = TestFlag::default(); +} + #[derive(Debug, Clone)] struct EventObserver { /// Path to the database where pending payloads are stored. If `None`, then @@ -1298,6 +1308,11 @@ impl EventDispatcher { let mature_rewards = serde_json::Value::Array(mature_rewards_vec); + #[cfg(any(test, feature = "testing"))] + if test_skip_block_announcement(&block) { + return; + } + for (observer_id, filtered_events_ids) in dispatch_matrix.iter().enumerate() { let filtered_events: Vec<_> = filtered_events_ids .iter() @@ -1694,6 +1709,18 @@ impl EventDispatcher { } } +#[cfg(any(test, feature = "testing"))] +fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { + if TEST_SKIP_BLOCK_ANNOUNCEMENT.get() { + warn!( + "Skipping new block announcement due to testing directive"; + "block_hash" => %block.block_hash + ); + return true; + } + false +} + #[cfg(test)] mod test { use std::net::TcpListener; diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 00c21ec003..834c59fa95 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -35,16 +35,16 @@ use stacks::types::PublicKey; use stacks::util::get_epoch_time_secs; use stacks::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; use stacks::util::secp256k1::MessageSignature; +#[cfg(test)] +use stacks_common::util::tests::TestFlag; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; -#[cfg(test)] -use crate::neon::TestFlag; #[cfg(test)] /// Fault injection flag to prevent the miner from seeing enough signer signatures. /// Used to test that the signers will broadcast a block if it gets enough signatures -pub static TEST_IGNORE_SIGNERS: LazyLock = LazyLock::new(TestFlag::default); +pub static TEST_IGNORE_SIGNERS: LazyLock> = LazyLock::new(TestFlag::default); /// How long should the coordinator poll on the event receiver before /// waking up to check timeouts? diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 16f5a12b2d..335fb325d8 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -319,6 +319,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index b2171b4e8b..4ecc84b73b 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -21,6 +21,8 @@ use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; +#[cfg(test)] +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; @@ -94,30 +96,6 @@ impl std::ops::Deref for RunLoopCounter { } } -#[cfg(test)] -#[derive(Clone)] -pub struct TestFlag(pub Arc>>); - -#[cfg(test)] -impl Default for TestFlag { - fn default() -> Self { - Self(Arc::new(std::sync::Mutex::new(None))) - } -} - -#[cfg(test)] -impl TestFlag { - /// Set the test flag to the given value - pub fn set(&self, value: bool) { - *self.0.lock().unwrap() = Some(value); - } - - /// Get the test flag value. Defaults to false if the flag is not set. - pub fn get(&self) -> bool { - self.0.lock().unwrap().unwrap_or(false) - } -} - #[derive(Clone, Default)] pub struct Counters { pub blocks_processed: RunLoopCounter, @@ -135,7 +113,7 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, #[cfg(test)] - pub naka_skip_commit_op: TestFlag, + pub naka_skip_commit_op: TestFlag, } impl Counters { @@ -637,6 +615,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node @@ -1168,19 +1147,8 @@ impl RunLoop { let mut sortition_db_height = rc_aligned_height; let mut burnchain_height = sortition_db_height; - let mut num_sortitions_in_last_cycle = 1; // prepare to fetch the first reward cycle! - let mut target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(burnchain_height) - .expect("BUG: block height is not in a reward cycle") - + 1, - ), - burnchain.get_headers_height() - 1, - ); - debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; @@ -1208,17 +1176,13 @@ impl RunLoop { let remote_chain_height = burnchain.get_headers_height() - 1; - // wait for the p2p state-machine to do at least one pass - debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); - - // wait until it's okay to process the next reward cycle's sortitions - let ibd = match self.get_pox_watchdog().pox_sync_wait( + // wait until it's okay to process the next reward cycle's sortitions. + let (ibd, target_burnchain_block_height) = match self.get_pox_watchdog().pox_sync_wait( &burnchain_config, &burnchain_tip, remote_chain_height, - num_sortitions_in_last_cycle, ) { - Ok(ibd) => ibd, + Ok(x) => x, Err(e) => { debug!("Runloop: PoX sync wait routine aborted: {e:?}"); continue; @@ -1232,9 +1196,6 @@ impl RunLoop { 0.0 }; - // will recalculate this in the following loop - num_sortitions_in_last_cycle = 0; - // Download each burnchain block and process their sortitions. This, in turn, will // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and // process them. This loop runs for one reward cycle, so that the next pass of the @@ -1282,8 +1243,6 @@ impl RunLoop { "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); - let mut sort_count = 0; - debug!("Runloop: block mining until we process all sortitions"); signal_mining_blocked(globals.get_miner_status()); @@ -1301,9 +1260,6 @@ impl RunLoop { "Failed to find block in fork processed by burnchain indexer", ) }; - if block.sortition { - sort_count += 1; - } let sortition_id = &block.sortition_id; @@ -1350,9 +1306,8 @@ impl RunLoop { debug!("Runloop: enable miner after processing sortitions"); signal_mining_ready(globals.get_miner_status()); - num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height})" ); sortition_db_height = next_sortition_height; @@ -1371,22 +1326,6 @@ impl RunLoop { } } - // advance one reward cycle at a time. - // If we're still downloading, then this is simply target_burnchain_block_height + reward_cycle_len. - // Otherwise, this is burnchain_tip + reward_cycle_len - let next_target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: burnchain height before system start") - + 1, - ), - remote_chain_height, - ); - - debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); - target_burnchain_block_height = next_target_burnchain_block_height; - if sortition_db_height >= burnchain_height && !ibd { let canonical_stacks_tip_height = SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn()) diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index 395d829c8f..488234d21d 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -1,20 +1,28 @@ -use std::collections::VecDeque; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::Arc; use stacks::burnchains::{Burnchain, Error as burnchain_error}; -use stacks::chainstate::stacks::db::StacksChainState; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::burnchains::BurnchainTip; use crate::Config; -// amount of time to wait for an inv or download sync to complete. -// These _really should_ complete before the PoX sync watchdog permits processing the next reward -// cycle, so this number is intentionally high (like, there's something really wrong with your -// network if your node is actualy waiting a day in-between reward cycles). -const SYNC_WAIT_SECS: u64 = 24 * 3600; - #[derive(Clone)] pub struct PoxSyncWatchdogComms { /// how many passes in the p2p state machine have taken place since startup? @@ -56,22 +64,6 @@ impl PoxSyncWatchdogComms { self.last_ibd.load(Ordering::SeqCst) } - /// Wait for at least one inv-sync state-machine passes - pub fn wait_for_inv_sync_pass(&self, timeout: u64) -> Result { - let current = self.get_inv_sync_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_inv_sync_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one inv-sync pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - fn interruptable_sleep(&self, secs: u64) -> Result<(), burnchain_error> { let deadline = secs + get_epoch_time_secs(); while get_epoch_time_secs() < deadline { @@ -83,21 +75,6 @@ impl PoxSyncWatchdogComms { Ok(()) } - pub fn wait_for_download_pass(&self, timeout: u64) -> Result { - let current = self.get_download_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_download_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one download pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - pub fn should_keep_running(&self) -> bool { self.should_keep_running.load(Ordering::SeqCst) } @@ -124,82 +101,25 @@ impl PoxSyncWatchdogComms { /// unless it's reasonably sure that it has processed all Stacks blocks for this reward cycle. /// This struct monitors the Stacks chainstate to make this determination. pub struct PoxSyncWatchdog { - /// number of attachable but unprocessed staging blocks over time - new_attachable_blocks: VecDeque, - /// number of newly-processed staging blocks over time - new_processed_blocks: VecDeque, - /// last time we asked for attachable blocks - last_attachable_query: u64, - /// last time we asked for processed blocks - last_processed_query: u64, - /// number of samples to take - max_samples: u64, - /// maximum number of blocks to count per query (affects performance!) - max_staging: u64, - /// when did we first start watching? - watch_start_ts: u64, - /// when did we first see a flatline in block-processing rate? - last_block_processed_ts: u64, - /// estimated time for a block to get downloaded. Used to infer how long to wait for the first - /// blocks to show up when waiting for this reward cycle. - estimated_block_download_time: f64, - /// estimated time for a block to get processed -- from when it shows up as attachable to when - /// it shows up as processed. Used to infer how long to wait for the last block to get - /// processed before unblocking burnchain sync for the next reward cycle. - estimated_block_process_time: f64, - /// time between burnchain syncs in stead state + /// time between burnchain syncs in steady state steady_state_burnchain_sync_interval: u64, - /// when to re-sync under steady state - steady_state_resync_ts: u64, - /// chainstate handle - chainstate: StacksChainState, /// handle to relayer thread that informs the watchdog when the P2P state-machine does stuff relayer_comms: PoxSyncWatchdogComms, /// should this sync watchdog always download? used in integration tests. unconditionally_download: bool, } -const PER_SAMPLE_WAIT_MS: u64 = 1000; - impl PoxSyncWatchdog { pub fn new( config: &Config, watchdog_comms: PoxSyncWatchdogComms, ) -> Result { - let mainnet = config.is_mainnet(); - let chain_id = config.burnchain.chain_id; - let chainstate_path = config.get_chainstate_path_str(); let burnchain_poll_time = config.burnchain.poll_time_secs; - let download_timeout = config.connection_options.timeout; - let max_samples = config.node.pox_sync_sample_secs; let unconditionally_download = config.node.pox_sync_sample_secs == 0; - let marf_opts = config.node.get_marf_opts(); - - let (chainstate, _) = - match StacksChainState::open(mainnet, chain_id, &chainstate_path, Some(marf_opts)) { - Ok(cs) => cs, - Err(e) => { - return Err(format!( - "Failed to open chainstate at '{chainstate_path}': {e:?}" - )); - } - }; Ok(PoxSyncWatchdog { unconditionally_download, - new_attachable_blocks: VecDeque::new(), - new_processed_blocks: VecDeque::new(), - last_attachable_query: 0, - last_processed_query: 0, - max_samples, - max_staging: 10, - watch_start_ts: 0, - last_block_processed_ts: 0, - estimated_block_download_time: download_timeout as f64, - estimated_block_process_time: 5.0, steady_state_burnchain_sync_interval: burnchain_poll_time, - steady_state_resync_ts: 0, - chainstate, relayer_comms: watchdog_comms, }) } @@ -208,39 +128,9 @@ impl PoxSyncWatchdog { self.relayer_comms.clone() } - /// How many recently-added Stacks blocks are in an attachable state, up to $max_staging? - fn count_attachable_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_attachable_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_attachable_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_attachable_query = get_epoch_time_secs(); - Ok(cnt) - } - - /// How many recently-processed Stacks blocks are there, up to $max_staging? - /// ($max_staging is necessary to limit the runtime of this method, since the underlying SQL - /// uses COUNT(*), which in Sqlite is a _O(n)_ operation for _n_ rows) - fn count_processed_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_processed_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_processed_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_processed_query = get_epoch_time_secs(); - Ok(cnt) - } - /// Are we in the initial burnchain block download? i.e. is the burn tip snapshot far enough away /// from the burnchain height that we should be eagerly downloading snapshots? - pub fn infer_initial_burnchain_block_download( + fn infer_initial_burnchain_block_download( burnchain: &Burnchain, last_processed_height: u64, burnchain_height: u64, @@ -261,182 +151,23 @@ impl PoxSyncWatchdog { ibd } - /// Calculate the first derivative of a list of points - fn derivative(sample_list: &VecDeque) -> Vec { - let mut deltas = vec![]; - let mut prev = 0; - for (i, sample) in sample_list.iter().enumerate() { - if i == 0 { - prev = *sample; - continue; - } - let delta = *sample - prev; - prev = *sample; - deltas.push(delta); - } - deltas - } - - /// Is a derivative approximately flat, with a maximum absolute deviation from 0? - /// Return whether or not the sample is mostly flat, and how many points were over the given - /// error bar in either direction. - fn is_mostly_flat(deriv: &[i64], error: i64) -> (bool, usize) { - let mut total_deviates = 0; - let mut ret = true; - for d in deriv.iter() { - if d.abs() > error { - total_deviates += 1; - ret = false; - } - } - (ret, total_deviates) - } - - /// low and high pass filter average -- take average without the smallest and largest values - fn hilo_filter_avg(samples: &[i64]) -> f64 { - // take average with low and high pass - let mut min = i64::MAX; - let mut max = i64::MIN; - for s in samples.iter() { - if *s < 0 { - // nonsensical result (e.g. due to clock drift?) - continue; - } - if *s < min { - min = *s; - } - if *s > max { - max = *s; - } - } - - let mut count = 0; - let mut sum = 0; - for s in samples.iter() { - if *s < 0 { - // nonsensical result - continue; - } - if *s == min { - continue; - } - if *s == max { - continue; - } - count += 1; - sum += *s; - } - - if count == 0 { - // no viable samples - 1.0 - } else { - (sum as f64) / (count as f64) - } - } - - /// estimate how long a block remains in an unprocessed state - fn estimate_block_process_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_wait_times = - StacksChainState::measure_block_wait_time(chainstate.db(), start_height, end_height) - .expect("BUG: failed to query chainstate block-processing times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_wait_times) - } - - /// estimate how long a block takes to download - fn estimate_block_download_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_download_times = StacksChainState::measure_block_download_time( - chainstate.db(), - start_height, - end_height, - ) - .expect("BUG: failed to query chainstate block-download times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_download_times) - } - - /// Reset internal state. Performed when it's okay to begin syncing the burnchain. - /// Updates estimate for block-processing time and block-downloading time. - fn reset(&mut self, burnchain: &Burnchain, tip_height: u64) { - // find the average (with low/high pass filter) time a block spends in the DB without being - // processed, during this reward cycle - self.estimated_block_process_time = - PoxSyncWatchdog::estimate_block_process_time(&self.chainstate, burnchain, tip_height); - - // find the average (with low/high pass filter) time a block spends downloading - self.estimated_block_download_time = - PoxSyncWatchdog::estimate_block_download_time(&self.chainstate, burnchain, tip_height); - - debug!( - "Estimated block download time: {}s. Estimated block processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time - ); - - self.new_attachable_blocks.clear(); - self.new_processed_blocks.clear(); - self.last_block_processed_ts = 0; - self.watch_start_ts = 0; - self.steady_state_resync_ts = 0; - } - - /// Wait until all of the Stacks blocks for the given reward cycle are seemingly downloaded and - /// processed. Do so by watching the _rate_ at which attachable Stacks blocks arrive and get - /// processed. - /// Returns whether or not we're still in the initial block download -- i.e. true if we're - /// still downloading burnchain blocks, or we haven't reached steady-state block-processing. + /// Wait until the next PoX anchor block arrives. + /// We know for a fact that they all exist for Epochs 2.5 and earlier, in both mainnet and + /// testnet. + /// Return (still-in-ibd?, maximum-burnchain-sync-height) on success. pub fn pox_sync_wait( &mut self, burnchain: &Burnchain, burnchain_tip: &BurnchainTip, // this is the highest burnchain snapshot we've sync'ed to burnchain_height: u64, // this is the absolute burnchain block height - num_sortitions_in_last_cycle: u64, - ) -> Result { - if self.watch_start_ts == 0 { - self.watch_start_ts = get_epoch_time_secs(); - } - if self.steady_state_resync_ts == 0 { - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - } + ) -> Result<(bool, u64), burnchain_error> { + let burnchain_rc = burnchain + .block_height_to_reward_cycle(burnchain_height) + .expect("FATAL: burnchain height is before system start"); + + let sortition_rc = burnchain + .block_height_to_reward_cycle(burnchain_tip.block_snapshot.block_height) + .expect("FATAL: sortition height is before system start"); let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( burnchain, @@ -444,220 +175,23 @@ impl PoxSyncWatchdog { burnchain_height, ); - // unconditionally download the first reward cycle - if burnchain_tip.block_snapshot.block_height - < burnchain.first_block_height + (burnchain.pox_constants.reward_cycle_length as u64) - { - debug!("PoX watchdog in first reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); + let max_sync_height = if sortition_rc < burnchain_rc { + burnchain + .reward_cycle_to_block_height(sortition_rc + 1) + .min(burnchain_height) + } else { + burnchain_tip + .block_snapshot + .block_height + .max(burnchain_height) + }; + self.relayer_comms.set_ibd(ibbd); + if !self.unconditionally_download { self.relayer_comms .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - - return Ok(ibbd); - } - - if self.unconditionally_download { - debug!("PoX watchdog set to unconditionally download (ibd={ibbd})"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - - let mut waited = false; - if ibbd { - // we are far behind the burnchain tip (i.e. not in the last reward cycle), - // so make sure the downloader knows about blocks it doesn't have yet so we can go and - // fetch its blocks before proceeding. - if num_sortitions_in_last_cycle > 0 { - debug!("PoX watchdog: Wait for at least one inventory state-machine pass..."); - self.relayer_comms.wait_for_inv_sync_pass(SYNC_WAIT_SECS)?; - waited = true; - } else { - debug!("PoX watchdog: In initial block download, and no sortitions to consider in this reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - } else { - debug!("PoX watchdog: not in initial burn block download, so not waiting for an inventory state-machine pass"); } - if burnchain_tip.block_snapshot.block_height - + (burnchain.pox_constants.reward_cycle_length as u64) - >= burnchain_height - { - // unconditionally download if we're within the last reward cycle (after the poll timeout) - if !waited { - debug!( - "PoX watchdog in last reward cycle -- sync after {} seconds", - self.steady_state_burnchain_sync_interval - ); - self.relayer_comms.set_ibd(ibbd); - - self.relayer_comms - .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - } else { - debug!("PoX watchdog in last reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - } - return Ok(ibbd); - } - - // have we reached steady-state behavior? i.e. have we stopped processing both burnchain - // and Stacks blocks? - let mut steady_state = false; - debug!("PoX watchdog: Wait until chainstate reaches steady-state block-processing..."); - - let ibbd = loop { - if !self.relayer_comms.should_keep_running() { - break false; - } - let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( - burnchain, - burnchain_tip.block_snapshot.block_height, - burnchain_height, - ); - - let expected_first_block_deadline = - self.watch_start_ts + (self.estimated_block_download_time as u64); - let expected_last_block_deadline = self.last_block_processed_ts - + (self.estimated_block_download_time as u64) - + (self.estimated_block_process_time as u64); - - match ( - self.count_attachable_stacks_blocks(), - self.count_processed_stacks_blocks(), - ) { - (Ok(num_available), Ok(num_processed)) => { - self.new_attachable_blocks.push_back(num_available as i64); - self.new_processed_blocks.push_back(num_processed as i64); - - if (self.new_attachable_blocks.len() as u64) > self.max_samples { - self.new_attachable_blocks.pop_front(); - } - if (self.new_processed_blocks.len() as u64) > self.max_samples { - self.new_processed_blocks.pop_front(); - } - - if (self.new_attachable_blocks.len() as u64) < self.max_samples - || (self.new_processed_blocks.len() as u64) < self.max_samples - { - // still getting initial samples - if self.new_processed_blocks.len() % 10 == 0 { - debug!( - "PoX watchdog: Still warming up: {} out of {} samples...", - &self.new_attachable_blocks.len(), - &self.max_samples - ); - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && get_epoch_time_secs() < expected_first_block_deadline - { - // still waiting for that first block in this reward cycle - debug!("PoX watchdog: Still warming up: waiting until {expected_first_block_deadline}s for first Stacks block download (estimated download time: {}s)...", self.estimated_block_download_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && (self.new_attachable_blocks.len() as u64) < self.max_samples - && self.watch_start_ts - + self.max_samples - + self.steady_state_burnchain_sync_interval - * (burnchain.stable_confirmations as u64) - < get_epoch_time_secs() - { - debug!( - "PoX watchdog: could not calculate {} samples in {} seconds. Assuming suspend/resume, or assuming load is too high.", - self.max_samples, - self.max_samples + self.steady_state_burnchain_sync_interval * (burnchain.stable_confirmations as u64) - ); - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - - self.watch_start_ts = get_epoch_time_secs(); - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - continue; - } - - // take first derivative of samples -- see if the download and processing rate has gone to 0 - let attachable_delta = PoxSyncWatchdog::derivative(&self.new_attachable_blocks); - let processed_delta = PoxSyncWatchdog::derivative(&self.new_processed_blocks); - - let (flat_attachable, attachable_deviants) = - PoxSyncWatchdog::is_mostly_flat(&attachable_delta, 0); - let (flat_processed, processed_deviants) = - PoxSyncWatchdog::is_mostly_flat(&processed_delta, 0); - - debug!("PoX watchdog: flat-attachable?: {flat_attachable}, flat-processed?: {flat_processed}, estimated block-download time: {}s, estimated block-processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time); - - if flat_attachable && flat_processed && self.last_block_processed_ts == 0 { - // we're flat-lining -- this may be the end of this cycle - self.last_block_processed_ts = get_epoch_time_secs(); - } - - if self.last_block_processed_ts > 0 - && get_epoch_time_secs() < expected_last_block_deadline - { - debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{expected_last_block_deadline})s before burnchain synchronization (estimated block-processing time: {}s)", - get_epoch_time_secs() + 1, self.estimated_block_process_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if ibbd { - // doing initial burnchain block download right now. - // only proceed to fetch the next reward cycle's burnchain blocks if we're neither downloading nor - // attaching blocks recently - debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {flat_attachable}, flat-processed = {flat_processed}, min-attachable: {attachable_deviants}, min-processed: {processed_deviants}"); - - if !flat_attachable || !flat_processed { - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - } else { - let now = get_epoch_time_secs(); - if now < self.steady_state_resync_ts { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state; waiting until at least {} before burnchain synchronization", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } else { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state, but ready burnchain synchronization as of {}", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - } - } - } - (err_attach, err_processed) => { - // can only happen on DB query failure - error!("PoX watchdog: Failed to count recently attached ('{err_attach:?}') and/or processed ('{err_processed:?}') staging blocks"); - panic!(); - } - }; - - if ibbd || !steady_state { - debug!("PoX watchdog: Wait for at least one downloader state-machine pass before resetting..."); - self.relayer_comms.wait_for_download_pass(SYNC_WAIT_SECS)?; - } else { - debug!("PoX watchdog: in steady-state, so not waiting for download pass"); - } - - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - break ibbd; - }; - - let ret = ibbd || !steady_state; - self.relayer_comms.set_ibd(ret); - Ok(ret) + Ok((ibbd, max_sync_height)) } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 59a95576bc..13923a847a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3404,7 +3404,7 @@ fn vote_for_aggregate_key_burn_op() { /// This test boots a follower node using the block downloader #[test] #[ignore] -fn follower_bootup() { +fn follower_bootup_simple() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -9422,6 +9422,178 @@ fn v3_blockbyheight_api_endpoint() { run_loop_thread.join().unwrap(); } +/// Verify that lockup events are attached to a phantom tx receipt +/// if the block does not have a coinbase tx +#[test] +#[ignore] +fn nakamoto_lockup_events() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + conf.connection_options.auth_token = Some(password.clone()); + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let stacker_sk = setup_stacker(&mut conf); + let signer_sk = Secp256k1PrivateKey::new(); + let signer_addr = tests::to_addr(&signer_sk); + let _signer_pubkey = Secp256k1PublicKey::from_private(&signer_sk); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + (send_amt + send_fee) * 100, + ); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // only subscribe to the block proposal events + test_observer::spawn(); + test_observer::register_any(&mut conf); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![signer_sk]); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &conf, + &blocks_processed, + &[stacker_sk], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + blind_signer(&conf, &signers, proposals_submitted); + let burnchain = conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + // TODO (hack) instantiate the sortdb in the burnchain + _ = btc_regtest_controller.sortdb_mut(); + + info!("------------------------- Setup finished, run test -------------------------"); + + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let get_stacks_height = || { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + tip.stacks_block_height + }; + let initial_block_height = get_stacks_height(); + + // This matches the data in `stx-genesis/chainstate-test.txt` + // Recipient: ST2CTPPV8BHBVSQR727A3MK00ZD85RNY9015WGW2D + let unlock_recipient = "ST2CTPPV8BHBVSQR727A3MK00ZD85RNY9015WGW2D"; + let unlock_height = 35_u64; + let interims_to_mine = unlock_height - initial_block_height; + + info!( + "----- Mining to unlock height -----"; + "unlock_height" => unlock_height, + "initial_height" => initial_block_height, + "interims_to_mine" => interims_to_mine, + ); + + // submit a tx so that the miner will mine an extra stacks block + let mut sender_nonce = 0; + + for _ in 0..interims_to_mine { + let height_before = get_stacks_height(); + info!("----- Mining interim block -----"; + "height" => %height_before, + "nonce" => %sender_nonce, + ); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + + wait_for(30, || Ok(get_stacks_height() > height_before)).unwrap(); + } + + let blocks = test_observer::get_blocks(); + let block = blocks.last().unwrap(); + assert_eq!( + block.get("block_height").unwrap().as_u64().unwrap(), + unlock_height + ); + + let events = block.get("events").unwrap().as_array().unwrap(); + let mut found_event = false; + for event in events { + let mint_event = event.get("stx_mint_event"); + if mint_event.is_some() { + found_event = true; + let mint_event = mint_event.unwrap(); + let recipient = mint_event.get("recipient").unwrap().as_str().unwrap(); + assert_eq!(recipient, unlock_recipient); + let amount = mint_event.get("amount").unwrap().as_str().unwrap(); + assert_eq!(amount, "12345678"); + let txid = event.get("txid").unwrap().as_str().unwrap(); + assert_eq!( + txid, + "0x63dd5773338782755e4947a05a336539137dfe13b19a0eac5154306850aca8ef" + ); + } + } + assert!(found_event); + + info!("------------------------- Test finished, clean up -------------------------"); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + #[test] #[ignore] /// This test spins up a nakamoto-neon node. diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 07b69e14f9..432b990667 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -56,13 +56,14 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::tests::TestFlag; use stacks_signer::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; -use crate::neon::{Counters, RunLoopCounter, TestFlag}; +use crate::neon::{Counters, RunLoopCounter}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ @@ -90,7 +91,7 @@ pub struct RunningNodes { pub nakamoto_blocks_mined: RunLoopCounter, pub nakamoto_blocks_rejected: RunLoopCounter, pub nakamoto_blocks_signer_pushed: RunLoopCounter, - pub nakamoto_test_skip_commit_op: TestFlag, + pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4002899cf1..5200883667 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -60,7 +60,7 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::v0::signer::{ +use stacks_signer::v0::tests::{ TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, }; @@ -69,7 +69,7 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; -use crate::event_dispatcher::MinedNakamotoBlockEvent; +use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEMENT}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; @@ -386,7 +386,7 @@ impl SignerTest { } } - /// Propose an invalid block to the signers + /// Propose a block to the signers fn propose_block(&mut self, block: NakamotoBlock, timeout: Duration) { let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = @@ -396,6 +396,7 @@ impl SignerTest { .btc_regtest_controller .get_headers_height(); let reward_cycle = self.get_current_reward_cycle(); + let signer_signature_hash = block.header.signer_signature_hash(); let message = SignerMessage::BlockProposal(BlockProposal { block, burn_height, @@ -412,7 +413,7 @@ impl SignerTest { let mut version = 0; let slot_id = MinerSlotID::BlockProposal.to_u8() as u32; let start = Instant::now(); - debug!("Proposing invalid block to signers"); + debug!("Proposing block to signers: {signer_signature_hash}"); while !accepted { let mut chunk = StackerDBChunkData::new(slot_id * 2, version, message.serialize_to_vec()); @@ -952,7 +953,7 @@ fn forked_tenure_testing( config.first_proposal_burn_block_timing = proposal_limit; // don't allow signers to post signed blocks (limits the amount of fault injection we // need) - TEST_SKIP_BLOCK_BROADCAST.lock().unwrap().replace(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); }, |config| { config.miner.tenure_cost_limit_per_block_percentage = None; @@ -2416,10 +2417,7 @@ fn retry_on_rejection() { .map(StacksPublicKey::from_private) .take(num_signers) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); let proposals_before = signer_test .running_nodes @@ -2466,10 +2464,7 @@ fn retry_on_rejection() { // resume signing info!("Disable unconditional rejection and wait for the block to be processed"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(vec![]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); loop { let blocks_mined = signer_test .running_nodes @@ -5361,10 +5356,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .cloned() .take(num_signers / 2 + num_signers % 2) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); test_observer::clear(); // Make a new stacks transaction to create a different block signature, but make sure to propose it // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted @@ -5397,10 +5389,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); let transfer_tx = make_stacks_transfer( &sender_sk, @@ -5556,10 +5545,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .cloned() .take(num_signers * 3 / 10) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); test_observer::clear(); // submit a tx so that the miner will mine a stacks block N+1 @@ -5624,10 +5610,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it let transfer_tx = make_stacks_transfer( @@ -5783,10 +5766,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .cloned() .skip(num_signers * 7 / 10) .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); @@ -5864,10 +5844,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(Vec::new()); wait_for(short_timeout, || { let info_after = signer_test .stacks_client @@ -6010,10 +5987,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { .cloned() .skip(num_signers * 7 / 10) .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); @@ -6232,7 +6206,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Forcing miner to ignore block responses for block N+1"); TEST_IGNORE_SIGNERS.set(true); info!("Delaying signer block N+1 broadcasting to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); + TEST_PAUSE_BLOCK_BROADCAST.set(true); test_observer::clear(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test @@ -6295,7 +6269,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } }) .collect::>(); - Ok(signatures.len() == num_signers) + Ok(signatures.len() >= num_signers * 7 / 10) }) .expect("Test timed out while waiting for signers signatures for first block proposal"); let block = block.unwrap(); @@ -6359,7 +6333,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Allowing miner to accept block responses again. "); TEST_IGNORE_SIGNERS.set(false); info!("Allowing signers to broadcast block N+1 to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); + TEST_PAUSE_BLOCK_BROADCAST.set(false); // Assert the N+1' block was rejected let rejected_block = rejected_block.unwrap(); @@ -6385,7 +6359,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } }) .collect::>(); - Ok(block_rejections.len() == num_signers) + Ok(block_rejections.len() >= num_signers * 7 / 10) }) .expect("FAIL: Timed out waiting for block proposal rejections"); @@ -6704,10 +6678,7 @@ fn continue_after_fast_block_no_sortition() { // Make all signers ignore block proposals let ignoring_signers = all_signers.to_vec(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(ignoring_signers.clone()); info!("------------------------- Submit Miner 2 Block Commit -------------------------"); let rejections_before = signer_test @@ -6821,10 +6792,7 @@ fn continue_after_fast_block_no_sortition() { let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); let nmb_old_blocks = test_observer::get_blocks().len(); // Allow signers to respond to proposals again - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); info!("------------------------- Wait for Miner B's Block N -------------------------"); // wait for the new block to be processed @@ -7483,13 +7451,12 @@ fn block_commit_delay() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![], |config| { // make the duration long enough that the miner will be marked as malicious - config.block_proposal_timeout = block_proposal_timeout; + config.block_proposal_timeout = Duration::from_secs(600); }, |config| { // Set the block commit delay to 10 minutes to ensure no block commit is sent @@ -7529,10 +7496,7 @@ fn block_commit_delay() { .iter() .map(StacksPublicKey::from_private) .collect::>(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(all_signers); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(all_signers); info!("------------------------- Test Mine Burn Block -------------------------"); let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; @@ -7567,10 +7531,7 @@ fn block_commit_delay() { .load(Ordering::SeqCst); info!("------------------------- Resume Signing -------------------------"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); // Wait for a block to be mined wait_for(60, || { @@ -9310,3 +9271,260 @@ fn block_proposal_max_age_rejections() { info!("------------------------- Test Shutdown-------------------------"); signer_test.shutdown(); } + +#[test] +#[ignore] +/// Test that signers do not mark a block as globally accepted if it was not announced by the node. +/// This will simulate this case via testing flags, and ensure that a block can be reorged across tenure +/// boundaries now (as it is only marked locally accepted and no longer gets marked globally accepted +/// by simply seeing the threshold number of signatures). +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// 1. The node mines 1 stacks block N (all signers sign it). +/// 2. <30% of signers are configured to auto reject any block proposals, broadcast of new blocks are skipped, and miners are configured to ignore signers responses. +/// 3. The node mines 1 stacks block N+1 (all signers sign it, but one which rejects it) but eventually all mark the block as locally accepted. +/// 4. A new tenure starts and the miner attempts to mine a new sister block N+1' (as it does not see the threshold number of signatures or any block push from signers). +/// 5. The signers accept this sister block as a valid reorg and the node advances to block N+1'. +/// +/// Test Assertion: +/// - All signers accepted block N. +/// - Less than 30% of the signers rejected block N+1. +/// - All signers accept block N+1' as a valid reorg. +/// - The node advances to block N+1' +fn global_acceptance_depends_on_block_announcement() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 4; + + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Just accept all reorg attempts + config.tenure_last_block_proposal_timeout = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + None, + None, + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + test_observer::clear(); + // submit a tx so that the miner will mine a stacks block N + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); + + wait_for(short_timeout, || { + Ok(signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for N to be mined and processed"); + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); + + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); + // Make less than 30% of the signers reject the block and ensure it is accepted by the node, but not announced. + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 3 / 10) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(true); + TEST_IGNORE_SIGNERS.set(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); + test_observer::clear(); + + // submit a tx so that the miner will mine a stacks block N+1 + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N+1"); + + let mut proposed_block = None; + let start_time = Instant::now(); + while proposed_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + proposed_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + let proposed_block = proposed_block.expect("Failed to find proposed block within 30s"); + + // Even though one of the signers rejected the block, it will eventually accept the block as it sees the 70% threshold of signatures + signer_test + .wait_for_block_acceptance( + short_timeout, + &proposed_block.header.signer_signature_hash(), + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1 by all signers"); + + info!( + "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" + ); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(false); + TEST_IGNORE_SIGNERS.set(false); + TEST_SKIP_BLOCK_BROADCAST.set(false); + test_observer::clear(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }, + ) + .unwrap(); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let mut sister_block = None; + let start_time = Instant::now(); + while sister_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + sister_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_after.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + let sister_block = sister_block.expect("Failed to find proposed sister block within 30s"); + signer_test + .wait_for_block_acceptance( + short_timeout, + &sister_block.header.signer_signature_hash(), + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1' by all signers"); + + // Assert the block was mined and the tip has changed. + assert_eq!( + info_after.stacks_tip_height, + sister_block.header.chain_length + ); + assert_eq!(info_after.stacks_tip, sister_block.header.block_hash()); + assert_eq!( + info_after.stacks_tip_consensus_hash, + sister_block.header.consensus_hash + ); + assert_eq!( + sister_block.header.chain_length, + proposed_block.header.chain_length + ); + assert_ne!(sister_block, proposed_block); +}