From bd3bcd60147e8ed6aa9c1769f2608ed4ebd17028 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 11 Sep 2025 11:40:44 -0700 Subject: [PATCH 01/56] WIP: just see if this breaks replay blocks Signed-off-by: Jacinta Ferrant --- clarity-serialization/src/errors/analysis.rs | 645 +++++++++++++++++ clarity/src/vm/analysis/mod.rs | 15 +- clarity/src/vm/ast/mod.rs | 178 +---- clarity/src/vm/clarity.rs | 6 +- clarity/src/vm/contexts.rs | 55 +- clarity/src/vm/database/clarity_db.rs | 7 - clarity/src/vm/docs/contracts.rs | 5 +- clarity/src/vm/docs/mod.rs | 22 +- clarity/src/vm/mod.rs | 27 +- clarity/src/vm/test_util/mod.rs | 20 +- clarity/src/vm/tests/assets.rs | 78 +- clarity/src/vm/tests/contracts.rs | 39 +- clarity/src/vm/tests/principals.rs | 55 -- clarity/src/vm/tests/simple_apply_eval.rs | 11 +- clarity/src/vm/tests/traits.rs | 82 --- clarity/src/vm/tests/variables.rs | 6 +- clarity/src/vm/tooling/mod.rs | 15 +- pox-locking/src/events.rs | 35 +- pox-locking/src/events_24.rs | 35 +- stacks-node/src/nakamoto_node.rs | 11 - stacks-node/src/neon_node.rs | 75 +- .../src/tests/nakamoto_integrations.rs | 2 - stacks-node/src/tests/neon_integrations.rs | 66 +- stackslib/src/chainstate/burn/db/sortdb.rs | 113 --- stackslib/src/chainstate/coordinator/tests.rs | 10 +- .../chainstate/nakamoto/coordinator/mod.rs | 2 - stackslib/src/chainstate/nakamoto/miner.rs | 5 - stackslib/src/chainstate/nakamoto/mod.rs | 33 +- stackslib/src/chainstate/nakamoto/shadow.rs | 3 - .../src/chainstate/nakamoto/tests/node.rs | 3 - .../chainstate/stacks/boot/contract_tests.rs | 24 +- stackslib/src/chainstate/stacks/boot/mod.rs | 8 +- stackslib/src/chainstate/stacks/db/blocks.rs | 24 +- stackslib/src/chainstate/stacks/db/mod.rs | 4 - .../src/chainstate/stacks/db/transactions.rs | 674 ++++-------------- .../src/chainstate/stacks/db/unconfirmed.rs | 3 - stackslib/src/chainstate/stacks/miner.rs | 99 +-- .../stacks/tests/chain_histories.rs | 78 +- stackslib/src/clarity_cli.rs | 43 +- stackslib/src/clarity_vm/clarity.rs | 65 +- stackslib/src/clarity_vm/database/mod.rs | 7 - .../src/clarity_vm/tests/analysis_costs.rs | 32 +- stackslib/src/clarity_vm/tests/contracts.rs | 143 +--- stackslib/src/clarity_vm/tests/costs.rs | 41 +- stackslib/src/clarity_vm/tests/events.rs | 3 +- stackslib/src/clarity_vm/tests/forking.rs | 17 +- .../src/clarity_vm/tests/large_contract.rs | 77 +- .../src/clarity_vm/tests/simple_tests.rs | 3 +- stackslib/src/net/api/postblock_proposal.rs | 5 - stackslib/src/net/api/postmicroblock.rs | 14 +- stackslib/src/net/api/posttransaction.rs | 6 +- .../src/net/api/tests/postblock_proposal.rs | 3 - stackslib/src/net/mod.rs | 2 - stackslib/src/net/p2p.rs | 6 - stackslib/src/net/relay.rs | 176 ++--- stackslib/src/net/tests/relay/epoch2x.rs | 54 +- .../src/util_lib/signed_structured_data.rs | 8 +- 57 files changed, 1101 insertions(+), 2177 deletions(-) create mode 100644 clarity-serialization/src/errors/analysis.rs diff --git a/clarity-serialization/src/errors/analysis.rs b/clarity-serialization/src/errors/analysis.rs new file mode 100644 index 00000000000..d639442177b --- /dev/null +++ b/clarity-serialization/src/errors/analysis.rs @@ -0,0 +1,645 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{error, fmt}; + +use crate::diagnostic::{DiagnosableError, Diagnostic}; +use crate::errors::CostErrors; +use crate::execution_cost::ExecutionCost; +use crate::representations::SymbolicExpression; +use crate::types::{TraitIdentifier, TupleTypeSignature, TypeSignature, Value}; + +pub type CheckResult = Result; + +/// What kind of syntax binding was found to be in error? +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum SyntaxBindingErrorType { + Let, + Eval, + TupleCons, +} + +impl fmt::Display for SyntaxBindingErrorType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", &self.message()) + } +} + +impl DiagnosableError for SyntaxBindingErrorType { + fn message(&self) -> String { + match &self { + Self::Let => "Let-binding".to_string(), + Self::Eval => "Function argument definition".to_string(), + Self::TupleCons => "Tuple constructor".to_string(), + } + } + + fn suggestion(&self) -> Option { + None + } +} + +/// Syntax binding error types +#[derive(Debug, PartialEq)] +pub enum SyntaxBindingError { + /// binding list item is not a list + NotList(SyntaxBindingErrorType, usize), + /// binding list item has an invalid length (e.g. not 2) + InvalidLength(SyntaxBindingErrorType, usize), + /// binding name is not an atom + NotAtom(SyntaxBindingErrorType, usize), +} + +impl fmt::Display for SyntaxBindingError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{self:?}") + } +} + +impl DiagnosableError for SyntaxBindingError { + fn message(&self) -> String { + match &self { + Self::NotList(err_type, item_index) => { + let item_no = item_index + 1; + format!("{err_type} item #{item_no} is not a list",) + } + Self::InvalidLength(err_type, item_index) => { + let item_no = item_index + 1; + format!("{err_type} item #{item_no} is not a two-element list",) + } + Self::NotAtom(err_type, item_index) => { + let item_no = item_index + 1; + format!("{err_type} item #{item_no}'s name is not an atom",) + } + } + } + + fn suggestion(&self) -> Option { + None + } +} + +impl SyntaxBindingError { + /// Helper constructor for NotList(SyntaxBindingErrorType::Let, item_no) + pub fn let_binding_not_list(item_no: usize) -> Self { + Self::NotList(SyntaxBindingErrorType::Let, item_no) + } + + /// Helper constructor for InvalidLength(SyntaxBindingErrorType::Let, item_no) + pub fn let_binding_invalid_length(item_no: usize) -> Self { + Self::InvalidLength(SyntaxBindingErrorType::Let, item_no) + } + + /// Helper constructor for NotAtom(SyntaxBindingErrorType::Let, item_no) + pub fn let_binding_not_atom(item_no: usize) -> Self { + Self::NotAtom(SyntaxBindingErrorType::Let, item_no) + } + + /// Helper constructor for NotList(SyntaxBindingErrorType::Eval, item_no) + pub fn eval_binding_not_list(item_no: usize) -> Self { + Self::NotList(SyntaxBindingErrorType::Eval, item_no) + } + + /// Helper constructor for InvalidLength(SyntaxBindingErrorType::Eval, item_no) + pub fn eval_binding_invalid_length(item_no: usize) -> Self { + Self::InvalidLength(SyntaxBindingErrorType::Eval, item_no) + } + + /// Helper constructor for NotAtom(SyntaxBindingErrorType::Eval, item_no) + pub fn eval_binding_not_atom(item_no: usize) -> Self { + Self::NotAtom(SyntaxBindingErrorType::Eval, item_no) + } + + /// Helper constructor for NotList(SyntaxBindingErrorType::TupleCons, item_no) + pub fn tuple_cons_not_list(item_no: usize) -> Self { + Self::NotList(SyntaxBindingErrorType::TupleCons, item_no) + } + + /// Helper constructor for InvalidLength(SyntaxBindingErrorType::TupleCons, item_no) + pub fn tuple_cons_invalid_length(item_no: usize) -> Self { + Self::InvalidLength(SyntaxBindingErrorType::TupleCons, item_no) + } + + /// Helper constructor for NotAtom(SyntaxBindingErrorType::TupleCons, item_no) + pub fn tuple_cons_not_atom(item_no: usize) -> Self { + Self::NotAtom(SyntaxBindingErrorType::TupleCons, item_no) + } +} + +impl From for CheckErrors { + fn from(e: SyntaxBindingError) -> Self { + Self::BadSyntaxBinding(e) + } +} + +#[derive(Debug, PartialEq)] +pub enum CheckErrors { + // cost checker errors + CostOverflow, + CostBalanceExceeded(ExecutionCost, ExecutionCost), + MemoryBalanceExceeded(u64, u64), + CostComputationFailed(String), + + ValueTooLarge, + ValueOutOfBounds, + TypeSignatureTooDeep, + ExpectedName, + SupertypeTooLarge, + + // unexpected interpreter behavior + Expects(String), + + // match errors + BadMatchOptionSyntax(Box), + BadMatchResponseSyntax(Box), + BadMatchInput(TypeSignature), + + // list typing errors + UnknownListConstructionFailure, + ListTypesMustMatch, + ConstructedListTooLarge, + + // simple type expectation mismatch + TypeError(TypeSignature, TypeSignature), + TypeLiteralError(TypeSignature, TypeSignature), + TypeValueError(TypeSignature, Value), + + NoSuperType(TypeSignature, TypeSignature), + InvalidTypeDescription, + UnknownTypeName(String), + + // union type mismatch + UnionTypeError(Vec, TypeSignature), + UnionTypeValueError(Vec, Value), + + ExpectedLiteral, + ExpectedOptionalType(TypeSignature), + ExpectedResponseType(TypeSignature), + ExpectedOptionalOrResponseType(TypeSignature), + ExpectedOptionalValue(Value), + ExpectedResponseValue(Value), + ExpectedOptionalOrResponseValue(Value), + CouldNotDetermineResponseOkType, + CouldNotDetermineResponseErrType, + CouldNotDetermineSerializationType, + UncheckedIntermediaryResponses, + + CouldNotDetermineMatchTypes, + CouldNotDetermineType, + + // Checker runtime failures + TypeAlreadyAnnotatedFailure, + TypeAnnotationExpectedFailure, + CheckerImplementationFailure, + + // Assets + BadTokenName, + DefineFTBadSignature, + DefineNFTBadSignature, + NoSuchNFT(String), + NoSuchFT(String), + + BadTransferSTXArguments, + BadTransferFTArguments, + BadTransferNFTArguments, + BadMintFTArguments, + BadBurnFTArguments, + + // tuples + BadTupleFieldName, + ExpectedTuple(TypeSignature), + NoSuchTupleField(String, TupleTypeSignature), + EmptyTuplesNotAllowed, + BadTupleConstruction(String), + + // variables + NoSuchDataVariable(String), + + // data map + BadMapName, + NoSuchMap(String), + + // defines + DefineFunctionBadSignature, + BadFunctionName, + BadMapTypeDefinition, + PublicFunctionMustReturnResponse(TypeSignature), + DefineVariableBadSignature, + ReturnTypesMustMatch(TypeSignature, TypeSignature), + + CircularReference(Vec), + + // contract-call errors + NoSuchContract(String), + NoSuchPublicFunction(String, String), + PublicFunctionNotReadOnly(String, String), + ContractAlreadyExists(String), + ContractCallExpectName, + ExpectedCallableType(TypeSignature), + + // get-block-info? errors + NoSuchBlockInfoProperty(String), + NoSuchBurnBlockInfoProperty(String), + NoSuchStacksBlockInfoProperty(String), + NoSuchTenureInfoProperty(String), + GetBlockInfoExpectPropertyName, + GetBurnBlockInfoExpectPropertyName, + GetStacksBlockInfoExpectPropertyName, + GetTenureInfoExpectPropertyName, + + NameAlreadyUsed(String), + ReservedWord(String), + + // expect a function, or applying a function to a list + NonFunctionApplication, + ExpectedListApplication, + ExpectedSequence(TypeSignature), + MaxLengthOverflow, + + // let syntax + BadLetSyntax, + + // generic binding syntax + BadSyntaxBinding(SyntaxBindingError), + + MaxContextDepthReached, + UndefinedFunction(String), + UndefinedVariable(String), + + // argument counts + RequiresAtLeastArguments(usize, usize), + RequiresAtMostArguments(usize, usize), + IncorrectArgumentCount(usize, usize), + IfArmsMustMatch(TypeSignature, TypeSignature), + MatchArmsMustMatch(TypeSignature, TypeSignature), + DefaultTypesMustMatch(TypeSignature, TypeSignature), + TooManyExpressions, + IllegalOrUnknownFunctionApplication(String), + UnknownFunction(String), + + // traits + NoSuchTrait(String, String), + TraitReferenceUnknown(String), + TraitMethodUnknown(String, String), + ExpectedTraitIdentifier, + ImportTraitBadSignature, + TraitReferenceNotAllowed, + BadTraitImplementation(String, String), + DefineTraitBadSignature, + DefineTraitDuplicateMethod(String), + UnexpectedTraitOrFieldReference, + TraitBasedContractCallInReadOnly, + ContractOfExpectsTrait, + IncompatibleTrait(TraitIdentifier, TraitIdentifier), + + // strings + InvalidCharactersDetected, + InvalidUTF8Encoding, + + // secp256k1 signature + InvalidSecp65k1Signature, + + WriteAttemptedInReadOnly, + AtBlockClosureMustBeReadOnly, + + // time checker errors + ExecutionTimeExpired, +} + +#[derive(Debug, PartialEq)] +pub struct CheckError { + pub err: CheckErrors, + pub expressions: Option>, + pub diagnostic: Diagnostic, +} + +impl CheckErrors { + /// Does this check error indicate that the transaction should be + /// rejected? + pub fn rejectable(&self) -> bool { + matches!( + self, + CheckErrors::SupertypeTooLarge | CheckErrors::Expects(_) + ) + } +} + +impl CheckError { + pub fn new(err: CheckErrors) -> CheckError { + let diagnostic = Diagnostic::err(&err); + CheckError { + err, + expressions: None, + diagnostic, + } + } + + pub fn has_expression(&self) -> bool { + self.expressions.is_some() + } + + pub fn set_expression(&mut self, expr: &SymbolicExpression) { + self.diagnostic.spans = vec![expr.span().clone()]; + self.expressions.replace(vec![expr.clone()]); + } + + pub fn set_expressions(&mut self, exprs: &[SymbolicExpression]) { + self.diagnostic.spans = exprs.iter().map(|e| e.span().clone()).collect(); + self.expressions.replace(exprs.to_vec()); + } + + pub fn with_expression(err: CheckErrors, expr: &SymbolicExpression) -> Self { + let mut r = Self::new(err); + r.set_expression(expr); + r + } +} + +impl From<(SyntaxBindingError, &SymbolicExpression)> for CheckError { + fn from(e: (SyntaxBindingError, &SymbolicExpression)) -> Self { + Self::with_expression(CheckErrors::BadSyntaxBinding(e.0), e.1) + } +} + +impl From<(CheckErrors, &SymbolicExpression)> for CheckError { + fn from(e: (CheckErrors, &SymbolicExpression)) -> Self { + let mut ce = Self::new(e.0); + ce.set_expression(e.1); + ce + } +} + +impl From<(CheckErrors, &SymbolicExpression)> for CheckErrors { + fn from(e: (CheckErrors, &SymbolicExpression)) -> Self { + e.0 + } +} + +impl fmt::Display for CheckErrors { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{self:?}") + } +} + +impl fmt::Display for CheckError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.err)?; + + if let Some(ref e) = self.expressions { + write!(f, "\nNear:\n{e:?}")?; + } + + Ok(()) + } +} + +impl From for CheckError { + fn from(err: CostErrors) -> Self { + CheckError::from(CheckErrors::from(err)) + } +} + +impl From for CheckErrors { + fn from(err: CostErrors) -> Self { + match err { + CostErrors::CostOverflow => CheckErrors::CostOverflow, + CostErrors::CostBalanceExceeded(a, b) => CheckErrors::CostBalanceExceeded(a, b), + CostErrors::MemoryBalanceExceeded(a, b) => CheckErrors::MemoryBalanceExceeded(a, b), + CostErrors::CostComputationFailed(s) => CheckErrors::CostComputationFailed(s), + CostErrors::CostContractLoadFailure => { + CheckErrors::CostComputationFailed("Failed to load cost contract".into()) + } + CostErrors::InterpreterFailure => { + CheckErrors::Expects("Unexpected interpreter failure in cost computation".into()) + } + CostErrors::Expect(s) => CheckErrors::Expects(s), + CostErrors::ExecutionTimeExpired => CheckErrors::ExecutionTimeExpired, + } + } +} + +impl error::Error for CheckError { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + None + } +} + +impl error::Error for CheckErrors { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + None + } +} + +impl From for CheckError { + fn from(err: CheckErrors) -> Self { + CheckError::new(err) + } +} + +#[cfg(any(test, feature = "testing"))] +impl From for String { + fn from(o: CheckErrors) -> Self { + o.to_string() + } +} + +#[allow(clippy::result_large_err)] +pub fn check_argument_count(expected: usize, args: &[T]) -> Result<(), CheckErrors> { + if args.len() != expected { + Err(CheckErrors::IncorrectArgumentCount(expected, args.len())) + } else { + Ok(()) + } +} + +#[allow(clippy::result_large_err)] +pub fn check_arguments_at_least(expected: usize, args: &[T]) -> Result<(), CheckErrors> { + if args.len() < expected { + Err(CheckErrors::RequiresAtLeastArguments(expected, args.len())) + } else { + Ok(()) + } +} + +#[allow(clippy::result_large_err)] +pub fn check_arguments_at_most(expected: usize, args: &[T]) -> Result<(), CheckErrors> { + if args.len() > expected { + Err(CheckErrors::RequiresAtMostArguments(expected, args.len())) + } else { + Ok(()) + } +} + +fn formatted_expected_types(expected_types: &[TypeSignature]) -> String { + let mut expected_types_joined = format!("'{}'", expected_types[0]); + + if expected_types.len() > 2 { + for expected_type in expected_types[1..expected_types.len() - 1].iter() { + expected_types_joined.push_str(&format!(", '{expected_type}'")); + } + } + expected_types_joined.push_str(&format!( + " or '{}'", + expected_types[expected_types.len() - 1] + )); + expected_types_joined +} + +impl DiagnosableError for CheckErrors { + fn message(&self) -> String { + match &self { + CheckErrors::ExpectedLiteral => "expected a literal argument".into(), + CheckErrors::SupertypeTooLarge => "supertype of two types is too large".into(), + CheckErrors::Expects(s) => format!("unexpected interpreter behavior: {s}"), + CheckErrors::BadMatchOptionSyntax(source) => + format!("match on a optional type uses the following syntax: (match input some-name if-some-expression if-none-expression). Caused by: {}", + source.message()), + CheckErrors::BadMatchResponseSyntax(source) => + format!("match on a result type uses the following syntax: (match input ok-name if-ok-expression err-name if-err-expression). Caused by: {}", + source.message()), + CheckErrors::BadMatchInput(t) => + format!("match requires an input of either a response or optional, found input: '{t}'"), + CheckErrors::TypeAnnotationExpectedFailure => "analysis expected type to already be annotated for expression".into(), + CheckErrors::CostOverflow => "contract execution cost overflowed cost counter".into(), + CheckErrors::CostBalanceExceeded(a, b) => format!("contract execution cost exceeded budget: {a:?} > {b:?}"), + CheckErrors::MemoryBalanceExceeded(a, b) => format!("contract execution cost exceeded memory budget: {a:?} > {b:?}"), + CheckErrors::InvalidTypeDescription => "supplied type description is invalid".into(), + CheckErrors::EmptyTuplesNotAllowed => "tuple types may not be empty".into(), + CheckErrors::UnknownTypeName(name) => format!("failed to parse type: '{name}'"), + CheckErrors::ValueTooLarge => "created a type which was greater than maximum allowed value size".into(), + CheckErrors::ValueOutOfBounds => "created a type which value size was out of defined bounds".into(), + CheckErrors::TypeSignatureTooDeep => "created a type which was deeper than maximum allowed type depth".into(), + CheckErrors::ExpectedName => "expected a name argument to this function".into(), + CheckErrors::NoSuperType(a, b) => format!("unable to create a supertype for the two types: '{a}' and '{b}'"), + CheckErrors::UnknownListConstructionFailure => "invalid syntax for list definition".into(), + CheckErrors::ListTypesMustMatch => "expecting elements of same type in a list".into(), + CheckErrors::ConstructedListTooLarge => "reached limit of elements in a sequence".into(), + CheckErrors::TypeError(expected_type, found_type) => format!("expecting expression of type '{expected_type}', found '{found_type}'"), + CheckErrors::TypeLiteralError(expected_type, found_type) => format!("expecting a literal of type '{expected_type}', found '{found_type}'"), + CheckErrors::TypeValueError(expected_type, found_value) => format!("expecting expression of type '{expected_type}', found '{found_value}'"), + CheckErrors::UnionTypeError(expected_types, found_type) => format!("expecting expression of type {}, found '{}'", formatted_expected_types(expected_types), found_type), + CheckErrors::UnionTypeValueError(expected_types, found_type) => format!("expecting expression of type {}, found '{}'", formatted_expected_types(expected_types), found_type), + CheckErrors::ExpectedOptionalType(found_type) => format!("expecting expression of type 'optional', found '{found_type}'"), + CheckErrors::ExpectedOptionalOrResponseType(found_type) => format!("expecting expression of type 'optional' or 'response', found '{found_type}'"), + CheckErrors::ExpectedOptionalOrResponseValue(found_type) => format!("expecting expression of type 'optional' or 'response', found '{found_type}'"), + CheckErrors::ExpectedResponseType(found_type) => format!("expecting expression of type 'response', found '{found_type}'"), + CheckErrors::ExpectedOptionalValue(found_type) => format!("expecting expression of type 'optional', found '{found_type}'"), + CheckErrors::ExpectedResponseValue(found_type) => format!("expecting expression of type 'response', found '{found_type}'"), + CheckErrors::CouldNotDetermineResponseOkType => "attempted to obtain 'ok' value from response, but 'ok' type is indeterminate".into(), + CheckErrors::CouldNotDetermineResponseErrType => "attempted to obtain 'err' value from response, but 'err' type is indeterminate".into(), + CheckErrors::CouldNotDetermineMatchTypes => "attempted to match on an (optional) or (response) type where either the some, ok, or err type is indeterminate. you may wish to use unwrap-panic or unwrap-err-panic instead.".into(), + CheckErrors::CouldNotDetermineType => "type of expression cannot be determined".into(), + CheckErrors::BadTupleFieldName => "invalid tuple field name".into(), + CheckErrors::ExpectedTuple(type_signature) => format!("expecting tuple, found '{type_signature}'"), + CheckErrors::NoSuchTupleField(field_name, tuple_signature) => format!("cannot find field '{field_name}' in tuple '{tuple_signature}'"), + CheckErrors::BadTupleConstruction(message) => format!("invalid tuple syntax: {message}"), + CheckErrors::NoSuchDataVariable(var_name) => format!("use of unresolved persisted variable '{var_name}'"), + CheckErrors::BadTransferSTXArguments => "STX transfer expects an int amount, from principal, to principal".into(), + CheckErrors::BadTransferFTArguments => "transfer expects an int amount, from principal, to principal".into(), + CheckErrors::BadTransferNFTArguments => "transfer expects an asset, from principal, to principal".into(), + CheckErrors::BadMintFTArguments => "mint expects a uint amount and from principal".into(), + CheckErrors::BadBurnFTArguments => "burn expects a uint amount and from principal".into(), + CheckErrors::BadMapName => "invalid map name".into(), + CheckErrors::NoSuchMap(map_name) => format!("use of unresolved map '{map_name}'"), + CheckErrors::DefineFunctionBadSignature => "invalid function definition".into(), + CheckErrors::BadFunctionName => "invalid function name".into(), + CheckErrors::BadMapTypeDefinition => "invalid map definition".into(), + CheckErrors::PublicFunctionMustReturnResponse(found_type) => format!("public functions must return an expression of type 'response', found '{found_type}'"), + CheckErrors::DefineVariableBadSignature => "invalid variable definition".into(), + CheckErrors::ReturnTypesMustMatch(type_1, type_2) => format!("detected two execution paths, returning two different expression types (got '{type_1}' and '{type_2}')"), + CheckErrors::NoSuchContract(contract_identifier) => format!("use of unresolved contract '{contract_identifier}'"), + CheckErrors::NoSuchPublicFunction(contract_identifier, function_name) => format!("contract '{contract_identifier}' has no public function '{function_name}'"), + CheckErrors::PublicFunctionNotReadOnly(contract_identifier, function_name) => format!("function '{contract_identifier}' in '{function_name}' is not read-only"), + CheckErrors::ContractAlreadyExists(contract_identifier) => format!("contract name '{contract_identifier}' conflicts with existing contract"), + CheckErrors::ContractCallExpectName => "missing contract name for call".into(), + CheckErrors::ExpectedCallableType(found_type) => format!("expected a callable contract, found {found_type}"), + CheckErrors::NoSuchBlockInfoProperty(property_name) => format!("use of block unknown property '{property_name}'"), + CheckErrors::NoSuchBurnBlockInfoProperty(property_name) => format!("use of burn block unknown property '{property_name}'"), + CheckErrors::NoSuchStacksBlockInfoProperty(property_name) => format!("use of unknown stacks block property '{property_name}'"), + CheckErrors::NoSuchTenureInfoProperty(property_name) => format!("use of unknown tenure property '{property_name}'"), + CheckErrors::GetBlockInfoExpectPropertyName => "missing property name for block info introspection".into(), + CheckErrors::GetBurnBlockInfoExpectPropertyName => "missing property name for burn block info introspection".into(), + CheckErrors::GetStacksBlockInfoExpectPropertyName => "missing property name for stacks block info introspection".into(), + CheckErrors::GetTenureInfoExpectPropertyName => "missing property name for tenure info introspection".into(), + CheckErrors::NameAlreadyUsed(name) => format!("defining '{name}' conflicts with previous value"), + CheckErrors::ReservedWord(name) => format!("{name} is a reserved word"), + CheckErrors::NonFunctionApplication => "expecting expression of type function".into(), + CheckErrors::ExpectedListApplication => "expecting expression of type list".into(), + CheckErrors::ExpectedSequence(found_type) => format!("expecting expression of type 'list', 'buff', 'string-ascii' or 'string-utf8' - found '{found_type}'"), + CheckErrors::MaxLengthOverflow => format!("expecting a value <= {}", u32::MAX), + CheckErrors::BadLetSyntax => "invalid syntax of 'let'".into(), + CheckErrors::CircularReference(references) => format!("detected circular reference: ({})", references.join(", ")), + CheckErrors::BadSyntaxBinding(binding_error) => format!("invalid syntax binding: {}", &binding_error.message()), + CheckErrors::MaxContextDepthReached => "reached depth limit".into(), + CheckErrors::UndefinedVariable(var_name) => format!("use of unresolved variable '{var_name}'"), + CheckErrors::UndefinedFunction(var_name) => format!("use of unresolved function '{var_name}'"), + CheckErrors::RequiresAtLeastArguments(expected, found) => format!("expecting >= {expected} arguments, got {found}"), + CheckErrors::RequiresAtMostArguments(expected, found) => format!("expecting < {expected} arguments, got {found}"), + CheckErrors::IncorrectArgumentCount(expected_count, found_count) => format!("expecting {expected_count} arguments, got {found_count}"), + CheckErrors::IfArmsMustMatch(type_1, type_2) => format!("expression types returned by the arms of 'if' must match (got '{type_1}' and '{type_2}')"), + CheckErrors::MatchArmsMustMatch(type_1, type_2) => format!("expression types returned by the arms of 'match' must match (got '{type_1}' and '{type_2}')"), + CheckErrors::DefaultTypesMustMatch(type_1, type_2) => format!("expression types passed in 'default-to' must match (got '{type_1}' and '{type_2}')"), + CheckErrors::TooManyExpressions => "reached limit of expressions".into(), + CheckErrors::IllegalOrUnknownFunctionApplication(function_name) => format!("use of illegal / unresolved function '{function_name}"), + CheckErrors::UnknownFunction(function_name) => format!("use of unresolved function '{function_name}'"), + CheckErrors::TraitBasedContractCallInReadOnly => "use of trait based contract calls are not allowed in read-only context".into(), + CheckErrors::WriteAttemptedInReadOnly => "expecting read-only statements, detected a writing operation".into(), + CheckErrors::AtBlockClosureMustBeReadOnly => "(at-block ...) closures expect read-only statements, but detected a writing operation".into(), + CheckErrors::BadTokenName => "expecting an token name as an argument".into(), + CheckErrors::DefineFTBadSignature => "(define-token ...) expects a token name as an argument".into(), + CheckErrors::DefineNFTBadSignature => "(define-asset ...) expects an asset name and an asset identifier type signature as arguments".into(), + CheckErrors::NoSuchNFT(asset_name) => format!("tried to use asset function with a undefined asset ('{asset_name}')"), + CheckErrors::NoSuchFT(asset_name) => format!("tried to use token function with a undefined token ('{asset_name}')"), + CheckErrors::NoSuchTrait(contract_name, trait_name) => format!("use of unresolved trait {contract_name}.{trait_name}"), + CheckErrors::TraitReferenceUnknown(trait_name) => format!("use of undeclared trait <{trait_name}>"), + CheckErrors::TraitMethodUnknown(trait_name, func_name) => format!("method '{func_name}' unspecified in trait <{trait_name}>"), + CheckErrors::ImportTraitBadSignature => "(use-trait ...) expects a trait name and a trait identifier".into(), + CheckErrors::BadTraitImplementation(trait_name, func_name) => format!("invalid signature for method '{func_name}' regarding trait's specification <{trait_name}>"), + CheckErrors::ExpectedTraitIdentifier => "expecting expression of type trait identifier".into(), + CheckErrors::UnexpectedTraitOrFieldReference => "unexpected use of trait reference or field".into(), + CheckErrors::DefineTraitBadSignature => "invalid trait definition".into(), + CheckErrors::DefineTraitDuplicateMethod(method_name) => format!("duplicate method name '{method_name}' in trait definition"), + CheckErrors::TraitReferenceNotAllowed => "trait references can not be stored".into(), + CheckErrors::ContractOfExpectsTrait => "trait reference expected".into(), + CheckErrors::IncompatibleTrait(expected_trait, actual_trait) => format!("trait '{actual_trait}' is not a compatible with expected trait, '{expected_trait}'"), + CheckErrors::InvalidCharactersDetected => "invalid characters detected".into(), + CheckErrors::InvalidUTF8Encoding => "invalid UTF8 encoding".into(), + CheckErrors::InvalidSecp65k1Signature => "invalid seckp256k1 signature".into(), + CheckErrors::TypeAlreadyAnnotatedFailure | CheckErrors::CheckerImplementationFailure => { + "internal error - please file an issue on https://github.com/stacks-network/stacks-blockchain".into() + }, + CheckErrors::UncheckedIntermediaryResponses => "intermediary responses in consecutive statements must be checked".into(), + CheckErrors::CostComputationFailed(s) => format!("contract cost computation failed: {s}"), + CheckErrors::CouldNotDetermineSerializationType => "could not determine the input type for the serialization function".into(), + CheckErrors::ExecutionTimeExpired => "execution time expired".into(), + } + } + + fn suggestion(&self) -> Option { + match &self { + CheckErrors::BadLetSyntax => Some( + "'let' syntax example: (let ((supply 1000) (ttl 60)) )".into(), + ), + CheckErrors::TraitReferenceUnknown(_) => Some( + "traits should be either defined, with define-trait, or imported, with use-trait." + .into(), + ), + CheckErrors::NoSuchBlockInfoProperty(_) => Some( + "properties available: time, header-hash, burnchain-header-hash, vrf-seed".into(), + ), + _ => None, + } + } +} diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index ddbcadb0c3d..2878877e964 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -35,7 +35,7 @@ use self::type_checker::v2_05::TypeChecker as TypeChecker2_05; use self::type_checker::v2_1::TypeChecker as TypeChecker2_1; pub use self::types::{AnalysisPass, ContractAnalysis}; #[cfg(feature = "rusqlite")] -use crate::vm::ast::{build_ast_with_rules, ASTRules}; +use crate::vm::ast::build_ast; use crate::vm::costs::LimitedCostTracker; #[cfg(feature = "rusqlite")] use crate::vm::database::MemoryBackingStore; @@ -54,16 +54,9 @@ pub fn mem_type_check( epoch: StacksEpochId, ) -> CheckResult<(Option, ContractAnalysis)> { let contract_identifier = QualifiedContractIdentifier::transient(); - let contract = build_ast_with_rules( - &contract_identifier, - snippet, - &mut (), - version, - epoch, - ASTRules::PrecheckSize, - ) - .map_err(|_| CheckErrors::Expects("Failed to build AST".into()))? - .expressions; + let contract = build_ast(&contract_identifier, snippet, &mut (), version, epoch) + .map_err(|_| CheckErrors::Expects("Failed to build AST".into()))? + .expressions; let mut marf = MemoryBackingStore::new(); let mut analysis_db = marf.as_analysis_db(); diff --git a/clarity/src/vm/ast/mod.rs b/clarity/src/vm/ast/mod.rs index 50b27881475..a319b23b2f7 100644 --- a/clarity/src/vm/ast/mod.rs +++ b/clarity/src/vm/ast/mod.rs @@ -28,7 +28,7 @@ use stacks_common::types::StacksEpochId; use self::definition_sorter::DefinitionSorter; use self::errors::ParseResult; use self::expression_identifier::ExpressionIdentifier; -use self::parser::v1::{parse as parse_v1, parse_no_stack_limit as parse_v1_no_stack_limit}; +use self::parser::v1::parse as parse_v1; use self::parser::v2::parse as parse_v2; use self::stack_depth_checker::{StackDepthChecker, VaryStackDepthChecker}; use self::sugar_expander::SugarExpander; @@ -64,12 +64,9 @@ define_u8_enum!(ASTRules { fn parse_in_epoch( source_code: &str, epoch_id: StacksEpochId, - ast_rules: ASTRules, ) -> ParseResult> { if epoch_id >= StacksEpochId::Epoch21 { parse_v2(source_code) - } else if ast_rules == ASTRules::Typical { - parse_v1_no_stack_limit(source_code) } else { parse_v1(source_code) } @@ -84,61 +81,13 @@ pub fn ast_check_size( clarity_version: ClarityVersion, epoch_id: StacksEpochId, ) -> ParseResult { - let pre_expressions = parse_in_epoch(source_code, epoch_id, ASTRules::PrecheckSize)?; + let pre_expressions = parse_in_epoch(source_code, epoch_id)?; let mut contract_ast = ContractAST::new(contract_identifier.clone(), pre_expressions); StackDepthChecker::run_pass(&mut contract_ast, clarity_version)?; VaryStackDepthChecker::run_pass(&mut contract_ast, clarity_version)?; Ok(contract_ast) } -/// Build an AST according to a ruleset -pub fn build_ast_with_rules( - contract_identifier: &QualifiedContractIdentifier, - source_code: &str, - cost_track: &mut T, - clarity_version: ClarityVersion, - epoch: StacksEpochId, - ruleset: ASTRules, -) -> ParseResult { - match ruleset { - // After epoch 2.1, prechecking the size is required - ASTRules::Typical if epoch < StacksEpochId::Epoch21 => build_ast_typical( - contract_identifier, - source_code, - cost_track, - clarity_version, - epoch, - ), - _ => build_ast_precheck_size( - contract_identifier, - source_code, - cost_track, - clarity_version, - epoch, - ), - } -} - -/// Build an AST with the typical rules -fn build_ast_typical( - contract_identifier: &QualifiedContractIdentifier, - source_code: &str, - cost_track: &mut T, - clarity_version: ClarityVersion, - epoch: StacksEpochId, -) -> ParseResult { - let (contract, _, _) = inner_build_ast( - contract_identifier, - source_code, - cost_track, - clarity_version, - epoch, - ASTRules::Typical, - true, - )?; - Ok(contract) -} - /// Used by developer tools only. Continues on through errors by inserting /// placeholders into the AST. Collects as many diagnostics as possible. /// Always returns a ContractAST, a vector of diagnostics, and a boolean @@ -157,7 +106,6 @@ pub fn build_ast_with_diagnostics( cost_track, clarity_version, epoch, - ASTRules::PrecheckSize, false, ) .unwrap() @@ -169,7 +117,6 @@ fn inner_build_ast( cost_track: &mut T, clarity_version: ClarityVersion, epoch: StacksEpochId, - ast_rules: ASTRules, error_early: bool, ) -> ParseResult<(ContractAST, Vec, bool)> { let cost_err = match runtime_cost( @@ -190,10 +137,7 @@ fn inner_build_ast( parser::v2::parse_collect_diagnostics(source_code) } } else { - let parse_result = match ast_rules { - ASTRules::Typical => parse_v1_no_stack_limit(source_code), - ASTRules::PrecheckSize => parse_v1(source_code), - }; + let parse_result = parse_v1(source_code); match parse_result { Ok(pre_expressions) => (pre_expressions, vec![], true), Err(error) if error_early => return Err(error), @@ -223,16 +167,14 @@ fn inner_build_ast( _ => (), } - if ast_rules != ASTRules::Typical { - // run extra stack-depth pass for tuples - match VaryStackDepthChecker::run_pass(&mut contract_ast, clarity_version) { - Err(e) if error_early => return Err(e), - Err(e) => { - diagnostics.push(e.diagnostic); - success = false; - } - _ => (), + // run extra stack-depth pass for tuples + match VaryStackDepthChecker::run_pass(&mut contract_ast, clarity_version) { + Err(e) if error_early => return Err(e), + Err(e) => { + diagnostics.push(e.diagnostic); + success = false; } + _ => (), } match ExpressionIdentifier::run_pre_expression_pass(&mut contract_ast, clarity_version) { @@ -278,8 +220,8 @@ fn inner_build_ast( Ok((contract_ast, diagnostics, success)) } -/// Built an AST, but pre-check the size of the AST before doing more work -fn build_ast_precheck_size( +/// Built an AST +pub fn build_ast( contract_identifier: &QualifiedContractIdentifier, source_code: &str, cost_track: &mut T, @@ -292,38 +234,19 @@ fn build_ast_precheck_size( cost_track, clarity_version, epoch, - ASTRules::PrecheckSize, true, )?; Ok(contract) } -/// Test compatibility -#[cfg(any(test, feature = "testing"))] -pub fn build_ast( - contract_identifier: &QualifiedContractIdentifier, - source_code: &str, - cost_track: &mut T, - clarity_version: ClarityVersion, - epoch_id: StacksEpochId, -) -> ParseResult { - build_ast_typical( - contract_identifier, - source_code, - cost_track, - clarity_version, - epoch_id, - ) -} - #[cfg(test)] mod test { use hashbrown::HashMap; use stacks_common::types::StacksEpochId; + use crate::vm::ast::build_ast; use crate::vm::ast::errors::ParseErrors; use crate::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; - use crate::vm::ast::{build_ast, build_ast_with_rules, ASTRules}; use crate::vm::costs::{LimitedCostTracker, *}; use crate::vm::representations::depth_traverse; use crate::vm::types::QualifiedContractIdentifier; @@ -398,13 +321,12 @@ mod test { // with old rules, this is just ExpressionStackDepthTooDeep let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( + let err = build_ast( &QualifiedContractIdentifier::transient(), &exceeds_stack_depth_list, &mut cost_track, clarity_version, StacksEpochId::Epoch2_05, - ASTRules::Typical, ) .expect_err("Contract should error in parsing"); @@ -420,13 +342,12 @@ mod test { // with new rules, this is now VaryExpressionStackDepthTooDeep let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( + let err = build_ast( &QualifiedContractIdentifier::transient(), &exceeds_stack_depth_list, &mut cost_track, clarity_version, StacksEpochId::Epoch2_05, - ASTRules::PrecheckSize, ) .expect_err("Contract should error in parsing"); @@ -441,30 +362,17 @@ mod test { assert_eq!(expected_list_cost_state, cost_track); // you cannot do the same for tuples! - // in ASTRules::Typical, this passes - let mut cost_track = UnitTestTracker::new(); - let _ = build_ast_with_rules( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_tuple, - &mut cost_track, - clarity_version, - StacksEpochId::Epoch2_05, - ASTRules::Typical, - ) - .expect("Contract should parse with ASTRules::Typical"); - // this actually won't even error without // the VaryStackDepthChecker changes. let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( + let err = build_ast( &QualifiedContractIdentifier::transient(), &exceeds_stack_depth_tuple, &mut cost_track, clarity_version, StacksEpochId::Epoch2_05, - ASTRules::PrecheckSize, ) - .expect_err("Contract should error in parsing with ASTRules::PrecheckSize"); + .expect_err("Contract should error in parsing"); let expected_err = ParseErrors::VaryExpressionStackDepthTooDeep; let expected_list_cost_state = UnitTestTracker { @@ -498,15 +406,13 @@ mod test { ")".repeat(stack_limit + 1) ); - // with old rules, this is just ExpressionStackDepthTooDeep let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( + let err = build_ast( &QualifiedContractIdentifier::transient(), &exceeds_stack_depth_list, &mut cost_track, *clarity_version, StacksEpochId::Epoch21, - ASTRules::Typical, ) .expect_err("Contract should error in parsing"); @@ -520,59 +426,13 @@ mod test { assert_eq!(&expected_err, &err.err); assert_eq!(expected_list_cost_state, cost_track); - // in 2.1, this is still ExpressionStackDepthTooDeep - let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_list, - &mut cost_track, - *clarity_version, - StacksEpochId::Epoch21, - ASTRules::PrecheckSize, - ) - .expect_err("Contract should error in parsing"); - - let expected_err = ParseErrors::ExpressionStackDepthTooDeep; - let expected_list_cost_state = UnitTestTracker { - invoked_functions: vec![(ClarityCostFunction::AstParse, vec![500])], - invocation_count: 1, - cost_addition_count: 1, - }; - - assert_eq!(&expected_err, &err.err); - assert_eq!(expected_list_cost_state, cost_track); - - // in 2.1, ASTRules::Typical is ignored -- this still fails to parse - let mut cost_track = UnitTestTracker::new(); - let _ = build_ast_with_rules( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_tuple, - &mut cost_track, - *clarity_version, - StacksEpochId::Epoch21, - ASTRules::Typical, - ) - .expect_err("Contract should error in parsing"); - - let expected_err = ParseErrors::ExpressionStackDepthTooDeep; - let expected_list_cost_state = UnitTestTracker { - invoked_functions: vec![(ClarityCostFunction::AstParse, vec![571])], - invocation_count: 1, - cost_addition_count: 1, - }; - - assert_eq!(&expected_err, &err.err); - assert_eq!(expected_list_cost_state, cost_track); - - // in 2.1, ASTRules::PrecheckSize is still ignored -- this still fails to parse let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( + let err = build_ast( &QualifiedContractIdentifier::transient(), &exceeds_stack_depth_tuple, &mut cost_track, *clarity_version, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, ) .expect_err("Contract should error in parsing"); diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index 17ba96f0eae..6287cc11bef 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -4,7 +4,7 @@ use stacks_common::types::StacksEpochId; use crate::vm::analysis::{AnalysisDatabase, CheckError, CheckErrors, ContractAnalysis}; use crate::vm::ast::errors::{ParseError, ParseErrors}; -use crate::vm::ast::{ASTRules, ContractAST}; +use crate::vm::ast::ContractAST; use crate::vm::contexts::{AssetMap, Environment, OwnedEnvironment}; use crate::vm::costs::{ExecutionCost, LimitedCostTracker}; use crate::vm::database::ClarityDatabase; @@ -209,18 +209,16 @@ pub trait TransactionConnection: ClarityConnection { identifier: &QualifiedContractIdentifier, clarity_version: ClarityVersion, contract_content: &str, - ast_rules: ASTRules, ) -> Result<(ContractAST, ContractAnalysis), Error> { let epoch_id = self.get_epoch(); self.with_analysis_db(|db, mut cost_track| { - let ast_result = ast::build_ast_with_rules( + let ast_result = ast::build_ast( identifier, contract_content, &mut cost_track, clarity_version, epoch_id, - ast_rules, ); let contract_ast = match ast_result { diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 1e167826acf..b07fd00363a 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -26,7 +26,7 @@ use stacks_common::types::chainstate::StacksBlockId; use stacks_common::types::StacksEpochId; use super::EvalHook; -use crate::vm::ast::{ASTRules, ContractAST}; +use crate::vm::ast::ContractAST; use crate::vm::callables::{DefinedFunction, FunctionIdentifier}; use crate::vm::contracts::Contract; use crate::vm::costs::cost_functions::ClarityCostFunction; @@ -641,15 +641,12 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { contract_identifier: QualifiedContractIdentifier, contract_content: &str, sponsor: Option, - ast_rules: ASTRules, ) -> Result<((), AssetMap, Vec)> { self.execute_in_env( contract_identifier.issuer.clone().into(), sponsor, None, - |exec_env| { - exec_env.initialize_contract(contract_identifier, contract_content, ast_rules) - }, + |exec_env| exec_env.initialize_contract(contract_identifier, contract_content), ) } @@ -659,7 +656,6 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { version: ClarityVersion, contract_content: &str, sponsor: Option, - ast_rules: ASTRules, ) -> Result<((), AssetMap, Vec)> { self.execute_in_env( contract_identifier.issuer.clone().into(), @@ -668,9 +664,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { QualifiedContractIdentifier::transient(), version, )), - |exec_env| { - exec_env.initialize_contract(contract_identifier, contract_content, ast_rules) - }, + |exec_env| exec_env.initialize_contract(contract_identifier, contract_content), ) } @@ -770,29 +764,19 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { ) } - pub fn eval_read_only_with_rules( + pub fn eval_read_only( &mut self, contract: &QualifiedContractIdentifier, program: &str, - ast_rules: ast::ASTRules, ) -> Result<(Value, AssetMap, Vec)> { self.execute_in_env( QualifiedContractIdentifier::transient().issuer.into(), None, None, - |exec_env| exec_env.eval_read_only_with_rules(contract, program, ast_rules), + |exec_env| exec_env.eval_read_only(contract, program), ) } - #[cfg(any(test, feature = "testing"))] - pub fn eval_read_only( - &mut self, - contract: &QualifiedContractIdentifier, - program: &str, - ) -> Result<(Value, AssetMap, Vec)> { - self.eval_read_only_with_rules(contract, program, ast::ASTRules::Typical) - } - pub fn begin(&mut self) { self.context.begin(); } @@ -950,21 +934,19 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { ) } - pub fn eval_read_only_with_rules( + pub fn eval_read_only( &mut self, contract_identifier: &QualifiedContractIdentifier, program: &str, - rules: ast::ASTRules, ) -> Result { let clarity_version = self.contract_context.clarity_version; - let parsed = ast::build_ast_with_rules( + let parsed = ast::build_ast( contract_identifier, program, self, clarity_version, self.global_context.epoch_id, - rules, )? .expressions; @@ -1004,26 +986,16 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { result } - #[cfg(any(test, feature = "testing"))] - pub fn eval_read_only( - &mut self, - contract_identifier: &QualifiedContractIdentifier, - program: &str, - ) -> Result { - self.eval_read_only_with_rules(contract_identifier, program, ast::ASTRules::Typical) - } - - pub fn eval_raw_with_rules(&mut self, program: &str, rules: ast::ASTRules) -> Result { + pub fn eval_raw(&mut self, program: &str) -> Result { let contract_id = QualifiedContractIdentifier::transient(); let clarity_version = self.contract_context.clarity_version; - let parsed = ast::build_ast_with_rules( + let parsed = ast::build_ast( &contract_id, program, self, clarity_version, self.global_context.epoch_id, - rules, )? .expressions; @@ -1037,11 +1009,6 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { eval(&parsed[0], self, &local_context) } - #[cfg(any(test, feature = "testing"))] - pub fn eval_raw(&mut self, program: &str) -> Result { - self.eval_raw_with_rules(program, ast::ASTRules::Typical) - } - /// Used only for contract-call! cost short-circuiting. Once the short-circuited cost /// has been evaluated and assessed, the contract-call! itself is executed "for free". pub fn run_free(&mut self, to_run: F) -> A @@ -1243,17 +1210,15 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { &mut self, contract_identifier: QualifiedContractIdentifier, contract_content: &str, - ast_rules: ASTRules, ) -> Result<()> { let clarity_version = self.contract_context.clarity_version; - let contract_ast = ast::build_ast_with_rules( + let contract_ast = ast::build_ast( &contract_identifier, contract_content, self, clarity_version, self.global_context.epoch_id, - ast_rules, )?; self.initialize_contract_from_ast( contract_identifier, diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 55b15ab75aa..7154794fd3c 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -29,7 +29,6 @@ use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use super::clarity_store::SpecialCaseHandler; use super::key_value_wrapper::ValueResult; use crate::vm::analysis::{AnalysisDatabase, ContractAnalysis}; -use crate::vm::ast::ASTRules; use crate::vm::contracts::Contract; use crate::vm::costs::{CostOverflowingMath, ExecutionCost}; use crate::vm::database::structures::{ @@ -236,8 +235,6 @@ pub trait BurnStateDB { fn get_stacks_epoch(&self, height: u32) -> Option; fn get_stacks_epoch_by_epoch_id(&self, epoch_id: &StacksEpochId) -> Option; - fn get_ast_rules(&self, height: u32) -> ASTRules; - /// Get the PoX payout addresses for a given burnchain block fn get_pox_payout_addrs( &self, @@ -441,10 +438,6 @@ impl BurnStateDB for NullBurnStateDB { ) -> Option<(Vec, u128)> { None } - - fn get_ast_rules(&self, _height: u32) -> ASTRules { - ASTRules::Typical - } } impl<'a> ClarityDatabase<'a> { diff --git a/clarity/src/vm/docs/contracts.rs b/clarity/src/vm/docs/contracts.rs index 6d480640699..12bcd1c8121 100644 --- a/clarity/src/vm/docs/contracts.rs +++ b/clarity/src/vm/docs/contracts.rs @@ -5,7 +5,7 @@ use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::StacksEpochId; use crate::vm::analysis::{mem_type_check, ContractAnalysis}; -use crate::vm::ast::{build_ast_with_rules, ASTRules}; +use crate::vm::ast::build_ast; use crate::vm::contexts::GlobalContext; use crate::vm::costs::LimitedCostTracker; use crate::vm::database::MemoryBackingStore; @@ -81,13 +81,12 @@ fn doc_execute(program: &str) -> Result, vm::Error> { DOCS_GENERATION_EPOCH, ); global_context.execute(|g| { - let parsed = build_ast_with_rules( + let parsed = build_ast( &contract_id, program, &mut (), ClarityVersion::latest(), StacksEpochId::latest(), - ASTRules::PrecheckSize, )? .expressions; vm::eval_all(&parsed, &mut contract_context, g, None) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 9456884e7d4..293da0858d7 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2742,7 +2742,6 @@ mod test { use super::{get_input_type_string, make_all_api_reference, make_json_api_reference}; use crate::vm::analysis::type_check; - use crate::vm::ast::ASTRules; use crate::vm::contexts::OwnedEnvironment; use crate::vm::costs::ExecutionCost; use crate::vm::database::{ @@ -2939,9 +2938,6 @@ mod test { fn get_stacks_epoch_by_epoch_id(&self, epoch_id: &StacksEpochId) -> Option { self.get_stacks_epoch(0) } - fn get_ast_rules(&self, height: u32) -> ASTRules { - ASTRules::PrecheckSize - } fn get_pox_payout_addrs( &self, height: u32, @@ -3215,21 +3211,11 @@ mod test { ) .unwrap(); - env.initialize_contract( - contract_id, - token_contract_content, - None, - ASTRules::PrecheckSize, - ) - .unwrap(); + env.initialize_contract(contract_id, token_contract_content, None) + .unwrap(); - env.initialize_contract( - trait_def_id, - super::DEFINE_TRAIT_API.example, - None, - ASTRules::PrecheckSize, - ) - .unwrap(); + env.initialize_contract(trait_def_id, super::DEFINE_TRAIT_API.example, None) + .unwrap(); } let example = &func_api.example; diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 69cb45c0136..fe0fc7f5454 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -500,14 +500,12 @@ pub fn execute_on_network(program: &str, use_mainnet: bool) -> Result( program: &str, clarity_version: ClarityVersion, epoch: StacksEpochId, - ast_rules: ast::ASTRules, use_mainnet: bool, mut global_context_function: F, ) -> Result> @@ -549,15 +546,8 @@ where ); global_context.execute(|g| { global_context_function(g)?; - let parsed = ast::build_ast_with_rules( - &contract_id, - program, - &mut (), - clarity_version, - epoch, - ast_rules, - )? - .expressions; + let parsed = + ast::build_ast(&contract_id, program, &mut (), clarity_version, epoch)?.expressions; eval_all(&parsed, &mut contract_context, g, None) }) } @@ -567,14 +557,12 @@ pub fn execute_with_parameters( program: &str, clarity_version: ClarityVersion, epoch: StacksEpochId, - ast_rules: ast::ASTRules, use_mainnet: bool, ) -> Result> { execute_with_parameters_and_call_in_global_context( program, clarity_version, epoch, - ast_rules, use_mainnet, |_| Ok(()), ) @@ -583,13 +571,7 @@ pub fn execute_with_parameters( /// Execute for test with `version`, Epoch20, testnet. #[cfg(any(test, feature = "testing"))] pub fn execute_against_version(program: &str, version: ClarityVersion) -> Result> { - execute_with_parameters( - program, - version, - StacksEpochId::Epoch20, - ast::ASTRules::PrecheckSize, - false, - ) + execute_with_parameters(program, version, StacksEpochId::Epoch20, false) } /// Execute for test in Clarity1, Epoch20, testnet. @@ -599,7 +581,6 @@ pub fn execute(program: &str) -> Result> { program, ClarityVersion::Clarity1, StacksEpochId::Epoch20, - ast::ASTRules::PrecheckSize, false, ) } @@ -614,7 +595,6 @@ pub fn execute_with_limited_execution_time( program, ClarityVersion::Clarity1, StacksEpochId::Epoch20, - ast::ASTRules::PrecheckSize, false, |g| { g.set_max_execution_time(max_execution_time); @@ -630,7 +610,6 @@ pub fn execute_v2(program: &str) -> Result> { program, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ast::ASTRules::PrecheckSize, false, ) } diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 3fc21b7f215..2f72719f0ec 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -10,7 +10,6 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::StacksEpochId; -use crate::vm::ast::ASTRules; use crate::vm::costs::ExecutionCost; use crate::vm::database::{BurnStateDB, HeadersDB}; use crate::vm::representations::SymbolicExpression; @@ -19,22 +18,18 @@ use crate::vm::{execute as vm_execute, execute_on_network as vm_execute_on_netwo pub struct UnitTestBurnStateDB { pub epoch_id: StacksEpochId, - pub ast_rules: ASTRules, } pub struct UnitTestHeaderDB {} pub const TEST_HEADER_DB: UnitTestHeaderDB = UnitTestHeaderDB {}; pub const TEST_BURN_STATE_DB: UnitTestBurnStateDB = UnitTestBurnStateDB { epoch_id: StacksEpochId::Epoch20, - ast_rules: ASTRules::Typical, }; pub const TEST_BURN_STATE_DB_205: UnitTestBurnStateDB = UnitTestBurnStateDB { epoch_id: StacksEpochId::Epoch2_05, - ast_rules: ASTRules::PrecheckSize, }; pub const TEST_BURN_STATE_DB_21: UnitTestBurnStateDB = UnitTestBurnStateDB { epoch_id: StacksEpochId::Epoch21, - ast_rules: ASTRules::PrecheckSize, }; pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnStateDB { @@ -42,11 +37,8 @@ pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnState StacksEpochId::Epoch10 => { panic!("Epoch 1.0 not testable"); } - StacksEpochId::Epoch20 => UnitTestBurnStateDB { - epoch_id, - ast_rules: ASTRules::Typical, - }, - StacksEpochId::Epoch2_05 + StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 | StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 @@ -54,10 +46,7 @@ pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnState | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 - | StacksEpochId::Epoch32 => UnitTestBurnStateDB { - epoch_id, - ast_rules: ASTRules::PrecheckSize, - }, + | StacksEpochId::Epoch32 => UnitTestBurnStateDB { epoch_id }, } } @@ -316,9 +305,6 @@ impl BurnStateDB for UnitTestBurnStateDB { ) -> Option { None } - fn get_ast_rules(&self, _height: u32) -> ASTRules { - self.ast_rules - } fn get_pox_payout_addrs( &self, _height: u32, diff --git a/clarity/src/vm/tests/assets.rs b/clarity/src/vm/tests/assets.rs index 0cfeebfb274..a6e953cd4c0 100644 --- a/clarity/src/vm/tests/assets.rs +++ b/clarity/src/vm/tests/assets.rs @@ -24,7 +24,6 @@ use crate::vm::tests::{test_clarity_versions, test_epochs}; use crate::vm::types::{PrincipalData, QualifiedContractIdentifier, Value}; #[cfg(test)] use crate::vm::{ - ast::ASTRules, contexts::AssetMapEntry, errors::{CheckErrors, RuntimeErrorType}, tests::{ @@ -184,20 +183,10 @@ fn test_native_stx_ops(epoch: StacksEpochId, mut env_factory: TopLevelMemoryEnvi QualifiedContractIdentifier::new(p1_std_principal_data, "second".into()); owned_env - .initialize_contract( - token_contract_id.clone(), - contract, - None, - ASTRules::PrecheckSize, - ) + .initialize_contract(token_contract_id.clone(), contract, None) .unwrap(); owned_env - .initialize_contract( - second_contract_id.clone(), - contract_second, - None, - ASTRules::PrecheckSize, - ) + .initialize_contract(second_contract_id.clone(), contract_second, None) .unwrap(); owned_env.stx_faucet(&(p1_principal), u128::MAX - 1500); @@ -551,12 +540,7 @@ fn test_simple_token_system( let contract_principal = PrincipalData::Contract(token_contract_id.clone()); owned_env - .initialize_contract( - token_contract_id.clone(), - tokens_contract, - None, - ASTRules::PrecheckSize, - ) + .initialize_contract(token_contract_id.clone(), tokens_contract, None) .unwrap(); let (result, asset_map, _events) = execute_transaction( @@ -847,12 +831,7 @@ fn test_total_supply(epoch: StacksEpochId, mut env_factory: TopLevelMemoryEnviro let token_contract_id = QualifiedContractIdentifier::new(p1_std_principal_data, "tokens".into()); let err = owned_env - .initialize_contract( - token_contract_id.clone(), - bad_0, - None, - ASTRules::PrecheckSize, - ) + .initialize_contract(token_contract_id.clone(), bad_0, None) .unwrap_err(); assert!(matches!( err, @@ -860,12 +839,7 @@ fn test_total_supply(epoch: StacksEpochId, mut env_factory: TopLevelMemoryEnviro )); let err = owned_env - .initialize_contract( - token_contract_id.clone(), - bad_1, - None, - ASTRules::PrecheckSize, - ) + .initialize_contract(token_contract_id.clone(), bad_1, None) .unwrap_err(); assert!(matches!( err, @@ -873,12 +847,7 @@ fn test_total_supply(epoch: StacksEpochId, mut env_factory: TopLevelMemoryEnviro )); owned_env - .initialize_contract( - token_contract_id.clone(), - contract, - None, - ASTRules::PrecheckSize, - ) + .initialize_contract(token_contract_id.clone(), contract, None) .unwrap(); let (result, _asset_map, _events) = execute_transaction( @@ -949,28 +918,13 @@ fn test_overlapping_nfts( QualifiedContractIdentifier::new(p1_std_principal_data, "names-2".into()); owned_env - .initialize_contract( - tokens_contract_id, - tokens_contract, - None, - ASTRules::PrecheckSize, - ) + .initialize_contract(tokens_contract_id, tokens_contract, None) .unwrap(); owned_env - .initialize_contract( - names_contract_id, - names_contract, - None, - ASTRules::PrecheckSize, - ) + .initialize_contract(names_contract_id, names_contract, None) .unwrap(); owned_env - .initialize_contract( - names_2_contract_id, - names_contract, - None, - ASTRules::PrecheckSize, - ) + .initialize_contract(names_2_contract_id, names_contract, None) .unwrap(); } @@ -1023,22 +977,12 @@ fn test_simple_naming_system( let name_hash_cheap_0 = execute("(hash160 100001)"); owned_env - .initialize_contract( - tokens_contract_id, - tokens_contract, - None, - ASTRules::PrecheckSize, - ) + .initialize_contract(tokens_contract_id, tokens_contract, None) .unwrap(); let names_contract_id = QualifiedContractIdentifier::new(p1_std_principal_data, "names".into()); owned_env - .initialize_contract( - names_contract_id.clone(), - names_contract, - None, - ASTRules::PrecheckSize, - ) + .initialize_contract(names_contract_id.clone(), names_contract, None) .unwrap(); let (result, _asset_map, _events) = execute_transaction( diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 5812830245e..07c2c4dc758 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -24,7 +24,7 @@ use crate::vm::tests::{test_clarity_versions, test_epochs}; use crate::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, Value}; #[cfg(test)] use crate::vm::{ - ast::{errors::ParseErrors, ASTRules}, + ast::errors::ParseErrors, errors::{CheckErrors, Error, RuntimeErrorType}, tests::{ env_factory, execute, is_committed, is_err_code_i128 as is_err_code, symbols_from_values, @@ -134,7 +134,6 @@ fn test_get_block_info_eval( ClarityVersion::Clarity2, contracts[i], None, - ASTRules::PrecheckSize, ) .unwrap(); @@ -182,13 +181,11 @@ fn test_contract_caller(epoch: StacksEpochId, mut env_factory: MemoryEnvironment env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("contract-b").unwrap(), contract_b, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -329,13 +326,11 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("contract-b").unwrap(), contract_b, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -385,13 +380,11 @@ fn test_fully_qualified_contract_call( env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("contract-b").unwrap(), contract_b, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -523,11 +516,11 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("tokens").unwrap(); - env.initialize_contract(contract_identifier, tokens_contract, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, tokens_contract) .unwrap(); let contract_identifier = QualifiedContractIdentifier::local("names").unwrap(); - env.initialize_contract(contract_identifier, names_contract, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, names_contract) .unwrap(); } @@ -696,11 +689,11 @@ fn test_simple_contract_call(epoch: StacksEpochId, mut env_factory: MemoryEnviro ); let contract_identifier = QualifiedContractIdentifier::local("factorial-contract").unwrap(); - env.initialize_contract(contract_identifier, contract_1, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, contract_1) .unwrap(); let contract_identifier = QualifiedContractIdentifier::local("proxy-compute").unwrap(); - env.initialize_contract(contract_identifier, contract_2, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, contract_2) .unwrap(); let args = symbols_from_values(vec![]); @@ -778,11 +771,11 @@ fn test_aborts(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("contract-1").unwrap(); - env.initialize_contract(contract_identifier, contract_1, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, contract_1) .unwrap(); let contract_identifier = QualifiedContractIdentifier::local("contract-2").unwrap(); - env.initialize_contract(contract_identifier, contract_2, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, contract_2) .unwrap(); env.sender = Some(get_principal_as_principal_data()); @@ -892,12 +885,8 @@ fn test_factorial_contract(epoch: StacksEpochId, mut env_factory: MemoryEnvironm let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("factorial").unwrap(); - env.initialize_contract( - contract_identifier, - FACTORIAL_CONTRACT, - ASTRules::PrecheckSize, - ) - .unwrap(); + env.initialize_contract(contract_identifier, FACTORIAL_CONTRACT) + .unwrap(); let tx_name = "compute"; let arguments_to_test = [ @@ -993,7 +982,6 @@ fn test_at_unknown_block( QualifiedContractIdentifier::local("contract").unwrap(), contract, None, - ASTRules::PrecheckSize, ) .unwrap_err(); eprintln!("{err}"); @@ -1020,7 +1008,6 @@ fn test_as_max_len(epoch: StacksEpochId, mut tl_env_factory: TopLevelMemoryEnvir QualifiedContractIdentifier::local("contract").unwrap(), contract, None, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1091,12 +1078,12 @@ fn test_cc_stack_depth( let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); - env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, contract_one) .unwrap(); let contract_identifier = QualifiedContractIdentifier::local("c-bar").unwrap(); assert_eq!( - env.initialize_contract(contract_identifier, contract_two, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, contract_two) .unwrap_err(), RuntimeErrorType::MaxStackDepthReached.into() ); @@ -1132,12 +1119,12 @@ fn test_cc_trait_stack_depth( let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); - env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, contract_one) .unwrap(); let contract_identifier = QualifiedContractIdentifier::local("c-bar").unwrap(); assert_eq!( - env.initialize_contract(contract_identifier, contract_two, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, contract_two) .unwrap_err(), RuntimeErrorType::MaxStackDepthReached.into() ); diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index aec00b3173d..370377c2278 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -1,7 +1,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::hex_bytes; -use crate::vm::ast::ASTRules; use crate::vm::types::{ ASCIIData, BuffData, CharType, OptionalData, PrincipalData, QualifiedContractIdentifier, SequenceData, StandardPrincipalData, TupleData, Value, @@ -23,7 +22,6 @@ fn test_simple_is_standard_check_inputs() { wrong_type_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap_err(), @@ -40,7 +38,6 @@ fn test_simple_is_standard_testnet_cases() { testnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -52,7 +49,6 @@ fn test_simple_is_standard_testnet_cases() { testnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -66,7 +62,6 @@ fn test_simple_is_standard_testnet_cases() { testnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -78,7 +73,6 @@ fn test_simple_is_standard_testnet_cases() { testnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -92,7 +86,6 @@ fn test_simple_is_standard_testnet_cases() { testnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -104,7 +97,6 @@ fn test_simple_is_standard_testnet_cases() { testnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -118,7 +110,6 @@ fn test_simple_is_standard_testnet_cases() { testnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -130,7 +121,6 @@ fn test_simple_is_standard_testnet_cases() { testnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -146,7 +136,6 @@ fn test_simple_is_standard_mainnet_cases() { mainnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -158,7 +147,6 @@ fn test_simple_is_standard_mainnet_cases() { mainnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -172,7 +160,6 @@ fn test_simple_is_standard_mainnet_cases() { mainnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -184,7 +171,6 @@ fn test_simple_is_standard_mainnet_cases() { mainnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -198,7 +184,6 @@ fn test_simple_is_standard_mainnet_cases() { mainnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -210,7 +195,6 @@ fn test_simple_is_standard_mainnet_cases() { mainnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -224,7 +208,6 @@ fn test_simple_is_standard_mainnet_cases() { mainnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -236,7 +219,6 @@ fn test_simple_is_standard_mainnet_cases() { mainnet_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -254,7 +236,6 @@ fn test_simple_is_standard_undefined_cases() { invalid_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -266,7 +247,6 @@ fn test_simple_is_standard_undefined_cases() { invalid_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -280,7 +260,6 @@ fn test_simple_is_standard_undefined_cases() { invalid_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -292,7 +271,6 @@ fn test_simple_is_standard_undefined_cases() { invalid_addr_test, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -356,7 +334,6 @@ fn test_principal_destruct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -378,7 +355,6 @@ fn test_principal_destruct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -400,7 +376,6 @@ fn test_principal_destruct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -422,7 +397,6 @@ fn test_principal_destruct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -444,7 +418,6 @@ fn test_principal_destruct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -466,7 +439,6 @@ fn test_principal_destruct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -488,7 +460,6 @@ fn test_principal_destruct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -510,7 +481,6 @@ fn test_principal_destruct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -537,7 +507,6 @@ fn test_principal_destruct_bad_version_byte() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -559,7 +528,6 @@ fn test_principal_destruct_bad_version_byte() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -581,7 +549,6 @@ fn test_principal_destruct_bad_version_byte() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -603,7 +570,6 @@ fn test_principal_destruct_bad_version_byte() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -625,7 +591,6 @@ fn test_principal_destruct_bad_version_byte() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -647,7 +612,6 @@ fn test_principal_destruct_bad_version_byte() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -677,7 +641,6 @@ fn test_principal_construct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -697,7 +660,6 @@ fn test_principal_construct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -721,7 +683,6 @@ fn test_principal_construct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -745,7 +706,6 @@ fn test_principal_construct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -765,7 +725,6 @@ fn test_principal_construct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -785,7 +744,6 @@ fn test_principal_construct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -809,7 +767,6 @@ fn test_principal_construct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -833,7 +790,6 @@ fn test_principal_construct_good() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -899,7 +855,6 @@ fn test_principal_construct_version_byte_future() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -935,7 +890,6 @@ fn test_principal_construct_version_byte_future() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -962,7 +916,6 @@ fn test_principal_construct_check_errors() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) ); @@ -976,7 +929,6 @@ fn test_principal_construct_check_errors() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) ); @@ -989,7 +941,6 @@ fn test_principal_construct_check_errors() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap_err(), @@ -1018,7 +969,6 @@ fn test_principal_construct_check_errors() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) ); @@ -1035,7 +985,6 @@ fn test_principal_construct_response_errors() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -1063,7 +1012,6 @@ fn test_principal_construct_response_errors() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -1103,7 +1051,6 @@ fn test_principal_construct_response_errors() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -1130,7 +1077,6 @@ fn test_principal_construct_response_errors() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() @@ -1157,7 +1103,6 @@ fn test_principal_construct_response_errors() { input, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 0c241c8f56b..0b781ed6295 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -26,7 +26,7 @@ use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPu use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, to_hex}; -use crate::vm::ast::{parse, ASTRules}; +use crate::vm::ast::parse; use crate::vm::callables::DefinedFunction; use crate::vm::contexts::OwnedEnvironment; use crate::vm::costs::LimitedCostTracker; @@ -513,7 +513,6 @@ fn test_principal_of_fix() { principal_of_program, ClarityVersion::Clarity2, StacksEpochId::Epoch20, - ASTRules::PrecheckSize, true ) .unwrap() @@ -527,7 +526,6 @@ fn test_principal_of_fix() { principal_of_program, ClarityVersion::Clarity2, StacksEpochId::Epoch20, - ASTRules::PrecheckSize, false ) .unwrap() @@ -541,7 +539,6 @@ fn test_principal_of_fix() { principal_of_program, ClarityVersion::Clarity1, StacksEpochId::Epoch20, - ASTRules::PrecheckSize, true ) .unwrap() @@ -555,7 +552,6 @@ fn test_principal_of_fix() { principal_of_program, ClarityVersion::Clarity1, StacksEpochId::Epoch20, - ASTRules::PrecheckSize, false ) .unwrap() @@ -1247,7 +1243,6 @@ fn test_stx_ops_errors() { program, ClarityVersion::Clarity2, StacksEpochId::Epoch20, - ASTRules::PrecheckSize, false ) .unwrap_err() @@ -1690,7 +1685,6 @@ fn test_is_mainnet() { program, ClarityVersion::Clarity2, StacksEpochId::Epoch20, - ASTRules::PrecheckSize, true ) .unwrap() @@ -1710,7 +1704,6 @@ fn test_is_mainnet() { program, ClarityVersion::Clarity2, StacksEpochId::Epoch20, - ASTRules::PrecheckSize, false ) .unwrap() @@ -1735,7 +1728,6 @@ fn test_chain_id() { program, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, true ) .unwrap() @@ -1755,7 +1747,6 @@ fn test_chain_id() { program, ClarityVersion::Clarity2, StacksEpochId::Epoch21, - ASTRules::PrecheckSize, false ) .unwrap() diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index 09c95cdbcca..a01fa11bf0a 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -21,7 +21,6 @@ use super::MemoryEnvironmentGenerator; use crate::vm::tests::{test_clarity_versions, test_epochs}; #[cfg(test)] use crate::vm::{ - ast::ASTRules, errors::{CheckErrors, Error}, tests::{env_factory, execute, symbols_from_values}, types::{PrincipalData, QualifiedContractIdentifier, Value}, @@ -51,13 +50,11 @@ fn test_dynamic_dispatch_by_defining_trait( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -109,13 +106,11 @@ fn test_dynamic_dispatch_pass_trait_nested_in_let( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -166,13 +161,11 @@ fn test_dynamic_dispatch_pass_trait( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -222,13 +215,11 @@ fn test_dynamic_dispatch_intra_contract_call( env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -281,19 +272,16 @@ fn test_dynamic_dispatch_by_implementing_imported_trait( env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -346,19 +334,16 @@ fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -408,19 +393,16 @@ fn test_dynamic_dispatch_by_importing_trait( env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -477,31 +459,26 @@ fn test_dynamic_dispatch_including_nested_trait( env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-nested-trait").unwrap(), contract_defining_nested_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-nested-contract").unwrap(), target_nested_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -553,13 +530,11 @@ fn test_dynamic_dispatch_mismatched_args( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -610,13 +585,11 @@ fn test_dynamic_dispatch_mismatched_returned( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -670,13 +643,11 @@ fn test_reentrant_dynamic_dispatch( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -727,13 +698,11 @@ fn test_readwrite_dynamic_dispatch( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -784,13 +753,11 @@ fn test_readwrite_violation_dynamic_dispatch( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -848,25 +815,21 @@ fn test_bad_call_with_trait( env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("dispatch").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("implem").unwrap(), impl_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("call").unwrap(), caller_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -916,25 +879,21 @@ fn test_good_call_with_trait( env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("dispatch").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("implem").unwrap(), impl_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("call").unwrap(), caller_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -985,25 +944,21 @@ fn test_good_call_2_with_trait( env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("dispatch").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("implem").unwrap(), impl_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("call").unwrap(), caller_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1056,19 +1011,16 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1119,19 +1071,16 @@ fn test_contract_of_value( env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("dispatch").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("implem").unwrap(), impl_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1186,19 +1135,16 @@ fn test_contract_of_no_impl( env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("dispatch").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("implem").unwrap(), impl_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1251,13 +1197,11 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1308,13 +1252,11 @@ fn test_return_trait_with_contract_of_wrapped_in_let( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1363,13 +1305,11 @@ fn test_return_trait_with_contract_of( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1427,14 +1367,12 @@ fn test_pass_trait_to_subtrait(epoch: StacksEpochId, mut env_factory: MemoryEnvi env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1489,14 +1427,12 @@ fn test_embedded_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentG env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1562,14 +1498,12 @@ fn test_pass_embedded_trait_to_subtrait_optional( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1634,14 +1568,12 @@ fn test_pass_embedded_trait_to_subtrait_ok( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1706,14 +1638,12 @@ fn test_pass_embedded_trait_to_subtrait_err( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1778,14 +1708,12 @@ fn test_pass_embedded_trait_to_subtrait_list( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1853,14 +1781,12 @@ fn test_pass_embedded_trait_to_subtrait_list_option( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1928,14 +1854,12 @@ fn test_pass_embedded_trait_to_subtrait_option_list( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1989,14 +1913,12 @@ fn test_let_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenera env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -2054,14 +1976,12 @@ fn test_let3_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -2115,14 +2035,12 @@ fn test_pass_principal_literal_to_trait( env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, - ASTRules::PrecheckSize, ) .unwrap(); env.initialize_contract( QualifiedContractIdentifier::local("target-contract").unwrap(), target_contract, - ASTRules::PrecheckSize, ) .unwrap(); } diff --git a/clarity/src/vm/tests/variables.rs b/clarity/src/vm/tests/variables.rs index c3652d97f0c..1ad1e64dd6b 100644 --- a/clarity/src/vm/tests/variables.rs +++ b/clarity/src/vm/tests/variables.rs @@ -23,7 +23,7 @@ use crate::vm::tests::test_clarity_versions; #[cfg(test)] use crate::vm::{ analysis::type_checker::v2_1::tests::contracts::type_check_version, - ast::{parse, ASTRules}, + ast::parse, database::MemoryBackingStore, errors::{CheckErrors, Error}, tests::{tl_env_factory, TopLevelMemoryEnvironmentGenerator}, @@ -70,7 +70,6 @@ fn test_block_height( version, contract, None, - ASTRules::PrecheckSize, ); let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); @@ -128,7 +127,6 @@ fn test_stacks_block_height( version, contract, None, - ASTRules::PrecheckSize, ); let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); @@ -188,7 +186,6 @@ fn test_tenure_height( version, contract, None, - ASTRules::PrecheckSize, ); let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); @@ -267,7 +264,6 @@ fn expect_contract_error( version, contract, None, - ASTRules::PrecheckSize, ); for (when, err_condition, expected_error) in expected_errors { diff --git a/clarity/src/vm/tooling/mod.rs b/clarity/src/vm/tooling/mod.rs index 0713d4576f0..3702f6d3d99 100644 --- a/clarity/src/vm/tooling/mod.rs +++ b/clarity/src/vm/tooling/mod.rs @@ -4,7 +4,7 @@ use super::analysis::ContractAnalysis; use super::types::TypeSignature; use super::ClarityVersion; use crate::vm::analysis::{run_analysis, CheckResult}; -use crate::vm::ast::{build_ast_with_rules, ASTRules}; +use crate::vm::ast::build_ast; use crate::vm::costs::LimitedCostTracker; use crate::vm::database::MemoryBackingStore; use crate::vm::types::QualifiedContractIdentifier; @@ -16,16 +16,9 @@ pub fn mem_type_check( epoch: StacksEpochId, ) -> CheckResult<(Option, ContractAnalysis)> { let contract_identifier = QualifiedContractIdentifier::transient(); - let contract = build_ast_with_rules( - &contract_identifier, - snippet, - &mut (), - version, - epoch, - ASTRules::PrecheckSize, - ) - .unwrap() - .expressions; + let contract = build_ast(&contract_identifier, snippet, &mut (), version, epoch) + .unwrap() + .expressions; let mut marf = MemoryBackingStore::new(); let mut analysis_db = marf.as_analysis_db(); diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index 3a71083673e..3fc16954bf2 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use clarity::vm::ast::ASTRules; use clarity::vm::contexts::GlobalContext; use clarity::vm::costs::LimitedCostTracker; use clarity::vm::errors::Error as ClarityError; @@ -654,25 +653,23 @@ fn inner_synthesize_pox_event_info( None, pox_contract.contract_context, |env| { - let base_event_info = env - .eval_read_only_with_rules(contract_id, &code_snippet, ASTRules::PrecheckSize) - .map_err(|e| { - error!( - "Failed to run event-info code snippet for '{}': {:?}", - function_name, &e + let base_event_info = + env.eval_read_only(contract_id, &code_snippet) + .map_err(|e| { + error!( + "Failed to run event-info code snippet for '{function_name}': {e:?}" ); - e - })?; + e + })?; - let data_event_info = env - .eval_read_only_with_rules(contract_id, &data_snippet, ASTRules::PrecheckSize) - .map_err(|e| { - error!( - "Failed to run data-info code snippet for '{}': {:?}", - function_name, &e - ); - e - })?; + let data_event_info = + env.eval_read_only(contract_id, &data_snippet) + .map_err(|e| { + error!( + "Failed to run data-info code snippet for '{function_name}': {e:?}" + ); + e + })?; // merge them let base_event_tuple = base_event_info @@ -683,7 +680,7 @@ fn inner_synthesize_pox_event_info( .expect("FATAL: unexpected clarity value"); let event_tuple = TupleData::shallow_merge(base_event_tuple, data_tuple).map_err(|e| { - error!("Failed to merge data-info and event-info: {:?}", &e); + error!("Failed to merge data-info and event-info: {e:?}"); e })?; diff --git a/pox-locking/src/events_24.rs b/pox-locking/src/events_24.rs index 40c10de9051..a23726b46e7 100644 --- a/pox-locking/src/events_24.rs +++ b/pox-locking/src/events_24.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use clarity::vm::ast::ASTRules; use clarity::vm::contexts::GlobalContext; use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, TupleData}; @@ -386,25 +385,23 @@ pub fn synthesize_pox_2_or_3_event_info( None, pox_2_contract.contract_context, |env| { - let base_event_info = env - .eval_read_only_with_rules(contract_id, &code_snippet, ASTRules::PrecheckSize) - .map_err(|e| { - error!( - "Failed to run event-info code snippet for '{}': {:?}", - function_name, &e + let base_event_info = + env.eval_read_only(contract_id, &code_snippet) + .map_err(|e| { + error!( + "Failed to run event-info code snippet for '{function_name}': {e:?}" ); - e - })?; + e + })?; - let data_event_info = env - .eval_read_only_with_rules(contract_id, &data_snippet, ASTRules::PrecheckSize) - .map_err(|e| { - error!( - "Failed to run data-info code snippet for '{}': {:?}", - function_name, &e - ); - e - })?; + let data_event_info = + env.eval_read_only(contract_id, &data_snippet) + .map_err(|e| { + error!( + "Failed to run data-info code snippet for '{function_name}': {e:?}" + ); + e + })?; // merge them let base_event_tuple = base_event_info @@ -415,7 +412,7 @@ pub fn synthesize_pox_2_or_3_event_info( .expect("FATAL: unexpected clarity value"); let event_tuple = TupleData::shallow_merge(base_event_tuple, data_tuple).map_err(|e| { - error!("Failed to merge data-info and event-info: {:?}", &e); + error!("Failed to merge data-info and event-info: {e:?}"); e })?; diff --git a/stacks-node/src/nakamoto_node.rs b/stacks-node/src/nakamoto_node.rs index 3bfa8202708..83c66694cfa 100644 --- a/stacks-node/src/nakamoto_node.rs +++ b/stacks-node/src/nakamoto_node.rs @@ -189,17 +189,6 @@ impl StacksNode { keychain.set_nakamoto_sk(mining_key); } - // we can call _open_ here rather than _connect_, since connect is first called in - // make_genesis_block - let mut sortdb = SortitionDB::open( - &config.get_burn_db_file_path(), - true, - burnchain.pox_constants.clone(), - ) - .expect("Error while instantiating sortition db"); - - NeonNode::setup_ast_size_precheck(&config, &mut sortdb); - let _ = config .connect_mempool_db() .expect("FATAL: database failure opening mempool"); diff --git a/stacks-node/src/neon_node.rs b/stacks-node/src/neon_node.rs index 7c798750317..1f57c1e9309 100644 --- a/stacks-node/src/neon_node.rs +++ b/stacks-node/src/neon_node.rs @@ -154,7 +154,6 @@ use std::time::{Duration, Instant}; use std::{fs, mem, thread}; use clarity::boot_util::boot_code_id; -use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use libsigner::v0::messages::{ @@ -721,11 +720,6 @@ impl MicroblockMinerThread { })?; let burn_height = block_snapshot.block_height; - let ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), burn_height).map_err(|e| { - error!("Failed to get AST rules for microblock: {e}"); - e - })?; - let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), burn_height) .map_err(|e| { error!("Failed to get epoch for microblock: {e}"); @@ -790,7 +784,6 @@ impl MicroblockMinerThread { chainstate.mainnet, epoch_id, &mined_microblock, - ASTRules::PrecheckSize, ) { // nope! warn!( @@ -830,19 +823,10 @@ impl MicroblockMinerThread { ); } } - if !Relayer::process_mined_problematic_blocks(ast_rules, ASTRules::PrecheckSize) { - // don't process it - warn!( - "Will NOT process our problematic mined microblock {}", - &mined_microblock.block_hash() - ); - return Err(ChainstateError::NoTransactionsToMine); - } else { - warn!( - "Will process our problematic mined microblock {}", - &mined_microblock.block_hash() - ) - } + warn!( + "Will process our problematic mined microblock {}", + &mined_microblock.block_hash() + ) } // cancelled? @@ -3072,7 +3056,6 @@ impl RelayerThread { })? .block_height; - let ast_rules = SortitionDB::get_ast_rules(self.sortdb_ref().conn(), burn_height)?; let epoch_id = SortitionDB::get_stacks_epoch(self.sortdb_ref().conn(), burn_height)? .expect("FATAL: no epoch defined") .epoch_id; @@ -3082,7 +3065,6 @@ impl RelayerThread { self.chainstate_ref().mainnet, epoch_id, anchored_block, - ASTRules::PrecheckSize, ) { // nope! warn!( @@ -3118,19 +3100,10 @@ impl RelayerThread { ); } } - if !Relayer::process_mined_problematic_blocks(ast_rules, ASTRules::PrecheckSize) { - // don't process it - warn!( - "Will NOT process our problematic mined block {}", - &anchored_block.block_hash() - ); - return Err(ChainstateError::NoTransactionsToMine); - } else { - warn!( - "Will process our problematic mined block {}", - &anchored_block.block_hash() - ) - } + warn!( + "Will process our problematic mined block {}", + &anchored_block.block_hash() + ) } // Preprocess the anchored block @@ -4653,27 +4626,6 @@ impl StacksNode { node_privkey } - /// Set up the AST size-precheck height, if configured - pub(crate) fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { - if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { - info!( - "Override burnchain height of {:?} to {ast_precheck_size_height}", - ASTRules::PrecheckSize - ); - let mut tx = sortdb - .tx_begin() - .expect("FATAL: failed to begin tx on sortition DB"); - SortitionDB::override_ast_rule_height( - &mut tx, - ASTRules::PrecheckSize, - ast_precheck_size_height, - ) - .expect("FATAL: failed to override AST PrecheckSize rule height"); - tx.commit() - .expect("FATAL: failed to commit sortition DB transaction"); - } - } - /// Set up the mempool DB by making sure it exists. /// Panics on failure. fn setup_mempool_db(config: &Config) -> MemPoolDB { @@ -5017,17 +4969,6 @@ impl StacksNode { let atlas_config = config.atlas.clone(); let keychain = Keychain::default(config.node.seed.clone()); - // we can call _open_ here rather than _connect_, since connect is first called in - // make_genesis_block - let mut sortdb = SortitionDB::open( - &config.get_burn_db_file_path(), - true, - burnchain.pox_constants.clone(), - ) - .expect("Error while instantiating sortition db"); - - Self::setup_ast_size_precheck(&config, &mut sortdb); - let _ = Self::setup_mempool_db(&config); let mut p2p_net = Self::setup_peer_network(&config, &atlas_config, burnchain); diff --git a/stacks-node/src/tests/nakamoto_integrations.rs b/stacks-node/src/tests/nakamoto_integrations.rs index 4bb6db6f1dc..3188ad2b6f5 100644 --- a/stacks-node/src/tests/nakamoto_integrations.rs +++ b/stacks-node/src/tests/nakamoto_integrations.rs @@ -24,7 +24,6 @@ use std::time::{Duration, Instant}; use std::{env, thread}; use clarity::boot_util::boot_code_addr; -use clarity::vm::ast::ASTRules; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::representations::ContractName; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; @@ -3194,7 +3193,6 @@ fn block_proposal_api_endpoint() { &tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT, - ASTRules::PrecheckSize, None, ); assert!( diff --git a/stacks-node/src/tests/neon_integrations.rs b/stacks-node/src/tests/neon_integrations.rs index 8943a8d22ef..b24e37252ca 100644 --- a/stacks-node/src/tests/neon_integrations.rs +++ b/stacks-node/src/tests/neon_integrations.rs @@ -6,7 +6,6 @@ use std::time::{Duration, Instant}; use std::{cmp, env, fs, io, thread}; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; -use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::PrincipalData; @@ -8144,16 +8143,13 @@ fn test_problematic_blocks_are_not_mined() { assert!(found); - let (tip, cur_ast_rules) = { + let tip = { let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - (tip, cur_ast_rules) + tip }; - assert_eq!(cur_ast_rules, ASTRules::Typical); - // add another bad tx to the mempool debug!("Submit problematic tx_high transaction {tx_high_txid}"); std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); @@ -8171,17 +8167,6 @@ fn test_problematic_blocks_are_not_mined() { }) .expect("Failed waiting for blocks to be processed"); - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; - - // new rules took effect - assert_eq!(cur_ast_rules, ASTRules::PrecheckSize); - let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; @@ -8479,16 +8464,13 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { assert!(found); - let (tip, cur_ast_rules) = { + let tip = { let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - (tip, cur_ast_rules) + tip }; - assert_eq!(cur_ast_rules, ASTRules::Typical); - btc_regtest_controller.build_next_block(1); // wait for runloop to advance @@ -8500,35 +8482,6 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { break; } } - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; - - // new rules took effect - assert_eq!(cur_ast_rules, ASTRules::PrecheckSize); - - // the follower we will soon boot up will start applying the new AST rules at this height. - // Make it so the miner does *not* follow the rules - { - let sortdb = btc_regtest_controller.sortdb_mut(); - let mut tx = sortdb.tx_begin().unwrap(); - SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 10_000).unwrap(); - tx.commit().unwrap(); - } - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; - - // we reverted to the old rules (but the follower won't) - assert_eq!(cur_ast_rules, ASTRules::Typical); // add another bad tx to the mempool. // because the miner is now non-conformant, it should mine this tx. @@ -8551,17 +8504,6 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); all_new_files.append(&mut new_files); cur_files = cur_files_new; - - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let cur_ast_rules = - SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; - - // we reverted to the old rules (but the follower won't) - assert_eq!(cur_ast_rules, ASTRules::Typical); } let tip_info = get_chain_info(&conf); diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 5a763acdf8f..04fc3035bdf 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -434,22 +434,6 @@ impl FromRow for VoteForAggregateKeyOp { } } -impl FromColumn for ASTRules { - fn from_column(row: &Row, column_name: &str) -> Result { - let x: u8 = row.get_unwrap(column_name); - let ast_rules = ASTRules::from_u8(x).ok_or(db_error::ParseError)?; - Ok(ast_rules) - } -} - -impl FromRow<(ASTRules, u64)> for (ASTRules, u64) { - fn from_row(row: &Row) -> Result<(ASTRules, u64), db_error> { - let ast_rules = ASTRules::from_column(row, "ast_rule_id")?; - let height = u64::from_column(row, "block_height")?; - Ok((ast_rules, height)) - } -} - struct AcceptedStacksBlockHeader { pub tip_consensus_hash: ConsensusHash, // PoX tip pub consensus_hash: ConsensusHash, // stacks block consensus hash @@ -3479,53 +3463,6 @@ impl SortitionDB { Ok(()) } - #[cfg(any(test, feature = "testing"))] - pub fn override_ast_rule_height( - tx: &mut DBTx<'_>, - ast_rules: ASTRules, - height: u64, - ) -> Result<(), db_error> { - let rules = params![u64_to_sql(height)?, (ast_rules as u8)]; - - tx.execute( - "UPDATE ast_rule_heights SET block_height = ?1 WHERE ast_rule_id = ?2", - rules, - )?; - Ok(()) - } - - #[cfg(not(any(test, feature = "testing")))] - pub fn override_ast_rule_height<'a>( - _tx: &mut DBTx<'a>, - _ast_rules: ASTRules, - _height: u64, - ) -> Result<(), db_error> { - Ok(()) - } - - /// What's the default AST rules at the given block height? - pub fn get_ast_rules(conn: &DBConn, height: u64) -> Result { - let ast_rule_sets: Vec<(ASTRules, u64)> = query_rows( - conn, - "SELECT * FROM ast_rule_heights ORDER BY block_height ASC", - NO_PARAMS, - )?; - - assert!(!ast_rule_sets.is_empty()); - let first_rules = ast_rule_sets.first().unwrap(); - let mut last_height = first_rules.1; - let mut last_rules = first_rules.0; - for (ast_rules, ast_rule_height) in ast_rule_sets.into_iter() { - if last_height <= height && height < ast_rule_height { - return Ok(last_rules); - } - last_height = ast_rule_height; - last_rules = ast_rules; - } - - return Ok(last_rules); - } - /// Store a pre-processed reward set. /// `sortition_id` is the first sortition ID of the prepare phase. /// No-op if the reward set has a selected-and-unknown anchor block. @@ -10591,56 +10528,6 @@ pub mod tests { assert_eq!(ancestors, vec![BurnchainHeaderHash([0xfe; 32])]); } - #[test] - fn test_get_set_ast_rules() { - let block_height = 123; - let first_burn_hash = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(); - let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap(); - - assert_eq!( - SortitionDB::get_ast_rules(db.conn(), 0).unwrap(), - ASTRules::Typical - ); - assert_eq!( - SortitionDB::get_ast_rules(db.conn(), 1).unwrap(), - ASTRules::Typical - ); - assert_eq!( - SortitionDB::get_ast_rules(db.conn(), AST_RULES_PRECHECK_SIZE - 1).unwrap(), - ASTRules::Typical - ); - assert_eq!( - SortitionDB::get_ast_rules(db.conn(), AST_RULES_PRECHECK_SIZE).unwrap(), - ASTRules::PrecheckSize - ); - assert_eq!( - SortitionDB::get_ast_rules(db.conn(), AST_RULES_PRECHECK_SIZE + 1).unwrap(), - ASTRules::PrecheckSize - ); - - { - let mut tx = db.tx_begin().unwrap(); - SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 1).unwrap(); - tx.commit().unwrap(); - } - - assert_eq!( - SortitionDB::get_ast_rules(db.conn(), 0).unwrap(), - ASTRules::Typical - ); - assert_eq!( - SortitionDB::get_ast_rules(db.conn(), 1).unwrap(), - ASTRules::PrecheckSize - ); - assert_eq!( - SortitionDB::get_ast_rules(db.conn(), 2).unwrap(), - ASTRules::PrecheckSize - ); - } - #[test] fn test_get_chosen_pox_anchor() { let path_root = "/tmp/test_get_chosen_pox_anchor"; diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index c05009425a7..3a35c0ea9d6 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -634,14 +634,13 @@ fn make_genesis_block_with_recipients( let iconn = sort_db.index_handle_at_tip(); let mut miner_epoch_info = builder.pre_epoch_begin(state, &iconn, true).unwrap(); - let ast_rules = miner_epoch_info.ast_rules.clone(); let mut epoch_tx = builder .epoch_begin(&iconn, &mut miner_epoch_info) .unwrap() .0; builder - .try_mine_tx(&mut epoch_tx, &coinbase_op, ast_rules, None) + .try_mine_tx(&mut epoch_tx, &coinbase_op, None) .unwrap(); let block = builder.mine_anchored_block(&mut epoch_tx); @@ -899,20 +898,17 @@ fn make_stacks_block_with_input( ) .unwrap(); let mut miner_epoch_info = builder.pre_epoch_begin(state, &iconn, true).unwrap(); - let ast_rules = miner_epoch_info.ast_rules.clone(); let mut epoch_tx = builder .epoch_begin(&iconn, &mut miner_epoch_info) .unwrap() .0; builder - .try_mine_tx(&mut epoch_tx, &coinbase_op, ast_rules, None) + .try_mine_tx(&mut epoch_tx, &coinbase_op, None) .unwrap(); for tx in txs { - builder - .try_mine_tx(&mut epoch_tx, tx, ast_rules, None) - .unwrap(); + builder.try_mine_tx(&mut epoch_tx, tx, None).unwrap(); } let block = builder.mine_anchored_block(&mut epoch_tx); diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 9f45f744303..da0980c63df 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -18,7 +18,6 @@ use std::collections::VecDeque; use std::sync::{Arc, Mutex}; use clarity::boot_util::boot_code_id; -use clarity::vm::ast::ASTRules; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksBlockId, }; @@ -154,7 +153,6 @@ impl OnChainRewardSetProvider<'_, T> { sort_handle, &boot_code_id(SIGNERS_NAME, chainstate.mainnet), &format!("(map-get? cycle-set-height u{})", cycle), - ASTRules::PrecheckSize, ) .map_err(ChainstateError::ClarityError)? .expect_optional() diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index e679a61c980..31f495605a2 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksBlockId, @@ -593,7 +592,6 @@ impl NakamotoBlockBuilder { &initial_txs, settings, event_observer, - ASTRules::PrecheckSize, replay_transactions, ) { Ok(x) => x, @@ -664,7 +662,6 @@ impl BlockBuilder for NakamotoBlockBuilder { tx: &StacksTransaction, tx_len: u64, limit_behavior: &BlockLimitFunction, - ast_rules: ASTRules, max_execution_time: Option, ) -> TransactionResult { if self.bytes_so_far + tx_len >= u64::from(MAX_EPOCH_SIZE) { @@ -702,7 +699,6 @@ impl BlockBuilder for NakamotoBlockBuilder { clarity_tx.config.mainnet, clarity_tx.get_epoch(), tx, - ast_rules, ) { info!( "Detected problematic tx {} while mining; dropping from mempool", @@ -717,7 +713,6 @@ impl BlockBuilder for NakamotoBlockBuilder { clarity_tx, tx, quiet, - ast_rules, max_execution_time, ) { Ok(x) => x, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e66b0bae595..f30fc67ca45 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -18,7 +18,6 @@ use std::collections::{HashMap, HashSet}; use std::ops::{Deref, DerefMut, Range}; use clarity::util::secp256k1::Secp256k1PublicKey; -use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{STXEventType, STXMintEventData, StacksTransactionEvent}; use clarity::vm::types::PrincipalData; @@ -4387,7 +4386,6 @@ impl NakamotoChainState { block.txs.len() ); - let ast_rules = ASTRules::PrecheckSize; let next_block_height = block.header.chain_length; let first_block_height = burn_dbconn.context.first_block_height; @@ -4602,21 +4600,17 @@ impl NakamotoChainState { ); // process anchored block - let (block_fees, txs_receipts) = match StacksChainState::process_block_transactions( - &mut clarity_tx, - &block.txs, - 0, - ast_rules, - ) { - Err(e) => { - let msg = format!("Invalid Stacks block {}: {:?}", &block_hash, &e); - warn!("{}", &msg); + let (block_fees, txs_receipts) = + match StacksChainState::process_block_transactions(&mut clarity_tx, &block.txs, 0) { + Err(e) => { + let msg = format!("Invalid Stacks block {}: {:?}", &block_hash, &e); + warn!("{}", &msg); - clarity_tx.rollback_block(); - return Err(ChainstateError::InvalidStacksBlock(msg)); - } - Ok((block_fees, _block_burns, txs_receipts)) => (block_fees, txs_receipts), - }; + clarity_tx.rollback_block(); + return Err(ChainstateError::InvalidStacksBlock(msg)); + } + Ok((block_fees, _block_burns, txs_receipts)) => (block_fees, txs_receipts), + }; tx_receipts.extend(txs_receipts); @@ -5058,12 +5052,7 @@ impl NakamotoChainState { let contract_id = boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false); clarity_tx.connection().as_transaction(|clarity| { let (ast, analysis) = clarity - .analyze_smart_contract( - &contract_id, - ClarityVersion::Clarity2, - &contract_content, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_id, ClarityVersion::Clarity2, &contract_content) .unwrap(); clarity .initialize_smart_contract( diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index 59bcd2854a9..d4dd851b066 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -505,8 +505,6 @@ impl NakamotoBlockBuilder { tenure_id_consensus_hash: &ConsensusHash, txs: Vec, ) -> Result<(NakamotoBlock, u64, ExecutionCost), Error> { - use clarity::vm::ast::ASTRules; - debug!( "Build shadow Nakamoto block from {} transactions", txs.len() @@ -537,7 +535,6 @@ impl NakamotoBlockBuilder { &tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT, - ASTRules::PrecheckSize, None, ) { TransactionResult::Success(..) => { diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 16cacd35302..4afd5ab339d 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -990,8 +990,6 @@ impl TestStacksNode { burn_dbconn: &SortitionHandleConn, txs: Vec, ) -> Result<(NakamotoBlock, u64, ExecutionCost), ChainstateError> { - use clarity::vm::ast::ASTRules; - debug!("Build Nakamoto block from {} transactions", txs.len()); let (mut chainstate, _) = chainstate_handle.reopen()?; @@ -1015,7 +1013,6 @@ impl TestStacksNode { &tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT, - ASTRules::PrecheckSize, None, ) { TransactionResult::Success(..) => { diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 900a560eac6..845c47a28e0 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -2,7 +2,6 @@ use std::ops::Deref; use clarity::vm::analysis::arithmetic_checker::ArithmeticOnlyChecker; use clarity::vm::analysis::mem_type_check; -use clarity::vm::ast::ASTRules; use clarity::vm::clarity::TransactionConnection; use clarity::vm::contexts::OwnedEnvironment; use clarity::vm::database::*; @@ -509,10 +508,6 @@ impl BurnStateDB for TestSimBurnStateDB { None } } - - fn get_ast_rules(&self, _block_height: u32) -> ASTRules { - ASTRules::PrecheckSize - } } #[cfg(test)] @@ -523,10 +518,9 @@ impl HeadersDB for TestSimHeadersDB { ) -> Option { if *id_bhh == *FIRST_INDEX_BLOCK_HASH { Some(BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap()) + } else if self.get_burn_block_height_for_block(id_bhh).is_none() { + None } else { - if self.get_burn_block_height_for_block(id_bhh).is_none() { - return None; - } Some(BurnchainHeaderHash(id_bhh.0.clone())) } } @@ -668,7 +662,6 @@ fn pox_2_contract_caller_units() { ClarityVersion::Clarity2, &POX_2_TESTNET_CODE, None, - ASTRules::PrecheckSize, ) .unwrap() }); @@ -682,8 +675,7 @@ fn pox_2_contract_caller_units() { (start-burn-ht uint) (lock-period uint)) (contract-call? .pox-2 stack-stx amount-ustx pox-addr start-burn-ht lock-period))", - None, - ASTRules::PrecheckSize) + None) .unwrap(); let burn_height = env.eval_raw("burn-block-height").unwrap().0; @@ -899,7 +891,6 @@ fn pox_2_lock_extend_units() { ClarityVersion::Clarity2, &POX_2_TESTNET_CODE, None, - ASTRules::PrecheckSize, ) .unwrap(); env.execute_in_env(boot_code_addr(false).into(), None, None, |env| { @@ -1768,8 +1759,7 @@ fn test_deploy_smart_contract( version: ClarityVersion, ) -> std::result::Result<(), ClarityError> { block.as_transaction(|tx| { - let (ast, analysis) = - tx.analyze_smart_contract(contract_id, version, content, ASTRules::PrecheckSize)?; + let (ast, analysis) = tx.analyze_smart_contract(contract_id, version, content)?; tx.initialize_smart_contract(contract_id, version, &ast, content, None, |_, _| None, None)?; tx.save_analysis(contract_id, &analysis)?; return Ok(()); @@ -1812,7 +1802,6 @@ fn recency_tests() { ClarityVersion::Clarity2, &BOOT_CODE_POX_TESTNET, None, - ASTRules::PrecheckSize, ) .unwrap() }); @@ -1890,7 +1879,6 @@ fn delegation_tests() { ClarityVersion::Clarity2, &BOOT_CODE_POX_TESTNET, None, - ASTRules::PrecheckSize, ) .unwrap() }); @@ -2468,7 +2456,6 @@ fn test_vote_withdrawal() { ClarityVersion::Clarity1, &BOOT_CODE_COST_VOTING, None, - ASTRules::PrecheckSize, ) .unwrap(); @@ -2660,7 +2647,6 @@ fn test_vote_fail() { COST_VOTING_CONTRACT_TESTNET.clone(), &BOOT_CODE_COST_VOTING, None, - ASTRules::PrecheckSize, ) .unwrap(); @@ -2876,7 +2862,6 @@ fn test_vote_confirm() { COST_VOTING_CONTRACT_TESTNET.clone(), &BOOT_CODE_COST_VOTING, None, - ASTRules::PrecheckSize, ) .unwrap(); @@ -2998,7 +2983,6 @@ fn test_vote_too_many_confirms() { COST_VOTING_CONTRACT_TESTNET.clone(), &BOOT_CODE_COST_VOTING, None, - ASTRules::PrecheckSize, ) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index f1c43787192..bd68c15d6c3 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -20,7 +20,6 @@ use std::sync::LazyLock; use clarity::types::Address; use clarity::vm::analysis::CheckErrors; -use clarity::vm::ast::ASTRules; use clarity::vm::clarity::{Error as ClarityError, TransactionConnection}; use clarity::vm::costs::LimitedCostTracker; use clarity::vm::database::{ClarityDatabase, NULL_BURN_STATE_DB, NULL_HEADER_DB}; @@ -390,14 +389,13 @@ impl StacksChainState { None, LimitedCostTracker::new_free(), |vm_env| { - vm_env.eval_read_only_with_rules( + vm_env.eval_read_only( &pox_contract, &format!(r#" (unwrap-panic (map-get? stacking-state {{ stacker: '{unlocked_principal} }})) "#, unlocked_principal = Value::Principal(principal.clone()) ), - ASTRules::PrecheckSize, ) }) .expect("FATAL: failed to query unlocked principal"); @@ -440,7 +438,7 @@ impl StacksChainState { None, LimitedCostTracker::new_free(), |vm_env| { - vm_env.eval_read_only_with_rules( + vm_env.eval_read_only( &pox_contract, &format!( r#" @@ -468,7 +466,6 @@ impl StacksChainState { cycle_to_unlock = Value::UInt(cycle_number.into()), pox_addr = user_pox_addr ), - ASTRules::PrecheckSize, ) }, ) @@ -641,7 +638,6 @@ impl StacksChainState { &iconn, &boot::boot_code_id(boot_contract_name, self.mainnet), code, - ASTRules::PrecheckSize, ) .map_err(Error::ClarityError) } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 6469c863fae..501b8e0ee95 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -20,7 +20,6 @@ use std::path::PathBuf; use std::{cmp, fs, io}; pub use clarity::vm::analysis::errors::{CheckError, CheckErrors}; -use clarity::vm::ast::ASTRules; use clarity::vm::clarity::TransactionConnection; use clarity::vm::costs::LimitedCostTracker; use clarity::vm::database::BurnStateDB; @@ -3895,7 +3894,6 @@ impl StacksChainState { pub fn process_microblocks_transactions( clarity_tx: &mut ClarityTx, microblocks: &[StacksMicroblock], - ast_rules: ASTRules, ) -> Result<(u128, u128, Vec), (Error, BlockHeaderHash)> { let mut fees = 0u128; let mut burns = 0u128; @@ -3904,7 +3902,7 @@ impl StacksChainState { debug!("Process microblock {}", µblock.block_hash()); for (tx_index, tx) in microblock.txs.iter().enumerate() { let (tx_fee, mut tx_receipt) = - StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules, None) + StacksChainState::process_transaction(clarity_tx, tx, false, None) .map_err(|e| (e, microblock.block_hash()))?; tx_receipt.microblock_header = Some(microblock.header.clone()); @@ -4461,14 +4459,13 @@ impl StacksChainState { clarity_tx: &mut ClarityTx, block_txs: &[StacksTransaction], mut tx_index: u32, - ast_rules: ASTRules, ) -> Result<(u128, u128, Vec), Error> { let mut fees = 0u128; let mut burns = 0u128; let mut receipts = vec![]; for tx in block_txs.iter() { let (tx_fee, mut tx_receipt) = - StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules, None)?; + StacksChainState::process_transaction(clarity_tx, tx, false, None)?; fees = fees.checked_add(u128::from(tx_fee)).expect("Fee overflow"); tx_receipt.tx_index = tx_index; burns = burns @@ -4964,12 +4961,6 @@ impl StacksChainState { .get_sortition_id_from_consensus_hash(&parent_consensus_hash) .expect("Failed to get parent SortitionID from ConsensusHash"); - let parent_burn_height = - SortitionDB::get_block_snapshot_consensus(conn, &parent_consensus_hash)? - .expect("Failed to get snapshot for parent's sortition") - .block_height; - let microblock_ast_rules = SortitionDB::get_ast_rules(conn, parent_burn_height)?; - // find matured miner rewards, so we can grant them within the Clarity DB tx. let (latest_matured_miners, matured_miner_parent) = { let latest_miners = StacksChainState::get_scheduled_block_rewards( @@ -5066,7 +5057,6 @@ impl StacksChainState { match StacksChainState::process_microblocks_transactions( &mut clarity_tx, parent_microblocks, - microblock_ast_rules, ) { Ok((fees, burns, events)) => (fees, burns, events), Err((e, mblock_header_hash)) => { @@ -5330,9 +5320,6 @@ impl StacksChainState { block.txs.len() ); - let ast_rules = - SortitionDB::get_ast_rules(burn_dbconn.tx(), chain_tip_burn_header_height.into())?; - let mainnet = chainstate_tx.get_config().mainnet; let next_block_height = block.header.total_work.work; @@ -5535,11 +5522,10 @@ impl StacksChainState { &block.txs, u32::try_from(microblock_txs_receipts.len()) .expect("more than 2^32 tx receipts"), - ast_rules, ) { Err(e) => { - let msg = format!("Invalid Stacks block {}: {:?}", block.block_hash(), &e); - warn!("{}", &msg); + let msg = format!("Invalid Stacks block {}: {e:?}", block.block_hash()); + warn!("{msg}"); clarity_tx.rollback_block(); return Err(Error::InvalidStacksBlock(msg)); @@ -6812,7 +6798,6 @@ impl StacksChainState { pub mod test { use std::fs; - use clarity::vm::ast::ASTRules; use clarity::vm::types::StacksAddressExtensions; use rand::{thread_rng, Rng}; use stacks_common::types::chainstate::{BlockHeaderHash, StacksWorkScore}; @@ -10177,7 +10162,6 @@ pub mod test { µblock_privkey, &anchored_block.0.block_hash(), microblocks.last().map(|mblock| &mblock.header), - ASTRules::PrecheckSize, ) .unwrap(); microblocks.push(microblock); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index b54fef7eab0..bba77d84f6d 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -22,7 +22,6 @@ use std::path::PathBuf; use std::{fs, io}; use clarity::vm::analysis::analysis_db::AnalysisDatabase; -use clarity::vm::ast::ASTRules; use clarity::vm::clarity::TransactionConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::database::{ @@ -1333,7 +1332,6 @@ impl StacksChainState { clarity, &boot_code_smart_contract, &boot_code_account, - ASTRules::PrecheckSize, None, ) })?; @@ -1960,7 +1958,6 @@ impl StacksChainState { burn_dbconn, contract, code, - ASTRules::PrecheckSize, ); result.unwrap() } @@ -1979,7 +1976,6 @@ impl StacksChainState { burn_dbconn, contract, code, - ASTRules::PrecheckSize, ) } diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 0bccc12789b..cb02677236a 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -18,7 +18,6 @@ use std::collections::{HashMap, HashSet}; use clarity::vm::analysis::types::ContractAnalysis; use clarity::vm::ast::errors::ParseErrors; -use clarity::vm::ast::ASTRules; use clarity::vm::clarity::TransactionConnection; use clarity::vm::contexts::{AssetMap, AssetMapEntry, Environment}; use clarity::vm::costs::cost_functions::ClarityCostFunction; @@ -1045,7 +1044,6 @@ impl StacksChainState { clarity_tx: &mut ClarityTransactionConnection, tx: &StacksTransaction, origin_account: &StacksAccount, - ast_rules: ASTRules, max_execution_time: Option, ) -> Result { match tx.payload { @@ -1276,7 +1274,6 @@ impl StacksChainState { &contract_id, clarity_version, &contract_code_str, - ast_rules, ); let (contract_ast, contract_analysis) = match analysis_resp { Ok(x) => x, @@ -1291,19 +1288,17 @@ impl StacksChainState { )); } other_error => { - if ast_rules == ASTRules::PrecheckSize { - // a [Vary]ExpressionDepthTooDeep error in this situation - // invalidates the block, since this should have prevented the - // block from getting relayed in the first place - if let clarity_error::Parse(ref parse_error) = &other_error { - match parse_error.err { - ParseErrors::ExpressionStackDepthTooDeep - | ParseErrors::VaryExpressionStackDepthTooDeep => { - info!("Transaction {} is problematic and should have prevented this block from being relayed", tx.txid()); - return Err(Error::ClarityError(other_error)); - } - _ => {} + // a [Vary]ExpressionDepthTooDeep error in this situation + // invalidates the block, since this should have prevented the + // block from getting relayed in the first place + if let clarity_error::Parse(ref parse_error) = &other_error { + match parse_error.err { + ParseErrors::ExpressionStackDepthTooDeep + | ParseErrors::VaryExpressionStackDepthTooDeep => { + info!("Transaction {} is problematic and should have prevented this block from being relayed", tx.txid()); + return Err(Error::ClarityError(other_error)); } + _ => {} } } if let clarity_error::Parse(err) = &other_error { @@ -1325,10 +1320,8 @@ impl StacksChainState { .expect("BUG: total block cost decreased"); info!( - "Runtime error in contract analysis for {}: {:?}", - &contract_id, &other_error; + "Runtime error in contract analysis for {contract_id}: {other_error:?}"; "txid" => %tx.txid(), - "AST rules" => %format!("{:?}", &ast_rules) ); let receipt = StacksTransactionReceipt::from_analysis_failure( tx.clone(), @@ -1564,7 +1557,6 @@ impl StacksChainState { clarity_block: &mut ClarityTx, tx: &StacksTransaction, quiet: bool, - ast_rules: ASTRules, max_execution_time: Option, ) -> Result<(u64, StacksTransactionReceipt), Error> { debug!("Process transaction {} ({})", tx.txid(), tx.payload.name()); @@ -1603,7 +1595,6 @@ impl StacksChainState { &mut transaction, tx, &origin_account, - ast_rules, max_execution_time, )?; @@ -1632,7 +1623,6 @@ impl StacksChainState { &mut transaction, tx, &origin_account, - ast_rules, None, )?; @@ -1681,31 +1671,24 @@ pub mod test { pub const TestBurnStateDB_20: UnitTestBurnStateDB = UnitTestBurnStateDB { epoch_id: StacksEpochId::Epoch20, - ast_rules: ASTRules::Typical, }; pub const TestBurnStateDB_2_05: UnitTestBurnStateDB = UnitTestBurnStateDB { epoch_id: StacksEpochId::Epoch2_05, - ast_rules: ASTRules::PrecheckSize, }; pub const TestBurnStateDB_21: UnitTestBurnStateDB = UnitTestBurnStateDB { epoch_id: StacksEpochId::Epoch21, - ast_rules: ASTRules::PrecheckSize, }; pub const TestBurnStateDB_25: UnitTestBurnStateDB = UnitTestBurnStateDB { epoch_id: StacksEpochId::Epoch25, - ast_rules: ASTRules::PrecheckSize, }; pub const TestBurnStateDB_30: UnitTestBurnStateDB = UnitTestBurnStateDB { epoch_id: StacksEpochId::Epoch30, - ast_rules: ASTRules::PrecheckSize, }; pub const TestBurnStateDB_31: UnitTestBurnStateDB = UnitTestBurnStateDB { epoch_id: StacksEpochId::Epoch31, - ast_rules: ASTRules::PrecheckSize, }; pub const TestBurnStateDB_32: UnitTestBurnStateDB = UnitTestBurnStateDB { epoch_id: StacksEpochId::Epoch32, - ast_rules: ASTRules::PrecheckSize, }; pub const ALL_BURN_DBS: &[&dyn BurnStateDB] = &[ @@ -1794,7 +1777,6 @@ pub mod test { nonce: 0, stx_balance: STXBalance::Unlocked { amount: 100 }, }, - ASTRules::PrecheckSize, None, ) .unwrap(); @@ -1855,14 +1837,8 @@ pub mod test { StacksChainState::account_credit(tx, &addr.to_account_principal(), 223) }); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); let account_after = StacksChainState::get_account(&mut conn, &addr.to_account_principal()); @@ -1907,14 +1883,8 @@ pub mod test { assert_eq!(recv_account.stx_balance.amount_unlocked(), 0); assert_eq!(recv_account.nonce, 0); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); let account_after = StacksChainState::get_account(&mut conn, &addr.to_account_principal()); @@ -2102,13 +2072,7 @@ pub mod test { assert_eq!(account.stx_balance.amount_unlocked(), 123); assert_eq!(account.nonce, 0); - let res = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ); + let res = StacksChainState::process_transaction(&mut conn, &signed_tx, false, None); if let Err(Error::InvalidStacksTransaction(msg, false)) = res { assert!(msg.contains(&err_frag), "{err_frag}"); } else { @@ -2193,14 +2157,8 @@ pub mod test { StacksChainState::account_credit(tx, &addr.to_account_principal(), 123) }); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); let account_after = StacksChainState::get_account(&mut conn, &addr.to_account_principal()); @@ -2274,14 +2232,8 @@ pub mod test { let account = StacksChainState::get_account(&mut conn, &addr.to_account_principal()); assert_eq!(account.nonce, 0); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); let account = StacksChainState::get_account(&mut conn, &addr.to_account_principal()); assert_eq!(account.nonce, 1); @@ -2369,13 +2321,7 @@ pub mod test { StacksChainState::get_account(&mut conn, &addr.to_account_principal()); assert_eq!(account.nonce, next_nonce); - let res = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ); + let res = StacksChainState::process_transaction(&mut conn, &signed_tx, false, None); if expected_behavior[i] { assert!(res.is_ok()); @@ -2463,14 +2409,9 @@ pub mod test { ContractName::from(contract_name), ); - let (fee, receipt) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, receipt) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None) + .unwrap(); // Verify that the syntax error is recorded in the receipt let expected_error = @@ -2568,14 +2509,9 @@ pub mod test { assert_eq!(account.nonce, i as u64); // runtime error should be handled - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None) + .unwrap(); // account nonce should increment let account = @@ -2657,14 +2593,8 @@ pub mod test { StacksChainState::get_account(&mut conn, &addr_sponsor.to_account_principal()); assert_eq!(account.nonce, 0); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); let account = StacksChainState::get_account(&mut conn, &addr.to_account_principal()); assert_eq!(account.nonce, 1); @@ -2771,27 +2701,16 @@ pub mod test { StacksChainState::get_data_var(&mut conn, &contract_id, "bar").unwrap(); assert!(var_before_res.is_none()); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); let var_before_set_res = StacksChainState::get_data_var(&mut conn, &contract_id, "bar").unwrap(); assert_eq!(var_before_set_res, Some(Value::Int(0))); - let (fee_2, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx_2, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee_2, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx_2, false, None) + .unwrap(); let account = StacksChainState::get_account(&mut conn, &addr.to_account_principal()); assert_eq!(account.nonce, 1); @@ -2907,14 +2826,8 @@ pub mod test { StacksChainState::get_data_var(&mut conn, &contract_id, "savedContract").unwrap(); assert!(var_before_res.is_none()); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); let var_before_set_res = StacksChainState::get_data_var(&mut conn, &contract_id, "savedContract").unwrap(); @@ -2923,14 +2836,9 @@ pub mod test { Some(Value::Principal(PrincipalData::from(addr.clone()))) ); - let (fee_2, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx_2, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee_2, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx_2, false, None) + .unwrap(); let account = StacksChainState::get_account(&mut conn, &addr.to_account_principal()); assert_eq!(account.nonce, 1); @@ -2999,14 +2907,8 @@ pub mod test { StandardPrincipalData::from(addr.clone()), ContractName::from("hello-world"), ); - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); // contract-calls that don't commit let contract_calls = vec![ @@ -3051,14 +2953,9 @@ pub mod test { StacksChainState::get_account(&mut conn, &addr_2.to_account_principal()); assert_eq!(account_2.nonce, next_nonce); - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx_2, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx_2, false, None) + .unwrap(); // nonce should have incremented next_nonce += 1; @@ -3116,14 +3013,8 @@ pub mod test { &ConsensusHash([(dbi + 1) as u8; 20]), &BlockHeaderHash([(dbi + 1) as u8; 32]), ); - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); conn.commit_block(); } @@ -3222,14 +3113,8 @@ pub mod test { &ConsensusHash([(dbi + 1) as u8; 20]), &BlockHeaderHash([(dbi + 1) as u8; 32]), ); - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); let next_nonce = 0; @@ -3261,13 +3146,8 @@ pub mod test { assert_eq!(account_2.nonce, next_nonce); // transaction is invalid, and won't be mined - let res = StacksChainState::process_transaction( - &mut conn, - &signed_tx_2, - false, - ASTRules::PrecheckSize, - None, - ); + let res = + StacksChainState::process_transaction(&mut conn, &signed_tx_2, false, None); assert!(res.is_err()); // nonce should NOT have incremented @@ -3293,14 +3173,8 @@ pub mod test { &ConsensusHash([3u8; 20]), &BlockHeaderHash([3u8; 32]), ); - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); let mut next_nonce = 0; @@ -3334,13 +3208,7 @@ pub mod test { assert_eq!(account_2.nonce, next_nonce); // this is expected to be mined - let res = StacksChainState::process_transaction( - &mut conn, - &signed_tx_2, - false, - ASTRules::PrecheckSize, - None, - ); + let res = StacksChainState::process_transaction(&mut conn, &signed_tx_2, false, None); assert!(res.is_ok()); next_nonce += 1; @@ -3460,14 +3328,8 @@ pub mod test { StacksChainState::get_data_var(&mut conn, &contract_id, "bar").unwrap(); assert!(var_before_res.is_none()); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx, false, None).unwrap(); let account_publisher = StacksChainState::get_account(&mut conn, &addr_publisher.to_account_principal()); @@ -3477,14 +3339,9 @@ pub mod test { StacksChainState::get_data_var(&mut conn, &contract_id, "bar").unwrap(); assert_eq!(var_before_set_res, Some(Value::Int(0))); - let (fee_2, _) = StacksChainState::process_transaction( - &mut conn, - &signed_tx_2, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee_2, _) = + StacksChainState::process_transaction(&mut conn, &signed_tx_2, false, None) + .unwrap(); let account_origin = StacksChainState::get_account(&mut conn, &addr_origin.to_account_principal()); @@ -3989,14 +3846,9 @@ pub mod test { .unwrap_err(); // publish contract - let _ = StacksChainState::process_transaction( - &mut conn, - &signed_contract_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let _ = + StacksChainState::process_transaction(&mut conn, &signed_contract_tx, false, None) + .unwrap(); // no initial stackaroos balance let account_stackaroos_balance = StacksChainState::get_account_ft( @@ -4015,14 +3867,8 @@ pub mod test { let mut expected_next_name: u64 = 0; for tx_pass in post_conditions_pass.iter() { - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - tx_pass, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, tx_pass, false, None).unwrap(); expected_stackaroos_balance += 100; expected_nonce += 1; @@ -4046,14 +3892,8 @@ pub mod test { } for tx_pass in post_conditions_pass_payback.iter() { - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - tx_pass, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, tx_pass, false, None).unwrap(); expected_stackaroos_balance -= 100; expected_payback_stackaroos_balance += 100; expected_recv_nonce += 1; @@ -4094,14 +3934,8 @@ pub mod test { } for tx_pass in post_conditions_pass_nft.iter() { - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - tx_pass, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, tx_pass, false, None).unwrap(); expected_nonce += 1; let expected_value = @@ -4125,14 +3959,8 @@ pub mod test { } for tx_fail in post_conditions_fail.iter() { - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - tx_fail, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, tx_fail, false, None).unwrap(); expected_nonce += 1; // no change in balance @@ -4169,14 +3997,8 @@ pub mod test { } for tx_fail in post_conditions_fail_payback.iter() { - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - tx_fail, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, tx_fail, false, None).unwrap(); expected_recv_nonce += 1; // no change in balance @@ -4218,14 +4040,8 @@ pub mod test { } for tx_fail in post_conditions_fail_nft.iter() { - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - tx_fail, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, tx_fail, false, None).unwrap(); expected_nonce += 1; // nft shouldn't exist -- the nft-mint! should have been rolled back @@ -4714,14 +4530,9 @@ pub mod test { .unwrap_err(); // publish contract - let _ = StacksChainState::process_transaction( - &mut conn, - &signed_contract_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let _ = + StacksChainState::process_transaction(&mut conn, &signed_contract_tx, false, None) + .unwrap(); // no initial stackaroos balance let account_stackaroos_balance = StacksChainState::get_account_ft( @@ -4739,14 +4550,8 @@ pub mod test { let mut expected_payback_stackaroos_balance = 0; for tx_pass in post_conditions_pass.iter() { - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - tx_pass, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, tx_pass, false, None).unwrap(); expected_stackaroos_balance += 100; expected_nonce += 1; @@ -4787,14 +4592,8 @@ pub mod test { } for tx_pass in post_conditions_pass_payback.iter() { - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - tx_pass, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, tx_pass, false, None).unwrap(); expected_stackaroos_balance -= 100; expected_payback_stackaroos_balance += 100; expected_recv_nonce += 1; @@ -4854,14 +4653,8 @@ pub mod test { } for tx_fail in post_conditions_fail.iter() { - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - tx_fail, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, tx_fail, false, None).unwrap(); expected_nonce += 1; // no change in balance @@ -4913,14 +4706,8 @@ pub mod test { for tx_fail in post_conditions_fail_payback.iter() { eprintln!("tx fail {tx_fail:?}"); - let (_fee, _) = StacksChainState::process_transaction( - &mut conn, - tx_fail, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, _) = + StacksChainState::process_transaction(&mut conn, tx_fail, false, None).unwrap(); expected_recv_nonce += 1; // no change in balance @@ -5080,23 +4867,13 @@ pub mod test { ); // publish contract - let _ = StacksChainState::process_transaction( - &mut conn, - &signed_contract_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let _ = + StacksChainState::process_transaction(&mut conn, &signed_contract_tx, false, None) + .unwrap(); - let (_fee, receipt) = StacksChainState::process_transaction( - &mut conn, - &contract_call_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (_fee, receipt) = + StacksChainState::process_transaction(&mut conn, &contract_call_tx, false, None) + .unwrap(); assert!(receipt.post_condition_aborted); assert_eq!(receipt.result.to_string(), "(ok (err u1))"); @@ -8189,19 +7966,13 @@ pub mod test { &ConsensusHash([(dbi + 1) as u8; 20]), &BlockHeaderHash([(dbi + 1) as u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_tx, false, None) + .unwrap(); let err = StacksChainState::process_transaction( &mut conn, &signed_contract_call_tx, false, - ASTRules::PrecheckSize, None, ) .unwrap_err(); @@ -8222,22 +7993,12 @@ pub mod test { &BlockHeaderHash([3u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_call_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_tx, false, None) + .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_call_tx, false, None) + .unwrap(); assert_eq!(fee, 1); assert_eq!( @@ -8382,7 +8143,6 @@ pub mod test { &mut conn, &signed_tx_poison_microblock, false, - ASTRules::PrecheckSize, None, ) .unwrap(); @@ -8503,7 +8263,6 @@ pub mod test { &mut conn, &signed_tx_poison_microblock, false, - ASTRules::PrecheckSize, None, ) .unwrap_err(); @@ -8622,7 +8381,6 @@ pub mod test { &mut conn, &signed_tx_poison_microblock_1, false, - ASTRules::PrecheckSize, None, ) .unwrap(); @@ -8637,7 +8395,6 @@ pub mod test { &mut conn, &signed_tx_poison_microblock_2, false, - ASTRules::PrecheckSize, None, ) .unwrap(); @@ -8790,9 +8547,6 @@ pub mod test { ) -> Option<(Vec, u128)> { None } - fn get_ast_rules(&self, _block_height: u32) -> ASTRules { - ASTRules::PrecheckSize - } } let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); @@ -8900,13 +8654,9 @@ pub mod test { ); // verify that 2.1 gating is applied for clarity2 - if let Err(Error::InvalidStacksTransaction(msg, ..)) = StacksChainState::process_transaction( - &mut conn, - &smart_contract_v2, - false, - ASTRules::PrecheckSize, - None, - ) { + if let Err(Error::InvalidStacksTransaction(msg, ..)) = + StacksChainState::process_transaction(&mut conn, &smart_contract_v2, false, None) + { assert!(msg.find("not in Stacks epoch 2.1 or later").is_some()); } else { panic!("FATAL: did not recieve the appropriate error in processing a clarity2 tx in pre-2.1 epoch"); @@ -9002,9 +8752,6 @@ pub mod test { ) -> Option<(Vec, u128)> { None } - fn get_ast_rules(&self, _block_height: u32) -> ASTRules { - ASTRules::PrecheckSize - } } let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); @@ -9193,24 +8940,14 @@ pub mod test { &ConsensusHash([1u8; 20]), &BlockHeaderHash([1u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_tx, false, None) + .unwrap(); assert_eq!(fee, 0); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_call_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_call_tx, false, None) + .unwrap(); assert_eq!(fee, 1); conn.commit_block(); @@ -9223,24 +8960,14 @@ pub mod test { &ConsensusHash([2u8; 20]), &BlockHeaderHash([2u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_tx, false, None) + .unwrap(); assert_eq!(fee, 0); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_call_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_call_tx, false, None) + .unwrap(); assert_eq!(fee, 1); conn.commit_block(); @@ -9253,24 +8980,14 @@ pub mod test { &ConsensusHash([3u8; 20]), &BlockHeaderHash([3u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_tx, false, None) + .unwrap(); assert_eq!(fee, 0); - let err = StacksChainState::process_transaction( - &mut conn, - &signed_contract_call_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap_err(); + let err = + StacksChainState::process_transaction(&mut conn, &signed_contract_call_tx, false, None) + .unwrap_err(); conn.commit_block(); assert!(matches!(err, Error::InvalidFee), "{err:?}"); @@ -9366,24 +9083,14 @@ pub mod test { &ConsensusHash([1u8; 20]), &BlockHeaderHash([1u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_tx, false, None) + .unwrap(); assert_eq!(fee, 0); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_call_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_call_tx, false, None) + .unwrap(); assert_eq!(fee, 1); conn.commit_block(); @@ -9396,24 +9103,14 @@ pub mod test { &ConsensusHash([2u8; 20]), &BlockHeaderHash([2u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_tx, false, None) + .unwrap(); assert_eq!(fee, 0); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_call_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_call_tx, false, None) + .unwrap(); assert_eq!(fee, 1); conn.commit_block(); @@ -9426,24 +9123,14 @@ pub mod test { &ConsensusHash([3u8; 20]), &BlockHeaderHash([3u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( - &mut conn, - &signed_contract_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let (fee, _) = + StacksChainState::process_transaction(&mut conn, &signed_contract_tx, false, None) + .unwrap(); assert_eq!(fee, 0); - let err = StacksChainState::process_transaction( - &mut conn, - &signed_contract_call_tx, - false, - ASTRules::PrecheckSize, - None, - ) - .unwrap_err(); + let err = + StacksChainState::process_transaction(&mut conn, &signed_contract_call_tx, false, None) + .unwrap_err(); conn.commit_block(); assert!(matches!(err, Error::InvalidFee), "{err:?}"); @@ -9454,7 +9141,6 @@ pub mod test { clarity_block: &mut ClarityTx, tx: &StacksTransaction, quiet: bool, - ast_rules: ASTRules, ) -> Result<(u64, StacksTransactionReceipt), Error> { let epoch = clarity_block.get_epoch(); @@ -9467,7 +9153,7 @@ pub mod test { return Err(Error::InvalidStacksTransaction(msg, false)); } - StacksChainState::process_transaction(clarity_block, tx, quiet, ast_rules, None) + StacksChainState::process_transaction(clarity_block, tx, quiet, None) } #[test] @@ -9783,7 +9469,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_trait_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -9792,7 +9477,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_impl_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -9801,7 +9485,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_tx_clar1_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -9810,7 +9493,6 @@ pub mod test { &mut conn, &signed_test_trait_checkerror_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::ClarityError(clarity_error::Interpreter(InterpreterError::Unchecked( @@ -9825,7 +9507,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -9838,7 +9519,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_tx_clar1, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -9851,7 +9531,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -9867,7 +9546,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_cc_contract_tx_clar1_no_version, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::ClarityError(clarity_error::Interpreter(InterpreterError::Unchecked( @@ -9895,7 +9573,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_trait_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -9904,7 +9581,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_impl_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -9913,7 +9589,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_tx_clar1_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -9922,7 +9597,6 @@ pub mod test { &mut conn, &signed_test_trait_checkerror_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::ClarityError(clarity_error::Interpreter(InterpreterError::Unchecked( @@ -9937,7 +9611,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -9950,7 +9623,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_tx_clar1, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -9963,7 +9635,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -9978,7 +9649,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_cc_contract_tx_clar1_no_version, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::ClarityError(clarity_error::Interpreter(InterpreterError::Unchecked( @@ -10013,7 +9683,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10022,7 +9691,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10031,7 +9699,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_tx_clar1, false, - ASTRules::PrecheckSize, None, ) .unwrap(); @@ -10041,7 +9708,6 @@ pub mod test { &mut conn, &signed_test_trait_checkerror_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10065,7 +9731,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_cc_contract_tx_clar1, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10100,7 +9765,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10109,7 +9773,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10118,7 +9781,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_tx_clar2, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10127,7 +9789,6 @@ pub mod test { &mut conn, &signed_test_trait_checkerror_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10147,7 +9808,6 @@ pub mod test { &mut conn, &signed_runtime_checkerror_cc_contract_tx_clar2, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10384,7 +10044,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10393,7 +10052,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10402,7 +10060,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar1_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10418,7 +10075,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -10431,7 +10087,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -10444,7 +10099,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar1, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -10468,7 +10122,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10477,7 +10130,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10486,7 +10138,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar1_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10502,7 +10153,6 @@ pub mod test { &mut conn, &signed_test_call_foo_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::ClarityError(clarity_error::Interpreter(InterpreterError::Unchecked( @@ -10517,7 +10167,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -10530,7 +10179,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -10543,7 +10191,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar1, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -10567,7 +10214,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10576,7 +10222,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10585,7 +10230,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar1, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10612,7 +10256,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10621,7 +10264,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10630,7 +10272,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar2, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10639,7 +10280,6 @@ pub mod test { &mut conn, &signed_test_call_foo_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10940,7 +10580,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10949,7 +10588,6 @@ pub mod test { &mut conn, &signed_transitive_trait_clar1_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10958,7 +10596,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10967,7 +10604,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar1_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -10976,7 +10612,6 @@ pub mod test { &mut conn, &signed_test_call_foo_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::ClarityError(clarity_error::Interpreter(InterpreterError::Unchecked( @@ -10992,7 +10627,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -11005,7 +10639,6 @@ pub mod test { &mut conn, &signed_transitive_trait_clar1_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -11018,7 +10651,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -11031,7 +10663,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar1, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -11055,7 +10686,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11064,7 +10694,6 @@ pub mod test { &mut conn, &signed_transitive_trait_clar1_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11073,7 +10702,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11082,7 +10710,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar1_no_version, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11091,7 +10718,6 @@ pub mod test { &mut conn, &signed_test_call_foo_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::ClarityError(clarity_error::Interpreter(InterpreterError::Unchecked( @@ -11106,7 +10732,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -11119,7 +10744,6 @@ pub mod test { &mut conn, &signed_transitive_trait_clar1_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -11132,7 +10756,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -11145,7 +10768,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar1, false, - ASTRules::PrecheckSize, ) .unwrap_err(); if let Error::InvalidStacksTransaction(msg, _ignored) = err { @@ -11169,7 +10791,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11178,7 +10799,6 @@ pub mod test { &mut conn, &signed_transitive_trait_clar1_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11187,7 +10807,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11196,7 +10815,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar1, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11205,7 +10823,6 @@ pub mod test { &mut conn, &signed_test_call_foo_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11236,7 +10853,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11245,7 +10861,6 @@ pub mod test { &mut conn, &signed_transitive_trait_clar1_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11254,7 +10869,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11263,7 +10877,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar2, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11272,7 +10885,6 @@ pub mod test { &mut conn, &signed_test_call_foo_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11303,7 +10915,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11312,7 +10923,6 @@ pub mod test { &mut conn, &signed_transitive_trait_clar2_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11321,7 +10931,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11330,7 +10939,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar2, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11357,7 +10965,6 @@ pub mod test { &mut conn, &signed_foo_trait_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11366,7 +10973,6 @@ pub mod test { &mut conn, &signed_transitive_trait_clar2_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11375,7 +10981,6 @@ pub mod test { &mut conn, &signed_foo_impl_tx, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); @@ -11384,7 +10989,6 @@ pub mod test { &mut conn, &signed_call_foo_tx_clar1, false, - ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 7f273b7f833..8b9c31f229d 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -236,8 +236,6 @@ impl UnconfirmedState { .get_burn_block_time_for_block(&self.confirmed_chain_tip, None) .expect("BUG: unable to get burn block timestamp based on chain tip"); - let ast_rules = burn_dbconn.get_ast_rules(burn_block_height); - let mut last_mblock = self.last_mblock.take(); let mut last_mblock_seq = self.last_mblock_seq; let db_config = chainstate.config(); @@ -295,7 +293,6 @@ impl UnconfirmedState { match StacksChainState::process_microblocks_transactions( &mut clarity_tx, &[mblock.clone()], - ast_rules, ) { Ok(x) => x, Err((e, _)) => { diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 26542dee191..cd8dd4bf87f 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -23,7 +23,6 @@ use std::thread::ThreadId; use std::time::Instant; use clarity::vm::ast::errors::ParseErrors; -use clarity::vm::ast::ASTRules; use clarity::vm::database::BurnStateDB; use clarity::vm::errors::Error as InterpreterError; use serde::Deserialize; @@ -307,7 +306,6 @@ pub struct MinerEpochInfo<'a> { pub burn_tip_height: u32, pub parent_microblocks: Vec, pub mainnet: bool, - pub ast_rules: ASTRules, } impl From<&UnconfirmedState> for MicroblockMinerRuntime { @@ -712,7 +710,6 @@ pub trait BlockBuilder { tx: &StacksTransaction, tx_len: u64, limit_behavior: &BlockLimitFunction, - ast_rules: ASTRules, max_execution_time: Option, ) -> TransactionResult; @@ -722,7 +719,6 @@ pub trait BlockBuilder { &mut self, clarity_tx: &mut ClarityTx, tx: &StacksTransaction, - ast_rules: ASTRules, max_execution_time: Option, ) -> Result { let tx_len = tx.tx_len(); @@ -731,7 +727,6 @@ pub trait BlockBuilder { tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT, - ast_rules, max_execution_time, ) { TransactionResult::Success(s) => Ok(TransactionResult::Success(s)), @@ -762,7 +757,6 @@ pub struct StacksMicroblockBuilder<'a> { unconfirmed: bool, runtime: MicroblockMinerRuntime, settings: BlockBuilderSettings, - ast_rules: ASTRules, } impl<'a> StacksMicroblockBuilder<'a> { @@ -794,8 +788,6 @@ impl<'a> StacksMicroblockBuilder<'a> { Error::NoSuchBlockError })?; let anchor_block_height = anchor_block_header.stacks_block_height; - let burn_height = anchor_block_header.burn_header_height; - let ast_rules = burn_dbconn.get_ast_rules(burn_height); // when we drop the miner, the underlying clarity instance will be rolled back chainstate.set_unconfirmed_dirty(true); @@ -834,7 +826,6 @@ impl<'a> StacksMicroblockBuilder<'a> { header_reader, unconfirmed: false, settings, - ast_rules, }) } @@ -857,8 +848,7 @@ impl<'a> StacksMicroblockBuilder<'a> { let ( anchored_consensus_hash, anchored_block_hash, - anchored_block_height, - anchored_burn_height, + anchored_block_height ) = if let Some(unconfirmed) = chainstate.unconfirmed_state.as_ref() { let header_info = StacksChainState::get_stacks_block_header_info_by_index_block_hash( chainstate.db(), @@ -874,8 +864,7 @@ impl<'a> StacksMicroblockBuilder<'a> { ( header_info.consensus_hash, header_info.anchored_header.block_hash(), - header_info.stacks_block_height, - header_info.burn_header_height, + header_info.stacks_block_height ) } else { // unconfirmed state needs to be initialized @@ -883,8 +872,6 @@ impl<'a> StacksMicroblockBuilder<'a> { return Err(Error::NoSuchBlockError)?; }; - let ast_rules = burn_dbconn.get_ast_rules(anchored_burn_height); - let mut clarity_tx = chainstate.begin_unconfirmed(burn_dbconn).ok_or_else(|| { warn!( "Failed to begin-unconfirmed on {}/{}", @@ -912,7 +899,6 @@ impl<'a> StacksMicroblockBuilder<'a> { header_reader, unconfirmed: true, settings, - ast_rules, }) } @@ -923,7 +909,6 @@ impl<'a> StacksMicroblockBuilder<'a> { miner_key: &Secp256k1PrivateKey, parent_anchor_block_hash: &BlockHeaderHash, prev_microblock_header: Option<&StacksMicroblockHeader>, - ast_rules: ASTRules, ) -> Result { let miner_pubkey_hash = Hash160::from_node_public_key(&StacksPublicKey::from_private(miner_key)); @@ -943,12 +928,10 @@ impl<'a> StacksMicroblockBuilder<'a> { StacksMicroblockHeader::first_unsigned(parent_anchor_block_hash, &tx_merkle_root) }; - if ast_rules != ASTRules::Typical { - next_microblock_header.version = cmp::max( - STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE, - next_microblock_header.version, - ); - } + next_microblock_header.version = cmp::max( + STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE, + next_microblock_header.version, + ); next_microblock_header.sign(miner_key).unwrap(); next_microblock_header.verify(&miner_pubkey_hash).unwrap(); @@ -973,7 +956,6 @@ impl<'a> StacksMicroblockBuilder<'a> { miner_key, &self.anchor_block, self.runtime.prev_microblock_header.as_ref(), - self.ast_rules, )?; self.runtime.prev_microblock_header = Some(microblock.header.clone()); @@ -1020,7 +1002,6 @@ impl<'a> StacksMicroblockBuilder<'a> { tx_len: u64, bytes_so_far: u64, limit_behavior: &BlockLimitFunction, - ast_rules: ASTRules, ) -> TransactionResult { if tx.anchor_mode != TransactionAnchorMode::OffChainOnly && tx.anchor_mode != TransactionAnchorMode::Any @@ -1077,7 +1058,6 @@ impl<'a> StacksMicroblockBuilder<'a> { clarity_tx.config.mainnet, clarity_tx.get_epoch(), &tx, - ast_rules, ) { info!( "Detected problematic tx {} while mining; dropping from mempool", @@ -1087,7 +1067,7 @@ impl<'a> StacksMicroblockBuilder<'a> { } let quiet = !cfg!(test); - match StacksChainState::process_transaction(clarity_tx, &tx, quiet, ast_rules, None) { + match StacksChainState::process_transaction(clarity_tx, &tx, quiet, None) { Ok((_fee, receipt)) => TransactionResult::success(&tx, receipt), Err(e) => convert_clarity_error_to_transaction_result(clarity_tx, &tx, e), } @@ -1131,7 +1111,6 @@ impl<'a> StacksMicroblockBuilder<'a> { tx_len, bytes_so_far, &block_limit_hit, - self.ast_rules, ); tx_events.push(tx_result.convert_to_event()); match tx_result { @@ -1265,7 +1244,6 @@ impl<'a> StacksMicroblockBuilder<'a> { mempool_tx.metadata.len, bytes_so_far, &block_limit_hit, - self.ast_rules, ); let result_event = tx_result.convert_to_event(); match tx_result { @@ -1649,13 +1627,7 @@ impl StacksBlockBuilder { let quiet = !cfg!(test); if !self.anchored_done { // save - match StacksChainState::process_transaction( - clarity_tx, - tx, - quiet, - ASTRules::Typical, - None, - ) { + match StacksChainState::process_transaction(clarity_tx, tx, quiet, None) { Ok((fee, receipt)) => { self.total_anchored_fees += fee; } @@ -1666,13 +1638,7 @@ impl StacksBlockBuilder { self.txs.push(tx.clone()); } else { - match StacksChainState::process_transaction( - clarity_tx, - tx, - quiet, - ASTRules::Typical, - None, - ) { + match StacksChainState::process_transaction(clarity_tx, tx, quiet, None) { Ok((fee, receipt)) => { self.total_streamed_fees += fee; } @@ -1886,10 +1852,7 @@ impl StacksBlockBuilder { self.miner_payouts { test_debug!( - "Miner payout to process: {:?}; user payouts: {:?}; parent payout: {:?}", - _miner_payout, - _user_payouts, - _parent_reward + "Miner payout to process: {_miner_payout:?}; user payouts: {_user_payouts:?}; parent payout: {_parent_reward:?}" ); } @@ -1954,9 +1917,6 @@ impl StacksBlockBuilder { // data won't be committed, so do a concurrent transaction let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin()?; - let ast_rules = - SortitionDB::get_ast_rules(burn_dbconn.conn(), (burn_tip_height + 1).into())?; - Ok(MinerEpochInfo { chainstate_tx, clarity_instance, @@ -1964,7 +1924,6 @@ impl StacksBlockBuilder { burn_tip_height: burn_tip_height + 1, parent_microblocks, mainnet, - ast_rules, }) } @@ -2067,10 +2026,9 @@ impl StacksBlockBuilder { debug!("Build anchored block from {} transactions", txs.len()); let (mut chainstate, _) = chainstate_handle.reopen()?; let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn, true)?; - let ast_rules = miner_epoch_info.ast_rules; let (mut epoch_tx, _) = builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; for tx in txs.into_iter() { - match builder.try_mine_tx(&mut epoch_tx, &tx, ast_rules.clone(), None) { + match builder.try_mine_tx(&mut epoch_tx, &tx, None) { Ok(_) => { debug!("Included {}", &tx.txid()); } @@ -2225,7 +2183,6 @@ impl StacksBlockBuilder { initial_txs: &[StacksTransaction], settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, - ast_rules: ASTRules, replay_transactions: &[StacksTransaction], ) -> Result<(bool, Vec), Error> { let mut tx_events = Vec::new(); @@ -2233,12 +2190,7 @@ impl StacksBlockBuilder { for initial_tx in initial_txs.iter() { tx_events.push( builder - .try_mine_tx( - epoch_tx, - initial_tx, - ast_rules.clone(), - settings.max_execution_time, - )? + .try_mine_tx(epoch_tx, initial_tx, settings.max_execution_time)? .convert_to_event(), ); } @@ -2271,7 +2223,6 @@ impl StacksBlockBuilder { tip_height, settings, event_observer, - ast_rules, ) } else { info!("Miner: constructing block with replay transactions"); @@ -2279,7 +2230,6 @@ impl StacksBlockBuilder { epoch_tx, builder, tip_height, - ast_rules, replay_transactions, ); Ok((txs, false)) @@ -2351,13 +2301,10 @@ impl StacksBlockBuilder { let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn, settings.confirm_microblocks)?; - let ast_rules = miner_epoch_info.ast_rules; - if ast_rules != ASTRules::Typical { - builder.header.version = cmp::max( - STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE, - builder.header.version, - ); - } + builder.header.version = cmp::max( + STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE, + builder.header.version, + ); let (mut epoch_tx, confirmed_mblock_cost) = builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; @@ -2375,7 +2322,6 @@ impl StacksBlockBuilder { &[coinbase_tx.clone()], settings, event_observer, - ast_rules, &vec![], ) { Ok(x) => x, @@ -2447,7 +2393,6 @@ impl BlockBuilder for StacksBlockBuilder { tx: &StacksTransaction, tx_len: u64, limit_behavior: &BlockLimitFunction, - ast_rules: ASTRules, _max_execution_time: Option, ) -> TransactionResult { if self.bytes_so_far + tx_len >= u64::from(MAX_EPOCH_SIZE) { @@ -2505,7 +2450,6 @@ impl BlockBuilder for StacksBlockBuilder { clarity_tx.config.mainnet, clarity_tx.get_epoch(), tx, - ast_rules, ) { info!( "Detected problematic tx {} while mining; dropping from mempool", @@ -2514,8 +2458,7 @@ impl BlockBuilder for StacksBlockBuilder { return TransactionResult::problematic(tx, Error::NetError(e)); } let (fee, receipt) = - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ast_rules, None) - { + match StacksChainState::process_transaction(clarity_tx, tx, quiet, None) { Ok((fee, receipt)) => (fee, receipt), Err(e) => { return convert_clarity_error_to_transaction_result(clarity_tx, tx, e); @@ -2550,7 +2493,6 @@ impl BlockBuilder for StacksBlockBuilder { clarity_tx.config.mainnet, clarity_tx.get_epoch(), tx, - ast_rules, ) { info!( "Detected problematic tx {} while mining; dropping from mempool", @@ -2559,8 +2501,7 @@ impl BlockBuilder for StacksBlockBuilder { return TransactionResult::problematic(tx, Error::NetError(e)); } let (fee, receipt) = - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ast_rules, None) - { + match StacksChainState::process_transaction(clarity_tx, tx, quiet, None) { Ok((fee, receipt)) => (fee, receipt), Err(e) => { return convert_clarity_error_to_transaction_result(clarity_tx, tx, e); @@ -2591,7 +2532,6 @@ fn select_and_apply_transactions_from_mempool( tip_height: u64, settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, - ast_rules: ASTRules, ) -> Result<(Vec, bool), Error> { let mut tx_events = vec![]; let max_miner_time_ms = settings.max_miner_time_ms; @@ -2700,7 +2640,6 @@ fn select_and_apply_transactions_from_mempool( &txinfo.tx, txinfo.metadata.len, &block_limit_hit, - ast_rules, settings.max_execution_time, ); @@ -2852,7 +2791,6 @@ fn select_and_apply_transactions_from_vec( epoch_tx: &mut ClarityTx, builder: &mut B, tip_height: u64, - ast_rules: ASTRules, replay_transactions: &[StacksTransaction], ) -> Vec { let mut tx_events = vec![]; @@ -2873,7 +2811,6 @@ fn select_and_apply_transactions_from_vec( replay_tx, replay_tx.tx_len(), &BlockLimitFunction::NO_LIMIT_HIT, - ast_rules, None, ); let tx_event = tx_result.convert_to_event(); diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index af00ebcff3b..20004c92d5b 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -22,7 +22,6 @@ /// test anything about block construction from mempool state. use std::collections::HashMap; -use clarity::vm::ast::ASTRules; use clarity::vm::types::*; use rand::seq::SliceRandom; use rand::thread_rng; @@ -2660,12 +2659,7 @@ pub fn mine_empty_anchored_block( let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx( - clarity_tx, - &tx_coinbase_signed, - ASTRules::PrecheckSize, - None, - ) + .try_mine_tx(clarity_tx, &tx_coinbase_signed, None) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2700,12 +2694,7 @@ pub fn mine_empty_anchored_block_with_burn_height_pubkh( let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx( - clarity_tx, - &tx_coinbase_signed, - ASTRules::PrecheckSize, - None, - ) + .try_mine_tx(clarity_tx, &tx_coinbase_signed, None) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2740,12 +2729,7 @@ pub fn mine_empty_anchored_block_with_stacks_height_pubkh( let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx( - clarity_tx, - &tx_coinbase_signed, - ASTRules::PrecheckSize, - None, - ) + .try_mine_tx(clarity_tx, &tx_coinbase_signed, None) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2776,12 +2760,7 @@ pub fn mine_invalid_token_transfers_block( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx( - clarity_tx, - &tx_coinbase_signed, - ASTRules::PrecheckSize, - None, - ) + .try_mine_tx(clarity_tx, &tx_coinbase_signed, None) .unwrap(); let recipient = @@ -2855,12 +2834,7 @@ pub fn mine_smart_contract_contract_call_block( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx( - clarity_tx, - &tx_coinbase_signed, - ASTRules::PrecheckSize, - None, - ) + .try_mine_tx(clarity_tx, &tx_coinbase_signed, None) .unwrap(); // make a smart contract @@ -2870,12 +2844,7 @@ pub fn mine_smart_contract_contract_call_block( builder.header.total_work.work as usize, ); builder - .try_mine_tx( - clarity_tx, - &tx_contract_signed, - ASTRules::PrecheckSize, - None, - ) + .try_mine_tx(clarity_tx, &tx_contract_signed, None) .unwrap(); // make a contract call @@ -2887,12 +2856,7 @@ pub fn mine_smart_contract_contract_call_block( 2, ); builder - .try_mine_tx( - clarity_tx, - &tx_contract_call_signed, - ASTRules::PrecheckSize, - None, - ) + .try_mine_tx(clarity_tx, &tx_contract_call_signed, None) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2947,12 +2911,7 @@ pub fn mine_smart_contract_block_contract_call_microblock( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx( - clarity_tx, - &tx_coinbase_signed, - ASTRules::PrecheckSize, - None, - ) + .try_mine_tx(clarity_tx, &tx_coinbase_signed, None) .unwrap(); // make a smart contract @@ -2962,12 +2921,7 @@ pub fn mine_smart_contract_block_contract_call_microblock( builder.header.total_work.work as usize, ); builder - .try_mine_tx( - clarity_tx, - &tx_contract_signed, - ASTRules::PrecheckSize, - None, - ) + .try_mine_tx(clarity_tx, &tx_contract_signed, None) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -3044,12 +2998,7 @@ pub fn mine_smart_contract_block_contract_call_microblock_exception( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx( - clarity_tx, - &tx_coinbase_signed, - ASTRules::PrecheckSize, - None, - ) + .try_mine_tx(clarity_tx, &tx_coinbase_signed, None) .unwrap(); // make a smart contract @@ -3059,12 +3008,7 @@ pub fn mine_smart_contract_block_contract_call_microblock_exception( builder.header.total_work.work as usize, ); builder - .try_mine_tx( - clarity_tx, - &tx_contract_signed, - ASTRules::PrecheckSize, - None, - ) + .try_mine_tx(clarity_tx, &tx_contract_signed, None) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 6b714771b26..ade05a66e8a 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -45,7 +45,7 @@ use crate::chainstate::stacks::index::ClarityMarfTrieId; use crate::clarity::vm::analysis::contract_interface_builder::build_contract_interface; use crate::clarity::vm::analysis::errors::CheckError; use crate::clarity::vm::analysis::{AnalysisDatabase, ContractAnalysis}; -use crate::clarity::vm::ast::{build_ast_with_rules, ASTRules}; +use crate::clarity::vm::ast::build_ast; use crate::clarity::vm::contexts::{AssetMap, GlobalContext, OwnedEnvironment}; use crate::clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use crate::clarity::vm::database::{ @@ -150,13 +150,12 @@ fn parse( source_code: &str, clarity_version: ClarityVersion, ) -> Result, Error> { - let ast = build_ast_with_rules( + let ast = build_ast( contract_identifier, source_code, &mut (), clarity_version, DEFAULT_CLI_EPOCH, - ASTRules::PrecheckSize, ) .map_err(RuntimeErrorType::ASTError)?; Ok(ast.expressions) @@ -455,13 +454,12 @@ pub fn vm_execute(program: &str, clarity_version: ClarityVersion) -> Result(header_db: &CLIHeadersDB, marf: &mut C) ClarityVersion::Clarity2, contract_content, None, - ASTRules::PrecheckSize, ) .unwrap(); } @@ -1278,7 +1275,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option val, Err(error) => { - println!("Parse error:\n{}", error); + println!("Parse error:\n{error}"); continue; } }; @@ -1286,19 +1283,18 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (), Err((error, _)) => { - println!("Type check error:\n{}", error); + println!("Type check error:\n{error}"); continue; } } - let eval_result = - match exec_env.eval_raw_with_rules(&content, ASTRules::PrecheckSize) { - Ok(val) => val, - Err(error) => { - println!("Execution error:\n{}", error); - continue; - } - }; + let eval_result = match exec_env.eval_raw(&content) { + Ok(val) => val, + Err(error) => { + println!("Execution error:\n{error}"); + continue; + } + }; println!("{}", eval_result); } @@ -1336,7 +1332,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { let result = vm_env .get_exec_environment(None, None, &placeholder_context) - .eval_raw_with_rules(&content, ASTRules::PrecheckSize); + .eval_raw(&content); match result { Ok(x) => ( 0, @@ -1388,10 +1384,9 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option (i32, Option Result { let mut read_only_conn = self.datastore.begin_read_only(Some(at_block)); let mut clarity_db = read_only_conn.as_clarity_db(header_db, burn_state_db); @@ -661,7 +653,7 @@ impl ClarityInstance { }?; let mut env = OwnedEnvironment::new_free(self.mainnet, self.chain_id, clarity_db, epoch_id); - env.eval_read_only_with_rules(contract, program, ast_rules) + env.eval_read_only(contract, program) .map(|(x, _, _)| x) .map_err(Error::from) } @@ -901,7 +893,6 @@ impl<'a> ClarityBlockConnection<'a, '_> { tx_conn, &costs_2_contract_tx, &boot_code_account, - ASTRules::PrecheckSize, None, ) .expect("FATAL: Failed to process PoX 2 contract initialization"); @@ -1015,7 +1006,6 @@ impl<'a> ClarityBlockConnection<'a, '_> { tx_conn, &pox_2_contract_tx, &boot_code_account, - ASTRules::PrecheckSize, None, ) .expect("FATAL: Failed to process PoX 2 contract initialization"); @@ -1088,7 +1078,6 @@ impl<'a> ClarityBlockConnection<'a, '_> { tx_conn, &costs_3_contract_tx, &boot_code_account, - ASTRules::PrecheckSize, None, ) .expect("FATAL: Failed to process costs-3 contract initialization"); @@ -1259,7 +1248,6 @@ impl<'a> ClarityBlockConnection<'a, '_> { tx_conn, &pox_3_contract_tx, &boot_code_account, - ASTRules::PrecheckSize, None, ) .expect("FATAL: Failed to process PoX 3 contract initialization"); @@ -1378,7 +1366,6 @@ impl<'a> ClarityBlockConnection<'a, '_> { tx_conn, &pox_4_contract_tx, &boot_code_account, - ASTRules::PrecheckSize, None, ) .expect("FATAL: Failed to process PoX 4 contract initialization"); @@ -1438,7 +1425,6 @@ impl<'a> ClarityBlockConnection<'a, '_> { tx_conn, &signers_contract_tx, &boot_code_account, - ASTRules::PrecheckSize, None, ) .expect("FATAL: Failed to process .signers contract initialization"); @@ -1485,7 +1471,6 @@ impl<'a> ClarityBlockConnection<'a, '_> { tx_conn, &signers_contract_tx, &boot_code_account, - ASTRules::PrecheckSize, None, ) .expect("FATAL: Failed to process .signers DB contract initialization"); @@ -1526,7 +1511,6 @@ impl<'a> ClarityBlockConnection<'a, '_> { tx_conn, &signers_contract_tx, &boot_code_account, - ASTRules::PrecheckSize, None, ) .expect("FATAL: Failed to process .signers-voting contract initialization"); @@ -1659,7 +1643,6 @@ impl<'a> ClarityBlockConnection<'a, '_> { tx_conn, &sip_031_contract_tx, &boot_code_account, - ASTRules::PrecheckSize, None, ) .expect("FATAL: Failed to process .sip-031 contract initialization"); @@ -2155,7 +2138,6 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, contract, - ASTRules::PrecheckSize, ) }) .unwrap_err(); @@ -2168,7 +2150,6 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, contract, - ASTRules::PrecheckSize, ) }) .unwrap_err(); @@ -2216,7 +2197,6 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, contract, - ASTRules::PrecheckSize, ) .unwrap(); conn.initialize_smart_contract( @@ -2270,7 +2250,6 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, contract, - ASTRules::PrecheckSize, ) .unwrap(); tx.initialize_smart_contract( @@ -2299,7 +2278,6 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, contract, - ASTRules::PrecheckSize, ) .unwrap(); tx.initialize_smart_contract( @@ -2330,7 +2308,6 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, contract, - ASTRules::PrecheckSize, ) .unwrap(); assert!(format!( @@ -2385,7 +2362,6 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, contract, - ASTRules::PrecheckSize, ) .unwrap(); conn.initialize_smart_contract( @@ -2447,7 +2423,6 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, contract, - ASTRules::PrecheckSize, ) .unwrap(); conn.initialize_smart_contract( @@ -2540,7 +2515,6 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, contract, - ASTRules::PrecheckSize, ) .unwrap(); conn.initialize_smart_contract( @@ -2672,7 +2646,6 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, contract, - ASTRules::PrecheckSize, ) .unwrap(); conn.initialize_smart_contract( @@ -2886,36 +2859,20 @@ mod tests { ); conn.as_transaction(|clarity_tx| { - let receipt = StacksChainState::process_transaction_payload( - clarity_tx, - &tx1, - &account, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let receipt = + StacksChainState::process_transaction_payload(clarity_tx, &tx1, &account, None) + .unwrap(); assert!(receipt.post_condition_aborted); }); conn.as_transaction(|clarity_tx| { - StacksChainState::process_transaction_payload( - clarity_tx, - &tx2, - &account, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + StacksChainState::process_transaction_payload(clarity_tx, &tx2, &account, None) + .unwrap(); }); conn.as_transaction(|clarity_tx| { - let receipt = StacksChainState::process_transaction_payload( - clarity_tx, - &tx3, - &account, - ASTRules::PrecheckSize, - None, - ) - .unwrap(); + let receipt = + StacksChainState::process_transaction_payload(clarity_tx, &tx3, &account, None) + .unwrap(); assert!(receipt.post_condition_aborted); }); @@ -3026,9 +2983,6 @@ mod tests { ) -> Option<(Vec, u128)> { return None; } - fn get_ast_rules(&self, height: u32) -> ASTRules { - ASTRules::Typical - } } let burn_state_db = BlockLimitBurnStateDB {}; @@ -3064,7 +3018,6 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, contract, - ASTRules::PrecheckSize, ) .unwrap(); conn.initialize_smart_contract( diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 966a422ba54..4702b0d53a0 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -1044,9 +1044,6 @@ impl BurnStateDB for SortitionHandleTx<'_> { Some((addrs, payout)) } - fn get_ast_rules(&self, height: u32) -> clarity::vm::ast::ASTRules { - SortitionDB::get_ast_rules(self.tx(), height.into()).expect("BUG: failed to get AST rules") - } } impl BurnStateDB for SortitionHandleConn<'_> { @@ -1177,10 +1174,6 @@ impl BurnStateDB for SortitionHandleConn<'_> { Some((addrs, payout)) } - fn get_ast_rules(&self, height: u32) -> clarity::vm::ast::ASTRules { - SortitionDB::get_ast_rules(self.conn(), height.into()) - .expect("BUG: failed to get AST rules") - } } pub struct MemoryBackingStore { diff --git a/stackslib/src/clarity_vm/tests/analysis_costs.rs b/stackslib/src/clarity_vm/tests/analysis_costs.rs index cab9d7756a2..fb12f1ceee8 100644 --- a/stackslib/src/clarity_vm/tests/analysis_costs.rs +++ b/stackslib/src/clarity_vm/tests/analysis_costs.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use clarity::vm::ast::ASTRules; use clarity::vm::clarity::TransactionConnection; use clarity::vm::costs::ExecutionCost; use clarity::vm::functions::NativeFunctions; @@ -58,10 +57,7 @@ fn setup_tracked_cost_test( QualifiedContractIdentifier::new(p1_principal.clone(), "contract-other".into()); let trait_contract_id = QualifiedContractIdentifier::new(p1_principal, "contract-trait".into()); - let burn_state_db = UnitTestBurnStateDB { - epoch_id: epoch, - ast_rules: ASTRules::PrecheckSize, - }; + let burn_state_db = UnitTestBurnStateDB { epoch_id: epoch }; clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), @@ -104,12 +100,7 @@ fn setup_tracked_cost_test( conn.as_transaction(|conn| { let (ct_ast, ct_analysis) = conn - .analyze_smart_contract( - &trait_contract_id, - version, - contract_trait, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&trait_contract_id, version, contract_trait) .unwrap(); conn.initialize_smart_contract( &trait_contract_id, @@ -138,12 +129,7 @@ fn setup_tracked_cost_test( conn.as_transaction(|conn| { let (ct_ast, ct_analysis) = conn - .analyze_smart_contract( - &other_contract_id, - version, - contract_other, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&other_contract_id, version, contract_other) .unwrap(); conn.initialize_smart_contract( &other_contract_id, @@ -199,10 +185,7 @@ fn test_tracked_costs( ContractName::try_from(format!("self-{prog_id}")).unwrap(), ); - let burn_state_db = UnitTestBurnStateDB { - epoch_id: epoch, - ast_rules: ASTRules::PrecheckSize, - }; + let burn_state_db = UnitTestBurnStateDB { epoch_id: epoch }; { let mut conn = clarity_instance.begin_block( @@ -214,12 +197,7 @@ fn test_tracked_costs( conn.as_transaction(|conn| { let (ct_ast, ct_analysis) = conn - .analyze_smart_contract( - &self_contract_id, - version, - &contract_self, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&self_contract_id, version, &contract_self) .unwrap(); conn.initialize_smart_contract( &self_contract_id, diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index 0b8af83c408..02bc42353fd 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -15,7 +15,6 @@ // along with this program. If not, see . use clarity::types::StacksEpochId; -use clarity::vm::ast::ASTRules; use clarity::vm::clarity::Error as ClarityError; use clarity::vm::errors::{CheckErrors, Error}; use clarity::vm::types::SequenceData::Buffer; @@ -47,12 +46,8 @@ fn test_get_burn_block_info_eval() { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::default_for_epoch(epoch); - let res = clarity_db.analyze_smart_contract( - &contract_identifier, - clarity_version, - contract, - ASTRules::PrecheckSize, - ); + let res = + clarity_db.analyze_smart_contract(&contract_identifier, clarity_version, contract); if let Err(ClarityError::Analysis(check_error)) = res { if let CheckErrors::UnknownFunction(func_name) = check_error.err { assert_eq!(func_name, "get-burn-block-info?"); @@ -72,12 +67,8 @@ fn test_get_burn_block_info_eval() { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::default_for_epoch(epoch); - let res = clarity_db.analyze_smart_contract( - &contract_identifier, - clarity_version, - contract, - ASTRules::PrecheckSize, - ); + let res = + clarity_db.analyze_smart_contract(&contract_identifier, clarity_version, contract); if let Err(ClarityError::Analysis(check_error)) = res { if let CheckErrors::UnknownFunction(func_name) = check_error.err { assert_eq!(func_name, "get-burn-block-info?"); @@ -98,12 +89,7 @@ fn test_get_burn_block_info_eval() { conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::default_for_epoch(epoch); let (ast, analysis) = clarity_db - .analyze_smart_contract( - &contract_identifier, - clarity_version, - contract, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_identifier, clarity_version, contract) .unwrap(); clarity_db .initialize_smart_contract( @@ -173,12 +159,8 @@ fn test_get_block_info_eval_v210() { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::default_for_epoch(epoch); - let res = clarity_db.analyze_smart_contract( - &contract_identifier, - clarity_version, - contract, - ASTRules::PrecheckSize, - ); + let res = + clarity_db.analyze_smart_contract(&contract_identifier, clarity_version, contract); if let Err(ClarityError::Analysis(check_error)) = res { if let CheckErrors::NoSuchBlockInfoProperty(name) = check_error.err { assert_eq!(name, "block-reward"); @@ -198,12 +180,8 @@ fn test_get_block_info_eval_v210() { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::default_for_epoch(epoch); - let res = clarity_db.analyze_smart_contract( - &contract_identifier, - clarity_version, - contract, - ASTRules::PrecheckSize, - ); + let res = + clarity_db.analyze_smart_contract(&contract_identifier, clarity_version, contract); if let Err(ClarityError::Analysis(check_error)) = res { if let CheckErrors::NoSuchBlockInfoProperty(name) = check_error.err { assert_eq!(name, "block-reward"); @@ -226,7 +204,7 @@ fn test_get_block_info_eval_v210() { conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::default_for_epoch(epoch); let (ast, analysis) = clarity_db - .analyze_smart_contract(&contract_identifier, clarity_version, contract, ASTRules::PrecheckSize) + .analyze_smart_contract(&contract_identifier, clarity_version, contract) .unwrap(); clarity_db .initialize_smart_contract(&contract_identifier, clarity_version, &ast, contract, None, |_, _| None, None) @@ -306,8 +284,7 @@ fn publish_contract( version: ClarityVersion, ) -> Result<(), clarity::vm::clarity::Error> { bc.as_transaction(|tx| { - let (ast, analysis) = - tx.analyze_smart_contract(contract_id, version, contract, ASTRules::PrecheckSize)?; + let (ast, analysis) = tx.analyze_smart_contract(contract_id, version, contract)?; tx.initialize_smart_contract( contract_id, version, @@ -593,12 +570,7 @@ fn trait_with_trait_invocation_cross_epoch() { conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::default_for_epoch(epoch); let (ast, analysis) = clarity_db - .analyze_smart_contract( - &math_contract_id, - clarity_version, - math_trait, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&math_contract_id, clarity_version, math_trait) .unwrap(); clarity_db .initialize_smart_contract( @@ -618,12 +590,7 @@ fn trait_with_trait_invocation_cross_epoch() { conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::default_for_epoch(epoch); let (ast, analysis) = clarity_db - .analyze_smart_contract( - &compute_contract_id, - clarity_version, - compute_trait, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&compute_contract_id, clarity_version, compute_trait) .unwrap(); clarity_db .initialize_smart_contract( @@ -643,12 +610,7 @@ fn trait_with_trait_invocation_cross_epoch() { conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::default_for_epoch(epoch); let (ast, analysis) = clarity_db - .analyze_smart_contract( - &impl_compute_id, - clarity_version, - impl_compute, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&impl_compute_id, clarity_version, impl_compute) .unwrap(); clarity_db .initialize_smart_contract( @@ -668,12 +630,7 @@ fn trait_with_trait_invocation_cross_epoch() { conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::default_for_epoch(epoch); let (ast, analysis) = clarity_db - .analyze_smart_contract( - &impl_math_id, - clarity_version, - impl_math, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&impl_math_id, clarity_version, impl_math) .unwrap(); clarity_db .initialize_smart_contract( @@ -693,12 +650,7 @@ fn trait_with_trait_invocation_cross_epoch() { conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::default_for_epoch(epoch); let (ast, analysis) = clarity_db - .analyze_smart_contract( - &use_compute_20_id, - clarity_version, - use_compute, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&use_compute_20_id, clarity_version, use_compute) .unwrap(); clarity_db .initialize_smart_contract( @@ -725,12 +677,7 @@ fn trait_with_trait_invocation_cross_epoch() { conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::Clarity1; let (ast, analysis) = clarity_db - .analyze_smart_contract( - &use_compute_21_c1_id, - clarity_version, - use_compute, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&use_compute_21_c1_id, clarity_version, use_compute) .unwrap(); clarity_db .initialize_smart_contract( @@ -750,12 +697,7 @@ fn trait_with_trait_invocation_cross_epoch() { conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::Clarity2; let (ast, analysis) = clarity_db - .analyze_smart_contract( - &use_compute_21_c2_id, - clarity_version, - use_compute, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&use_compute_21_c2_id, clarity_version, use_compute) .unwrap(); clarity_db .initialize_smart_contract( @@ -926,14 +868,12 @@ fn test_block_heights() { &contract_identifier1, ClarityVersion::Clarity1, contract_clarity1, - ASTRules::PrecheckSize, ).unwrap(); let res = clarity_db.analyze_smart_contract( &contract_identifier2, ClarityVersion::Clarity1, contract_clarity3, - ASTRules::PrecheckSize, ); if let Err(ClarityError::Analysis(check_error)) = res { if let CheckErrors::UndefinedVariable(var_name) = check_error.err { @@ -962,14 +902,12 @@ fn test_block_heights() { &contract_identifier1, ClarityVersion::Clarity2, contract_clarity1, - ASTRules::PrecheckSize, ).unwrap(); let res = clarity_db.analyze_smart_contract( &contract_identifier2, ClarityVersion::Clarity2, contract_clarity3, - ASTRules::PrecheckSize, ); if let Err(ClarityError::Analysis(check_error)) = res { if let CheckErrors::UndefinedVariable(var_name) = check_error.err { @@ -986,7 +924,6 @@ fn test_block_heights() { &contract_identifier1, ClarityVersion::Clarity3, contract_clarity1, - ASTRules::PrecheckSize, ); if let Err(ClarityError::Analysis(check_error)) = res { if let CheckErrors::UndefinedVariable(var_name) = check_error.err { @@ -1002,7 +939,6 @@ fn test_block_heights() { &contract_identifier2, ClarityVersion::Clarity3, contract_clarity3, - ASTRules::PrecheckSize, ).unwrap(); // Publish the Clarity 3 contract @@ -1218,7 +1154,6 @@ fn test_block_heights_across_versions() { &contract_id_e2c1, ClarityVersion::Clarity1, contract_e2c1_2, - ASTRules::PrecheckSize, ) .unwrap(); clarity_db @@ -1249,7 +1184,6 @@ fn test_block_heights_across_versions() { &contract_id_e2c2, ClarityVersion::Clarity2, contract_e2c1_2, - ASTRules::PrecheckSize, ) .unwrap(); clarity_db @@ -1281,12 +1215,7 @@ fn test_block_heights_across_versions() { conn.as_transaction(|clarity_db| { // Analyze the Clarity 3 contract let (ast, analysis) = clarity_db - .analyze_smart_contract( - &contract_id_e3c3, - ClarityVersion::Clarity3, - &contract_e3c3, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_id_e3c3, ClarityVersion::Clarity3, &contract_e3c3) .unwrap(); // Publish the Clarity 3 contract @@ -1353,7 +1282,6 @@ fn test_block_heights_across_versions_traits_3_from_2() { &contract_id_e2c1, ClarityVersion::Clarity1, contract_e2c1_2, - ASTRules::PrecheckSize, ) .unwrap(); @@ -1381,7 +1309,6 @@ fn test_block_heights_across_versions_traits_3_from_2() { &contract_id_e2c2, ClarityVersion::Clarity2, contract_e2c1_2, - ASTRules::PrecheckSize, ) .unwrap(); @@ -1410,12 +1337,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { conn.as_transaction(|clarity_db| { // Analyze the Clarity 3 contract let (ast, analysis) = clarity_db - .analyze_smart_contract( - &contract_id_e3c3, - ClarityVersion::Clarity3, - &contract_e3c3, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_id_e3c3, ClarityVersion::Clarity3, &contract_e3c3) .unwrap(); // Publish the Clarity 3 contract @@ -1501,7 +1423,6 @@ fn test_block_heights_across_versions_traits_2_from_3() { &contract_id_e2c1, ClarityVersion::Clarity1, contract_e2c1_2, - ASTRules::PrecheckSize, ) .unwrap(); @@ -1529,7 +1450,6 @@ fn test_block_heights_across_versions_traits_2_from_3() { &contract_id_e2c2, ClarityVersion::Clarity2, contract_e2c1_2, - ASTRules::PrecheckSize, ) .unwrap(); @@ -1558,12 +1478,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { conn.as_transaction(|clarity_db| { // Analyze the Clarity 3 contract let (ast, analysis) = clarity_db - .analyze_smart_contract( - &contract_id_e3c3, - ClarityVersion::Clarity3, - &contract_e3c3, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_id_e3c3, ClarityVersion::Clarity3, &contract_e3c3) .unwrap(); // Publish the Clarity 3 contract @@ -1640,7 +1555,6 @@ fn test_block_heights_at_block() { &contract_identifier, ClarityVersion::Clarity3, contract, - ASTRules::PrecheckSize, ).unwrap(); // Publish the contract @@ -1698,12 +1612,7 @@ fn test_get_block_info_time() { conn.as_transaction(|clarity_db| { // Analyze the contract as Clarity 2 let (ast, analysis) = clarity_db - .analyze_smart_contract( - &contract_identifier2, - ClarityVersion::Clarity2, - contract2, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_identifier2, ClarityVersion::Clarity2, contract2) .unwrap(); // Publish the contract as Clarity 2 @@ -1721,12 +1630,7 @@ fn test_get_block_info_time() { // Analyze the contract as Clarity 3 let (ast, analysis) = clarity_db - .analyze_smart_contract( - &contract_identifier3, - ClarityVersion::Clarity3, - contract3, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_identifier3, ClarityVersion::Clarity3, contract3) .unwrap(); // Publish the contract as Clarity 3 @@ -1748,7 +1652,6 @@ fn test_get_block_info_time() { &contract_identifier3_3, ClarityVersion::Clarity3, contract3_3, - ASTRules::PrecheckSize, ) .unwrap(); diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index 6172141b191..5ee73be93ee 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use clarity::vm::ast::ASTRules; use clarity::vm::clarity::TransactionConnection; use clarity::vm::contexts::{AssetMap, OwnedEnvironment}; use clarity::vm::costs::cost_functions::ClarityCostFunction; @@ -234,7 +233,7 @@ fn exec_cost(contract: &str, use_mainnet: bool, epoch: StacksEpochId) -> Executi with_owned_env(epoch, use_mainnet, |mut owned_env| { owned_env - .initialize_contract(contract_id.clone(), contract, None, ASTRules::PrecheckSize) + .initialize_contract(contract_id.clone(), contract, None) .unwrap(); let cost_before = owned_env.get_cost_total(); @@ -847,22 +846,10 @@ fn setup_cost_tracked_test( let trait_contract_id = QualifiedContractIdentifier::new(p1_principal, "contract-trait".into()); owned_env - .initialize_versioned_contract( - trait_contract_id, - version, - contract_trait, - None, - ASTRules::PrecheckSize, - ) + .initialize_versioned_contract(trait_contract_id, version, contract_trait, None) .unwrap(); owned_env - .initialize_versioned_contract( - other_contract_id, - version, - contract_other, - None, - ASTRules::PrecheckSize, - ) + .initialize_versioned_contract(other_contract_id, version, contract_other, None) .unwrap(); } @@ -990,13 +977,7 @@ fn test_program_cost( let other_contract_id = QualifiedContractIdentifier::new(p1_principal, "contract-other".into()); owned_env - .initialize_versioned_contract( - self_contract_id.clone(), - version, - &contract_self, - None, - ASTRules::PrecheckSize, - ) + .initialize_versioned_contract(self_contract_id.clone(), version, &contract_self, None) .unwrap(); let start = owned_env.get_cost_total(); @@ -1159,12 +1140,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity { block_conn.as_transaction(|tx| { let (ast, analysis) = tx - .analyze_smart_contract( - contract_name, - clarity_version, - contract_src, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(contract_name, clarity_version, contract_src) .unwrap(); tx.initialize_smart_contract( contract_name, @@ -1444,12 +1420,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi { block_conn.as_transaction(|tx| { let (ast, analysis) = tx - .analyze_smart_contract( - contract_name, - clarity_version, - contract_src, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(contract_name, clarity_version, contract_src) .unwrap(); tx.initialize_smart_contract( contract_name, diff --git a/stackslib/src/clarity_vm/tests/events.rs b/stackslib/src/clarity_vm/tests/events.rs index 0dfc16b5aa0..218d718d7bf 100644 --- a/stackslib/src/clarity_vm/tests/events.rs +++ b/stackslib/src/clarity_vm/tests/events.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use clarity::vm::ast::ASTRules; use clarity::vm::contexts::OwnedEnvironment; use clarity::vm::events::*; use clarity::vm::tests::{ @@ -101,7 +100,7 @@ fn helper_execute_epoch( { let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); - env.initialize_contract(contract_id.clone(), contract, ASTRules::PrecheckSize) + env.initialize_contract(contract_id.clone(), contract) .unwrap(); } diff --git a/stackslib/src/clarity_vm/tests/forking.rs b/stackslib/src/clarity_vm/tests/forking.rs index 6dc33055629..4c056c7da0e 100644 --- a/stackslib/src/clarity_vm/tests/forking.rs +++ b/stackslib/src/clarity_vm/tests/forking.rs @@ -15,7 +15,6 @@ // along with this program. If not, see . use clarity::vm::analysis::errors::CheckErrors; -use clarity::vm::ast::ASTRules; use clarity::vm::contexts::OwnedEnvironment; use clarity::vm::errors::{Error, InterpreterResult as Result, RuntimeErrorType}; use clarity::vm::test_util::{ @@ -67,9 +66,7 @@ fn test_at_block_mutations(#[case] version: ClarityVersion, #[case] epoch: Stack (ok (at-block 0x0101010101010101010101010101010101010101010101010101010101010101 (var-get datum)))))"; eprintln!("Initializing contract..."); - owned_env - .initialize_contract(c, contract, None, ASTRules::PrecheckSize) - .unwrap(); + owned_env.initialize_contract(c, contract, None).unwrap(); } fn branch( @@ -146,9 +143,7 @@ fn test_at_block_good(#[case] version: ClarityVersion, #[case] epoch: StacksEpoc (ok (var-get datum))))"; eprintln!("Initializing contract..."); - owned_env - .initialize_contract(c, contract, None, ASTRules::PrecheckSize) - .unwrap(); + owned_env.initialize_contract(c, contract, None).unwrap(); } fn branch( @@ -220,9 +215,7 @@ fn test_at_block_missing_defines(#[case] version: ClarityVersion, #[case] epoch: (ok current)))"; eprintln!("Initializing contract..."); - owned_env - .initialize_contract(c_a, contract, None, ASTRules::PrecheckSize) - .unwrap(); + owned_env.initialize_contract(c_a, contract, None).unwrap(); } fn initialize_2(owned_env: &mut OwnedEnvironment) -> Error { @@ -236,7 +229,7 @@ fn test_at_block_missing_defines(#[case] version: ClarityVersion, #[case] epoch: eprintln!("Initializing contract..."); let e = owned_env - .initialize_contract(c_b, contract, None, ASTRules::PrecheckSize) + .initialize_contract(c_b, contract, None) .unwrap_err(); e } @@ -355,7 +348,7 @@ fn initialize_contract(owned_env: &mut OwnedEnvironment) { let contract_identifier = QualifiedContractIdentifier::new(p1_address, "tokens".into()); owned_env - .initialize_contract(contract_identifier, &contract, None, ASTRules::PrecheckSize) + .initialize_contract(contract_identifier, &contract, None) .unwrap(); } diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 1915d3db367..c767ad14e45 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -15,7 +15,6 @@ // along with this program. If not, see . use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; -use clarity::vm::ast::{self, ASTRules}; use clarity::vm::clarity::{ClarityConnection, TransactionConnection}; use clarity::vm::contexts::OwnedEnvironment; use clarity::vm::database::HeadersDB; @@ -24,7 +23,7 @@ use clarity::vm::test_util::*; use clarity::vm::tests::{test_clarity_versions, BurnStateDB}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, Value}; use clarity::vm::version::ClarityVersion; -use clarity::vm::{ContractContext, MAX_CALL_STACK_DEPTH}; +use clarity::vm::{ast, ContractContext, MAX_CALL_STACK_DEPTH}; #[cfg(test)] use rstest::rstest; #[cfg(test)] @@ -46,7 +45,6 @@ fn test_block_headers(n: u8) -> StacksBlockId { pub const TEST_BURN_STATE_DB_AST_PRECHECK: UnitTestBurnStateDB = UnitTestBurnStateDB { epoch_id: StacksEpochId::Epoch20, - ast_rules: ast::ASTRules::PrecheckSize, }; const SIMPLE_TOKENS: &str = "(define-map tokens { account: principal } { balance: uint }) @@ -143,7 +141,6 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &boot_code_id("costs-2", false), ClarityVersion::Clarity1, BOOT_CODE_COSTS_2, - ASTRules::PrecheckSize, ) .unwrap(); tx.initialize_smart_contract( @@ -170,7 +167,6 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &boot_code_id("costs-3", false), ClarityVersion::Clarity2, BOOT_CODE_COSTS_3, - ASTRules::PrecheckSize, ) .unwrap(); tx.initialize_smart_contract( @@ -533,11 +529,11 @@ fn inner_test_simple_naming_system(owned_env: &mut OwnedEnvironment, version: Cl let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("tokens").unwrap(); - env.initialize_contract(contract_identifier, tokens_contract, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, tokens_contract) .unwrap(); let contract_identifier = QualifiedContractIdentifier::local("names").unwrap(); - env.initialize_contract(contract_identifier, names_contract, ASTRules::PrecheckSize) + env.initialize_contract(contract_identifier, names_contract) .unwrap(); } @@ -739,12 +735,7 @@ pub fn rollback_log_memory_test( conn.as_transaction(|conn| { let (ct_ast, _ct_analysis) = conn - .analyze_smart_contract( - &contract_identifier, - clarity_version, - &contract, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_identifier, clarity_version, &contract) .unwrap(); assert!(format!( "{:?}", @@ -816,12 +807,7 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id conn.as_transaction(|conn| { let (ct_ast, _ct_analysis) = conn - .analyze_smart_contract( - &contract_identifier, - clarity_version, - &contract, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_identifier, clarity_version, &contract) .unwrap(); assert!(format!( "{:?}", @@ -896,12 +882,7 @@ pub fn argument_memory_test( conn.as_transaction(|conn| { let (ct_ast, _ct_analysis) = conn - .analyze_smart_contract( - &contract_identifier, - clarity_version, - &contract, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_identifier, clarity_version, &contract) .unwrap(); assert!(format!( "{:?}", @@ -992,12 +973,7 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ conn.as_transaction(|conn| { let (ct_ast, _ct_analysis) = conn - .analyze_smart_contract( - &contract_identifier, - clarity_version, - &contract_ok, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_identifier, clarity_version, &contract_ok) .unwrap(); assert!(match conn .initialize_smart_contract( @@ -1019,12 +995,7 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ conn.as_transaction(|conn| { let (ct_ast, _ct_analysis) = conn - .analyze_smart_contract( - &contract_identifier, - clarity_version, - &contract_err, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_identifier, clarity_version, &contract_err) .unwrap(); assert!(format!( "{:?}", @@ -1108,12 +1079,7 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ if i < (CONTRACTS - 1) { conn.as_transaction(|conn| { let (ct_ast, ct_analysis) = conn - .analyze_smart_contract( - &contract_identifier, - clarity_version, - &contract, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_identifier, clarity_version, &contract) .unwrap(); conn.initialize_smart_contract( &contract_identifier, @@ -1131,12 +1097,7 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ } else { conn.as_transaction(|conn| { let (ct_ast, _ct_analysis) = conn - .analyze_smart_contract( - &contract_identifier, - clarity_version, - &contract, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&contract_identifier, clarity_version, &contract) .unwrap(); assert!(format!( "{:?}", @@ -1196,12 +1157,8 @@ fn test_deep_tuples() { let _res = block.as_transaction(|tx| { // basically, without the new stack depth checks in the lexer/parser, // and without the VaryStackDepthChecker, this next call will return a checkerror - let analysis_resp = tx.analyze_smart_contract( - &contract_identifier, - *version, - &meets_stack_depth_tuple, - ASTRules::PrecheckSize, - ); + let analysis_resp = + tx.analyze_smart_contract(&contract_identifier, *version, &meets_stack_depth_tuple); eprintln!( "analyze_smart_contract() with meets_stack_depth_tuple: {}", analysis_resp.is_ok() @@ -1222,7 +1179,6 @@ fn test_deep_tuples() { &contract_identifier, *version, &exceeds_stack_depth_tuple, - ASTRules::PrecheckSize, ); analysis_resp.unwrap_err() }); @@ -1291,7 +1247,6 @@ fn test_deep_tuples_ast_precheck() { &contract_identifier, *version, &exceeds_stack_depth_tuple, - ASTRules::PrecheckSize, ); analysis_resp.unwrap_err() }); @@ -1362,12 +1317,8 @@ fn test_deep_type_nesting() { } // basically, without the new stack depth checks in the lexer/parser, // and without the VaryStackDepthChecker, this next call will return a checkerror - let analysis_resp = tx.analyze_smart_contract( - &contract_identifier, - *version, - &exceeds_type_depth, - ASTRules::PrecheckSize, - ); + let analysis_resp = + tx.analyze_smart_contract(&contract_identifier, *version, &exceeds_type_depth); analysis_resp.unwrap_err() }); diff --git a/stackslib/src/clarity_vm/tests/simple_tests.rs b/stackslib/src/clarity_vm/tests/simple_tests.rs index 0fb38cdf9e4..114474be959 100644 --- a/stackslib/src/clarity_vm/tests/simple_tests.rs +++ b/stackslib/src/clarity_vm/tests/simple_tests.rs @@ -57,10 +57,9 @@ fn test_at_unknown_block() { QualifiedContractIdentifier::local("contract").unwrap(), contract, None, - clarity::vm::ast::ASTRules::PrecheckSize, ) .unwrap_err(); - eprintln!("{}", err); + eprintln!("{err}"); match err { Error::Runtime(x, _) => assert_eq!( x, diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index d64cf6a9069..771b21ea3a7 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -23,7 +23,6 @@ use std::thread::{self, JoinHandle}; use std::time::Duration; use std::time::Instant; -use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use regex::{Captures, Regex}; use serde::Deserialize; @@ -594,7 +593,6 @@ impl NakamotoBlockProposal { tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT, - ASTRules::PrecheckSize, None, ); let err = match tx_result { @@ -773,7 +771,6 @@ impl NakamotoBlockProposal { &replay_tx, replay_tx.tx_len(), &BlockLimitFunction::NO_LIMIT_HIT, - ASTRules::PrecheckSize, None, ); match tx_result { @@ -837,7 +834,6 @@ impl NakamotoBlockProposal { tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT, - ASTRules::PrecheckSize, None, ); } @@ -852,7 +848,6 @@ impl NakamotoBlockProposal { &tx, tx.tx_len(), &BlockLimitFunction::NO_LIMIT_HIT, - ASTRules::PrecheckSize, None, ); match tx_result { diff --git a/stackslib/src/net/api/postmicroblock.rs b/stackslib/src/net/api/postmicroblock.rs index f5b8f0a7781..791d78ef925 100644 --- a/stackslib/src/net/api/postmicroblock.rs +++ b/stackslib/src/net/api/postmicroblock.rs @@ -162,16 +162,12 @@ impl RPCRequestHandler for RPCPostMicroblockRequestHandler { let parent_block_snapshot = Relayer::get_parent_stacks_block_snapshot(&sort_handle, consensus_hash, block_hash) .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load parent block for Stacks tip: {:?}", &e))))?; - let ast_rules = SortitionDB::get_ast_rules(&sort_handle, parent_block_snapshot.block_height) - .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load AST rules for Bitcoin block height {}: {:?}", parent_block_snapshot.block_height, &e))))?; - let epoch_id = self.get_stacks_epoch(&preamble, sortdb, parent_block_snapshot.block_height)?.epoch_id; if !Relayer::static_check_problematic_relayed_microblock( chainstate.mainnet, epoch_id, - µblock, - ast_rules, + µblock ) { info!("Microblock {} from {}/{} is problematic; will not store or relay it, nor its descendants", µblock.block_hash(), consensus_hash, &block_hash); @@ -181,17 +177,15 @@ impl RPCRequestHandler for RPCPostMicroblockRequestHandler { match chainstate.preprocess_streamed_microblock(consensus_hash, block_hash, µblock) { Ok(accepted) => { - debug!("{} uploaded microblock {}/{}-{}", + debug!("{} uploaded microblock {consensus_hash}/{block_hash}-{}", if accepted { "Accepted" } else { "Did not accept" }, - consensus_hash, - block_hash, µblock.block_hash() ); - return Ok((accepted, StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash))); + Ok((accepted, StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash))) }, Err(e) => { debug!("Failed to process microblock {}/{}-{}: {:?}", &consensus_hash, &block_hash, µblock.block_hash(), &e); - return Err(StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new_json(e.into_json()))); + Err(StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new_json(e.into_json()))) } } }); diff --git a/stackslib/src/net/api/posttransaction.rs b/stackslib/src/net/api/posttransaction.rs index 84209f25999..3fd2c19945c 100644 --- a/stackslib/src/net/api/posttransaction.rs +++ b/stackslib/src/net/api/posttransaction.rs @@ -197,15 +197,13 @@ impl RPCRequestHandler for RPCPostTransactionRequestHandler { chainstate.mainnet, stacks_epoch.epoch_id, &tx, - network.ast_rules, ) .is_err() { // we statically check the tx for known problems, and it had some. Reject. debug!( - "Transaction {} is problematic in rules {:?}; will not store or relay", - &tx.txid(), - network.ast_rules + "Transaction {} is problematic; will not store or relay", + &tx.txid() ); return Ok(false); } diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 1dcea1abcfb..d38bd7b734e 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -22,7 +22,6 @@ use std::sync::{Arc, Condvar, Mutex}; use clarity::codec::StacksMessageCodec; use clarity::consts::CHAIN_ID_TESTNET; use clarity::types::chainstate::{BlockHeaderHash, StacksBlockId, StacksPrivateKey}; -use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::StandardPrincipalData; use postblock_proposal::{NakamotoBlockProposal, ValidateRejectCode}; @@ -294,7 +293,6 @@ fn test_try_make_response() { &tx, tx.tx_len(), &BlockLimitFunction::NO_LIMIT_HIT, - ASTRules::PrecheckSize, None, ); let block = builder.mine_nakamoto_block(&mut tenure_tx, burn_chain_height); @@ -546,7 +544,6 @@ fn replay_validation_test( &tx, tx.tx_len(), &BlockLimitFunction::NO_LIMIT_HIT, - ASTRules::PrecheckSize, None, ); } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 6006581b83a..4d3587ff139 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2239,7 +2239,6 @@ pub mod test { use clarity::types::sqlite::NO_PARAMS; use clarity::vm::ast::parser::v1::CONTRACT_MAX_NAME_LENGTH; - use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::STXBalance; use clarity::vm::types::*; @@ -3073,7 +3072,6 @@ pub mod test { clarity, &boot_code_smart_contract, &boot_code_account, - ASTRules::PrecheckSize, None, ) .unwrap() diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 1702af2e762..1efac82ce21 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -20,7 +20,6 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TryRecvError, TrySendError}; use std::thread::JoinHandle; -use clarity::vm::ast::ASTRules; use clarity::vm::types::QualifiedContractIdentifier; use mio::net as mio_net; use rand::prelude::*; @@ -467,7 +466,6 @@ pub struct PeerNetwork { pub chain_view: BurnchainView, pub burnchain_tip: BlockSnapshot, pub chain_view_stable_consensus_hash: ConsensusHash, - pub ast_rules: ASTRules, /// Current Stacks tip -- the highest block's consensus hash, block hash, and height pub stacks_tip: StacksTipInfo, @@ -688,7 +686,6 @@ impl PeerNetwork { local_peer, chain_view, chain_view_stable_consensus_hash: ConsensusHash([0u8; 20]), - ast_rules: ASTRules::Typical, last_anchor_block_hash: BlockHeaderHash([0x00; 32]), last_anchor_block_txid: Txid([0x00; 32]), burnchain_tip: BlockSnapshot::initial( @@ -4887,9 +4884,6 @@ impl PeerNetwork { ibd, ); - // update tx validation information - self.ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), canonical_sn.block_height)?; - // update last anchor data let ih = sortdb.index_handle(&canonical_sn.sortition_id); self.last_anchor_block_hash = ih diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index be6b5302817..9197bf2cd71 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -17,8 +17,8 @@ use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::mem; +use clarity::vm::ast::ast_check_size; use clarity::vm::ast::errors::ParseErrors; -use clarity::vm::ast::{ast_check_size, ASTRules}; use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; use clarity::vm::ClarityVersion; use rand::prelude::*; @@ -818,31 +818,16 @@ impl Relayer { // don't relay this block if it's using the wrong AST rules (this would render at least one of its // txs problematic). - let ast_rules = SortitionDB::get_ast_rules(sort_ic, block_sn.block_height)?; let epoch_id = SortitionDB::get_stacks_epoch(sort_ic, block_sn.block_height)? .expect("FATAL: no epoch defined") .epoch_id; - debug!( - "Current AST rules for block {}/{} height {} sortitioned at {} is {:?}", - consensus_hash, - &block.block_hash(), - block.header.total_work.work, - &block_sn.block_height, - &ast_rules - ); - if !Relayer::static_check_problematic_relayed_block( - chainstate.mainnet, - epoch_id, - block, - ast_rules, - ) { + if !Relayer::static_check_problematic_relayed_block(chainstate.mainnet, epoch_id, block) { warn!( "Block is problematic; will not store or relay"; "stacks_block_hash" => %block.block_hash(), "consensus_hash" => %consensus_hash, "burn_height" => block.header.total_work.work, - "sortition_height" => block_sn.block_height, - "ast_rules" => ?ast_rules, + "sortition_height" => block_sn.block_height ); return Ok(BlockAcceptResponse::Rejected("Block is problematic".into())); } @@ -988,7 +973,6 @@ impl Relayer { chainstate.mainnet, epoch_id, block, - ASTRules::PrecheckSize, ) { warn!( "Nakamoto block is problematic; will not store or relay"; @@ -1444,13 +1428,6 @@ impl Relayer { } }; - let ast_rules = match SortitionDB::get_ast_rules(sort_ic, block_snapshot.block_height) { - Ok(rules) => rules, - Err(e) => { - error!("Failed to load current AST rules: {:?}", &e); - continue; - } - }; let epoch_id = match SortitionDB::get_stacks_epoch(sort_ic, block_snapshot.block_height) { Ok(Some(epoch)) => epoch.epoch_id, @@ -1458,7 +1435,7 @@ impl Relayer { panic!("FATAL: no epoch defined"); } Err(e) => { - error!("Failed to load epoch: {:?}", &e); + error!("Failed to load epoch: {e:?}"); continue; } }; @@ -1466,18 +1443,15 @@ impl Relayer { let mut stored = false; for mblock in microblock_stream.iter() { debug!( - "Preprocess downloaded microblock {}/{}-{}", - consensus_hash, - &anchored_block_hash, + "Preprocess downloaded microblock {consensus_hash}/{anchored_block_hash}-{}", &mblock.block_hash() ); if !Relayer::static_check_problematic_relayed_microblock( chainstate.mainnet, epoch_id, mblock, - ast_rules, ) { - info!("Microblock {} from {}/{} is problematic; will not store or relay it, nor its descendants", &mblock.block_hash(), consensus_hash, &anchored_block_hash); + info!("Microblock {} from {consensus_hash}/{anchored_block_hash} is problematic; will not store or relay it, nor its descendants", &mblock.block_hash()); break; } match chainstate.preprocess_streamed_microblock( @@ -1490,11 +1464,8 @@ impl Relayer { } Err(e) => { warn!( - "Invalid downloaded microblock {}/{}-{}: {:?}", - consensus_hash, - &anchored_block_hash, - mblock.block_hash(), - &e + "Invalid downloaded microblock {consensus_hash}/{anchored_block_hash}-{}: {e:?}", + mblock.block_hash() ); } } @@ -1548,25 +1519,21 @@ impl Relayer { let block_snapshot = SortitionDB::get_block_snapshot_consensus(sort_ic, &consensus_hash)? .ok_or(net_error::DBError(db_error::NotFoundError))?; - let ast_rules = SortitionDB::get_ast_rules(sort_ic, block_snapshot.block_height)?; let epoch_id = SortitionDB::get_stacks_epoch(sort_ic, block_snapshot.block_height)? .expect("FATAL: no epoch defined") .epoch_id; for mblock in mblock_data.microblocks.iter() { debug!( - "Preprocess downloaded microblock {}/{}-{}", - &consensus_hash, - &anchored_block_hash, + "Preprocess downloaded microblock {consensus_hash}/{anchored_block_hash}-{}", &mblock.block_hash() ); if !Relayer::static_check_problematic_relayed_microblock( chainstate.mainnet, epoch_id, mblock, - ast_rules, ) { - info!("Microblock {} from {}/{} is problematic; will not store or relay it, nor its descendants", &mblock.block_hash(), &consensus_hash, &anchored_block_hash); + info!("Microblock {} from {consensus_hash}/{anchored_block_hash} is problematic; will not store or relay it, nor its descendants", &mblock.block_hash()); continue; } let need_relay = !chainstate.has_descendant_microblock_indexed( @@ -1805,55 +1772,48 @@ impl Relayer { mainnet: bool, epoch_id: StacksEpochId, tx: &StacksTransaction, - ast_rules: ASTRules, ) -> Result<(), Error> { - debug!( - "Check {} to see if it is problematic in {:?}", - &tx.txid(), - &ast_rules - ); + debug!("Check {} to see if it is problematic", &tx.txid(),); if let TransactionPayload::SmartContract(ref smart_contract, ref clarity_version_opt) = tx.payload { let clarity_version = clarity_version_opt.unwrap_or(ClarityVersion::default_for_epoch(epoch_id)); - if ast_rules == ASTRules::PrecheckSize { - let origin = tx.get_origin(); - let issuer_principal = { - let addr = if mainnet { - origin.address_mainnet() - } else { - origin.address_testnet() - }; - addr.to_account_principal() - }; - let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { - data + let origin = tx.get_origin(); + let issuer_principal = { + let addr = if mainnet { + origin.address_mainnet() } else { - // not possible - panic!("Transaction had a contract principal origin"); + origin.address_testnet() }; + addr.to_account_principal() + }; + let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { + data + } else { + // not possible + panic!("Transaction had a contract principal origin"); + }; - let contract_id = - QualifiedContractIdentifier::new(issuer_principal, smart_contract.name.clone()); - let contract_code_str = smart_contract.code_body.to_string(); - - // make sure that the AST isn't unreasonably big - let ast_res = - ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); - match ast_res { - Ok(_) => {} - Err(parse_error) => match parse_error.err { - ParseErrors::ExpressionStackDepthTooDeep - | ParseErrors::VaryExpressionStackDepthTooDeep => { - // don't include this block - info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); - return Err(Error::ClarityError(parse_error.into())); - } - _ => {} - }, - } + let contract_id = + QualifiedContractIdentifier::new(issuer_principal, smart_contract.name.clone()); + let contract_code_str = smart_contract.code_body.to_string(); + + // make sure that the AST isn't unreasonably big + let ast_res = + ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); + match ast_res { + Ok(_) => {} + Err(parse_error) => match parse_error.err { + ParseErrors::ExpressionStackDepthTooDeep + | ParseErrors::VaryExpressionStackDepthTooDeep => { + // don't include this block + info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); + return Err(Error::ClarityError(parse_error.into())); + } + _ => {} + }, } } Ok(()) @@ -1868,12 +1828,9 @@ impl Relayer { mainnet: bool, epoch_id: StacksEpochId, block: &StacksBlock, - ast_rules: ASTRules, ) -> bool { for tx in block.txs.iter() { - if Relayer::static_check_problematic_relayed_tx(mainnet, epoch_id, tx, ast_rules) - .is_err() - { + if Relayer::static_check_problematic_relayed_tx(mainnet, epoch_id, tx).is_err() { info!( "Block {} with tx {} will not be stored or relayed", block.block_hash(), @@ -1894,12 +1851,9 @@ impl Relayer { mainnet: bool, epoch_id: StacksEpochId, block: &NakamotoBlock, - ast_rules: ASTRules, ) -> bool { for tx in block.txs.iter() { - if Relayer::static_check_problematic_relayed_tx(mainnet, epoch_id, tx, ast_rules) - .is_err() - { + if Relayer::static_check_problematic_relayed_tx(mainnet, epoch_id, tx).is_err() { info!( "Nakamoto block {} with tx {} will not be stored or relayed", block.header.block_hash(), @@ -1921,12 +1875,9 @@ impl Relayer { mainnet: bool, epoch_id: StacksEpochId, mblock: &StacksMicroblock, - ast_rules: ASTRules, ) -> bool { for tx in mblock.txs.iter() { - if Relayer::static_check_problematic_relayed_tx(mainnet, epoch_id, tx, ast_rules) - .is_err() - { + if Relayer::static_check_problematic_relayed_tx(mainnet, epoch_id, tx).is_err() { info!( "Microblock {} with tx {} will not be stored relayed", mblock.block_hash(), @@ -1950,27 +1901,6 @@ impl Relayer { true } - /// Should we store and process problematic blocks and microblocks to staging that we mined? - #[cfg(any(test, feature = "testing"))] - pub fn process_mined_problematic_blocks( - cur_ast_rules: ASTRules, - processed_ast_rules: ASTRules, - ) -> bool { - std::env::var("STACKS_PROCESS_PROBLEMATIC_BLOCKS") != Ok("1".into()) - || cur_ast_rules != processed_ast_rules - } - - /// Should we store and process problematic blocks and microblocks to staging that we mined? - /// We should do this only if we used a different ruleset than the active one. If it was - /// problematic with the currently-active rules, then obviously it shouldn't be processed. - #[cfg(not(any(test, feature = "testing")))] - pub fn process_mined_problematic_blocks( - cur_ast_rules: ASTRules, - processed_ast_rules: ASTRules, - ) -> bool { - cur_ast_rules != processed_ast_rules - } - /// Process blocks and microblocks that we recieved, both downloaded (confirmed) and streamed /// (unconfirmed). Returns: /// * set of consensus hashes that elected the newly-discovered blocks, and the blocks, so we can turn them into BlocksAvailable / BlocksData messages @@ -2206,13 +2136,7 @@ impl Relayer { let mut filtered_tx_data = vec![]; for (relayers, tx) in tx_data.into_iter() { if Relayer::do_static_problematic_checks() - && Relayer::static_check_problematic_relayed_tx( - mainnet, - epoch_id, - &tx, - ASTRules::PrecheckSize, - ) - .is_err() + && Relayer::static_check_problematic_relayed_tx(mainnet, epoch_id, &tx).is_err() { info!( "Pushed transaction {} is problematic; will not store or relay", @@ -2229,13 +2153,7 @@ impl Relayer { for tx in network_result.uploaded_transactions.drain(..) { if Relayer::do_static_problematic_checks() - && Relayer::static_check_problematic_relayed_tx( - mainnet, - epoch_id, - &tx, - ASTRules::PrecheckSize, - ) - .is_err() + && Relayer::static_check_problematic_relayed_tx(mainnet, epoch_id, &tx).is_err() { info!( "Uploaded transaction {} is problematic; will not store or relay", diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 520a72b203e..2f8c2e185f8 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -17,7 +17,6 @@ use std::cell::RefCell; use std::collections::HashMap; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; -use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; use clarity::vm::{ClarityVersion, MAX_CALL_STACK_DEPTH}; @@ -2620,8 +2619,7 @@ pub fn make_contract_tx( let mut tx_signer = StacksTransactionSigner::new(&tx_contract); tx_signer.sign_origin(sender).unwrap(); - let tx_contract_signed = tx_signer.get_tx().unwrap(); - tx_contract_signed + tx_signer.get_tx().unwrap() } #[test] @@ -2633,7 +2631,7 @@ fn test_static_problematic_tests() { let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); - let tx_edge_body = format!("{}u1 {}", tx_edge_body_start, tx_edge_body_end); + let tx_edge_body = format!("{tx_edge_body_start}u1 {tx_edge_body_end}"); let tx_edge = make_contract_tx( &spender_sk_1, @@ -2647,7 +2645,7 @@ fn test_static_problematic_tests() { let exceeds_repeat_factor = edge_repeat_factor + 1; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_tx( &spender_sk_2, @@ -2661,7 +2659,7 @@ fn test_static_problematic_tests() { let high_repeat_factor = 128 * 1024; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_tx( &spender_sk_3, @@ -2673,44 +2671,19 @@ fn test_static_problematic_tests() { assert!(Relayer::static_check_problematic_relayed_tx( false, StacksEpochId::Epoch2_05, - &tx_edge, - ASTRules::Typical - ) - .is_ok()); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_exceeds, - ASTRules::Typical - ) - .is_ok()); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_high, - ASTRules::Typical + &tx_edge ) - .is_ok()); - - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_edge, - ASTRules::Typical - ) - .is_ok()); + .is_err()); assert!(Relayer::static_check_problematic_relayed_tx( false, StacksEpochId::Epoch2_05, - &tx_exceeds, - ASTRules::PrecheckSize + &tx_exceeds ) .is_err()); assert!(Relayer::static_check_problematic_relayed_tx( false, StacksEpochId::Epoch2_05, - &tx_high, - ASTRules::PrecheckSize + &tx_high ) .is_err()); } @@ -2753,16 +2726,7 @@ fn process_new_blocks_rejects_problematic_asts() { // activate new AST rules right away let mut peer = TestPeer::new(peer_config); - let mut sortdb = peer.sortdb.take().unwrap(); - { - let mut tx = sortdb - .tx_begin() - .expect("FATAL: failed to begin tx on sortition DB"); - SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 1) - .expect("FATAL: failed to override AST PrecheckSize rule height"); - tx.commit() - .expect("FATAL: failed to commit sortition DB transaction"); - } + let sortdb = peer.sortdb.take().unwrap(); peer.sortdb = Some(sortdb); let chainstate_path = peer.chainstate_path.clone(); diff --git a/stackslib/src/util_lib/signed_structured_data.rs b/stackslib/src/util_lib/signed_structured_data.rs index 5c19926fcb3..a119abbf3dd 100644 --- a/stackslib/src/util_lib/signed_structured_data.rs +++ b/stackslib/src/util_lib/signed_structured_data.rs @@ -164,7 +164,6 @@ pub mod pox4 { #[cfg(test)] mod tests { - use clarity::vm::ast::ASTRules; use clarity::vm::clarity::{ClarityConnection, TransactionConnection}; use clarity::vm::costs::LimitedCostTracker; use clarity::vm::types::PrincipalData; @@ -236,12 +235,7 @@ pub mod pox4 { conn.as_transaction(|clarity_db| { let clarity_version = ClarityVersion::Clarity2; let (ast, analysis) = clarity_db - .analyze_smart_contract( - &pox_contract_id, - clarity_version, - body, - ASTRules::PrecheckSize, - ) + .analyze_smart_contract(&pox_contract_id, clarity_version, body) .unwrap(); clarity_db .initialize_smart_contract( From a85013466a693cd3b23d8aa969d682e106cd3391 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 11 Sep 2025 11:42:00 -0700 Subject: [PATCH 02/56] Cargo fmt Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/stacks/miner.rs | 50 ++++++++++++------------ stackslib/src/clarity_cli.rs | 10 +---- 2 files changed, 26 insertions(+), 34 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index cd8dd4bf87f..a54a6ac5f9c 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -845,32 +845,30 @@ impl<'a> StacksMicroblockBuilder<'a> { }; let (header_reader, _) = chainstate.reopen()?; - let ( - anchored_consensus_hash, - anchored_block_hash, - anchored_block_height - ) = if let Some(unconfirmed) = chainstate.unconfirmed_state.as_ref() { - let header_info = StacksChainState::get_stacks_block_header_info_by_index_block_hash( - chainstate.db(), - &unconfirmed.confirmed_chain_tip, - )? - .ok_or_else(|| { - warn!( - "No such confirmed block {}", - &unconfirmed.confirmed_chain_tip - ); - Error::NoSuchBlockError - })?; - ( - header_info.consensus_hash, - header_info.anchored_header.block_hash(), - header_info.stacks_block_height - ) - } else { - // unconfirmed state needs to be initialized - debug!("Unconfirmed chainstate not initialized"); - return Err(Error::NoSuchBlockError)?; - }; + let (anchored_consensus_hash, anchored_block_hash, anchored_block_height) = + if let Some(unconfirmed) = chainstate.unconfirmed_state.as_ref() { + let header_info = + StacksChainState::get_stacks_block_header_info_by_index_block_hash( + chainstate.db(), + &unconfirmed.confirmed_chain_tip, + )? + .ok_or_else(|| { + warn!( + "No such confirmed block {}", + &unconfirmed.confirmed_chain_tip + ); + Error::NoSuchBlockError + })?; + ( + header_info.consensus_hash, + header_info.anchored_header.block_hash(), + header_info.stacks_block_height, + ) + } else { + // unconfirmed state needs to be initialized + debug!("Unconfirmed chainstate not initialized"); + return Err(Error::NoSuchBlockError)?; + }; let mut clarity_tx = chainstate.begin_unconfirmed(burn_dbconn).ok_or_else(|| { warn!( diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index ade05a66e8a..35c7ec179ae 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -1384,10 +1384,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option Date: Thu, 11 Sep 2025 12:35:41 -0700 Subject: [PATCH 03/56] Cargo fmt Signed-off-by: Jacinta Ferrant --- stackslib/src/clarity_vm/clarity.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index 076c5f59c3d..38f2d87bddd 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -1728,8 +1728,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { stx_balance: STXBalance::zero(), }; - let costs_4_contract_tx = - StacksTransaction::new(tx_version, boot_code_auth, payload); + let costs_4_contract_tx = StacksTransaction::new(tx_version, boot_code_auth, payload); let costs_4_initialization_receipt = self.as_transaction(|tx_conn| { // bump the epoch in the Clarity DB From 05d2e0a3acf821e74d5e922cad9db8d05f266373 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 11 Sep 2025 16:29:41 -0700 Subject: [PATCH 04/56] Fix some unit tests Signed-off-by: Jacinta Ferrant --- clarity/src/vm/ast/mod.rs | 29 ---------------------- stacks-node/src/tests/neon_integrations.rs | 22 ++++------------ stackslib/src/config/mod.rs | 13 ---------- stackslib/src/net/tests/relay/epoch2x.rs | 2 +- 4 files changed, 6 insertions(+), 60 deletions(-) diff --git a/clarity/src/vm/ast/mod.rs b/clarity/src/vm/ast/mod.rs index 23551390411..cfdb7ca9f84 100644 --- a/clarity/src/vm/ast/mod.rs +++ b/clarity/src/vm/ast/mod.rs @@ -310,38 +310,12 @@ mod test { "} ".repeat(stack_limit + 1) ); - // for deep lists, a test like this works: - // it can assert a limit, that you can also verify - // by disabling `VaryStackDepthChecker` and arbitrarily bumping up the parser lexer limits - // and see that it produces the same result let exceeds_stack_depth_list = format!( "{}u1 {}", "(list ".repeat(stack_limit + 1), ")".repeat(stack_limit + 1) ); - // with old rules, this is just ExpressionStackDepthTooDeep - let mut cost_track = UnitTestTracker::new(); - let err = build_ast( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_list, - &mut cost_track, - clarity_version, - StacksEpochId::Epoch2_05, - ) - .expect_err("Contract should error in parsing"); - - let expected_err = ParseErrors::ExpressionStackDepthTooDeep; - let expected_list_cost_state = UnitTestTracker { - invoked_functions: vec![(ClarityCostFunction::AstParse, vec![500])], - invocation_count: 1, - cost_addition_count: 1, - }; - - assert_eq!(&expected_err, &err.err); - assert_eq!(expected_list_cost_state, cost_track); - - // with new rules, this is now VaryExpressionStackDepthTooDeep let mut cost_track = UnitTestTracker::new(); let err = build_ast( &QualifiedContractIdentifier::transient(), @@ -362,9 +336,6 @@ mod test { assert_eq!(&expected_err, &err.err); assert_eq!(expected_list_cost_state, cost_track); - // you cannot do the same for tuples! - // this actually won't even error without - // the VaryStackDepthChecker changes. let mut cost_track = UnitTestTracker::new(); let err = build_ast( &QualifiedContractIdentifier::transient(), diff --git a/stacks-node/src/tests/neon_integrations.rs b/stacks-node/src/tests/neon_integrations.rs index 8e367defb70..0d20b5f2dd2 100644 --- a/stacks-node/src/tests/neon_integrations.rs +++ b/stacks-node/src/tests/neon_integrations.rs @@ -7778,9 +7778,6 @@ fn test_problematic_txs_are_not_stored() { ])); conf.burnchain.pox_2_activation = Some(10_003); - // take effect immediately - conf.burnchain.ast_precheck_size_height = Some(0); - test_observer::spawn(); test_observer::register_any(&mut conf); @@ -7936,7 +7933,6 @@ fn spawn_follower_node( conf.initial_balances = initial_conf.initial_balances.clone(); conf.burnchain.epochs = initial_conf.burnchain.epochs.clone(); - conf.burnchain.ast_precheck_size_height = initial_conf.burnchain.ast_precheck_size_height; conf.connection_options.inv_sync_interval = 3; @@ -8020,9 +8016,6 @@ fn test_problematic_blocks_are_not_mined() { ])); conf.burnchain.pox_2_activation = Some(10_003); - // AST precheck becomes default at burn height - conf.burnchain.ast_precheck_size_height = Some(210); - test_observer::spawn(); test_observer::register_any(&mut conf); @@ -8342,9 +8335,6 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { ])); conf.burnchain.pox_2_activation = Some(10_003); - // AST precheck becomes default at burn height - conf.burnchain.ast_precheck_size_height = Some(210); - test_observer::spawn(); test_observer::register_any(&mut conf); @@ -8474,17 +8464,15 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { btc_regtest_controller.build_next_block(1); // wait for runloop to advance - loop { - sleep_ms(1_000); + wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - if new_tip.block_height > tip.block_height { - break; - } - } + Ok(new_tip.block_height > tip.block_height) + }) + .expect("Runloop failed to advance"); // add another bad tx to the mempool. - // because the miner is now non-conformant, it should mine this tx. + // TODO: Forcibly mine a bad transaction to simulate a bad miner? debug!("Submit problematic tx_high transaction {tx_high_txid}"); std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 34f0457bbdb..b7d1919752c 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -1585,16 +1585,6 @@ pub struct BurnchainConfig { /// @notes: /// - Primarily relevant for miners interacting with multi-wallet Bitcoin nodes. pub wallet_name: String, - /// Override for the burnchain height activating stricter AST size checks - /// pre-epoch 3.0 for testing purposes. - /// - /// Used pre-epoch 3.0 to control activation before it became standard (at burn - /// height `752000`). Ignored in standard production builds as the underlying - /// mechanism is disabled unless the `testing` feature is active. - /// --- - /// @default: `None` - /// @deprecated: This setting is ignored in Epoch 3.0+. - pub ast_precheck_size_height: Option, /// Fault injection setting for testing. Introduces an artificial delay (in /// milliseconds) before processing each burnchain block download. Simulates a /// slow burnchain connection. @@ -1661,7 +1651,6 @@ impl BurnchainConfig { sunset_start: None, sunset_end: None, wallet_name: "".to_string(), - ast_precheck_size_height: None, fault_injection_burnchain_block_delay: 0, max_unspent_utxos: Some(1024), } @@ -1757,7 +1746,6 @@ pub struct BurnchainConfigFile { pub sunset_start: Option, pub sunset_end: Option, pub wallet_name: Option, - pub ast_precheck_size_height: Option, pub fault_injection_burnchain_block_delay: Option, pub max_unspent_utxos: Option, } @@ -1881,7 +1869,6 @@ impl BurnchainConfigFile { .or(default_burnchain_config.first_burn_block_hash.clone()), // will be overwritten below epochs: default_burnchain_config.epochs, - ast_precheck_size_height: self.ast_precheck_size_height, pox_2_activation: self .pox_2_activation .or(default_burnchain_config.pox_2_activation), diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 6d12f9061f4..e07a22d0dca 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -2673,7 +2673,7 @@ fn test_static_problematic_tests() { StacksEpochId::Epoch2_05, &tx_edge ) - .is_err()); + .is_ok()); assert!(Relayer::static_check_problematic_relayed_tx( false, StacksEpochId::Epoch2_05, From 3f2347764562cabf4f89989af388f502597e15f1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 12 Sep 2025 11:24:39 -0700 Subject: [PATCH 05/56] Fix inversion of what block to process Signed-off-by: Jacinta Ferrant --- stacks-node/src/neon_node.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/stacks-node/src/neon_node.rs b/stacks-node/src/neon_node.rs index b2914782fa4..5f5fd44fdf3 100644 --- a/stacks-node/src/neon_node.rs +++ b/stacks-node/src/neon_node.rs @@ -787,7 +787,7 @@ impl MicroblockMinerThread { ) { // nope! warn!( - "Our mined microblock {} was problematic", + "Our mined microblock {} was problematic. Will NOT process.", &mined_microblock.block_hash() ); @@ -823,10 +823,7 @@ impl MicroblockMinerThread { ); } } - warn!( - "Will process our problematic mined microblock {}", - &mined_microblock.block_hash() - ) + return Err(ChainstateError::NoTransactionsToMine); } // cancelled? @@ -3067,7 +3064,7 @@ impl RelayerThread { ) { // nope! warn!( - "Our mined block {} was problematic", + "Our mined block {} was problematic. Will NOT process.", &anchored_block.block_hash() ); #[cfg(any(test, feature = "testing"))] @@ -3099,10 +3096,7 @@ impl RelayerThread { ); } } - warn!( - "Will process our problematic mined block {}", - &anchored_block.block_hash() - ) + return Err(ChainstateError::NoTransactionsToMine); } // Preprocess the anchored block From d5cb49a173414b1bc8bcbb8a450605bd75e00370 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 12 Sep 2025 13:07:05 -0700 Subject: [PATCH 06/56] Cleanup Signed-off-by: Jacinta Ferrant --- stacks-node/src/neon_node.rs | 2 +- stacks-node/src/tests/neon_integrations.rs | 23 +++++++++++----------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/stacks-node/src/neon_node.rs b/stacks-node/src/neon_node.rs index 5f5fd44fdf3..a21fb7a5ffe 100644 --- a/stacks-node/src/neon_node.rs +++ b/stacks-node/src/neon_node.rs @@ -2935,7 +2935,7 @@ impl RelayerThread { /// Handle a NetworkResult from the p2p/http state machine. Usually this is the act of /// * preprocessing and storing new blocks and microblocks - /// * relaying blocks, microblocks, and transacctions + /// * relaying blocks, microblocks, and transactions /// * updating unconfirmed state views pub fn process_network_result(&mut self, mut net_result: NetworkResult) { debug!( diff --git a/stacks-node/src/tests/neon_integrations.rs b/stacks-node/src/tests/neon_integrations.rs index 0d20b5f2dd2..7006293e2dd 100644 --- a/stacks-node/src/tests/neon_integrations.rs +++ b/stacks-node/src/tests/neon_integrations.rs @@ -8542,20 +8542,18 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { follower_conf.node.p2p_bind, follower_conf.node.rpc_bind ); - let deadline = get_epoch_time_secs() + 300; - while get_epoch_time_secs() < deadline { + wait_for(300, || { let follower_tip_info = get_chain_info(&follower_conf); - if follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height - || follower_tip_info.stacks_tip_height + 1 == bad_block_height - { - break; - } eprintln!( "\nFollower is at burn block {} stacks block {} (bad_block is {bad_block_height})\n", follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); - sleep_ms(1000); - } + Ok( + follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height + || follower_tip_info.stacks_tip_height + 1 == bad_block_height, + ) + }) + .expect("Follower failed to advance"); // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); @@ -8564,14 +8562,15 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { num_download_passes + 5 ); - while num_download_passes + 5 > pox_sync_comms.get_download_passes() { - sleep_ms(1000); + wait_for(30, || { eprintln!( "\nFollower has performed {} download passes; wait for {}\n", pox_sync_comms.get_download_passes(), num_download_passes + 5 ); - } + Ok(pox_sync_comms.get_download_passes() >= num_download_passes + 5) + }) + .expect("Follower failed to perform download passes"); eprintln!( "\nFollower has performed {} download passes\n", From 62287cc506e2cbac4493ef6bb29cce4b2e052bfb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 15 Sep 2025 16:54:14 -0700 Subject: [PATCH 07/56] Drop ast_rule_heights from sortdb and fix bug in get_schema_version Signed-off-by: Jacinta Ferrant --- clarity/src/vm/ast/mod.rs | 6 - stackslib/src/chainstate/burn/db/sortdb.rs | 153 +++++++++++------- stackslib/src/chainstate/coordinator/mod.rs | 2 +- stackslib/src/chainstate/coordinator/tests.rs | 2 +- stackslib/src/core/mod.rs | 3 - 5 files changed, 94 insertions(+), 72 deletions(-) diff --git a/clarity/src/vm/ast/mod.rs b/clarity/src/vm/ast/mod.rs index cfdb7ca9f84..55d9461c17a 100644 --- a/clarity/src/vm/ast/mod.rs +++ b/clarity/src/vm/ast/mod.rs @@ -54,12 +54,6 @@ pub fn parse( Ok(ast.expressions) } -// AST parser rulesets to apply. -define_u8_enum!(ASTRules { - Typical = 0, - PrecheckSize = 1 -}); - /// Parse a program based on which epoch is active fn parse_in_epoch( source_code: &str, diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e19fe18147a..d91c84934b8 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -21,7 +21,6 @@ use std::sync::{Arc, LazyLock, Mutex, MutexGuard}; use std::{cmp, fs}; use clarity::util::lru_cache::LruCache; -use clarity::vm::ast::ASTRules; use rusqlite::{params, Connection, OptionalExtension, Row, Transaction}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, StacksBlockId, @@ -56,9 +55,7 @@ use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; use crate::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection, MARF}; use crate::chainstate::stacks::index::ClarityMarfTrieId; use crate::chainstate::ChainstateDB; -use crate::core::{ - EpochList, StacksEpoch, StacksEpochExtension, StacksEpochId, AST_RULES_PRECHECK_SIZE, -}; +use crate::core::{EpochList, StacksEpoch, StacksEpochExtension, StacksEpochId}; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; use crate::util_lib::db::{ db_mkdirs, opt_u64_to_sql, query_row, query_row_panic, query_rows, sql_pragma, table_exists, @@ -484,7 +481,7 @@ impl FromRow for StacksEpoch { } } -pub const SORTITION_DB_VERSION: &str = "9"; +pub const SORTITION_DB_VERSION: u32 = 10; const SORTITION_DB_INITIAL_SCHEMA: &[&str] = &[ r#" @@ -722,6 +719,7 @@ const SORTITION_DB_SCHEMA_8: &[&str] = &[ static SORTITION_DB_SCHEMA_9: &[&str] = &[r#"ALTER TABLE block_commits ADD punished TEXT DEFAULT NULL;"#]; +static SORTITION_DB_SCHEMA_10: &[&str] = &[r#"DROP TABLE IF EXISTS ast_rule_heights;"#]; const LAST_SORTITION_DB_INDEX: &str = "index_block_commits_by_sender"; const SORTITION_DB_INDEXES: &[&str] = &[ @@ -2837,7 +2835,7 @@ impl SortitionDB { let db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; SortitionDB::apply_schema_9(&db_tx, epochs_ref)?; - + SortitionDB::apply_schema_10(&db_tx)?; db_tx.commit()?; self.add_indexes()?; @@ -3030,7 +3028,7 @@ impl SortitionDB { /// Returns the version string, if it exists. /// /// Does **not** migrate the database (like `open()` or `connect()` would) - pub fn get_db_version_from_path(path: &str) -> Result, db_error> { + pub fn get_db_version_from_path(path: &str) -> Result, db_error> { if fs::metadata(path).is_err() { return Err(db_error::NoDBError); } @@ -3055,33 +3053,31 @@ impl SortitionDB { } /// Is a particular database version supported by a given epoch? - pub fn is_db_version_supported_in_epoch(epoch: StacksEpochId, version: &str) -> bool { - let version_u32: u32 = version.parse().unwrap_or_else(|e| { - error!("Failed to parse sortdb version as u32: {e}"); - 0 - }); + pub fn is_db_version_supported_in_epoch(epoch: StacksEpochId, version: u32) -> bool { match epoch { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => version_u32 >= 1, - StacksEpochId::Epoch2_05 => version_u32 >= 2, - StacksEpochId::Epoch21 => version_u32 >= 3, - StacksEpochId::Epoch22 => version_u32 >= 3, - StacksEpochId::Epoch23 => version_u32 >= 3, - StacksEpochId::Epoch24 => version_u32 >= 3, - StacksEpochId::Epoch25 => version_u32 >= 3, - StacksEpochId::Epoch30 => version_u32 >= 3, - StacksEpochId::Epoch31 => version_u32 >= 3, - StacksEpochId::Epoch32 => version_u32 >= 3, - StacksEpochId::Epoch33 => version_u32 >= 3, + StacksEpochId::Epoch20 => version >= 1, + StacksEpochId::Epoch2_05 => version >= 2, + StacksEpochId::Epoch21 => version >= 3, + StacksEpochId::Epoch22 => version >= 3, + StacksEpochId::Epoch23 => version >= 3, + StacksEpochId::Epoch24 => version >= 3, + StacksEpochId::Epoch25 => version >= 3, + StacksEpochId::Epoch30 => version >= 3, + StacksEpochId::Epoch31 => version >= 3, + StacksEpochId::Epoch32 => version >= 3, + StacksEpochId::Epoch33 => version >= 3, } } /// Get the database schema version, given a DB connection - fn get_schema_version(conn: &Connection) -> Result, db_error> { + fn get_schema_version(conn: &Connection) -> Result, db_error> { let version = conn - .query_row("SELECT MAX(version) from db_config", NO_PARAMS, |row| { - row.get(0) - }) + .query_row( + "SELECT MAX(CAST(version AS INTEGER)) FROM db_config", + [], + |row| row.get(0), + ) .optional()?; Ok(version) } @@ -3116,22 +3112,6 @@ impl SortitionDB { for sql_exec in SORTITION_DB_SCHEMA_4 { tx.execute_batch(sql_exec)?; } - - let typical_rules = params![(ASTRules::Typical as u8), 0i64]; - - let precheck_size_rules = params![ - (ASTRules::PrecheckSize as u8), - u64_to_sql(AST_RULES_PRECHECK_SIZE)?, - ]; - - tx.execute( - "INSERT INTO ast_rule_heights (ast_rule_id,block_height) VALUES (?1, ?2)", - typical_rules, - )?; - tx.execute( - "INSERT INTO ast_rule_heights (ast_rule_id,block_height) VALUES (?1, ?2)", - precheck_size_rules, - )?; tx.execute( "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", &["4"], @@ -3326,19 +3306,30 @@ impl SortitionDB { Ok(()) } + fn apply_schema_10(tx: &DBTx) -> Result<(), db_error> { + for sql_exec in SORTITION_DB_SCHEMA_10 { + tx.execute_batch(sql_exec)?; + } + + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["10"], + )?; + + Ok(()) + } + fn check_schema_version_or_error(&mut self) -> Result<(), db_error> { match SortitionDB::get_schema_version(self.conn()) { Ok(Some(version)) => { - let expected_version = SORTITION_DB_VERSION.to_string(); - if version == expected_version { + if version == SORTITION_DB_VERSION { Ok(()) } else { - let version_u64 = version.parse::().unwrap(); - Err(db_error::OldSchema(version_u64)) + Err(db_error::OldSchema(version.into())) } } Ok(None) => panic!("The schema version of the sortition DB is not recorded."), - Err(e) => panic!("Error obtaining the version of the sortition DB: {:?}", e), + Err(e) => panic!("Error obtaining the version of the sortition DB: {e:?}"), } } @@ -3351,46 +3342,49 @@ impl SortitionDB { epochs: &[StacksEpoch], mut migrator: Option, ) -> Result<(), db_error> { - let expected_version = SORTITION_DB_VERSION.to_string(); loop { match SortitionDB::get_schema_version(self.conn()) { Ok(Some(version)) => { - if version == "1" { + if version == 1 { let tx = self.tx_begin()?; SortitionDB::apply_schema_2(tx.deref(), epochs)?; tx.commit()?; - } else if version == "2" { + } else if version == 2 { // add the tables of schema 3, but do not populate them. let tx = self.tx_begin()?; SortitionDB::apply_schema_3(tx.deref())?; tx.commit()?; - } else if version == "3" { + } else if version == 3 { let tx = self.tx_begin()?; SortitionDB::apply_schema_4(tx.deref())?; tx.commit()?; - } else if version == "4" { + } else if version == 4 { let tx = self.tx_begin()?; SortitionDB::apply_schema_5(tx.deref(), epochs)?; tx.commit()?; - } else if version == "5" { + } else if version == 5 { let tx = self.tx_begin()?; SortitionDB::apply_schema_6(tx.deref(), epochs)?; tx.commit()?; - } else if version == "6" { + } else if version == 6 { let tx = self.tx_begin()?; SortitionDB::apply_schema_7(tx.deref(), epochs)?; tx.commit()?; - } else if version == "7" { + } else if version == 7 { let tx = self.tx_begin()?; SortitionDB::apply_schema_8_tables(tx.deref(), epochs)?; tx.commit()?; self.apply_schema_8_migration(migrator.take())?; - } else if version == "8" { + } else if version == 8 { let tx = self.tx_begin()?; SortitionDB::apply_schema_9(tx.deref(), epochs)?; tx.commit()?; - } else if version == expected_version { + } else if version == 9 { + let tx = self.tx_begin()?; + SortitionDB::apply_schema_10(tx.deref())?; + tx.commit()?; + } else if version == SORTITION_DB_VERSION { // this transaction is almost never needed let validated_epochs = StacksEpoch::validate_epochs(epochs); let existing_epochs = Self::get_stacks_epochs(self.conn())?; @@ -3409,7 +3403,7 @@ impl SortitionDB { } } Ok(None) => panic!("The schema version of the sortition DB is not recorded."), - Err(e) => panic!("Error obtaining the version of the sortition DB: {:?}", e), + Err(e) => panic!("Error obtaining the version of the sortition DB: {e:?}"), } } } @@ -4757,7 +4751,7 @@ impl SortitionDB { let args = params![ConsensusHash::empty()]; let mut stmt = conn.prepare(sql)?; let mut rows = stmt.query(args)?; - while let Some(row) = rows.next()? { + if let Some(row) = rows.next()? { let height_i64: i64 = row.get("block_height")?; let hash: BurnchainHeaderHash = row.get("burn_header_hash")?; let height = u64::try_from(height_i64).map_err(|_| { @@ -4767,7 +4761,7 @@ impl SortitionDB { return Ok((height, hash)); } // NOTE: shouldn't be reachable because we instantiate with a first snapshot - return Err(db_error::NotFoundError); + Err(db_error::NotFoundError) } pub fn is_pox_active( @@ -10958,4 +10952,41 @@ pub mod tests { .has_consensus_hash(&all_snapshots[4].consensus_hash) .unwrap()); } + + fn table_exists(path: &str, name: &str) -> bool { + let conn = + Connection::open_with_flags(path, rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY).unwrap(); + conn.busy_timeout(std::time::Duration::from_secs(1)) + .unwrap(); + + conn.query_row( + "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?1", + [name], + |row| row.get::<_, i64>(0), + ) + .unwrap() + > 0 + } + + #[test] + fn schema_10_drops_ast_rules() { + let tmp = std::env::temp_dir().join(function_name!()); + + let first_block_header = { + let burnchain = Burnchain::regtest(tmp.to_str().unwrap()); + let burnchain_db = + BurnchainDB::connect(&burnchain.get_burnchaindb_path(), &burnchain, true).unwrap(); + + burnchain_db.get_canonical_chain_tip().unwrap() + }; + let epochs = (*STACKS_EPOCHS_TESTNET).clone(); + let db = SortitionDB::connect_test_with_epochs( + first_block_header.block_height, + &first_block_header.block_hash, + epochs, + ) + .unwrap(); + let path = db.conn().path().unwrap().to_string(); + assert!(!table_exists(&path, "ast_rule_heights")); + } } diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 7ac94f19091..e500b6bcd6c 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -1808,7 +1808,7 @@ pub fn check_chainstate_db_versions( let db_version = SortitionDB::get_db_version_from_path(sortdb_path)? .expect("FATAL: could not load sortition DB version"); - if !SortitionDB::is_db_version_supported_in_epoch(cur_epoch, &db_version) { + if !SortitionDB::is_db_version_supported_in_epoch(cur_epoch, db_version) { error!("Sortition DB at {sortdb_path} does not support epoch {cur_epoch}"); return Ok(false); } diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 60ff31032b9..ac39b68aafc 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -6456,7 +6456,7 @@ fn test_check_chainstate_db_versions() { SortitionDB::get_db_version_from_path(&sortdb_path) .unwrap() .unwrap(), - "1" + 1 ); // should work just fine in epoch 2 diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index b561a6b38c9..479fd33a7e1 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -182,9 +182,6 @@ pub const POX_V3_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = // This is out of 10, so 7 means "70%". pub const NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD: u64 = 7; -/// Burn block height at which the ASTRules::PrecheckSize becomes the default behavior on mainnet -pub const AST_RULES_PRECHECK_SIZE: u64 = 752000; // on or about Aug 30 2022 - // Stacks 1.0 did not allow smart contracts so all limits are 0. pub const BLOCK_LIMIT_MAINNET_10: ExecutionCost = ExecutionCost { write_length: 0, From fe6de8b199b19a8351bb3d09b0c5c00b2f9bc11d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 16 Sep 2025 11:06:22 -0700 Subject: [PATCH 08/56] Drop test_problematic_blocks_are_not_relayed_or_stored and expand test_static_problematic_tests Signed-off-by: Jacinta Ferrant --- stacks-node/src/tests/neon_integrations.rs | 323 --------------------- stackslib/src/net/tests/relay/epoch2x.rs | 100 ++++--- 2 files changed, 58 insertions(+), 365 deletions(-) diff --git a/stacks-node/src/tests/neon_integrations.rs b/stacks-node/src/tests/neon_integrations.rs index 7006293e2dd..178e04ffc16 100644 --- a/stacks-node/src/tests/neon_integrations.rs +++ b/stacks-node/src/tests/neon_integrations.rs @@ -8268,329 +8268,6 @@ fn test_problematic_blocks_are_not_mined() { follower_channel.stop_chains_coordinator(); } -// TODO: test in epoch 2.1 with parser_v2 -#[test] -#[ignore] -fn test_problematic_blocks_are_not_relayed_or_stored() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_relayed_or_stored"; - if fs::metadata(bad_blocks_dir).is_ok() { - fs::remove_dir_all(bad_blocks_dir).unwrap(); - } - fs::create_dir_all(bad_blocks_dir).unwrap(); - - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); - - let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); - let spender_sk_3 = StacksPrivateKey::from_hex(SK_3).unwrap(); - let spender_stacks_addr_1 = to_addr(&spender_sk_1); - let spender_stacks_addr_2 = to_addr(&spender_sk_2); - let spender_stacks_addr_3 = to_addr(&spender_sk_3); - let spender_addr_1: PrincipalData = spender_stacks_addr_1.into(); - let spender_addr_2: PrincipalData = spender_stacks_addr_2.into(); - let spender_addr_3: PrincipalData = spender_stacks_addr_3.into(); - - let (mut conf, _) = neon_integration_test_conf(); - - conf.initial_balances.push(InitialBalance { - address: spender_addr_1, - amount: 1_000_000_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: spender_addr_2, - amount: 1_000_000_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: spender_addr_3, - amount: 1_000_000_000_000, - }); - - // force mainnet limits in 2.05 for this test - conf.burnchain.epochs = Some(EpochList::new(&[ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 1, - block_limit: BLOCK_LIMIT_MAINNET_20.clone(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 1, - end_height: 10_002, - block_limit: BLOCK_LIMIT_MAINNET_205.clone(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 10_002, - end_height: 9223372036854775807, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ])); - conf.burnchain.pox_2_activation = Some(10_003); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - let mut btcd_controller = BitcoinCoreController::from_stx_config(&conf); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - // something just over the limit of the expression depth - let exceeds_repeat_factor = 32; - let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - - let tx_exceeds = make_contract_publish( - &spender_sk_2, - 0, - (tx_exceeds_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-exceeds", - &tx_exceeds_body, - ); - let tx_exceeds_txid = StacksTransaction::consensus_deserialize(&mut &tx_exceeds[..]) - .unwrap() - .txid(); - - let high_repeat_factor = 70; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); - - let tx_high = make_contract_publish( - &spender_sk_3, - 0, - (tx_high_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-high", - &tx_high_body, - ); - let tx_high_txid = StacksTransaction::consensus_deserialize(&mut &tx_high[..]) - .unwrap() - .txid(); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // Give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // First block wakes up the run loop. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Second block will hold our VRF registration. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Third block will be the first mined Stacks block. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); - submit_tx(&http_origin, &tx_exceeds); - assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); - - let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); - let old_tip_info = get_chain_info(&conf); - let mut all_new_files = vec![]; - - for _i in 0..5 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let cur_files_old = cur_files.clone(); - let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); - all_new_files.append(&mut new_files); - cur_files = cur_files_new; - } - - let tip_info = get_chain_info(&conf); - - // blocks were all processed - assert_eq!( - tip_info.stacks_tip_height, - old_tip_info.stacks_tip_height + 5 - ); - // no blocks considered problematic - assert!(all_new_files.is_empty()); - - // one block contained tx_exceeds - let blocks = test_observer::get_blocks(); - let mut found = false; - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(..) = &parsed.payload { - if parsed.txid() == tx_exceeds_txid { - found = true; - break; - } - } - } - } - - assert!(found); - - let tip = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - tip - }; - - btc_regtest_controller.build_next_block(1); - - // wait for runloop to advance - wait_for(30, || { - let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - Ok(new_tip.block_height > tip.block_height) - }) - .expect("Runloop failed to advance"); - - // add another bad tx to the mempool. - // TODO: Forcibly mine a bad transaction to simulate a bad miner? - debug!("Submit problematic tx_high transaction {tx_high_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); - submit_tx(&http_origin, &tx_high); - assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); - - let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); - let old_tip_info = get_chain_info(&conf); - let mut all_new_files = vec![]; - - eprintln!("old_tip_info = {old_tip_info:?}"); - - // mine some blocks, and log problematic blocks - for _i in 0..6 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let cur_files_old = cur_files.clone(); - let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); - all_new_files.append(&mut new_files); - cur_files = cur_files_new; - } - - let tip_info = get_chain_info(&conf); - - // at least one block was mined (hard to say how many due to the raciness between the burnchain - // downloader and this thread). - info!( - "tip_info.stacks_tip_height = {}, old_tip_info.stacks_tip_height = {}", - tip_info.stacks_tip_height, old_tip_info.stacks_tip_height - ); - assert!(tip_info.stacks_tip_height > old_tip_info.stacks_tip_height); - // one was problematic -- i.e. the one that included tx_high - assert_eq!(all_new_files.len(), 1); - - // tx_high got mined by the miner - let blocks = test_observer::get_blocks(); - let mut bad_block_height = None; - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(..) = &parsed.payload { - if parsed.txid() == tx_high_txid { - bad_block_height = Some(block.get("block_height").unwrap().as_u64().unwrap()); - } - } - } - } - assert!(bad_block_height.is_some()); - let bad_block_height = bad_block_height.unwrap(); - - // follower should not process bad_block_height or higher - let new_tip_info = get_chain_info(&conf); - - eprintln!("\nBooting follower\n"); - - // verify that a follower node that boots up with this node as a bootstrap peer will process - // all of the blocks available, even if they are problematic, with the checks on. - let (follower_conf, _, pox_sync_comms, follower_channel) = spawn_follower_node(&conf); - - eprintln!( - "\nFollower booted on port {},{}\n", - follower_conf.node.p2p_bind, follower_conf.node.rpc_bind - ); - - wait_for(300, || { - let follower_tip_info = get_chain_info(&follower_conf); - eprintln!( - "\nFollower is at burn block {} stacks block {} (bad_block is {bad_block_height})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height - ); - Ok( - follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height - || follower_tip_info.stacks_tip_height + 1 == bad_block_height, - ) - }) - .expect("Follower failed to advance"); - - // make sure we aren't just slow -- wait for the follower to do a few download passes - let num_download_passes = pox_sync_comms.get_download_passes(); - eprintln!( - "\nFollower has performed {num_download_passes} download passes; wait for {}\n", - num_download_passes + 5 - ); - - wait_for(30, || { - eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - pox_sync_comms.get_download_passes(), - num_download_passes + 5 - ); - Ok(pox_sync_comms.get_download_passes() >= num_download_passes + 5) - }) - .expect("Follower failed to perform download passes"); - - eprintln!( - "\nFollower has performed {} download passes\n", - pox_sync_comms.get_download_passes() - ); - - let follower_tip_info = get_chain_info(&follower_conf); - eprintln!( - "\nFollower is at burn block {} stacks block {} (bad block is {bad_block_height})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height - ); - - // follower rejects the bad block - assert_eq!(follower_tip_info.stacks_tip_height, bad_block_height - 1); - - test_observer::clear(); - channel.stop_chains_coordinator(); - follower_channel.stop_chains_coordinator(); -} - /// Verify that we push all boot receipts even before bootstrapping #[test] #[ignore] diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index e07a22d0dca..c293346e42f 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -2622,8 +2622,13 @@ pub fn make_contract_tx( tx_signer.get_tx().unwrap() } -#[test] -fn test_static_problematic_tests() { +struct DeepTransactions { + pub tx_high: StacksTransaction, + pub tx_edge: StacksTransaction, + pub tx_exceeds: StacksTransaction, +} + +fn setup_deep_txs() -> DeepTransactions { let spender_sk_1 = StacksPrivateKey::random(); let spender_sk_2 = StacksPrivateKey::random(); let spender_sk_3 = StacksPrivateKey::random(); @@ -2668,24 +2673,46 @@ fn test_static_problematic_tests() { "test-high", &tx_high_body, ); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_edge - ) - .is_ok()); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_exceeds - ) - .is_err()); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_high - ) - .is_err()); + DeepTransactions { + tx_high, + tx_edge, + tx_exceeds, + } +} + +#[rstest] +#[case::epoch20(StacksEpochId::Epoch20)] +#[case::epoch2_05(StacksEpochId::Epoch2_05)] +fn static_problematic_txs_pre_epoch21(#[case] epoch_id: StacksEpochId) { + let DeepTransactions { + tx_high, + tx_edge, + tx_exceeds, + } = setup_deep_txs(); + assert!(Relayer::static_check_problematic_relayed_tx(false, epoch_id, &tx_edge).is_ok()); + assert!(Relayer::static_check_problematic_relayed_tx(false, epoch_id, &tx_exceeds).is_err()); + assert!(Relayer::static_check_problematic_relayed_tx(false, epoch_id, &tx_high).is_err()); +} + +#[rstest] +#[case::epoch_21(StacksEpochId::Epoch21)] +#[case::epoch_22(StacksEpochId::Epoch22)] +#[case::epoch_23(StacksEpochId::Epoch23)] +#[case::epoch_24(StacksEpochId::Epoch24)] +#[case::epoch_25(StacksEpochId::Epoch25)] +#[case::epoch_30(StacksEpochId::Epoch30)] +#[case::epoch_31(StacksEpochId::Epoch31)] +#[case::epoch_32(StacksEpochId::Epoch32)] +#[case::epoch_33(StacksEpochId::Epoch33)] +fn static_problematic_txs_post_epoch21(#[case] epoch_id: StacksEpochId) { + let DeepTransactions { + tx_high, + tx_edge, + tx_exceeds, + } = setup_deep_txs(); + assert!(Relayer::static_check_problematic_relayed_tx(false, epoch_id, &tx_edge).is_err()); + assert!(Relayer::static_check_problematic_relayed_tx(false, epoch_id, &tx_exceeds).is_err()); + assert!(Relayer::static_check_problematic_relayed_tx(false, epoch_id, &tx_high).is_err()); } #[test] @@ -2740,22 +2767,11 @@ fn process_new_blocks_rejects_problematic_asts() { let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - let high_repeat_factor = 128 * 1024; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); - - let bad_tx = make_contract_tx( - &privk, - 0, - (tx_high_body.len() * 100) as u64, - "test-high", - &tx_high_body, - ); - let bad_txid = bad_tx.txid(); - let bad_tx_len = { + let DeepTransactions { tx_high, .. } = setup_deep_txs(); + let tx_high_txid = tx_high.txid(); + let tx_high_len = { let mut bytes = vec![]; - bad_tx.consensus_serialize(&mut bytes).unwrap(); + tx_high.consensus_serialize(&mut bytes).unwrap(); bytes.len() as u64 }; @@ -2877,10 +2893,10 @@ fn process_new_blocks_rejects_problematic_asts() { block_builder, chainstate, &sortdb.index_handle(&tip.sortition_id), - vec![coinbase_tx.clone(), bad_tx.clone()], + vec![coinbase_tx.clone(), tx_high.clone()], ) { - assert_eq!(txid, bad_txid); + assert_eq!(txid, tx_high_txid); } else { panic!("Did not get Error::ProblematicTransaction"); } @@ -2904,7 +2920,7 @@ fn process_new_blocks_rejects_problematic_asts() { .unwrap(); let mut bad_block = bad_block.0; - bad_block.txs.push(bad_tx.clone()); + bad_block.txs.push(tx_high.clone()); let txid_vecs: Vec<_> = bad_block .txs @@ -2935,7 +2951,7 @@ fn process_new_blocks_rejects_problematic_asts() { // miner should fail with just the bad tx, since it's problematic let mblock_err = microblock_builder - .mine_next_microblock_from_txs(vec![(bad_tx.clone(), bad_tx_len)], &mblock_privk) + .mine_next_microblock_from_txs(vec![(tx_high.clone(), tx_high_len)], &mblock_privk) .unwrap_err(); if let ChainstateError::NoTransactionsToMine = mblock_err { } else { @@ -2952,14 +2968,14 @@ fn process_new_blocks_rejects_problematic_asts() { let mut bad_mblock = microblock_builder .mine_next_microblock_from_txs( - vec![(token_transfer, tt_len), (bad_tx.clone(), bad_tx_len)], + vec![(token_transfer, tt_len), (tx_high.clone(), tx_high_len)], &mblock_privk, ) .unwrap(); // miner shouldn't include the bad tx, since it's problematic assert_eq!(bad_mblock.txs.len(), 1); - bad_mblock.txs.push(bad_tx.clone()); + bad_mblock.txs.push(tx_high.clone()); // force it in anyway let txid_vecs: Vec<_> = bad_mblock @@ -3022,7 +3038,7 @@ fn process_new_blocks_rejects_problematic_asts() { StacksMessage { preamble, relayers: vec![], - payload: StacksMessageType::Transaction(bad_tx.clone()), + payload: StacksMessageType::Transaction(tx_high.clone()), }, ]; let mut unsolicited = HashMap::new(); From a6428c43e3854aeb4e49944337a94483c621b6e5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 16 Sep 2025 11:51:01 -0700 Subject: [PATCH 09/56] Add ExpressionStackDepthTooDeep and VaryExpressionStackDepthTooDeep to rejectable() Signed-off-by: Jacinta Ferrant --- clarity-types/src/errors/ast.rs | 7 ++++++- stackslib/src/chainstate/stacks/db/transactions.rs | 14 -------------- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/clarity-types/src/errors/ast.rs b/clarity-types/src/errors/ast.rs index a18e6242aa5..f2b774bd1b7 100644 --- a/clarity-types/src/errors/ast.rs +++ b/clarity-types/src/errors/ast.rs @@ -114,7 +114,12 @@ impl ParseError { } pub fn rejectable(&self) -> bool { - matches!(*self.err, ParseErrors::InterpreterFailure) + matches!( + *self.err, + ParseErrors::InterpreterFailure + | ParseErrors::ExpressionStackDepthTooDeep + | ParseErrors::VaryExpressionStackDepthTooDeep + ) } pub fn has_pre_expression(&self) -> bool { diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index bbaa917ede5..270f61c8425 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -17,7 +17,6 @@ use std::collections::{HashMap, HashSet}; use clarity::vm::analysis::types::ContractAnalysis; -use clarity::vm::ast::errors::ParseErrors; use clarity::vm::clarity::TransactionConnection; use clarity::vm::contexts::{AssetMap, AssetMapEntry, Environment}; use clarity::vm::costs::cost_functions::ClarityCostFunction; @@ -1288,19 +1287,6 @@ impl StacksChainState { )); } other_error => { - // a [Vary]ExpressionDepthTooDeep error in this situation - // invalidates the block, since this should have prevented the - // block from getting relayed in the first place - if let clarity_error::Parse(ref parse_error) = &other_error { - match *parse_error.err { - ParseErrors::ExpressionStackDepthTooDeep - | ParseErrors::VaryExpressionStackDepthTooDeep => { - info!("Transaction {} is problematic and should have prevented this block from being relayed", tx.txid()); - return Err(Error::ClarityError(other_error)); - } - _ => {} - } - } if let clarity_error::Parse(err) = &other_error { if err.rejectable() { info!("Transaction {} is problematic and should have prevented this block from being relayed", tx.txid()); From 7ea4e06b8efe0c9355c9cda16faf6197705bb298 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 16 Sep 2025 13:44:29 -0700 Subject: [PATCH 10/56] Fix bad merge Signed-off-by: Jacinta Ferrant --- clarity/src/vm/tests/variables.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/clarity/src/vm/tests/variables.rs b/clarity/src/vm/tests/variables.rs index ba9b7f878ce..475a9692f92 100644 --- a/clarity/src/vm/tests/variables.rs +++ b/clarity/src/vm/tests/variables.rs @@ -1119,7 +1119,6 @@ fn test_block_time( version, contract, None, - ASTRules::PrecheckSize, ); let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); @@ -1167,7 +1166,6 @@ fn test_block_time_in_expressions() { version, contract, None, - ASTRules::PrecheckSize, ); assert!(result.is_ok()); From 40802bb740374f375b2dc55803b72024e7afb2de Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 17 Sep 2025 13:22:21 +0200 Subject: [PATCH 11/56] added block simulate endpoint --- stackslib/src/net/api/blocksimulate.rs | 343 +++++++++++++++++++++++++ stackslib/src/net/api/mod.rs | 2 + 2 files changed, 345 insertions(+) create mode 100644 stackslib/src/net/api/blocksimulate.rs diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blocksimulate.rs new file mode 100644 index 00000000000..0ec646f77f5 --- /dev/null +++ b/stackslib/src/net/api/blocksimulate.rs @@ -0,0 +1,343 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::vm::ast::ASTRules; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::Value; +use regex::{Captures, Regex}; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash, StacksBlockId, TrieHash}; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; + +use crate::burnchains::Txid; +use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::events::TransactionOrigin; +use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; +use crate::chainstate::stacks::{Error as ChainError, StacksTransaction, TransactionPayload}; +use crate::net::http::{ + parse_bytes, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttpResponse}; +use crate::net::{Error as NetError, StacksNodeState}; + +#[derive(Clone)] +pub struct RPCNakamotoBlockSimulateRequestHandler { + pub block_id: Option, +} + +impl RPCNakamotoBlockSimulateRequestHandler { + pub fn new() -> Self { + Self { block_id: None } + } +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct RPCSimulatedBlockTransaction { + pub txid: Txid, + pub tx_index: u32, + pub data: Option, + pub hex: String, + pub result: Value, + pub stx_burned: u128, + pub execution_cost: ExecutionCost, + pub events: Vec, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct RPCSimulatedBlock { + pub block_id: StacksBlockId, + pub block_hash: BlockHeaderHash, + pub parent_block_id: StacksBlockId, + pub consensus_hash: ConsensusHash, + pub fees: u128, + pub tx_merkle_root: Sha512Trunc256Sum, + pub state_index_root: TrieHash, + pub timestamp: u64, + pub miner_signature: MessageSignature, + pub signer_signature: Vec, + pub transactions: Vec, + pub valid: bool, +} + +/// Decode the HTTP request +impl HttpRequest for RPCNakamotoBlockSimulateRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v3/blocks/simulate/(?P[0-9a-f]{64})$"#).unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v3/blocks/simulate/:block_id" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let block_id_str = captures + .name("block_id") + .ok_or_else(|| { + Error::DecodeError("Failed to match path to block ID group".to_string()) + })? + .as_str(); + + let block_id = StacksBlockId::from_hex(block_id_str).map_err(|_| { + Error::DecodeError("Invalid path: unparseable consensus hash".to_string()) + })?; + self.block_id = Some(block_id); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.block_id = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let block_id = self + .block_id + .take() + .ok_or(NetError::SendError("Missing `block_id`".into()))?; + + let simulated_block_res = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let block_id = block_id.clone(); + let Some((tenure_id, parent_block_id)) = chainstate + .nakamoto_blocks_db() + .get_tenure_and_parent_block_id(&block_id)? + else { + return Err(ChainError::NoSuchBlockError); + }; + + let staging_db_path = chainstate.get_nakamoto_staging_blocks_path()?; + let db_conn = + StacksChainState::open_nakamoto_staging_blocks(&staging_db_path, false)?; + let rowid = db_conn + .conn() + .get_nakamoto_block_rowid(&block_id)? + .ok_or(ChainError::NoSuchBlockError)?; + + let mut blob_fd = db_conn + .open_nakamoto_block(rowid, false) + .map_err(|e| { + let msg = format!("Failed to open Nakamoto block {}: {:?}", &block_id, &e); + warn!("{}", &msg); + msg + }) + .unwrap(); + + let block = NakamotoBlock::consensus_deserialize(&mut blob_fd) + .map_err(|e| { + let msg = format!("Failed to read Nakamoto block {}: {:?}", &block_id, &e); + warn!("{}", &msg); + msg + }) + .unwrap(); + + let burn_dbconn = match sortdb.index_handle_at_block(chainstate, &parent_block_id) { + Ok(burn_dbconn) => burn_dbconn, + Err(_) => return Err(ChainError::NoSuchBlockError), + }; + + let tenure_change = block + .txs + .iter() + .find(|tx| matches!(tx.payload, TransactionPayload::TenureChange(..))); + let coinbase = block + .txs + .iter() + .find(|tx| matches!(tx.payload, TransactionPayload::Coinbase(..))); + let tenure_cause = tenure_change.and_then(|tx| match &tx.payload { + TransactionPayload::TenureChange(tc) => Some(tc.cause), + _ => None, + }); + + // let (block_fees, txs_receipts) = chainstate + // .with_simulated_clarity_tx(&burn_dbconn, &parent_block_id, &block_id, |_| { + let parent_stacks_header = + NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id) + .unwrap() + .unwrap(); + let mut builder = NakamotoBlockBuilder::new( + &parent_stacks_header, + &block.header.consensus_hash, + block.header.burn_spent, + tenure_change, + coinbase, + block.header.pox_treatment.len(), + None, + None, + ) + .unwrap(); + + let mut miner_tenure_info = builder + .load_ephemeral_tenure_info(chainstate, &burn_dbconn, tenure_cause) + .unwrap(); + let burn_chain_height = miner_tenure_info.burn_tip_height; + let mut tenure_tx = builder + .tenure_begin(&burn_dbconn, &mut miner_tenure_info) + .unwrap(); + + let mut block_fees: u128 = 0; + let mut txs_receipts = vec![]; + + for (i, tx) in block.txs.iter().enumerate() { + let tx_len = tx.tx_len(); + + let tx_result = builder.try_mine_tx_with_len( + &mut tenure_tx, + tx, + tx_len, + &BlockLimitFunction::NO_LIMIT_HIT, + ASTRules::PrecheckSize, + None, + ); + let err = match tx_result { + TransactionResult::Success(tx_result) => { + txs_receipts.push(tx_result.receipt); + Ok(()) + } + _ => Err(format!("Problematic tx {i}")), + }; + if let Err(reason) = err { + panic!("Rejected block tx: {reason}"); + } + + block_fees += tx.get_tx_fee() as u128; + } + + let simulated_block = + builder.mine_nakamoto_block(&mut tenure_tx, burn_chain_height); + + tenure_tx.rollback_block(); + + let block_hash = block.header.block_hash(); + + let tx_merkle_root = block.header.tx_merkle_root.clone(); + + let mut simulated_block = RPCSimulatedBlock { + block_id, + block_hash, + parent_block_id, + consensus_hash: tenure_id, + fees: block_fees, + tx_merkle_root: block.header.tx_merkle_root, + state_index_root: block.header.state_index_root, + timestamp: block.header.timestamp, + miner_signature: block.header.miner_signature, + signer_signature: block.header.signer_signature, + transactions: vec![], + valid: block.header.state_index_root == simulated_block.header.state_index_root + && tx_merkle_root == simulated_block.header.tx_merkle_root, + }; + for receipt in txs_receipts { + let events = receipt + .events + .iter() + .enumerate() + .map(|(event_index, event)| { + event + .json_serialize(event_index, &receipt.transaction.txid(), true) + .unwrap() + }) + .collect(); + let transaction_data = match &receipt.transaction { + TransactionOrigin::Stacks(stacks) => Some(stacks.clone()), + TransactionOrigin::Burn(_) => None, + }; + let txid = receipt.transaction.txid(); + let transaction = RPCSimulatedBlockTransaction { + txid, + tx_index: receipt.tx_index, + data: transaction_data, + hex: receipt.transaction.serialize_to_dbstring(), + result: receipt.result, + stx_burned: receipt.stx_burned, + execution_cost: receipt.execution_cost, + events, + }; + simulated_block.transactions.push(transaction); + } + + Ok(simulated_block) + }); + + // start loading up the block + let simulated_block = match simulated_block_res { + Ok(simulated_block) => simulated_block, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such block {block_id}\n")), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load block {}: {:?}\n", &block_id, &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let preamble = HttpResponsePreamble::ok_json(&preamble); + let body = HttpResponseContents::try_from_json(&simulated_block)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCNakamotoBlockSimulateRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 2f46069c4e6..53f933080bd 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -17,6 +17,7 @@ use crate::net::http::Error; use crate::net::httpcore::StacksHttp; use crate::net::Error as NetError; +pub mod blocksimulate; pub mod callreadonly; pub mod fastcallreadonly; pub mod get_tenures_fork_info; @@ -74,6 +75,7 @@ impl StacksHttp { /// Register all RPC methods. /// Put your new RPC method handlers here. pub fn register_rpc_methods(&mut self) { + self.register_rpc_endpoint(blocksimulate::RPCNakamotoBlockSimulateRequestHandler::new()); self.register_rpc_endpoint(callreadonly::RPCCallReadOnlyRequestHandler::new( self.maximum_call_argument_size, self.read_only_call_limit.clone(), From 71c78d641a41a6a6a20104b90d08a192c98a642b Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 17 Sep 2025 15:57:49 +0200 Subject: [PATCH 12/56] added tests --- stackslib/src/net/api/blocksimulate.rs | 29 +++++++++++++++++++++++--- stackslib/src/net/api/tests/mod.rs | 1 + 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blocksimulate.rs index 0ec646f77f5..25c20ef4a4e 100644 --- a/stackslib/src/net/api/blocksimulate.rs +++ b/stackslib/src/net/api/blocksimulate.rs @@ -19,6 +19,7 @@ use clarity::vm::Value; use regex::{Captures, Regex}; use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash, StacksBlockId, TrieHash}; +use stacks_common::types::net::PeerHost; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; @@ -34,7 +35,7 @@ use crate::net::http::{ HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, }; use crate::net::httpcore::{RPCRequestHandler, StacksHttpResponse}; -use crate::net::{Error as NetError, StacksNodeState}; +use crate::net::{Error as NetError, StacksHttpRequest, StacksNodeState}; #[derive(Clone)] pub struct RPCNakamotoBlockSimulateRequestHandler { @@ -191,8 +192,6 @@ impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { _ => None, }); - // let (block_fees, txs_receipts) = chainstate - // .with_simulated_clarity_tx(&burn_dbconn, &parent_block_id, &block_id, |_| { let parent_stacks_header = NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id) .unwrap() @@ -269,6 +268,7 @@ impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { valid: block.header.state_index_root == simulated_block.header.state_index_root && tx_merkle_root == simulated_block.header.tx_merkle_root, }; + for receipt in txs_receipts { let events = receipt .events @@ -328,6 +328,19 @@ impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { } } +impl StacksHttpRequest { + /// Make a new block_simulate request to this endpoint + pub fn new_block_simulate(host: PeerHost, block_id: &StacksBlockId) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v3/blocks/simulate/{block_id}"), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + /// Decode the HTTP response impl HttpResponse for RPCNakamotoBlockSimulateRequestHandler { /// Decode this response from a byte stream. This is called by the client to decode this @@ -341,3 +354,13 @@ impl HttpResponse for RPCNakamotoBlockSimulateRequestHandler { Ok(HttpResponsePayload::Bytes(bytes)) } } + +impl StacksHttpResponse { + pub fn decode_simulated_block(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let simulated_block: RPCSimulatedBlock = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(simulated_block) + } +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index c7b6285665a..533422d86a5 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -59,6 +59,7 @@ use crate::net::{ UrlString, }; +mod blocksimulate; mod callreadonly; mod fastcallreadonly; mod get_tenures_fork_info; From 863bafaa9e75f8b47fd90734b47663c57274fc1b Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 18 Sep 2025 08:00:39 +0200 Subject: [PATCH 13/56] added blocksimulate tests --- stackslib/src/net/api/tests/blocksimulate.rs | 107 +++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 stackslib/src/net/api/tests/blocksimulate.rs diff --git a/stackslib/src/net/api/tests/blocksimulate.rs b/stackslib/src/net/api/tests/blocksimulate.rs new file mode 100644 index 00000000000..134c2d5f20f --- /dev/null +++ b/stackslib/src/net/api/tests/blocksimulate.rs @@ -0,0 +1,107 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; + +use crate::chainstate::stacks::StacksBlock; +use crate::net::api::blocksimulate; +use crate::net::api::tests::TestRPC; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{StacksHttp, StacksHttpRequest}; +use crate::net::test::TestEventObserver; +use crate::net::ProtocolFamily; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_block_simulate(addr.into(), &StacksBlockId([0x01; 32])); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + + let mut handler = blocksimulate::RPCNakamotoBlockSimulateRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + assert_eq!(handler.block_id, Some(StacksBlockId([0x01; 32]))); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let test_observer = TestEventObserver::new(); + let rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); + + let nakamoto_consensus_hash = rpc_test.consensus_hash.clone(); + + let mut requests = vec![]; + + // query existing, non-empty Nakamoto block + let request = + StacksHttpRequest::new_block_simulate(addr.clone().into(), &rpc_test.canonical_tip); + requests.push(request); + + // query non-existent block + let request = + StacksHttpRequest::new_block_simulate(addr.clone().into(), &StacksBlockId([0x01; 32])); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the Nakamoto tip + let response = responses.remove(0); + println!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_simulated_block().unwrap(); + + let tip_block = test_observer.get_blocks().last().unwrap().clone(); + + assert_eq!(resp.consensus_hash, nakamoto_consensus_hash); + assert_eq!(resp.consensus_hash, tip_block.metadata.consensus_hash); + + assert_eq!(resp.transactions.len(), tip_block.receipts.len()); + + // got a failure + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From 839aaaff958f6d6d70f5411f1df4c5e41f45c1a5 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 18 Sep 2025 09:10:43 +0200 Subject: [PATCH 14/56] fixed api --- stackslib/src/net/api/blocksimulate.rs | 13 +++++----- stackslib/src/net/api/tests/blocksimulate.rs | 27 +++++++++++++++++--- 2 files changed, 30 insertions(+), 10 deletions(-) diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blocksimulate.rs index 25c20ef4a4e..572117f665a 100644 --- a/stackslib/src/net/api/blocksimulate.rs +++ b/stackslib/src/net/api/blocksimulate.rs @@ -17,7 +17,7 @@ use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::Value; use regex::{Captures, Regex}; -use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash, StacksBlockId, TrieHash}; use stacks_common::types::net::PeerHost; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -31,7 +31,7 @@ use crate::chainstate::stacks::events::TransactionOrigin; use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; use crate::chainstate::stacks::{Error as ChainError, StacksTransaction, TransactionPayload}; use crate::net::http::{ - parse_bytes, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, }; use crate::net::httpcore::{RPCRequestHandler, StacksHttpResponse}; @@ -73,7 +73,7 @@ pub struct RPCSimulatedBlock { pub miner_signature: MessageSignature, pub signer_signature: Vec, pub transactions: Vec, - pub valid: bool, + pub valid_merkle_root: bool, } /// Decode the HTTP request @@ -265,8 +265,7 @@ impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { miner_signature: block.header.miner_signature, signer_signature: block.header.signer_signature, transactions: vec![], - valid: block.header.state_index_root == simulated_block.header.state_index_root - && tx_merkle_root == simulated_block.header.tx_merkle_root, + valid_merkle_root: tx_merkle_root == simulated_block.header.tx_merkle_root, }; for receipt in txs_receipts { @@ -350,8 +349,8 @@ impl HttpResponse for RPCNakamotoBlockSimulateRequestHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; - Ok(HttpResponsePayload::Bytes(bytes)) + let rpc_simulated_block: RPCSimulatedBlock = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(rpc_simulated_block)?) } } diff --git a/stackslib/src/net/api/tests/blocksimulate.rs b/stackslib/src/net/api/tests/blocksimulate.rs index 134c2d5f20f..b4fe4d360d4 100644 --- a/stackslib/src/net/api/tests/blocksimulate.rs +++ b/stackslib/src/net/api/tests/blocksimulate.rs @@ -16,9 +16,8 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; +use stacks_common::types::chainstate::StacksBlockId; -use crate::chainstate::stacks::StacksBlock; use crate::net::api::blocksimulate; use crate::net::api::tests::TestRPC; use crate::net::connection::ConnectionOptions; @@ -81,7 +80,8 @@ fn test_try_make_response() { // got the Nakamoto tip let response = responses.remove(0); - println!( + + debug!( "Response:\n{}\n", std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() ); @@ -93,8 +93,29 @@ fn test_try_make_response() { assert_eq!(resp.consensus_hash, nakamoto_consensus_hash); assert_eq!(resp.consensus_hash, tip_block.metadata.consensus_hash); + assert_eq!(resp.block_hash, tip_block.block.block_hash); + assert_eq!(resp.block_id, tip_block.metadata.index_block_hash()); + assert_eq!(resp.parent_block_id, tip_block.parent); + + assert!(resp.valid_merkle_root); + assert_eq!(resp.transactions.len(), tip_block.receipts.len()); + for tx_index in 0..resp.transactions.len() { + assert_eq!( + resp.transactions[tx_index].txid, + tip_block.receipts[tx_index].transaction.txid() + ); + assert_eq!( + resp.transactions[tx_index].events.len(), + tip_block.receipts[tx_index].events.len() + ); + assert_eq!( + resp.transactions[tx_index].result, + tip_block.receipts[tx_index].result + ); + } + // got a failure let response = responses.remove(0); debug!( From c87a6089dc8ff3c5ad9f79fe09c5698c746f3c01 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 18 Sep 2025 15:45:57 +0200 Subject: [PATCH 15/56] chore: remove mutation testing script from contrib folder, #6275 --- contrib/tools/local-mutation-testing.sh | 87 ------------------------- 1 file changed, 87 deletions(-) delete mode 100755 contrib/tools/local-mutation-testing.sh diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh deleted file mode 100755 index 11da6810e54..00000000000 --- a/contrib/tools/local-mutation-testing.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -# Install cargo-mutants -cargo install --version 24.7.1 cargo-mutants --locked - -# Create diff file between current branch and develop branch -git diff origin/develop...HEAD > git.diff - -# Remove git diff files about removed/renamed files -awk ' - /^diff --git/ { - diff_line = $0 - getline - if ($0 !~ /^(deleted file mode|similarity index)/) { - print diff_line - print - } - } - !/^(diff --git|deleted file mode|similarity index|rename from|rename to)/ {print} -' git.diff > processed.diff - -# Extract mutants based on the processed diff -cargo mutants --in-diff processed.diff --list > all_mutants.txt - -# Create a directory for organizing mutants -mkdir -p mutants_by_package - -# Organize mutants into files based on their main folder -while IFS= read -r line; do - package=$(echo "$line" | cut -d'/' -f1) - - case $package in - "stackslib") - echo "$line" >> "mutants_by_package/stackslib.txt" - ;; - "testnet") - echo "$line" >> "mutants_by_package/stacks-node.txt" - ;; - "stacks-signer") - echo "$line" >> "mutants_by_package/stacks-signer.txt" - ;; - *) - echo "$line" >> "mutants_by_package/small-packages.txt" - ;; - esac -done < all_mutants.txt - -# Function to run mutants for a package -run_mutants() { - local package=$1 - local threshold=$2 - local output_dir=$3 - local mutant_file="mutants_by_package/${package}.txt" - - if [ ! -f "$mutant_file" ]; then - echo "No mutants found for $package" - return 0 - fi - - local regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "$mutant_file" | paste -sd'|' -) - local mutant_count=$(cargo mutants -F "$regex_pattern" -E ": replace .{1,2} with .{1,2} in " --list | wc -l) - - if [ "$mutant_count" -gt "$threshold" ]; then - echo "Running mutants for $package ($mutant_count mutants)" - RUST_BACKTRACE=1 BITCOIND_TEST=1 \ - cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ - -F "$regex_pattern" \ - -E ": replace .{1,2} with .{1,2} in " \ - --output "$output_dir" \ - --test-tool=nextest \ - --package "$package" \ - -- --all-targets --test-threads 1 || true - - echo $? > "${output_dir}/exit_code.txt" - else - echo "Skipping $package, only $mutant_count mutants (threshold: $threshold)" - fi - - return 0 -} - -# Run mutants for each wanted package -run_mutants "stacks-signer" 500 "./stacks-signer_mutants" || true -run_mutants "stacks-node" 540 "./stacks-node_mutants" || true -run_mutants "stackslib" 72 "./stackslib_mutants" || true From dfb07004a46ae292bd27da984799d94f3b836372 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 18 Sep 2025 15:46:25 +0200 Subject: [PATCH 16/56] chore: remove mutation testing doc from docs folder, #6275 --- docs/mutation-testing.md | 146 --------------------------------------- 1 file changed, 146 deletions(-) delete mode 100644 docs/mutation-testing.md diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md deleted file mode 100644 index 85fcd89a7f6..00000000000 --- a/docs/mutation-testing.md +++ /dev/null @@ -1,146 +0,0 @@ -# Mutation Testing - -This document describes how to run mutation testing locally to mimic the outcome of a PR, without the CI limitation it provides by timing out after 6 hours. -[Here is the script](../contrib/tools/local-mutation-testing.sh) to run the tests locally by running the mutants created by the changes between `HEAD` and develop. -It does automatically all the steps explained below. - -From the root level of the stacks-core repository run -```sh -./contrib/tools/local-mutation-testing.sh -``` - -## Prerequirements - -Install the cargo mutants library -```sh -cargo install --version 24.7.1 cargo-mutants --locked -``` - - -## Steps -1. Be on source branch you would use for the PR. -2. Create diff file comparing this branch with the `develop` branch - ```sh - git diff origin/develop..HEAD > git.diff - ``` -3. Clean up the diff file and create auxiliary files - ```sh - awk ' - /^diff --git/ { - diff_line = $0 - getline - if ($0 !~ /^(deleted file mode|similarity index)/) { - print diff_line - print - } - } - !/^(diff --git|deleted file mode|similarity index|rename from|rename to)/ {print} - ' git.diff > processed.diff - - # Extract mutants based on the processed diff - cargo mutants --in-diff processed.diff --list > all_mutants.txt - - # Create a directory for organizing mutants - mkdir -p mutants_by_package - - # Organize mutants into files based on their main folder - while IFS= read -r line; do - package=$(echo "$line" | cut -d'/' -f1) - - case $package in - "stackslib") - echo "$line" >> "mutants_by_package/stackslib.txt" - ;; - "testnet") - echo "$line" >> "mutants_by_package/stacks-node.txt" - ;; - "stacks-signer") - echo "$line" >> "mutants_by_package/stacks-signer.txt" - ;; - *) - echo "$line" >> "mutants_by_package/small-packages.txt" - ;; - esac - done < all_mutants.txt - ``` -4. Based on the package required to run the mutants for - a. Stackslib package - ```sh - regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stackslib.txt" | paste -sd'|' -) - - RUST_BACKTRACE=1 BITCOIND_TEST=1 \ - cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ - -F "$regex_pattern" \ - -E ": replace .{1,2} with .{1,2} in " \ - --output "./stackslib_mutants" \ - --test-tool=nextest \ - -- --all-targets --test-threads 1 - ``` - b. Stacks-node (testnet) package - ```sh - regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/testnet.txt" | paste -sd'|' -) - - RUST_BACKTRACE=1 BITCOIND_TEST=1 \ - cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ - -F "$regex_pattern" \ - -E ": replace .{1,2} with .{1,2} in " \ - --output "./testnet_mutants" \ - --test-tool=nextest \ - -- --all-targets --test-threads 1 - ``` - c. Stacks-signer - ```sh - regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stacks-signer.txt" | paste -sd'|' -) - - RUST_BACKTRACE=1 BITCOIND_TEST=1 \ - cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ - -F "$regex_pattern" \ - -E ": replace .{1,2} with .{1,2} in " \ - --output "./stacks-signer_mutants" \ - --test-tool=nextest \ - -- --all-targets --test-threads 1 - ``` - d. All other packages combined - ```sh - regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/small-packages.txt" | paste -sd'|' -) - - cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ - -F "$regex_pattern" \ - -E ": replace .{1,2} with .{1,2} in " \ - --output "./small-packages_mutants" \ - --test-tool=nextest \ - -- --all-targets --test-threads 1 - ``` - -## How to run one specific mutant to test it - -Example of output which had a missing mutant -```sh -MISSED stacks-signer/src/runloop.rs:424:9: replace >::run_one_pass -> Option> with None in 3.0s build + 9.3s test -``` - -Example of fix for it -```sh -RUST_BACKTRACE=1 BITCOIND_TEST=1 \ -cargo mutants -vV \ - -F "replace process_stackerdb_event" \ - -E ": replace >::run_one_pass -> Option> with None in " \ - --test-tool=nextest \ - -- \ - --run-ignored all \ - --fail-fast \ - --test-threads 1 -``` - -General command to run -```sh -RUST_BACKTRACE=1 BITCOIND_TEST=1 \ -cargo mutants -vV \ - -F "replace process_stackerdb_event" \ - -E ": replace [modify this] with [modify this] in " \ - --test-tool=nextest \ - -- \ - --run-ignored all \ - --fail-fast \ - --test-threads 1 -``` From b13a5853bdc5b9a4ff11c28f0f27133ce8cc12db Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 18 Sep 2025 15:49:27 +0200 Subject: [PATCH 17/56] chore: update ci-workflow doc removing mutation testing topic, #6275 --- docs/ci-workflow.md | 98 --------------------------------------------- 1 file changed, 98 deletions(-) diff --git a/docs/ci-workflow.md b/docs/ci-workflow.md index 0b1ed2b170d..a94f0a74ddb 100644 --- a/docs/ci-workflow.md +++ b/docs/ci-workflow.md @@ -5,7 +5,6 @@ All releases are built via a Github Actions workflow named [`CI`](../.github/wor - Verifying code is formatted correctly - Integration tests - Unit tests -- [Mutation tests](https://en.wikipedia.org/wiki/Mutation_testing) - Creating releases - Building binary archives and calculating checksums - Publishing Docker images @@ -128,100 +127,3 @@ check-tests: jobs: ${{ toJson(needs) }} summary_print: "true" ``` - -## Mutation Testing - -When a new Pull Request (PR) is submitted, this feature evaluates the quality of the tests added or modified in the PR. -It checks the new and altered functions through mutation testing. -Mutation testing involves making small changes (mutations) to the code to check if the tests can detect these changes. - -The mutations are run with or without a [Github Actions matrix](https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs). -The matrix is used when there is a large number of mutations to run ([check doc specific cases](https://github.com/stacks-network/actions/blob/main/stacks-core/mutation-testing/check-packages-and-shards/README.md#outputs)). -We utilize a matrix strategy with shards to enable parallel execution in GitHub Actions. -This approach allows for the concurrent execution of multiple jobs across various runners. -The total workload is divided across all shards, effectively reducing the overall duration of a workflow because the time taken is approximately the total time divided by the number of shards (+ initial build & test time). -This is particularly advantageous for large packages that have significant build and test times, as it enhances efficiency and speeds up the process. - -Since mutation testing is directly correlated to the written tests, there are slower packages (due to the quantity or time it takes to run the tests) like `stackslib` or `stacks-node`. -These mutations are run separately from the others, with one or more parallel jobs, depending on the amount of mutations found. - -Once all the jobs have finished testing mutants, the last job collects all the tested mutations from the previous jobs, combines them and outputs them to the `Summary` section of the workflow, at the bottom of the page. -There, you can find all mutants on categories, with links to the function they tested, and a short description on how to fix the issue. -The PR should only be approved/merged after all the mutants tested are in the `Caught` category. - -### Time required to run the workflow based on mutants outcome and packages' size - -- Small packages typically completed in under 30 minutes, aided by the use of shards. -- Large packages like stackslib and stacks-node initially required about 20-25 minutes for build and test processes. - - Each "missed" and "caught" mutant took approximately 15 minutes. Using shards, this meant about 50-55 minutes for processing around 32 mutants (10-16 functions modified). Every additional 8 mutants added another 15 minutes to the runtime. - - "Unviable" mutants, which are functions lacking a Default implementation for their returned struct type, took less than a minute each. - - "Timeout" mutants typically required more time. However, these should be marked to be skipped (by adding a skip flag to their header) since they indicate functions unable to proceed in their test workflow with mutated values, as opposed to the original implementations. - -File: - -- [PR Differences Mutants](../.github/workflows/pr-differences-mutants.yml) - -### Mutant Outcomes - -- caught — A test failed with this mutant applied. - This is a good sign about test coverage. - -- missed — No test failed with this mutation applied, which seems to indicate a gap in test coverage. - Or, it may be that the mutant is undistinguishable from the correct code. - In any case, you may wish to add a better test. - -- unviable — The attempted mutation doesn't compile. - This is inconclusive about test coverage, since the function's return structure may not implement `Default::default()` (one of the mutations applied), hence causing the compile to fail. - It is recommended to add `Default` implementation for the return structures of these functions, only mark that the function should be skipped as a last resort. - -- timeout — The mutation caused the test suite to run for a long time, until it was eventually killed. - You might want to investigate the cause and only mark the function to be skipped if necessary. - -### Skipping Mutations - -Some functions may be inherently hard to cover with tests, for example if: - -- Generated mutants cause tests to hang. -- You've chosen to test the functionality by human inspection or some higher-level integration tests. -- The function has side effects or performance characteristics that are hard to test. -- You've decided that the function is not important to test. - -To mark functions as skipped, so they are not mutated: - -- Add a Cargo dependency of the [mutants](https://crates.io/crates/mutants) crate, version `0.0.3` or later (this must be a regular `dependency`, not a `dev-dependency`, because the annotation will be on non-test code) and mark functions with `#[mutants::skip]`, or - -- You can avoid adding the dependency by using the slightly longer `#[cfg_attr(test, mutants::skip)]`. - -### Example - -```rust -use std::time::{Duration, Instant}; - -/// Returns true if the program should stop -#[cfg_attr(test, mutants::skip)] // Returning false would cause a hang -fn should_stop() -> bool { - true -} - -pub fn controlled_loop() { - let start = Instant::now(); - for i in 0.. { - println!("{}", i); - if should_stop() { - break; - } - if start.elapsed() > Duration::from_secs(60 * 5) { - panic!("timed out"); - } - } -} - -mod test { - #[test] - fn controlled_loop_terminates() { - super::controlled_loop() - } -} -``` - ---- From edb68ea22d7b2a9925850bd75681f2ed2ce1c39d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 18 Sep 2025 12:08:59 -0700 Subject: [PATCH 18/56] CRC: remove unnecessary table_exists and update inline formatting Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/burn/db/sortdb.rs | 18 +----------------- stackslib/src/chainstate/nakamoto/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 19 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index d91c84934b8..d6e74570521 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -10953,21 +10953,6 @@ pub mod tests { .unwrap()); } - fn table_exists(path: &str, name: &str) -> bool { - let conn = - Connection::open_with_flags(path, rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY).unwrap(); - conn.busy_timeout(std::time::Duration::from_secs(1)) - .unwrap(); - - conn.query_row( - "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?1", - [name], - |row| row.get::<_, i64>(0), - ) - .unwrap() - > 0 - } - #[test] fn schema_10_drops_ast_rules() { let tmp = std::env::temp_dir().join(function_name!()); @@ -10986,7 +10971,6 @@ pub mod tests { epochs, ) .unwrap(); - let path = db.conn().path().unwrap().to_string(); - assert!(!table_exists(&path, "ast_rule_heights")); + assert!(!table_exists(db.conn(), "ast_rule_heights").unwrap()); } } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b8a268dc6ef..a8bb3d87b02 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4740,8 +4740,8 @@ impl NakamotoChainState { let (block_fees, txs_receipts) = match StacksChainState::process_block_transactions(&mut clarity_tx, &block.txs, 0) { Err(e) => { - let msg = format!("Invalid Stacks block {}: {:?}", &block_hash, &e); - warn!("{}", &msg); + let msg = format!("Invalid Stacks block {block_hash}: {e:?}"); + warn!("{msg}"); clarity_tx.rollback_block(); return Err(ChainstateError::InvalidStacksBlock(msg)); From cff34c2bf98ac3ee15ab15da995b38c20e23ae8c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 18 Sep 2025 20:59:04 -0700 Subject: [PATCH 19/56] Fix bad merge Signed-off-by: Jacinta Ferrant --- clarity/src/vm/tests/variables.rs | 1 - stackslib/src/clarity_vm/tests/ephemeral.rs | 2 -- 2 files changed, 3 deletions(-) diff --git a/clarity/src/vm/tests/variables.rs b/clarity/src/vm/tests/variables.rs index 33f6f827152..7ce50958465 100644 --- a/clarity/src/vm/tests/variables.rs +++ b/clarity/src/vm/tests/variables.rs @@ -1270,7 +1270,6 @@ fn test_current_contract( version, contract, None, - ASTRules::PrecheckSize, ); let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); diff --git a/stackslib/src/clarity_vm/tests/ephemeral.rs b/stackslib/src/clarity_vm/tests/ephemeral.rs index 384656321d6..73b5897212c 100644 --- a/stackslib/src/clarity_vm/tests/ephemeral.rs +++ b/stackslib/src/clarity_vm/tests/ephemeral.rs @@ -15,7 +15,6 @@ use std::fs; -use clarity::vm::ast::ASTRules; use clarity::vm::types::StacksAddressExtensions; use clarity::vm::{ClarityName, ContractName}; use proptest::prelude::*; @@ -364,7 +363,6 @@ fn replay_block( tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT, - ASTRules::PrecheckSize, None, ); let err = match &tx_result { From 14d4a3175ef636445344c1b8120da5257c220e87 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 19 Sep 2025 13:08:02 +0200 Subject: [PATCH 20/56] added authentication --- stackslib/src/net/api/blocksimulate.rs | 18 ++++++++-- stackslib/src/net/api/mod.rs | 4 ++- stackslib/src/net/api/tests/blocksimulate.rs | 37 +++++++++++++++++--- 3 files changed, 51 insertions(+), 8 deletions(-) diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blocksimulate.rs index 572117f665a..0654173e83c 100644 --- a/stackslib/src/net/api/blocksimulate.rs +++ b/stackslib/src/net/api/blocksimulate.rs @@ -40,11 +40,15 @@ use crate::net::{Error as NetError, StacksHttpRequest, StacksNodeState}; #[derive(Clone)] pub struct RPCNakamotoBlockSimulateRequestHandler { pub block_id: Option, + pub auth: Option, } impl RPCNakamotoBlockSimulateRequestHandler { - pub fn new() -> Self { - Self { block_id: None } + pub fn new(auth: Option) -> Self { + Self { + block_id: None, + auth, + } } } @@ -99,6 +103,16 @@ impl HttpRequest for RPCNakamotoBlockSimulateRequestHandler { query: Option<&str>, _body: &[u8], ) -> Result { + // If no authorization is set, then the block simulation endpoint is not enabled + let Some(password) = &self.auth else { + return Err(Error::Http(400, "Bad Request.".into())); + }; + let Some(auth_header) = preamble.headers.get("authorization") else { + return Err(Error::Http(401, "Unauthorized".into())); + }; + if auth_header != password { + return Err(Error::Http(401, "Unauthorized".into())); + } if preamble.get_content_length() != 0 { return Err(Error::DecodeError( "Invalid Http request: expected 0-length body".to_string(), diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 53f933080bd..0223be3115f 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -75,7 +75,9 @@ impl StacksHttp { /// Register all RPC methods. /// Put your new RPC method handlers here. pub fn register_rpc_methods(&mut self) { - self.register_rpc_endpoint(blocksimulate::RPCNakamotoBlockSimulateRequestHandler::new()); + self.register_rpc_endpoint(blocksimulate::RPCNakamotoBlockSimulateRequestHandler::new( + self.auth_token.clone(), + )); self.register_rpc_endpoint(callreadonly::RPCCallReadOnlyRequestHandler::new( self.maximum_call_argument_size, self.read_only_call_limit.clone(), diff --git a/stackslib/src/net/api/tests/blocksimulate.rs b/stackslib/src/net/api/tests/blocksimulate.rs index b4fe4d360d4..6fb8eb2d671 100644 --- a/stackslib/src/net/api/tests/blocksimulate.rs +++ b/stackslib/src/net/api/tests/blocksimulate.rs @@ -30,7 +30,11 @@ fn test_try_parse_request() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); - let request = StacksHttpRequest::new_block_simulate(addr.into(), &StacksBlockId([0x01; 32])); + let mut request = + StacksHttpRequest::new_block_simulate(addr.into(), &StacksBlockId([0x01; 32])); + + // add the authorization header + request.add_header("authorization".into(), "password".into()); let bytes = request.try_serialize().unwrap(); @@ -38,7 +42,9 @@ fn test_try_parse_request() { let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); - let mut handler = blocksimulate::RPCNakamotoBlockSimulateRequestHandler::new(); + let mut handler = + blocksimulate::RPCNakamotoBlockSimulateRequestHandler::new(Some("password".into())); + let mut parsed_request = http .handle_try_parse_request( &mut handler, @@ -50,6 +56,8 @@ fn test_try_parse_request() { // parsed request consumes headers that would not be in a constructed request parsed_request.clear_headers(); + parsed_request.add_header("authorization".into(), "password".into()); + let (preamble, contents) = parsed_request.destruct(); assert_eq!(&preamble, request.preamble()); @@ -67,13 +75,22 @@ fn test_try_make_response() { let mut requests = vec![]; // query existing, non-empty Nakamoto block - let request = + let mut request = StacksHttpRequest::new_block_simulate(addr.clone().into(), &rpc_test.canonical_tip); + // add the authorization header + request.add_header("authorization".into(), "password".into()); requests.push(request); // query non-existent block - let request = + let mut request = StacksHttpRequest::new_block_simulate(addr.clone().into(), &StacksBlockId([0x01; 32])); + // add the authorization header + request.add_header("authorization".into(), "password".into()); + requests.push(request); + + // unauthenticated request + let request = + StacksHttpRequest::new_block_simulate(addr.clone().into(), &StacksBlockId([0x00; 32])); requests.push(request); let mut responses = rpc_test.run(requests); @@ -116,7 +133,7 @@ fn test_try_make_response() { ); } - // got a failure + // got a failure (404) let response = responses.remove(0); debug!( "Response:\n{}\n", @@ -125,4 +142,14 @@ fn test_try_make_response() { let (preamble, body) = response.destruct(); assert_eq!(preamble.status_code, 404); + + // got another failure (401 this time) + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 401); } From 6044cdaee3c18b71addd0a983d971d233cf3474a Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 19 Sep 2025 14:27:50 +0200 Subject: [PATCH 21/56] added block-simulate.example.json --- .../examples/block-simulate.example.json | 107 ++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 docs/rpc/components/examples/block-simulate.example.json diff --git a/docs/rpc/components/examples/block-simulate.example.json b/docs/rpc/components/examples/block-simulate.example.json new file mode 100644 index 00000000000..7a2a20782b2 --- /dev/null +++ b/docs/rpc/components/examples/block-simulate.example.json @@ -0,0 +1,107 @@ +{ + "block_hash": "732f57eefc4dbfb015c9988d9943c47273d25fbe039220d53f311b307609c83f", + "block_id": "856f6b08f338164df7422f66337c8ce916b6b0301fcaa09de06c61cfb79e2a45", + "consensus_hash": "33dffda027e2ca3aaf278855c59a8a0b2d2dd51f", + "fees": 1, + "miner_signature": "004b2878d7639060c4d183b1338447c06ceb0ad55424856c550a4c988401fdf8cf1fe6e8c05cc0e3ff8d4383590bf700cb5dd1a8bb3269f7125f6e0704b66eade8", + "parent_block_id": "3ac36fc1acfc86ba80ea27cd26017c675f75bc07fb042814b72e74cd7d331503", + "signer_signature": [ + "00d4e08331db614d18d7b5af53cf9bc03add9c7a2dcb6f0448721de7ea98f662cf7dc43ee73e14d18dfae3d3d349ff67e0fd773a446fb8c949c93ae4676f4d34bc", + "01619c6e69bad5b43e11bae3eb4d4626e5cf19d595923b0b5d2053e8883a266b41315fdaefd1eca03c5c0580b0f7fd28053c3f34eb0a12220b61392d342f5afb0d", + "0078fa352e0e4d2d94b25d4070ae24a819f268b2260a1e4d0d867415dbdc39e2cf75e57de69375794073e22a75873a5e1ca33ed96eadd8086415e934f697b78fdb", + "00b8d9b0d0cdfabe3c65237801e714185777f60507c773fcd2a72ed00b9d4c59cb5ab96e0e8d545bd562b5ca3de6db1d3b9fccd8f41c3bfa7de3528deb1acd30d6" + ], + "state_index_root": "839b826290027e5b92de415495be7bab2eab2ad4e2f8c371a1a773ae552fedba", + "timestamp": 1758284349, + "transactions": [ + { + "data": { + "anchor_mode": "OnChainOnly", + "auth": { + "Standard": { + "Singlesig": { + "hash_mode": "P2PKH", + "key_encoding": "Compressed", + "nonce": 99, + "signature": "01e29229b386e1f69ffd91e339c878246235ec1cd4771b42a7f45e1ed108643bc9417d43dd96a02c93314ef4cf5bcbcc5642df2e1f5a177333ff983c8719d80661", + "signer": "2965a4e6e4226868fa3ae88b2b9bb9e937d77fba", + "tx_fee": 1 + } + } + }, + "chain_id": 2147483648, + "payload": { + "TokenTransfer": [ + { + "Standard": [ + 26, + [ + 189, + 65, + 200, + 147, + 188, + 192, + 157, + 152, + 224, + 211, + 77, + 255, + 135, + 190, + 175, + 153, + 88, + 51, + 140, + 222 + ] + ] + }, + 1, + "00000000000000000000000000000000000000000000000000000000000000000000" + ] + }, + "post_condition_mode": "Deny", + "post_conditions": [], + "version": "Testnet" + }, + "events": [ + { + "committed": true, + "event_index": 0, + "stx_transfer_event": { + "amount": "1", + "memo": "00000000000000000000000000000000000000000000000000000000000000000000", + "recipient": "ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM", + "sender": "STMPB976WGH6GT7T7BM8PAWVQ7MKFNVZQAXS4BFS" + }, + "txid": "0xf14dd7dec56405fd7dac69c3080fb569fae4c49c591f9ad0e5cf5c797add9005", + "type": "stx_transfer_event" + } + ], + "execution_cost": { + "read_count": 0, + "read_length": 0, + "runtime": 0, + "write_count": 0, + "write_length": 0 + }, + "hex": "808000000004002965a4e6e4226868fa3ae88b2b9bb9e937d77fba000000000000006300000000000000010001e29229b386e1f69ffd91e339c878246235ec1cd4771b42a7f45e1ed108643bc9417d43dd96a02c93314ef4cf5bcbcc5642df2e1f5a177333ff983c8719d8066101020000000000051abd41c893bcc09d98e0d34dff87beaf9958338cde000000000000000100000000000000000000000000000000000000000000000000000000000000000000", + "result": { + "Response": { + "committed": true, + "data": { + "Bool": true + } + } + }, + "stx_burned": 0, + "tx_index": 0, + "txid": "f14dd7dec56405fd7dac69c3080fb569fae4c49c591f9ad0e5cf5c797add9005" + } + ], + "tx_merkle_root": "a68e3c76471d9e66b71a14165c4c9a2b980c51efb5b313425cffcef7172d6080", + "valid_merkle_root": true +} \ No newline at end of file From 6f3fd5844f98707aaef38edd8190dabd7b3c15c4 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 19 Sep 2025 14:29:43 +0200 Subject: [PATCH 22/56] updated CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index da077c5051d..5a725d46f08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Add `stackerdb_timeout_secs` to miner config for limiting duration of StackerDB HTTP requests. - When determining a global transaction replay set, the state evaluator now uses a longest-common-prefix algorithm to find a replay set in the case where a single replay set has less than 70% of signer weight. - New endpoints /v3/tenures/blocks/, /v3/tenures/blocks/hash, /v3/tenures/blocks/height allowing retrieving the list of stacks blocks from a burn block +- New authenticated endpoint /v3/block/simulate to simulate the execution of any Nakamoto block in the chain (useful for validation, replay, getting events...) - Creates epoch 3.3 and costs-4 in preparation for a hardfork to activate Clarity 4 - Adds support for new Clarity 4 builtins (not activated until epoch 3.3): - `contract-hash?` From a49ba21b695fd76e2bbff55087ea2d2f043c3216 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Fri, 19 Sep 2025 14:44:18 +0200 Subject: [PATCH 23/56] move stacks-inspect to contrib --- Cargo.lock | 16 ++++++++- Cargo.toml | 1 + contrib/stacks-inspect/Cargo.toml | 19 ++++++++++ contrib/stacks-inspect/README.md | 36 +++++++++++++++++++ .../stacks-inspect}/src/main.rs | 3 +- stackslib/Cargo.toml | 7 ---- stackslib/fuzz/Cargo.lock | 21 ----------- 7 files changed, 72 insertions(+), 31 deletions(-) create mode 100644 contrib/stacks-inspect/Cargo.toml create mode 100644 contrib/stacks-inspect/README.md rename {stackslib => contrib/stacks-inspect}/src/main.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index 6c012a98d8d..56301718081 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3163,6 +3163,21 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "stacks-inspect" +version = "0.1.0" +dependencies = [ + "clarity 0.0.1", + "libstackerdb 0.0.1", + "mutants", + "rusqlite", + "serde_json", + "slog", + "stacks-common 0.0.1", + "stackslib 0.0.1", + "tikv-jemallocator", +] + [[package]] name = "stacks-node" version = "0.1.0" @@ -3314,7 +3329,6 @@ dependencies = [ "stdext", "stx-genesis", "tempfile", - "tikv-jemallocator", "time 0.3.41", "toml", "url", diff --git a/Cargo.toml b/Cargo.toml index 9d68c25f29a..63e1e5d9ac1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ members = [ "libsigner", "stacks-signer", "stacks-node", + "contrib/stacks-inspect" ] exclude = ["contrib/tools/config-docs-generator"] diff --git a/contrib/stacks-inspect/Cargo.toml b/contrib/stacks-inspect/Cargo.toml new file mode 100644 index 00000000000..7394b0c4dbf --- /dev/null +++ b/contrib/stacks-inspect/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "stacks-inspect" +version = "0.1.0" +edition = "2024" + +[dependencies] +blockstack_lib = { package = "stackslib", path = "../../stackslib", default-features = false } +clarity = { path = "../../clarity", default-features = false } +libstackerdb = { path = "../../libstackerdb", default-features = false } +stacks-common = { path = "../../stacks-common", default-features = false } +rusqlite = { workspace = true } +serde_json = { workspace = true } +slog = { workspace = true } + +[target.'cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))'.dependencies] +tikv-jemallocator = { workspace = true } + +[dev-dependencies] +mutants = "0.0.3" diff --git a/contrib/stacks-inspect/README.md b/contrib/stacks-inspect/README.md new file mode 100644 index 00000000000..b3ea3b52b95 --- /dev/null +++ b/contrib/stacks-inspect/README.md @@ -0,0 +1,36 @@ +# stacks-inspect + +A multifunction inspection CLI for Stacks chain data and networking. + +Highlights: +- Decode primitives: Bitcoin headers/txs/blocks, Stacks blocks/microblocks, P2P net messages +- Chain queries: ancestors, MARF lookups, tenure info, PoX anchor evaluation +- Mining helpers: `try-mine`, `tip-mine`, sortition (anti-MEV) analysis +- Shadow chain tools: build, patch, repair, and verify shadow chainstate +- Replay: re-execute blocks and microblocks for diagnostics + +Build: +```bash +cargo build -p stacks-inspect +``` + +Basic usage: +```bash +# Show version +./target/debug/stacks-inspect --version + +# Example: decode a bitcoin header from file +./target/debug/stacks-inspect decode-bitcoin-header + +# Example: analyze anti-MEV behavior over a height range +./target/debug/stacks-inspect analyze-sortition-mev [miner advantage ...] +``` + +For detailed commands and flags, run: +```bash +./target/debug/stacks-inspect --help +``` + +Notes: +- Some commands expect mainnet data paths by default and may require specific network contexts. +- Operations that write data (e.g., shadow chain tools) are destructive—use copies of data directories when experimenting. diff --git a/stackslib/src/main.rs b/contrib/stacks-inspect/src/main.rs similarity index 99% rename from stackslib/src/main.rs rename to contrib/stacks-inspect/src/main.rs index c6f667213e8..bcd4306563d 100644 --- a/stackslib/src/main.rs +++ b/contrib/stacks-inspect/src/main.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2025 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index c24880a69fe..d4bf263f27f 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -18,10 +18,6 @@ rust-version = "1.80" name = "blockstack_lib" path = "src/lib.rs" -[[bin]] -name = "stacks-inspect" -path = "src/main.rs" - [[bin]] name = "clarity-cli" path = "src/clarity_cli_main.rs" @@ -54,9 +50,6 @@ rusqlite = { workspace = true } time = "0.3.41" toml = { workspace = true } -[target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] -tikv-jemallocator = {workspace = true} - [target.'cfg(unix)'.dependencies] nix = "0.23" diff --git a/stackslib/fuzz/Cargo.lock b/stackslib/fuzz/Cargo.lock index f3f29c79007..86e0347d47a 100644 --- a/stackslib/fuzz/Cargo.lock +++ b/stackslib/fuzz/Cargo.lock @@ -1278,7 +1278,6 @@ dependencies = [ "siphasher", "slog", "stacks-common", - "tikv-jemallocator", "time", "toml", "url", @@ -1340,26 +1339,6 @@ dependencies = [ "cfg-if 1.0.3", ] -[[package]] -name = "tikv-jemalloc-sys" -version = "0.5.4+5.3.0-patched" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "tikv-jemallocator" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca" -dependencies = [ - "libc", - "tikv-jemalloc-sys", -] - [[package]] name = "time" version = "0.3.43" From 9bcf5cf7270c3a63045456a82566f4724402cc8e Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 19 Sep 2025 15:55:50 +0200 Subject: [PATCH 24/56] added schema prototype for block-simulate --- .../schemas/block-simulate.schema.yaml | 55 +++++++++++++++++++ docs/rpc/openapi.yaml | 35 ++++++++++++ 2 files changed, 90 insertions(+) create mode 100644 docs/rpc/components/schemas/block-simulate.schema.yaml diff --git a/docs/rpc/components/schemas/block-simulate.schema.yaml b/docs/rpc/components/schemas/block-simulate.schema.yaml new file mode 100644 index 00000000000..7b9d321c544 --- /dev/null +++ b/docs/rpc/components/schemas/block-simulate.schema.yaml @@ -0,0 +1,55 @@ +type: object +properties: + block_hash: + type: string + description: Consensus hash of the tenure + pattern: "^[0-9a-f]{40}$" + block_id: + type: integer + description: Height of the Bitcoin block + format: uint64 + consensus_hash: + type: string + description: Consensus hash of the tenure + pattern: "^[0-9a-f]{40}$" + fees: + type: integer + miner_signature: + type: string + parent_block_id: + type: string + signer_signature: + type: array + items: + type: string + state_index_root: + type: string + timestamp: + type: integer + tx_merkle_root: + type: string + valid_merkle_root: + type: boolean + transactions: + type: array + items: + type: object + properties: + data: + type: object + events: + type: array + items: + type: object + execution_cost: + type: object + hex: + type: string + result: + type: object + stx_burned: + type: integer + tx_index: + type: integer + txid: + type: string \ No newline at end of file diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index d1a4abcc852..64a048f03a6 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -159,6 +159,8 @@ components: $ref: ./components/schemas/get-stacker-set.schema.yaml TenureBlocks: $ref: ./components/schemas/tenure-blocks.schema.yaml + BlockSimulate: + $ref: ./components/schemas/block-simulate.schema.yaml paths: /v2/transactions: @@ -2197,3 +2199,36 @@ paths: $ref: "#/components/responses/Unauthorized" "500": $ref: "#/components/responses/InternalServerError" + + /v3/blocks/simulate/{block_id}: + get: + summary: Simulate mining of a block and returns its content + tags: + - Blocks + security: [] + operationId: blockSimulate + description: | + Simulate the mining of a block (no data is written in the MARF) and returns its content. + parameters: + - name: block_id + in: path + description: The block ID hash + required: true + schema: + type: string + pattern: "^[0-9a-f]{64}$" + responses: + "200": + description: Content of the simulated block + content: + application/json: + schema: + $ref: "#/components/schemas/BlockSimulate" + example: + $ref: "./components/examples/block-simulate.example.json" + "400": + $ref: "#/components/responses/BadRequest" + "404": + $ref: "#/components/responses/NotFound" + "500": + $ref: "#/components/responses/InternalServerError" From 449f25b0817870d444e4e9d2fd4b560b98e0d3e5 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 19 Sep 2025 16:06:33 +0200 Subject: [PATCH 25/56] added height to block simulation output --- .../components/examples/block-simulate.example.json | 1 + .../components/schemas/block-simulate.schema.yaml | 12 ++++++++++-- stackslib/src/net/api/blocksimulate.rs | 3 +++ stackslib/src/net/api/tests/blocksimulate.rs | 2 ++ 4 files changed, 16 insertions(+), 2 deletions(-) diff --git a/docs/rpc/components/examples/block-simulate.example.json b/docs/rpc/components/examples/block-simulate.example.json index 7a2a20782b2..b6c79c99f2d 100644 --- a/docs/rpc/components/examples/block-simulate.example.json +++ b/docs/rpc/components/examples/block-simulate.example.json @@ -1,6 +1,7 @@ { "block_hash": "732f57eefc4dbfb015c9988d9943c47273d25fbe039220d53f311b307609c83f", "block_id": "856f6b08f338164df7422f66337c8ce916b6b0301fcaa09de06c61cfb79e2a45", + "block_height": 123, "consensus_hash": "33dffda027e2ca3aaf278855c59a8a0b2d2dd51f", "fees": 1, "miner_signature": "004b2878d7639060c4d183b1338447c06ceb0ad55424856c550a4c988401fdf8cf1fe6e8c05cc0e3ff8d4383590bf700cb5dd1a8bb3269f7125f6e0704b66eade8", diff --git a/docs/rpc/components/schemas/block-simulate.schema.yaml b/docs/rpc/components/schemas/block-simulate.schema.yaml index 7b9d321c544..c69206bf686 100644 --- a/docs/rpc/components/schemas/block-simulate.schema.yaml +++ b/docs/rpc/components/schemas/block-simulate.schema.yaml @@ -2,9 +2,13 @@ type: object properties: block_hash: type: string - description: Consensus hash of the tenure - pattern: "^[0-9a-f]{40}$" + description: Hash of the block + pattern: "^[0-9a-f]{64}$" block_id: + type: string + description: Block ID (index block hash) + pattern: "^[0-9a-f]{64}$" + block_height: type: integer description: Height of the Bitcoin block format: uint64 @@ -14,16 +18,20 @@ properties: pattern: "^[0-9a-f]{40}$" fees: type: integer + description: total fees for the block miner_signature: type: string parent_block_id: type: string + description: Parent Block ID (index block hash) + pattern: "^[0-9a-f]{64}$" signer_signature: type: array items: type: string state_index_root: type: string + pattern: "^[0-9a-f]{64}$" timestamp: type: integer tx_merkle_root: diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blocksimulate.rs index 0654173e83c..c8a5faf6afc 100644 --- a/stackslib/src/net/api/blocksimulate.rs +++ b/stackslib/src/net/api/blocksimulate.rs @@ -68,6 +68,7 @@ pub struct RPCSimulatedBlockTransaction { pub struct RPCSimulatedBlock { pub block_id: StacksBlockId, pub block_hash: BlockHeaderHash, + pub block_height: u64, pub parent_block_id: StacksBlockId, pub consensus_hash: ConsensusHash, pub fees: u128, @@ -264,12 +265,14 @@ impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { tenure_tx.rollback_block(); let block_hash = block.header.block_hash(); + let block_height = block.header.chain_length; let tx_merkle_root = block.header.tx_merkle_root.clone(); let mut simulated_block = RPCSimulatedBlock { block_id, block_hash, + block_height, parent_block_id, consensus_hash: tenure_id, fees: block_fees, diff --git a/stackslib/src/net/api/tests/blocksimulate.rs b/stackslib/src/net/api/tests/blocksimulate.rs index 6fb8eb2d671..1d5d153b1ae 100644 --- a/stackslib/src/net/api/tests/blocksimulate.rs +++ b/stackslib/src/net/api/tests/blocksimulate.rs @@ -114,6 +114,8 @@ fn test_try_make_response() { assert_eq!(resp.block_id, tip_block.metadata.index_block_hash()); assert_eq!(resp.parent_block_id, tip_block.parent); + assert_eq!(resp.block_height, tip_block.metadata.stacks_block_height); + assert!(resp.valid_merkle_root); assert_eq!(resp.transactions.len(), tip_block.receipts.len()); From 20bb1df7eb930b8fbbef3ea904c3322c8e8e472d Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 19 Sep 2025 16:22:36 +0200 Subject: [PATCH 26/56] updated block-simulate schema docs --- docs/rpc/components/schemas/block-simulate.schema.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/rpc/components/schemas/block-simulate.schema.yaml b/docs/rpc/components/schemas/block-simulate.schema.yaml index c69206bf686..9e0fc32de3b 100644 --- a/docs/rpc/components/schemas/block-simulate.schema.yaml +++ b/docs/rpc/components/schemas/block-simulate.schema.yaml @@ -10,7 +10,7 @@ properties: pattern: "^[0-9a-f]{64}$" block_height: type: integer - description: Height of the Bitcoin block + description: Height of the Stacks block format: uint64 consensus_hash: type: string @@ -21,6 +21,8 @@ properties: description: total fees for the block miner_signature: type: string + description: Uncompressed signature of the miner + pattern: "^[0-9a-f]{130}$" parent_block_id: type: string description: Parent Block ID (index block hash) @@ -29,15 +31,20 @@ properties: type: array items: type: string + description: Uncompressed signature of the signer + pattern: "^[0-9a-f]{130}$" state_index_root: type: string pattern: "^[0-9a-f]{64}$" + description: block state index root computed from the MARF (got from the original block) timestamp: type: integer tx_merkle_root: type: string + description: merkle_root of the included transactions valid_merkle_root: type: boolean + description: does the merkle_root matches the chain block and the simulated one? transactions: type: array items: From 55b0454d9a84cc8bb145b9427724d324000e53b1 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 19 Sep 2025 16:42:08 +0200 Subject: [PATCH 27/56] more docs for block-simulate --- docs/rpc/components/schemas/block-simulate.schema.yaml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/rpc/components/schemas/block-simulate.schema.yaml b/docs/rpc/components/schemas/block-simulate.schema.yaml index 9e0fc32de3b..c8e9802bb21 100644 --- a/docs/rpc/components/schemas/block-simulate.schema.yaml +++ b/docs/rpc/components/schemas/block-simulate.schema.yaml @@ -52,19 +52,27 @@ properties: properties: data: type: object + description: JSON representation of the transaction payload events: type: array items: type: object + description: JSON representation of the transaction events execution_cost: type: object + description: costs accounting for the transaction hex: type: string + description: hexadecimal representation of the transaction body result: type: object + description: Clarity value representing the transaction result stx_burned: type: integer + description: number of burned stx tx_index: type: integer + description: index of the transaction in the array of transactions txid: - type: string \ No newline at end of file + type: string + description: transaction id \ No newline at end of file From 1d3cca1bcb2a993e299edbe25ee1d93e9584a3ee Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Fri, 19 Sep 2025 15:55:32 +0200 Subject: [PATCH 28/56] blockstack_lib -> stackslib --- contrib/stacks-inspect/Cargo.toml | 2 +- contrib/stacks-inspect/src/main.rs | 101 ++++++++++++++++------------- 2 files changed, 56 insertions(+), 47 deletions(-) diff --git a/contrib/stacks-inspect/Cargo.toml b/contrib/stacks-inspect/Cargo.toml index 7394b0c4dbf..354c1fdc549 100644 --- a/contrib/stacks-inspect/Cargo.toml +++ b/contrib/stacks-inspect/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2024" [dependencies] -blockstack_lib = { package = "stackslib", path = "../../stackslib", default-features = false } +stackslib = { package = "stackslib", path = "../../stackslib", default-features = false } clarity = { path = "../../clarity", default-features = false } libstackerdb = { path = "../../libstackerdb", default-features = false } stacks-common = { path = "../../stacks-common", default-features = false } diff --git a/contrib/stacks-inspect/src/main.rs b/contrib/stacks-inspect/src/main.rs index bcd4306563d..8af4ac8e042 100644 --- a/contrib/stacks-inspect/src/main.rs +++ b/contrib/stacks-inspect/src/main.rs @@ -21,6 +21,18 @@ #[macro_use] extern crate stacks_common; +use clarity::consts::CHAIN_ID_MAINNET; +use clarity::types::StacksEpochId; +use clarity::types::chainstate::StacksPrivateKey; +use stackslib::chainstate::stacks::miner::BlockBuilderSettings; +use stackslib::chainstate::stacks::{ + CoinbasePayload, StacksBlock, StacksBlockBuilder, StacksMicroblock, StacksTransaction, + StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, TransactionPayload, + TransactionVersion, +}; +use stackslib::core::{ + BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP, StacksEpoch, StacksEpochExtension as _, +}; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; @@ -30,64 +42,61 @@ static GLOBAL: Jemalloc = Jemalloc; use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::File; -use std::io::prelude::*; use std::io::BufReader; +use std::io::prelude::*; use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; use std::time::{Duration, Instant}; use std::{env, fs, io, process, thread}; -use blockstack_lib::burnchains::bitcoin::{spv, BitcoinNetworkType}; -use blockstack_lib::burnchains::db::{BurnchainBlockData, BurnchainDB}; -use blockstack_lib::burnchains::{Address, Burnchain, PoxConstants}; -use blockstack_lib::chainstate::burn::db::sortdb::{ - get_block_commit_by_txid, SortitionDB, SortitionHandle, -}; -use blockstack_lib::chainstate::burn::operations::BlockstackOperationType; -use blockstack_lib::chainstate::burn::{BlockSnapshot, ConsensusHash}; -use blockstack_lib::chainstate::coordinator::{get_reward_cycle_info, OnChainRewardSetProvider}; -use blockstack_lib::chainstate::nakamoto::miner::NakamotoBlockBuilder; -use blockstack_lib::chainstate::nakamoto::shadow::{ - process_shadow_block, shadow_chainstate_repair, -}; -use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; -use blockstack_lib::chainstate::stacks::db::blocks::{DummyEventDispatcher, StagingBlock}; -use blockstack_lib::chainstate::stacks::db::{ - ChainStateBootData, StacksBlockHeaderTypes, StacksChainState, -}; -use blockstack_lib::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection, MARF}; -use blockstack_lib::chainstate::stacks::index::ClarityMarfTrieId; -use blockstack_lib::chainstate::stacks::miner::*; -use blockstack_lib::chainstate::stacks::{StacksBlockHeader, *}; -use blockstack_lib::clarity::vm::costs::ExecutionCost; -use blockstack_lib::clarity::vm::types::StacksAddressExtensions; -use blockstack_lib::clarity::vm::ClarityVersion; -use blockstack_lib::core::{MemPoolDB, *}; -use blockstack_lib::cost_estimates::metrics::UnitMetric; -use blockstack_lib::cost_estimates::UnitEstimator; -use blockstack_lib::net::api::getinfo::RPCPeerInfoData; -use blockstack_lib::net::db::LocalPeer; -use blockstack_lib::net::httpcore::{send_http_request, StacksHttpRequest}; -use blockstack_lib::net::p2p::PeerNetwork; -use blockstack_lib::net::relay::Relayer; -use blockstack_lib::net::{GetNakamotoInvData, HandshakeData, StacksMessage, StacksMessageType}; -use blockstack_lib::util_lib::db::sqlite_open; -use blockstack_lib::util_lib::strings::UrlString; -use blockstack_lib::{clarity_cli, cli}; use libstackerdb::StackerDBChunkData; -use rusqlite::{params, Connection, Error as SqliteError, OpenFlags}; -use serde_json::{json, Value}; -use stacks_common::codec::{read_next, StacksMessageCodec}; +use rusqlite::{Connection, Error as SqliteError, OpenFlags, params}; +use serde_json::{Value, json}; +use stacks_common::codec::{StacksMessageCodec, read_next}; +use stacks_common::types::MempoolCollectionBehavior; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::types::sqlite::NO_PARAMS; -use stacks_common::types::MempoolCollectionBehavior; -use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; +use stacks_common::util::hash::{Hash160, hex_bytes, to_hex}; use stacks_common::util::retry::LogReader; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::VRFProof; use stacks_common::util::{get_epoch_time_ms, sleep_ms}; +use stackslib::burnchains::bitcoin::{BitcoinNetworkType, spv}; +use stackslib::burnchains::db::{BurnchainBlockData, BurnchainDB}; +use stackslib::burnchains::{Address, Burnchain, PoxConstants}; +use stackslib::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionHandle, get_block_commit_by_txid, +}; +use stackslib::chainstate::burn::operations::BlockstackOperationType; +use stackslib::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stackslib::chainstate::coordinator::{OnChainRewardSetProvider, get_reward_cycle_info}; +use stackslib::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use stackslib::chainstate::nakamoto::shadow::{process_shadow_block, shadow_chainstate_repair}; +use stackslib::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stackslib::chainstate::stacks::StacksBlockHeader; +use stackslib::chainstate::stacks::db::blocks::{DummyEventDispatcher, StagingBlock}; +use stackslib::chainstate::stacks::db::{ + ChainStateBootData, StacksBlockHeaderTypes, StacksChainState, +}; +use stackslib::chainstate::stacks::index::ClarityMarfTrieId; +use stackslib::chainstate::stacks::index::marf::{MARF, MARFOpenOpts, MarfConnection}; +use stackslib::clarity::vm::ClarityVersion; +use stackslib::clarity::vm::costs::ExecutionCost; +use stackslib::clarity::vm::types::StacksAddressExtensions; +use stackslib::core::MemPoolDB; +use stackslib::cost_estimates::UnitEstimator; +use stackslib::cost_estimates::metrics::UnitMetric; +use stackslib::net::api::getinfo::RPCPeerInfoData; +use stackslib::net::db::LocalPeer; +use stackslib::net::httpcore::{StacksHttpRequest, send_http_request}; +use stackslib::net::p2p::PeerNetwork; +use stackslib::net::relay::Relayer; +use stackslib::net::{GetNakamotoInvData, HandshakeData, StacksMessage, StacksMessageType}; +use stackslib::util_lib::db::sqlite_open; +use stackslib::util_lib::strings::UrlString; +use stackslib::{clarity_cli, cli}; struct P2PSession { pub local_peer: LocalPeer, @@ -311,7 +320,7 @@ fn main() { if argv[1] == "--version" { println!( "{}", - &blockstack_lib::version_string( + &stackslib::version_string( option_env!("CARGO_PKG_NAME").unwrap_or(&argv[0]), option_env!("STACKS_NODE_VERSION") ) @@ -957,7 +966,7 @@ check if the associated microblocks can be downloaded if argv[1] == "docgen" { println!( "{}", - blockstack_lib::clarity::vm::docs::make_json_api_reference() + stackslib::clarity::vm::docs::make_json_api_reference() ); return; } @@ -965,7 +974,7 @@ check if the associated microblocks can be downloaded if argv[1] == "docgen_boot" { println!( "{}", - blockstack_lib::chainstate::stacks::boot::docs::make_json_boot_contracts_reference() + stackslib::chainstate::stacks::boot::docs::make_json_boot_contracts_reference() ); return; } From bacd4e6419d08c645b5e33afdca1ea2005127403 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Fri, 19 Sep 2025 16:54:58 +0200 Subject: [PATCH 29/56] fix clippy --- contrib/stacks-inspect/src/main.rs | 216 +++++++++++++---------------- 1 file changed, 96 insertions(+), 120 deletions(-) diff --git a/contrib/stacks-inspect/src/main.rs b/contrib/stacks-inspect/src/main.rs index 8af4ac8e042..9c43923920d 100644 --- a/contrib/stacks-inspect/src/main.rs +++ b/contrib/stacks-inspect/src/main.rs @@ -13,11 +13,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(dead_code)] -#![allow(non_camel_case_types)] -#![allow(non_snake_case)] -#![allow(non_upper_case_globals)] - #[macro_use] extern crate stacks_common; @@ -121,7 +116,7 @@ impl P2PSession { ); msg.sign(self.seq, &self.local_peer.private_key) - .map_err(|e| format!("Failed to sign message {:?}: {:?}", &msg, &e))?; + .map_err(|e| format!("Failed to sign message {msg:?}: {e:?}"))?; self.seq = self.seq.wrapping_add(1); Ok(msg) @@ -131,14 +126,14 @@ impl P2PSession { /// Returns error text on failure. fn send_peer_message(&mut self, msg: StacksMessage) -> Result<(), String> { msg.consensus_serialize(&mut self.tcp_socket) - .map_err(|e| format!("Failed to send message {:?}: {:?}", &msg, &e)) + .map_err(|e| format!("Failed to send message {msg:?}: {e:?}")) } /// Receive a p2p message. /// Returns error text on failure. fn recv_peer_message(&mut self) -> Result { let msg: StacksMessage = read_next(&mut self.tcp_socket) - .map_err(|e| format!("Failed to receive message: {:?}", &e))?; + .map_err(|e| format!("Failed to receive message: {e:?}"))?; Ok(msg) } @@ -160,9 +155,9 @@ impl P2PSession { .with_header("Connection".to_string(), "close".to_string()), Duration::from_secs(60), ) - .map_err(|e| format!("Failed to query /v2/info: {:?}", &e))? + .map_err(|e| format!("Failed to query /v2/info: {e:?}"))? .decode_peer_info() - .map_err(|e| format!("Failed to decode response from /v2/info: {:?}", &e))?; + .map_err(|e| format!("Failed to decode response from /v2/info: {e:?}"))?; // convert `pox_consensus` and `stable_pox_consensus` into their respective burn block // hashes @@ -176,9 +171,9 @@ impl P2PSession { .with_header("Connection".to_string(), "close".to_string()), Duration::from_secs(60), ) - .map_err(|e| format!("Failed to query /v3/sortitions: {:?}", &e))? + .map_err(|e| format!("Failed to query /v3/sortitions: {e:?}"))? .decode_sortition_info() - .map_err(|e| format!("Failed to decode response from /v3/sortitions: {:?}", &e))? + .map_err(|e| format!("Failed to decode response from /v3/sortitions: {e:?}"))? .pop() .ok_or_else(|| format!("No sortition returned for {}", &peer_info.pox_consensus))?; @@ -192,14 +187,9 @@ impl P2PSession { .with_header("Connection".to_string(), "close".to_string()), Duration::from_secs(60), ) - .map_err(|e| format!("Failed to query stable /v3/sortitions: {:?}", &e))? + .map_err(|e| format!("Failed to query stable /v3/sortitions: {e:?}"))? .decode_sortition_info() - .map_err(|e| { - format!( - "Failed to decode response from stable /v3/sortitions: {:?}", - &e - ) - })? + .map_err(|e| format!("Failed to decode response from stable /v3/sortitions: {e:?}",))? .pop() .ok_or_else(|| { format!( @@ -218,12 +208,12 @@ impl P2PSession { peer_addr.port(), Some(StacksPrivateKey::random()), u64::MAX, - UrlString::try_from(format!("http://127.0.0.1:{}", data_port).as_str()).unwrap(), + UrlString::from(format!("http://127.0.0.1:{data_port}",).as_str()), vec![], ); - let tcp_socket = TcpStream::connect(&peer_addr) - .map_err(|e| format!("Failed to open {:?}: {:?}", &peer_addr, &e))?; + let tcp_socket = TcpStream::connect(peer_addr) + .map_err(|e| format!("Failed to open {peer_addr:?}: {e:?}"))?; let mut session = Self { local_peer, @@ -246,8 +236,7 @@ impl P2PSession { | StacksMessageType::StackerDBHandshakeAccept(..) => {} x => { return Err(format!( - "Peer returned unexpected message (expected HandshakeAccept variant): {:?}", - &x + "Peer returned unexpected message (expected HandshakeAccept variant): {x:?}", )); } } @@ -280,12 +269,12 @@ fn open_nakamoto_chainstate_dbs( "nakamoto-neon", ), _ => { - panic!("Unrecognized network name '{}'", network); + panic!("Unrecognized network name '{network}'"); } }; - let chain_state_path = format!("{}/{}/chainstate/", chainstate_dir, dirname); - let sort_db_path = format!("{}/{}/burnchain/sortition/", chainstate_dir, dirname); + let chain_state_path = format!("{chainstate_dir}/{dirname}/chainstate/"); + let sort_db_path = format!("{chainstate_dir}/{dirname}/burnchain/sortition/"); let sort_db = SortitionDB::open(&sort_db_path, true, pox_constants) .unwrap_or_else(|_| panic!("Failed to open {sort_db_path}")); @@ -298,10 +287,7 @@ fn open_nakamoto_chainstate_dbs( fn check_shadow_network(network: &str) { if network != "mainnet" && network != "krypton" && network != "naka3" { - eprintln!( - "Unknown network '{}': only support 'mainnet', 'krypton', or 'naka3'", - &network - ); + eprintln!("Unknown network '{network}': only support 'mainnet', 'krypton', or 'naka3'"); process::exit(1); } } @@ -337,7 +323,7 @@ fn main() { let local_seed = hex_bytes(&argv[2]).expect("Failed to parse hex input local-peer-seed"); let node_privkey = Secp256k1PrivateKey::from_seed(&local_seed); let pubkey = Secp256k1PublicKey::from_private(&node_privkey).to_hex(); - println!("{}", pubkey); + println!("{pubkey}"); process::exit(0); } @@ -353,11 +339,11 @@ fn main() { let mut testnet = false; let mut regtest = false; let mut idx = 0; - for i in 0..argv.len() { - if argv[i] == "-t" { + for (i, item) in argv.iter().enumerate() { + if item == "-t" { testnet = true; idx = i; - } else if argv[i] == "-r" { + } else if item == "-r" { regtest = true; idx = i; } @@ -392,7 +378,7 @@ fn main() { .expect("FATAL: could not read block header database") { Some(header) => { - println!("{:#?}", header); + println!("{header:#?}"); process::exit(0); } None => { @@ -421,7 +407,7 @@ fn main() { let tx = StacksTransaction::consensus_deserialize(&mut debug_cursor) .map_err(|e| { - eprintln!("Failed to decode transaction: {:?}", &e); + eprintln!("Failed to decode transaction: {e:?}"); eprintln!("Bytes consumed:"); for buf in debug_cursor.log().iter() { eprintln!(" {}", to_hex(buf)); @@ -455,7 +441,7 @@ fn main() { }) .unwrap(); - println!("{:#?}", &block); + println!("{block:#?}"); process::exit(0); } @@ -474,7 +460,7 @@ fn main() { }) .unwrap(); - println!("{:#?}", &block); + println!("{block:#?}"); process::exit(0); } @@ -495,7 +481,7 @@ fn main() { }; match read_next::(&mut &buf[..]) { Ok(msg) => { - println!("{:#?}", &msg); + println!("{msg:#?}"); process::exit(0); } Err(_) => { @@ -584,7 +570,7 @@ fn main() { "microblocks": mblock_report }); - println!("{}", &report.to_string()); + println!("{report}"); process::exit(0); } @@ -626,7 +612,7 @@ Given a , obtain a 2100 header hash block inventory (with an empty let block_inv = chain_state.get_blocks_inventory(&header_hashes).unwrap(); println!("Fetched block inv in {}", start.elapsed().as_secs_f32()); - println!("{:?}", &block_inv); + println!("{block_inv:?}"); println!("Done!"); process::exit(0); @@ -664,7 +650,7 @@ check if the associated microblocks can be downloaded 0, None, 0, - UrlString::try_from("abc").unwrap(), + UrlString::from("abc"), vec![], ); @@ -696,7 +682,7 @@ check if the associated microblocks can be downloaded ) { Ok(Some(hdr)) => hdr, _ => { - debug!("No such block: {:?}", &index_block_hash); + debug!("No such block: {index_block_hash:?}"); continue; } }; @@ -711,7 +697,9 @@ check if the associated microblocks can be downloaded } Err(_) => { // we don't know about this parent block yet - debug!("{:?}: Do not have parent of anchored block {}/{} yet, so cannot ask for the microblocks it produced", &local_peer, &consensus_hash, &block_hash); + debug!( + "{local_peer:?}: Do not have parent of anchored block {consensus_hash}/{block_hash} yet, so cannot ask for the microblocks it produced" + ); continue; } } @@ -736,9 +724,8 @@ check if the associated microblocks can be downloaded } println!( - "Checked can_download in {} (headers load took {}ms)", + "Checked can_download in {} (headers load took {total_load_headers}ms)", start.elapsed().as_secs_f32(), - total_load_headers ); println!("Done!"); @@ -747,7 +734,10 @@ check if the associated microblocks can be downloaded if argv[1] == "evaluate-pox-anchor" { if argv.len() < 4 { - eprintln!("Usage: {} evaluate-pox-anchor (last-height)", argv[0]); + eprintln!( + "Usage: {} evaluate-pox-anchor (last-height)", + argv[0] + ); process::exit(1); } let start_height: u64 = argv[3].parse().expect("Failed to parse argument"); @@ -824,7 +814,7 @@ check if the associated microblocks can be downloaded let mut debug_cursor = LogReader::from_reader(&mut cursor); let mblocks: Vec = Vec::consensus_deserialize(&mut debug_cursor) .map_err(|e| { - eprintln!("Failed to decode microblocks: {:?}", &e); + eprintln!("Failed to decode microblocks: {e:?}"); eprintln!("Bytes consumed:"); for buf in debug_cursor.log().iter() { eprintln!(" {}", to_hex(buf)); @@ -833,7 +823,7 @@ check if the associated microblocks can be downloaded }) .unwrap(); - println!("{:#?}", &mblocks); + println!("{mblocks:#?}"); process::exit(0); } @@ -843,14 +833,16 @@ check if the associated microblocks can be downloaded "Usage: {} header-indexed-get STATE_DIR BLOCK_ID_HASH KEY", argv[0] ); - eprintln!(" STATE_DIR is either the chain state directory OR a marf index and data db file"); + eprintln!( + " STATE_DIR is either the chain state directory OR a marf index and data db file" + ); process::exit(1); } let (marf_path, db_path, arg_next) = if argv.len() == 5 { let headers_dir = &argv[2]; ( - format!("{}/vm/index.sqlite", &headers_dir), - format!("{}/vm/headers.sqlite", &headers_dir), + format!("{headers_dir}/vm/index.sqlite"), + format!("{headers_dir}/vm/headers.sqlite"), 3, ) } else { @@ -860,12 +852,12 @@ check if the associated microblocks can be downloaded let marf_key = &argv[arg_next + 1]; if fs::metadata(&marf_path).is_err() { - eprintln!("No such file or directory: {}", &marf_path); + eprintln!("No such file or directory: {marf_path}"); process::exit(1); } if fs::metadata(&db_path).is_err() { - eprintln!("No such file or directory: {}", &db_path); + eprintln!("No such file or directory: {db_path}"); process::exit(1); } @@ -889,7 +881,7 @@ check if the associated microblocks can be downloaded let row = res.unwrap_or_else(|_| panic!("Failed to query DB for MARF value hash {value}")); - println!("{}", row); + println!("{row}"); } else { println!("(undefined)"); } @@ -906,10 +898,10 @@ check if the associated microblocks can be downloaded .unwrap_or_else(|_| panic!("Error reading file: {}", argv[2])); let clarity_version = ClarityVersion::default_for_epoch(clarity_cli::DEFAULT_CLI_EPOCH); match clarity_cli::vm_execute(&program, clarity_version) { - Ok(Some(result)) => println!("{}", result), - Ok(None) => println!(""), + Ok(Some(result)) => println!("{result}"), + Ok(None) => println!(), Err(error) => { - panic!("Program Execution Error: \n{}", error); + panic!("Program Execution Error: \n{error}"); } } return; @@ -927,7 +919,7 @@ check if the associated microblocks can be downloaded let mut marf = MARF::from_path(path, marf_opts).unwrap(); let res = marf.get(&itip, key).expect("MARF error."); match res { - Some(x) => println!("{}", x), + Some(x) => println!("{x}"), None => println!("None"), }; return; @@ -942,7 +934,7 @@ check if the associated microblocks can be downloaded let mut cur_burn = burntip.clone(); let mut cur_tip = tip.clone(); loop { - println!("{}, {}", cur_burn, cur_tip); + println!("{cur_burn}, {cur_tip}"); let (next_burn, next_tip) = match conn.query_row("SELECT parent_burn_header_hash, parent_anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ? and burn_header_hash = ?", params![cur_tip, cur_burn], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) { @@ -951,7 +943,7 @@ check if the associated microblocks can be downloaded match e { SqliteError::QueryReturnedNoRows => {}, e => { - eprintln!("SQL Error: {}", e); + eprintln!("SQL Error: {e}"); }, } break @@ -992,10 +984,7 @@ check if the associated microblocks can be downloaded let db_path = &argv[2]; let byte_prefix = &argv[3]; let conn = Connection::open_with_flags(db_path, OpenFlags::SQLITE_OPEN_READ_ONLY).unwrap(); - let query = format!( - "SELECT value FROM data_table WHERE key LIKE \"{}%\"", - byte_prefix - ); + let query = format!("SELECT value FROM data_table WHERE key LIKE \"{byte_prefix}%\""); let mut stmt = conn.prepare(&query).unwrap(); let mut rows = stmt.query(NO_PARAMS).unwrap(); while let Ok(Some(row)) = rows.next() { @@ -1004,7 +993,7 @@ check if the associated microblocks can be downloaded Ok(x) => x, Err(_e) => continue, }; - println!("{} => {}", val_string, clarity_value); + println!("{val_string} => {clarity_value}"); } process::exit(0); @@ -1020,7 +1009,7 @@ check if the associated microblocks can be downloaded let mut i = 1; for line in io::BufReader::new(check_file).lines() { if i % 100000 == 0 { - println!("{}...", i); + println!("{i}..."); } i += 1; let line = line.unwrap().trim().to_string(); @@ -1096,10 +1085,10 @@ check if the associated microblocks can be downloaded let resp = session.recv_peer_message().unwrap(); let StacksMessageType::NakamotoInv(inv) = &resp.payload else { - panic!("Got spurious message: {:?}", &resp); + panic!("Got spurious message: {resp:?}"); }; - println!("{:?}", inv); + println!("{inv:?}"); } if argv[1] == "get-nakamoto-tip" { @@ -1144,23 +1133,20 @@ check if the associated microblocks can be downloaded let chain_tip_header = chain_tip .map(|tip| { - let header = NakamotoChainState::get_block_header_nakamoto(chain_state.db(), &tip) + NakamotoChainState::get_block_header_nakamoto(chain_state.db(), &tip) + .unwrap() .unwrap() - .unwrap(); - header }) .unwrap_or_else(|| { - let header = - NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) - .unwrap() - .unwrap(); - header + NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap() + .unwrap() }); let account = NakamotoBlockBuilder::get_account(&mut chain_state, &sort_db, &addr, &chain_tip_header) .unwrap(); - println!("{:#?}", &account); + println!("{account:#?}"); process::exit(0); } @@ -1179,8 +1165,8 @@ check if the associated microblocks can be downloaded .iter() .map(|tx_str| { let tx_bytes = hex_bytes(tx_str).unwrap(); - let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - tx + + StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap() }) .collect(); @@ -1302,7 +1288,10 @@ check if the associated microblocks can be downloaded if argv[1] == "replay-chainstate" { if argv.len() < 7 { - eprintln!("Usage: {} OLD_CHAINSTATE_PATH OLD_SORTITION_DB_PATH OLD_BURNCHAIN_DB_PATH NEW_CHAINSTATE_PATH NEW_BURNCHAIN_DB_PATH", &argv[0]); + eprintln!( + "Usage: {} OLD_CHAINSTATE_PATH OLD_SORTITION_DB_PATH OLD_BURNCHAIN_DB_PATH NEW_CHAINSTATE_PATH NEW_BURNCHAIN_DB_PATH", + &argv[0] + ); process::exit(1); } @@ -1425,9 +1414,8 @@ check if the associated microblocks can be downloaded } eprintln!( - "\nWill replay {} stacks epochs out of {}\n", + "\nWill replay {} stacks epochs out of {num_staging_blocks}\n", &stacks_blocks_available.len(), - num_staging_blocks ); let mut known_stacks_blocks = HashSet::new(); @@ -1438,7 +1426,7 @@ check if the associated microblocks can be downloaded let (p2p_new_sortition_db, _) = burnchain .connect_db( true, - &first_burnchain_block_hash, + first_burnchain_block_hash, BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), epochs, ) @@ -1523,7 +1511,7 @@ check if the associated microblocks can be downloaded while next_arrival < stacks_blocks_arrival_order.len() && known_stacks_blocks.contains(&stacks_block_id) { - if stacks_blocks_available.get(&stacks_block_id).is_some() { + if stacks_blocks_available.contains_key(&stacks_block_id) { // load up the block let stacks_block_opt = StacksChainState::load_block( &old_chainstate.blocks_path, @@ -1587,8 +1575,7 @@ check if the associated microblocks can be downloaded } eprintln!( - "Final arrival index is {} out of {}", - next_arrival, + "Final arrival index is {next_arrival} out of {}", stacks_blocks_arrival_order.len() ); return; @@ -1735,10 +1722,7 @@ simulating a miner. header_tip.anchored_header.height() ); - info!( - "Submitting up to {} transactions to the mempool", - mine_max_txns - ); + info!("Submitting up to {mine_max_txns} transactions to the mempool"); let mut found_block_height = false; let mut parsed_tx_count = 0; let mut submit_tx_count = 0; @@ -1754,11 +1738,10 @@ simulating a miner. let block_height = payload["block_height"].as_u64().unwrap(); if !found_block_height && block_height >= mine_tip_height { found_block_height = true; - info!("Found target block height {}", block_height); + info!("Found target block height {block_height}"); } info!( - "Found new_block height {} parsed_tx_count {} submit_tx_count {}", - block_height, parsed_tx_count, submit_tx_count + "Found new_block height {block_height} parsed_tx_count {parsed_tx_count} submit_tx_count {submit_tx_count}" ); } "new_mempool_tx" => { @@ -1770,7 +1753,7 @@ simulating a miner. let raw_tx = StacksTransaction::consensus_deserialize(&mut cursor).unwrap(); if found_block_height { if submit_tx_count >= mine_max_txns { - info!("Reached mine_max_txns {}", submit_tx_count); + info!("Reached mine_max_txns {submit_tx_count}"); break 'outer; } let result = mempool_db.submit( @@ -1793,11 +1776,8 @@ simulating a miner. _ => {} }; } - info!("Parsed {} transactions", parsed_tx_count); - info!( - "Submitted {} transactions into the mempool", - submit_tx_count - ); + info!("Parsed {parsed_tx_count} transactions"); + info!("Submitted {submit_tx_count} transactions into the mempool"); info!("Mining a block"); @@ -1870,11 +1850,8 @@ simulating a miner. total_fees += tx.get_tx_fee(); } println!( - "Block {}: {} uSTX, {} bytes, cost {:?}", + "Block {}: {total_fees} uSTX, {size} bytes, cost {execution_cost:?}", block.block_hash(), - total_fees, - size, - &execution_cost ); } @@ -1923,7 +1900,7 @@ fn analyze_sortition_mev(argv: Vec) { let mut wins_epoch3 = BTreeMap::new(); for height in start_height..end_height { - debug!("Get ancestor snapshots for {}", height); + debug!("Get ancestor snapshots for {height}"); let (tip_sort_id, parent_ancestor_sn, ancestor_sn) = { let mut sort_tx = sortdb.tx_begin_at_tip(); let tip_sort_id = sort_tx.tip(); @@ -1959,20 +1936,19 @@ fn analyze_sortition_mev(argv: Vec) { let mut ops = burn_block.ops.clone(); for op in ops.iter_mut() { - if let BlockstackOperationType::LeaderBlockCommit(op) = op { - if let Some(extra_burn) = advantages.get(&op.apparent_sender.to_string()) { - debug!( - "Miner {} gets {} extra burn fee", - &op.apparent_sender.to_string(), - extra_burn - ); - op.burn_fee += *extra_burn; - } + if let BlockstackOperationType::LeaderBlockCommit(op) = op + && let Some(extra_burn) = advantages.get(&op.apparent_sender.to_string()) + { + debug!( + "Miner {} gets {extra_burn} extra burn fee", + &op.apparent_sender.to_string() + ); + op.burn_fee += *extra_burn; } } burn_block.ops = ops; - debug!("Re-evaluate sortition at height {}", height); + debug!("Re-evaluate sortition at height {height}"); let (next_sn, state_transition) = sortdb .evaluate_sortition( true, @@ -2045,7 +2021,7 @@ fn analyze_sortition_mev(argv: Vec) { println!("------------"); println!("height,burn_header_hash,winner"); for ((height, bhh), winner) in wins_epoch2.iter() { - println!("{},{},{}", height, bhh, winner); + println!("{height},{bhh},{winner}"); if let Some(cnt) = all_wins_epoch2.get_mut(winner) { *cnt += 1; } else { @@ -2058,7 +2034,7 @@ fn analyze_sortition_mev(argv: Vec) { println!("------------"); println!("height,burn_header_hash,winner"); for ((height, bhh), winner) in wins_epoch3.iter() { - println!("{},{},{}", height, bhh, winner); + println!("{height},{bhh},{winner}"); if let Some(cnt) = all_wins_epoch3.get_mut(winner) { *cnt += 1; } else { @@ -2075,7 +2051,7 @@ fn analyze_sortition_mev(argv: Vec) { continue; }; if epoch3_winner != winner { - println!("{},{},{},{}", height, bhh, winner, epoch3_winner); + println!("{height},{bhh},{winner},{epoch3_winner}"); } } @@ -2084,7 +2060,7 @@ fn analyze_sortition_mev(argv: Vec) { println!("---------------"); println!("miner,count"); for (winner, count) in all_wins_epoch2.iter() { - println!("{},{}", winner, count); + println!("{winner},{count}"); } println!("---------------"); From 5177466a8e532bd8134968593a316b05844c42cc Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Tue, 2 Sep 2025 16:19:44 +0200 Subject: [PATCH 30/56] Add --clarity_version to clarity-cli and pass it into analysis functions --- stackslib/src/clarity_cli.rs | 92 +++++++++++++++++++++++------------- 1 file changed, 60 insertions(+), 32 deletions(-) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index bc91d386438..57a908de638 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -204,6 +204,7 @@ fn run_analysis_free( expressions: &mut [SymbolicExpression], marf_kv: &mut C, save_contract: bool, + clarity_version: ClarityVersion, ) -> Result> { let clarity_version = ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH); analysis::run_analysis( @@ -225,9 +226,9 @@ fn run_analysis( header_db: &CLIHeadersDB, marf_kv: &mut C, save_contract: bool, + clarity_version: ClarityVersion, ) -> Result> { let mainnet = header_db.is_mainnet(); - let clarity_version = ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH); let cost_track = LimitedCostTracker::new( mainnet, default_chain_id(mainnet), @@ -820,6 +821,23 @@ struct InitialAllocation { amount: u64, } +/// Parse the --clarity_version flag from command line arguments. +/// Returns the specified ClarityVersion or the default for the CLI epoch if not provided. +fn parse_clarity_version_flag(argv: &mut Vec) -> ClarityVersion { + if let Ok(optarg) = consume_arg(argv, &["--clarity_version"], true) { + if let Some(version_str) = optarg { + friendly_expect( + version_str.parse::(), + &format!("Invalid clarity version: {}", version_str), + ) + } else { + ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH) + } + } else { + ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH) + } +} + fn consume_arg( args: &mut Vec, argnames: &[&str], @@ -858,8 +876,8 @@ fn consume_arg( } } -/// This function uses Clarity1 to parse the boot code. -fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C) { +/// This function uses the specified Clarity version to parse the boot code. +fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C, clarity_version: ClarityVersion) { let mainnet = header_db.is_mainnet(); let boot_code = if mainnet { *STACKS_BOOT_CODE_MAINNET_2_1 @@ -906,12 +924,12 @@ fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C) parse( &contract_identifier, contract_content, - ClarityVersion::Clarity2, + clarity_version ), "Failed to parse program.", ); - let analysis_result = run_analysis_free(&contract_identifier, &mut ast, marf, true); + let analysis_result = run_analysis_free(&contract_identifier, &mut ast, marf, true, clarity_version); match analysis_result { Ok(_) => { let db = marf.get_clarity_db(header_db, &NULL_BURN_STATE_DB); @@ -924,7 +942,7 @@ fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C) vm_env .initialize_versioned_contract( contract_identifier, - ClarityVersion::Clarity2, + clarity_version, contract_content, None, ) @@ -998,7 +1016,6 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option (i32, Option { if args.len() < 2 { eprintln!( - "Usage: {} {} [program-file.clar] [--contract_id CONTRACT_ID] [--output_analysis] [--costs] [--testnet] (vm-state.db)", + "Usage: {} {} [program-file.clar] [--contract_id CONTRACT_ID] [--output_analysis] [--costs] [--testnet] [--clarity_version N] (vm-state.db)", invoked_by, args[0] ); panic_test!(); @@ -1164,9 +1182,9 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option (i32, Option { let mut argv = args.to_vec(); let mainnet = !matches!(consume_arg(&mut argv, &["--testnet"], false), Ok(Some(_))); + let mut marf = MemoryBackingStore::new(); let mut vm_env = OwnedEnvironment::new_free( mainnet, @@ -1248,9 +1268,10 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option val, Err(error) => { println!("Parse error:\n{error}"); @@ -1285,7 +1306,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (), Err(boxed) => { let (error, _) = *boxed; @@ -1325,16 +1346,18 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { let result = vm_env .get_exec_environment(None, None, &placeholder_context) @@ -1383,9 +1406,10 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (header_db, marf, Err(e)), Ok(analysis) => { @@ -1652,7 +1679,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option = argv[5..] .iter() .map(|argument| { - let clarity_version = ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH); let argument_parsed = friendly_expect( vm_execute(argument, clarity_version), &format!("Error parsing argument \"{}\"", argument), From d45435af70876a0a3e241d4535a4b1c93fdf889a Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Thu, 4 Sep 2025 16:15:33 +0200 Subject: [PATCH 31/56] Revert "Add --clarity_version to clarity-cli and pass it into analysis functions" This reverts commit 45b0c37e564ed0084cfd2cfe38550e3e487e9017. --- stackslib/src/clarity_cli.rs | 90 +++++++++++++----------------------- 1 file changed, 32 insertions(+), 58 deletions(-) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 57a908de638..83490abdb23 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -229,6 +229,7 @@ fn run_analysis( clarity_version: ClarityVersion, ) -> Result> { let mainnet = header_db.is_mainnet(); + let clarity_version = ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH); let cost_track = LimitedCostTracker::new( mainnet, default_chain_id(mainnet), @@ -821,23 +822,6 @@ struct InitialAllocation { amount: u64, } -/// Parse the --clarity_version flag from command line arguments. -/// Returns the specified ClarityVersion or the default for the CLI epoch if not provided. -fn parse_clarity_version_flag(argv: &mut Vec) -> ClarityVersion { - if let Ok(optarg) = consume_arg(argv, &["--clarity_version"], true) { - if let Some(version_str) = optarg { - friendly_expect( - version_str.parse::(), - &format!("Invalid clarity version: {}", version_str), - ) - } else { - ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH) - } - } else { - ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH) - } -} - fn consume_arg( args: &mut Vec, argnames: &[&str], @@ -876,8 +860,8 @@ fn consume_arg( } } -/// This function uses the specified Clarity version to parse the boot code. -fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C, clarity_version: ClarityVersion) { +/// This function uses Clarity1 to parse the boot code. +fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C) { let mainnet = header_db.is_mainnet(); let boot_code = if mainnet { *STACKS_BOOT_CODE_MAINNET_2_1 @@ -924,12 +908,12 @@ fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C, parse( &contract_identifier, contract_content, - clarity_version + ClarityVersion::Clarity2, ), "Failed to parse program.", ); - let analysis_result = run_analysis_free(&contract_identifier, &mut ast, marf, true, clarity_version); + let analysis_result = run_analysis_free(&contract_identifier, &mut ast, marf, true); match analysis_result { Ok(_) => { let db = marf.get_clarity_db(header_db, &NULL_BURN_STATE_DB); @@ -942,7 +926,7 @@ fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C, vm_env .initialize_versioned_contract( contract_identifier, - clarity_version, + ClarityVersion::Clarity2, contract_content, None, ) @@ -1016,6 +1000,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option (i32, Option { if args.len() < 2 { eprintln!( - "Usage: {} {} [program-file.clar] [--contract_id CONTRACT_ID] [--output_analysis] [--costs] [--testnet] [--clarity_version N] (vm-state.db)", + "Usage: {} {} [program-file.clar] [--contract_id CONTRACT_ID] [--output_analysis] [--costs] [--testnet] (vm-state.db)", invoked_by, args[0] ); panic_test!(); @@ -1182,9 +1166,9 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option (i32, Option { let mut argv = args.to_vec(); let mainnet = !matches!(consume_arg(&mut argv, &["--testnet"], false), Ok(Some(_))); - let mut marf = MemoryBackingStore::new(); let mut vm_env = OwnedEnvironment::new_free( mainnet, @@ -1268,10 +1250,9 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option val, Err(error) => { println!("Parse error:\n{error}"); @@ -1306,7 +1287,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (), Err(boxed) => { let (error, _) = *boxed; @@ -1346,18 +1327,16 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { let result = vm_env .get_exec_environment(None, None, &placeholder_context) @@ -1406,10 +1385,9 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (header_db, marf, Err(e)), Ok(analysis) => { @@ -1679,7 +1654,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option = argv[5..] .iter() .map(|argument| { + let clarity_version = ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH); let argument_parsed = friendly_expect( vm_execute(argument, clarity_version), &format!("Error parsing argument \"{}\"", argument), From f205ccce3e5c08a5801ee6fa57691ee0343f1031 Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Fri, 5 Sep 2025 14:15:29 +0200 Subject: [PATCH 32/56] Apply patch by @jcnelson via https://github.com/stacks-network/stacks-core/pull/6434#discussion_r2317480646 Co-authored-by: Jude Nelson --- stackslib/src/clarity_cli.rs | 56 ++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 83490abdb23..d32a033851d 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -138,7 +138,7 @@ fn friendly_expect_opt(input: Option, msg: &str) -> A { }) } -pub const DEFAULT_CLI_EPOCH: StacksEpochId = StacksEpochId::Epoch25; +pub const DEFAULT_CLI_EPOCH: StacksEpochId = StacksEpochId::Epoch32; struct EvalInput { marf_kv: MarfedKV, @@ -229,7 +229,7 @@ fn run_analysis( clarity_version: ClarityVersion, ) -> Result> { let mainnet = header_db.is_mainnet(); - let clarity_version = ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH); + let clarity_version = ClarityVersion::Clarity3; let cost_track = LimitedCostTracker::new( mainnet, default_chain_id(mainnet), @@ -908,12 +908,18 @@ fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C) parse( &contract_identifier, contract_content, - ClarityVersion::Clarity2, + ClarityVersion::Clarity1, ), "Failed to parse program.", ); - let analysis_result = run_analysis_free(&contract_identifier, &mut ast, marf, true); + let analysis_result = run_analysis_free( + &contract_identifier, + &mut ast, + marf, + true, + ClarityVersion::Clarity2, + ); match analysis_result { Ok(_) => { let db = marf.get_clarity_db(header_db, &NULL_BURN_STATE_DB); @@ -926,14 +932,14 @@ fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C) vm_env .initialize_versioned_contract( contract_identifier, - ClarityVersion::Clarity2, + ClarityVersion::Clarity1, contract_content, None, ) .unwrap(); } - Err(_) => { - panic!("failed to instantiate boot contract"); + Err(e) => { + panic!("failed to instantiate boot contract: {:?}", &e); } }; } @@ -1168,7 +1174,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option val, Err(error) => { println!("Parse error:\n{error}"); @@ -1287,7 +1293,13 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (), Err(boxed) => { let (error, _) = *boxed; @@ -1329,14 +1341,20 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { let result = vm_env .get_exec_environment(None, None, &placeholder_context) @@ -1387,7 +1405,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option = argv[5..] .iter() .map(|argument| { - let clarity_version = ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH); + let clarity_version = ClarityVersion::Clarity3; let argument_parsed = friendly_expect( vm_execute(argument, clarity_version), &format!("Error parsing argument \"{}\"", argument), From efb8225c2b44d5428d6841282b25c3d8a087c4fd Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Fri, 5 Sep 2025 15:02:59 +0200 Subject: [PATCH 33/56] Use CLI arg or default instead of hardcoded Clarity3 Pass user-specified Clarity version when provided, otherwise use default for CLI epoch. --- stackslib/src/clarity_cli.rs | 77 ++++++++++++++++++++++++------------ 1 file changed, 51 insertions(+), 26 deletions(-) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index d32a033851d..6fabb27e240 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -206,7 +206,6 @@ fn run_analysis_free( save_contract: bool, clarity_version: ClarityVersion, ) -> Result> { - let clarity_version = ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH); analysis::run_analysis( contract_identifier, expressions, @@ -229,7 +228,6 @@ fn run_analysis( clarity_version: ClarityVersion, ) -> Result> { let mainnet = header_db.is_mainnet(); - let clarity_version = ClarityVersion::Clarity3; let cost_track = LimitedCostTracker::new( mainnet, default_chain_id(mainnet), @@ -994,6 +992,22 @@ pub fn add_serialized_output(result: &mut serde_json::Value, value: Value) { result["output_serialized"] = serde_json::to_value(result_raw.as_str()).unwrap(); } +/// Parse --clarity_version flag. Defaults to version for DEFAULT_CLI_EPOCH. +fn parse_clarity_version_flag(argv: &mut Vec) -> ClarityVersion { + if let Ok(optarg) = consume_arg(argv, &["--clarity_version"], true) { + if let Some(s) = optarg { + friendly_expect( + s.parse::(), + &format!("Invalid clarity version: {}", s), + ) + } else { + ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH) + } + } else { + ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH) + } +} + /// Returns (process-exit-code, Option) pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option) { if args.is_empty() { @@ -1118,13 +1132,14 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { if args.len() < 2 { eprintln!( - "Usage: {} {} [program-file.clar] [--contract_id CONTRACT_ID] [--output_analysis] [--costs] [--testnet] (vm-state.db)", + "Usage: {} {} [program-file.clar] [--contract_id CONTRACT_ID] [--output_analysis] [--costs] [--testnet] [--clarity_version N] (vm-state.db)", invoked_by, args[0] ); panic_test!(); } let mut argv = args.to_vec(); + let clarity_version = parse_clarity_version_flag(&mut argv); let contract_id = if let Ok(optarg) = consume_arg(&mut argv, &["--contract_id"], true) { optarg .map(|optarg_str| { @@ -1172,9 +1187,8 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option (i32, Option { let mut argv = args.to_vec(); + let clarity_version = parse_clarity_version_flag(&mut argv); let mainnet = !matches!(consume_arg(&mut argv, &["--testnet"], false), Ok(Some(_))); let mut marf = MemoryBackingStore::new(); let mut vm_env = OwnedEnvironment::new_free( @@ -1258,7 +1274,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option val, Err(error) => { println!("Parse error:\n{error}"); @@ -1298,7 +1314,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (), Err(boxed) => { @@ -1320,6 +1336,8 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { + let mut argv = args.to_vec(); + let clarity_version = parse_clarity_version_flag(&mut argv); let content: String = { let mut buffer = String::new(); friendly_expect( @@ -1341,11 +1359,11 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option { let result = vm_env @@ -1391,6 +1409,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { let mut argv = args.to_vec(); + let clarity_version = parse_clarity_version_flag(&mut argv); let costs = matches!(consume_arg(&mut argv, &["--costs"], false), Ok(Some(_))); @@ -1405,7 +1424,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option { let mut argv = args.to_vec(); + let clarity_version = parse_clarity_version_flag(&mut argv); let costs = matches!(consume_arg(&mut argv, &["--costs"], false), Ok(Some(_))); let coverage_folder = consume_arg(&mut argv, &["--c"], true).unwrap_or(None); let evalInput = get_eval_input(invoked_by, &argv); - let vm_filename = if argv.len() == 3 { &argv[2] } else { &argv[3] }; + let vm_filename = if argv.len() == 3 { + &argv[2].clone() + } else { + &argv[3].clone() + }; let header_db = friendly_expect(CLIHeadersDB::resume(vm_filename), "Failed to open CLI DB"); let marf_kv = friendly_expect( @@ -1462,7 +1486,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option { let mut argv = args.to_vec(); + let clarity_version = parse_clarity_version_flag(&mut argv); let costs = matches!(consume_arg(&mut argv, &["--costs"], false), Ok(Some(_))); if argv.len() != 4 { eprintln!( - "Usage: {} {} [--costs] [index-block-hash] [contract-identifier] [vm/clarity dir]", + "Usage: {} {} [--costs] [index-block-hash] [contract-identifier] [--clarity_version N] [vm/clarity dir]", invoked_by, &argv[0] ); panic_test!(); } - let chain_tip = &argv[1]; + let chain_tip = &argv[1].clone(); let contract_identifier = friendly_expect( QualifiedContractIdentifier::parse(&argv[2]), "Failed to parse contract identifier.", @@ -1550,7 +1575,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option { let mut argv = args.to_vec(); + let clarity_version = parse_clarity_version_flag(&mut argv); let coverage_folder = consume_arg(&mut argv, &["--c"], true).unwrap_or(None); let costs = matches!(consume_arg(&mut argv, &["--costs"], false), Ok(Some(_))); @@ -1601,13 +1627,13 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option (header_db, marf, Err(e)), Ok(analysis) => { @@ -1672,7 +1697,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option { let mut argv = args.to_vec(); + let clarity_version = parse_clarity_version_flag(&mut argv); let coverage_folder = consume_arg(&mut argv, &["--c"], true).unwrap_or(None); let costs = matches!(consume_arg(&mut argv, &["--costs"], false), Ok(Some(_))); let assets = matches!(consume_arg(&mut argv, &["--assets"], false), Ok(Some(_))); if argv.len() < 5 { - eprintln!("Usage: {} {} [--costs] [--assets] [vm-state.db] [contract-identifier] [public-function-name] [sender-address] [args...]", invoked_by, argv[0]); + eprintln!("Usage: {} {} [--costs] [--assets] [--clarity_version N] [vm-state.db] [contract-identifier] [public-function-name] [sender-address] [args...]", invoked_by, argv[0]); panic_test!(); } @@ -1771,7 +1797,6 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option = argv[5..] .iter() .map(|argument| { - let clarity_version = ClarityVersion::Clarity3; let argument_parsed = friendly_expect( vm_execute(argument, clarity_version), &format!("Error parsing argument \"{}\"", argument), From 0dc24b30560c68f998a08bff9dc4169f31c2acca Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Thu, 11 Sep 2025 08:12:43 +0200 Subject: [PATCH 34/56] Run `cargo fmt-stacks` --- stackslib/src/clarity_cli.rs | 56 +++++++++++++++++------------------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 6fabb27e240..a54d1a4a7b9 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -1208,8 +1208,14 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (i32, Option (header_db, marf, Err(e)), Ok(analysis) => { From e9ba8d0a42bc0d1970a0740ecb0d54f9ee610d58 Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Thu, 11 Sep 2025 09:10:09 +0200 Subject: [PATCH 35/56] Verify `--clarity_version` flag takes effect for `clarity-cli check` --- stackslib/src/clarity_cli.rs | 89 ++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index a54d1a4a7b9..d52bb05397b 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -2394,4 +2394,93 @@ mod test { }) ); } + + #[test] + fn test_check_clarity3_contract_passes_with_clarity3_flag() { + // Arrange + let clar_path = format!( + "/tmp/version-flag-c3-allow-{}.clar", + rand::thread_rng().gen::() + ); + fs::write( + &clar_path, + // Valid only in Clarity 3. + r#" +(define-read-only (get-tenure-info (h uint)) + (ok + { + tenure-time: (get-tenure-info? time h), + tenure-miner-address: (get-tenure-info? miner-address h), + }) +) +"#, + ) + .unwrap(); + + // Act + let invoked = invoke_command( + "test", + &[ + "check".to_string(), + clar_path, + "--clarity_version".to_string(), + "clarity3".to_string(), + ], + ); + + // Assert + let exit_code = invoked.0; + let result_json = invoked.1.unwrap(); + assert_eq!( + exit_code, 0, + "expected check to pass under Clarity 3, got: {}", + result_json + ); + assert_eq!(result_json["message"], "Checks passed."); + } + + #[test] + fn test_check_clarity3_contract_fails_with_clarity2_flag() { + // Arrange + let clar_path = format!( + "/tmp/version-flag-c2-reject-{}.clar", + rand::thread_rng().gen::() + ); + fs::write( + &clar_path, + // Valid only in Clarity 3, should fail in 2. + r#" +(define-read-only (get-tenure-info (h uint)) + (ok + { + tenure-time: (get-tenure-info? time h), + tenure-miner-address: (get-tenure-info? miner-address h), + }) +) +"#, + ) + .unwrap(); + + // Act + let invoked = invoke_command( + "test", + &[ + "check".to_string(), + clar_path, + "--clarity_version".to_string(), + "clarity2".to_string(), + ], + ); + + // Assert + let exit_code = invoked.0; + let result_json = invoked.1.unwrap(); + assert_eq!( + exit_code, 1, + "expected check to fail under Clarity 2, got: {}", + result_json + ); + assert_eq!(result_json["message"], "Checks failed."); + assert!(result_json["error"]["analysis"] != json!(null)); + } } From 15b1ef599009809aa77e7813e4ae53512a2d64c4 Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Thu, 11 Sep 2025 09:10:26 +0200 Subject: [PATCH 36/56] Verify `--clarity_version` flag takes effect for `clarity-cli launch` --- stackslib/src/clarity_cli.rs | 98 ++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index d52bb05397b..96eab47f2ed 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -2483,4 +2483,102 @@ mod test { assert_eq!(result_json["message"], "Checks failed."); assert!(result_json["error"]["analysis"] != json!(null)); } + + #[test] + fn test_launch_clarity3_contract_passes_with_clarity3_flag() { + // Arrange + let db_name = format!("/tmp/db_{}", rand::thread_rng().gen::()); + invoke_command("test", &["initialize".to_string(), db_name.clone()]); + + let clar_path = format!( + "/tmp/version-flag-launch-c3-{}.clar", + rand::thread_rng().gen::() + ); + fs::write( + &clar_path, + // Valid only in Clarity 3. + r#" +(define-read-only (get-tenure-info (h uint)) + (ok + { + tenure-time: (get-tenure-info? time h), + tenure-miner-address: (get-tenure-info? miner-address h), + }) +) +"#, + ) + .unwrap(); + + // Act + let invoked = invoke_command( + "test", + &[ + "launch".to_string(), + "S1G2081040G2081040G2081040G208105NK8PE5.tenure".to_string(), + clar_path, + db_name, + "--clarity_version".to_string(), + "clarity3".to_string(), + ], + ); + + // Assert + let exit_code = invoked.0; + let result_json = invoked.1.unwrap(); + assert_eq!( + exit_code, 0, + "expected launch to pass under Clarity 3, got: {}", + result_json + ); + assert_eq!(result_json["message"], "Contract initialized!"); + } + + #[test] + fn test_launch_clarity3_contract_fails_with_clarity2_flag() { + // Arrange + let db_name = format!("/tmp/db_{}", rand::thread_rng().gen::()); + invoke_command("test", &["initialize".to_string(), db_name.clone()]); + + let clar_path = format!( + "/tmp/version-flag-launch-c2-{}.clar", + rand::thread_rng().gen::() + ); + fs::write( + &clar_path, + // Valid only in Clarity 3, should fail in 2. + r#" +(define-read-only (get-tenure-info (h uint)) + (ok + { + tenure-time: (get-tenure-info? time h), + tenure-miner-address: (get-tenure-info? miner-address h), + }) +) +"#, + ) + .unwrap(); + + // Act + let invoked = invoke_command( + "test", + &[ + "launch".to_string(), + "S1G2081040G2081040G2081040G208105NK8PE5.tenure".to_string(), + clar_path, + db_name, + "--clarity_version".to_string(), + "clarity2".to_string(), + ], + ); + + // Assert + let exit_code = invoked.0; + let result_json = invoked.1.unwrap(); + assert_eq!( + exit_code, 1, + "expected launch to fail under Clarity 2, got: {}", + result_json + ); + assert!(result_json["error"]["initialization"] != json!(null)); + } } From 34fad55704b7028721758dbfe2fc850be6c55369 Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Thu, 11 Sep 2025 09:49:04 +0200 Subject: [PATCH 37/56] Verify `--clarity_version` flag takes effect for `clarity-cli eval` NOTE: test_eval_clarity3_contract_fails_with_clarity2_flag fails. --- stackslib/src/clarity_cli.rs | 106 +++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 96eab47f2ed..e199a50efdf 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -2581,4 +2581,110 @@ mod test { ); assert!(result_json["error"]["initialization"] != json!(null)); } + + #[test] + fn test_eval_clarity3_contract_passes_with_clarity3_flag() { + // Arrange + let db_name = format!("/tmp/db_{}", rand::thread_rng().gen::()); + invoke_command("test", &["initialize".to_string(), db_name.clone()]); + + // Launch minimal contract at target for eval context. + let launch_src = format!( + "/tmp/version-flag-eval-launch-{}.clar", + rand::thread_rng().gen::() + ); + fs::write(&launch_src, "(define-read-only (dummy) true)").unwrap(); + let _ = invoke_command( + "test", + &[ + "launch".to_string(), + "S1G2081040G2081040G2081040G208105NK8PE5.tenure".to_string(), + launch_src, + db_name.clone(), + ], + ); + + // Use a Clarity3-only native expression. + let clar_path = format!( + "/tmp/version-flag-eval-c3-{}.clar", + rand::thread_rng().gen::() + ); + fs::write(&clar_path, "(get-tenure-info? time u1)").unwrap(); + + // Act + let invoked = invoke_command( + "test", + &[ + "eval".to_string(), + "S1G2081040G2081040G2081040G208105NK8PE5.tenure".to_string(), + clar_path, + db_name, + "--clarity_version".to_string(), + "clarity3".to_string(), + ], + ); + + // Assert + let exit_code = invoked.0; + let result_json = invoked.1.unwrap(); + assert_eq!( + exit_code, 0, + "expected eval to pass under Clarity 3, got: {}", + result_json + ); + assert!(result_json["success"].as_bool().unwrap()); + } + + #[test] + fn test_eval_clarity3_contract_fails_with_clarity2_flag() { + // Arrange + let db_name = format!("/tmp/db_{}", rand::thread_rng().gen::()); + invoke_command("test", &["initialize".to_string(), db_name.clone()]); + + // Launch minimal contract at target for eval context. + let launch_src = format!( + "/tmp/version-flag-eval-launch-{}.clar", + rand::thread_rng().gen::() + ); + fs::write(&launch_src, "(define-read-only (dummy) true)").unwrap(); + let _ = invoke_command( + "test", + &[ + "launch".to_string(), + "S1G2081040G2081040G2081040G208105NK8PE5.tenure".to_string(), + launch_src, + db_name.clone(), + ], + ); + + // Use a Clarity3-only native expression. + let clar_path = format!( + "/tmp/version-flag-eval-c2-{}.clar", + rand::thread_rng().gen::() + ); + fs::write(&clar_path, "(get-tenure-info? time u1)").unwrap(); + + // Act + let invoked = invoke_command( + "test", + &[ + "eval".to_string(), + "S1G2081040G2081040G2081040G208105NK8PE5.tenure".to_string(), + clar_path, + db_name, + "--clarity_version".to_string(), + "clarity2".to_string(), + ], + ); + + // Assert + let exit_code = invoked.0; + let result_json = invoked.1.unwrap(); + assert_eq!( + exit_code, 1, + "expected eval to fail under Clarity 2, got: {}", + result_json + ); + assert!(result_json["error"]["runtime"] != json!(null)); + } } From 9dac73c1e4e0fa78233312a5f596647ae84bd95e Mon Sep 17 00:00:00 2001 From: Radu Bahmata <92028479+BowTiedRadone@users.noreply.github.com> Date: Fri, 19 Sep 2025 17:47:29 +0300 Subject: [PATCH 38/56] Fix `test_eval_clarity3_contract_fails_with_clarity2_flag` --- stackslib/src/clarity_cli.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index e199a50efdf..53dfd481cc9 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -2654,6 +2654,8 @@ mod test { "S1G2081040G2081040G2081040G208105NK8PE5.tenure".to_string(), launch_src, db_name.clone(), + "--clarity_version".to_string(), + "clarity2".to_string(), ], ); From 7fcdac95a7bd0421d4328d99d4c417f089975e6f Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Mon, 22 Sep 2025 13:02:10 +0200 Subject: [PATCH 39/56] merge with develop fixes --- stackslib/src/net/api/blocksimulate.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blocksimulate.rs index c8a5faf6afc..49973300afe 100644 --- a/stackslib/src/net/api/blocksimulate.rs +++ b/stackslib/src/net/api/blocksimulate.rs @@ -13,7 +13,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::Value; use regex::{Captures, Regex}; @@ -242,7 +241,6 @@ impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT, - ASTRules::PrecheckSize, None, ); let err = match tx_result { From abf0edcd935fb491b5e0cddcbb81594ce5ba414a Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Mon, 22 Sep 2025 15:12:14 +0100 Subject: [PATCH 40/56] move blockstack-cli to contrib --- Cargo.lock | 11 +++++ Cargo.toml | 3 +- contrib/blockstack-cli/Cargo.toml | 14 ++++++ contrib/blockstack-cli/README.md | 33 ++++++++++++++ .../blockstack-cli/src/main.rs | 43 +++++++++---------- stackslib/Cargo.toml | 4 -- 6 files changed, 81 insertions(+), 27 deletions(-) create mode 100644 contrib/blockstack-cli/Cargo.toml create mode 100644 contrib/blockstack-cli/README.md rename stackslib/src/blockstack_cli.rs => contrib/blockstack-cli/src/main.rs (98%) diff --git a/Cargo.lock b/Cargo.lock index 56301718081..7f4395b8de5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -480,6 +480,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "blockstack-cli" +version = "0.1.0" +dependencies = [ + "clarity 0.0.1", + "serde_json", + "stacks-common 0.0.1", + "stackslib 0.0.1", + "tempfile", +] + [[package]] name = "bumpalo" version = "3.14.0" diff --git a/Cargo.toml b/Cargo.toml index 63e1e5d9ac1..7c6214e8164 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,8 @@ members = [ "libsigner", "stacks-signer", "stacks-node", - "contrib/stacks-inspect" + "contrib/stacks-inspect", + "contrib/blockstack-cli" ] exclude = ["contrib/tools/config-docs-generator"] diff --git a/contrib/blockstack-cli/Cargo.toml b/contrib/blockstack-cli/Cargo.toml new file mode 100644 index 00000000000..8395246477d --- /dev/null +++ b/contrib/blockstack-cli/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "blockstack-cli" +version = "0.1.0" +edition = "2024" + +[dependencies] +stackslib = { package = "stackslib", path = "../../stackslib", default-features = false } +clarity = { path = "../../clarity", default-features = false } +stacks-common = { path = "../../stacks-common", default-features = false } +serde_json = { workspace = true } + +[dev-dependencies] +stacks-common = { path = "../../stacks-common", default-features = false, features = ["testing"] } +tempfile = { version = "3.3", default-features = false } diff --git a/contrib/blockstack-cli/README.md b/contrib/blockstack-cli/README.md new file mode 100644 index 00000000000..0c1cb66e828 --- /dev/null +++ b/contrib/blockstack-cli/README.md @@ -0,0 +1,33 @@ +# blockstack-cli + +A CLI for building and signing Stacks transactions and interacting with Clarity contracts. + +Features: +- `publish` — deploy Clarity smart contracts +- `contract-call` — call public functions on deployed contracts +- `token-transfer` — send STX between accounts +- Decoding helpers for transactions and payloads + +Build: +```bash +cargo build -p blockstack-cli +``` + +Basic usage: +```bash +cargo run -p blockstack-cli --help +``` + +Examples: +```bash +# Publish a contract +cargo run -p blockstack-cli publish --path --sender --network + +# Call a contract function +cargo run -p blockstack-cli contract-call --contract --function --args '[(int 1)]' --sender + +# Transfer STX +cargo run -p blockstack-cli token-transfer --amount 100000 --sender --recipient +``` + +See `--help` on each subcommand for complete options. diff --git a/stackslib/src/blockstack_cli.rs b/contrib/blockstack-cli/src/main.rs similarity index 98% rename from stackslib/src/blockstack_cli.rs rename to contrib/blockstack-cli/src/main.rs index bcf23757e7c..54514855a22 100644 --- a/stackslib/src/blockstack_cli.rs +++ b/contrib/blockstack-cli/src/main.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2025 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -18,38 +17,38 @@ #![allow(non_snake_case)] #![allow(non_upper_case_globals)] -extern crate blockstack_lib; extern crate clarity; extern crate stacks_common; +extern crate stackslib; +use std::io::Read; #[cfg(test)] use std::io::prelude::*; -use std::io::Read; use std::{env, fs, io}; -use blockstack_lib::burnchains::bitcoin::address::{ - ADDRESS_VERSION_MAINNET_SINGLESIG, ADDRESS_VERSION_TESTNET_SINGLESIG, -}; -use blockstack_lib::burnchains::Address; -use blockstack_lib::chainstate::stacks::{ - StacksBlock, StacksBlockHeader, StacksMicroblock, StacksPrivateKey, StacksPublicKey, - StacksTransaction, StacksTransactionSigner, TokenTransferMemo, TransactionAnchorMode, - TransactionAuth, TransactionContractCall, TransactionPayload, TransactionPostConditionMode, - TransactionSmartContract, TransactionSpendingCondition, TransactionVersion, - C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, -}; -use blockstack_lib::clarity_cli::vm_execute; -use blockstack_lib::core::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; -use blockstack_lib::net::Error as NetError; -use blockstack_lib::util_lib::strings::StacksString; use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType}; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; -use stacks_common::address::{b58, AddressHashMode}; +use stacks_common::address::{AddressHashMode, b58}; use stacks_common::codec::{Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::{hex_bytes, to_hex}; use stacks_common::util::retry::LogReader; +use stackslib::burnchains::Address; +use stackslib::burnchains::bitcoin::address::{ + ADDRESS_VERSION_MAINNET_SINGLESIG, ADDRESS_VERSION_TESTNET_SINGLESIG, +}; +use stackslib::chainstate::stacks::{ + C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, StacksBlock, + StacksBlockHeader, StacksMicroblock, StacksPrivateKey, StacksPublicKey, StacksTransaction, + StacksTransactionSigner, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionContractCall, TransactionPayload, TransactionPostConditionMode, + TransactionSmartContract, TransactionSpendingCondition, TransactionVersion, +}; +use stackslib::clarity_cli::vm_execute; +use stackslib::core::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; +use stackslib::net::Error as NetError; +use stackslib::util_lib::strings::StacksString; const USAGE: &str = "blockstack-cli (options) [method] [args...] @@ -1071,8 +1070,8 @@ fn main_handler(mut argv: Vec) -> Result { mod test { use std::panic; - use blockstack_lib::chainstate::stacks::TransactionPostCondition; use stacks_common::util::cargo_workspace; + use stackslib::chainstate::stacks::TransactionPostCondition; use tempfile::NamedTempFile; use super::*; @@ -1721,7 +1720,7 @@ mod test { fn simple_decode_tx() { let tx_args = [ "decode-tx", - "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe4000000000000000100000000000000000100c90ae0235365f3a73c595f8c6ab3c529807feb3cb269247329c9a24218d50d3f34c7eef5d28ba26831affa652a73ec32f098fec4bf1decd1ceb3fde4b8ce216b030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f" + "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe4000000000000000100000000000000000100c90ae0235365f3a73c595f8c6ab3c529807feb3cb269247329c9a24218d50d3f34c7eef5d28ba26831affa652a73ec32f098fec4bf1decd1ceb3fde4b8ce216b030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f", ]; let result = main_handler(to_string_vec(&tx_args)).unwrap(); diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index d4bf263f27f..7f8209b5824 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -22,10 +22,6 @@ path = "src/lib.rs" name = "clarity-cli" path = "src/clarity_cli_main.rs" -[[bin]] -name = "blockstack-cli" -path = "src/blockstack_cli.rs" - [dependencies] rand = { workspace = true } rand_core = { workspace = true } From cc6846c014571595e1ce8fc817949d17a4dd26e0 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Mon, 22 Sep 2025 17:29:24 +0100 Subject: [PATCH 41/56] rename blockstack-cli -> stacks-cli --- Cargo.lock | 22 ++++---- Cargo.toml | 2 +- contrib/blockstack-cli/README.md | 33 ------------ .../{blockstack-cli => stacks-cli}/Cargo.toml | 2 +- contrib/stacks-cli/README.md | 50 +++++++++++++++++++ .../src/main.rs | 0 6 files changed, 63 insertions(+), 46 deletions(-) delete mode 100644 contrib/blockstack-cli/README.md rename contrib/{blockstack-cli => stacks-cli}/Cargo.toml (95%) create mode 100644 contrib/stacks-cli/README.md rename contrib/{blockstack-cli => stacks-cli}/src/main.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 7f4395b8de5..1ef9cc74d59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -480,17 +480,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "blockstack-cli" -version = "0.1.0" -dependencies = [ - "clarity 0.0.1", - "serde_json", - "stacks-common 0.0.1", - "stackslib 0.0.1", - "tempfile", -] - [[package]] name = "bumpalo" version = "3.14.0" @@ -3117,6 +3106,17 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "stacks-cli" +version = "0.1.0" +dependencies = [ + "clarity 0.0.1", + "serde_json", + "stacks-common 0.0.1", + "stackslib 0.0.1", + "tempfile", +] + [[package]] name = "stacks-common" version = "0.0.1" diff --git a/Cargo.toml b/Cargo.toml index 7c6214e8164..edbaa79db61 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ members = [ "stacks-signer", "stacks-node", "contrib/stacks-inspect", - "contrib/blockstack-cli" + "contrib/stacks-cli" ] exclude = ["contrib/tools/config-docs-generator"] diff --git a/contrib/blockstack-cli/README.md b/contrib/blockstack-cli/README.md deleted file mode 100644 index 0c1cb66e828..00000000000 --- a/contrib/blockstack-cli/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# blockstack-cli - -A CLI for building and signing Stacks transactions and interacting with Clarity contracts. - -Features: -- `publish` — deploy Clarity smart contracts -- `contract-call` — call public functions on deployed contracts -- `token-transfer` — send STX between accounts -- Decoding helpers for transactions and payloads - -Build: -```bash -cargo build -p blockstack-cli -``` - -Basic usage: -```bash -cargo run -p blockstack-cli --help -``` - -Examples: -```bash -# Publish a contract -cargo run -p blockstack-cli publish --path --sender --network - -# Call a contract function -cargo run -p blockstack-cli contract-call --contract --function --args '[(int 1)]' --sender - -# Transfer STX -cargo run -p blockstack-cli token-transfer --amount 100000 --sender --recipient -``` - -See `--help` on each subcommand for complete options. diff --git a/contrib/blockstack-cli/Cargo.toml b/contrib/stacks-cli/Cargo.toml similarity index 95% rename from contrib/blockstack-cli/Cargo.toml rename to contrib/stacks-cli/Cargo.toml index 8395246477d..a3db6d6e0ff 100644 --- a/contrib/blockstack-cli/Cargo.toml +++ b/contrib/stacks-cli/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "blockstack-cli" +name = "stacks-cli" version = "0.1.0" edition = "2024" diff --git a/contrib/stacks-cli/README.md b/contrib/stacks-cli/README.md new file mode 100644 index 00000000000..9582832817a --- /dev/null +++ b/contrib/stacks-cli/README.md @@ -0,0 +1,50 @@ +# stacks-cli + +A CLI for building and signing Stacks transactions and interacting with Clarity contracts. + +### Features + +* **Transaction Building:** + * `publish`: Deploy Clarity smart contracts. + * `contract-call`: Call public functions on deployed contracts. + * `token-transfer`: Transfer STX between accounts. +* **Key Management:** + * `generate-sk`: Generate a new Stacks private key. + * `addresses`: Derive Stacks and Bitcoin addresses from a private key. +* **Decoding Helpers:** + * `decode-tx`, `decode-block`, `decode-header`, etc. - Decode raw, hex-encoded data structures. + +### Build & Run + +```bash +# Build the CLI +cargo build -p stacks-cli + +# See top-level help +cargo run -p stacks-cli -- --help +``` +*Note the extra `--` to pass flags to the binary instead of cargo.* + +### Global Options +* `--testnet[=]` - Generate a transaction for the testnet. An optional custom chain ID can be provided in hex (e.g., `--testnet=0x12345678`). + +### Examples + +**Note:** All arguments are positional. + +```bash +# Publish a contract on testnet +cargo run -p stacks-cli -- --testnet publish + +# Call a contract function +# Function arguments are passed using flags: -e (evaluate), -x (hex), or --hex-file +cargo run -p stacks-cli -- contract-call -e "'ST1...'" -e "u100" + +# Transfer STX (amount is in micro-STX, memo is optional) +cargo run -p stacks-cli -- token-transfer "my memo" + +# Generate a new key and associated addresses +cargo run -p stacks-cli -- generate-sk +``` + +See `--help` on each subcommand for complete options (e.g., `cargo run -p stacks-cli -- publish -h`). diff --git a/contrib/blockstack-cli/src/main.rs b/contrib/stacks-cli/src/main.rs similarity index 100% rename from contrib/blockstack-cli/src/main.rs rename to contrib/stacks-cli/src/main.rs From df63978a84ce8db4800ce37252b0211cb5c058e8 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Mon, 22 Sep 2025 17:48:05 +0100 Subject: [PATCH 42/56] add decode example in readme --- contrib/stacks-cli/README.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/contrib/stacks-cli/README.md b/contrib/stacks-cli/README.md index 9582832817a..4d355bed193 100644 --- a/contrib/stacks-cli/README.md +++ b/contrib/stacks-cli/README.md @@ -12,7 +12,11 @@ A CLI for building and signing Stacks transactions and interacting with Clarity * `generate-sk`: Generate a new Stacks private key. * `addresses`: Derive Stacks and Bitcoin addresses from a private key. * **Decoding Helpers:** - * `decode-tx`, `decode-block`, `decode-header`, etc. - Decode raw, hex-encoded data structures. + * `decode-tx`: Decode a hex-encoded transaction. + * `decode-header`: Decode a hex-encoded block header. + * `decode-block`: Decode a hex-encoded block. + * `decode-microblock`: Decode a hex-encoded microblock. + * `decode-microblocks`: Decode a hex-encoded stream of microblocks. ### Build & Run @@ -45,6 +49,9 @@ cargo run -p stacks-cli -- token-transfer ``` See `--help` on each subcommand for complete options (e.g., `cargo run -p stacks-cli -- publish -h`). From 0b1d2444e24a8fccf65553e8010d06707bce2e60 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Mon, 22 Sep 2025 18:07:35 +0100 Subject: [PATCH 43/56] fix clippy --- contrib/stacks-cli/src/main.rs | 141 +++++++++++++-------------------- 1 file changed, 53 insertions(+), 88 deletions(-) diff --git a/contrib/stacks-cli/src/main.rs b/contrib/stacks-cli/src/main.rs index 54514855a22..34561c0659b 100644 --- a/contrib/stacks-cli/src/main.rs +++ b/contrib/stacks-cli/src/main.rs @@ -12,10 +12,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(dead_code)] -#![allow(non_camel_case_types)] -#![allow(non_snake_case)] -#![allow(non_upper_case_globals)] extern crate clarity; extern crate stacks_common; @@ -208,11 +204,11 @@ impl std::error::Error for CliError { impl std::fmt::Display for CliError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - CliError::ClarityRuntimeError(e) => write!(f, "Clarity error: {:?}", e), - CliError::ClarityGeneralError(e) => write!(f, "Clarity error: {}", e), - CliError::Message(e) => write!(f, "{}", e), - CliError::Usage => write!(f, "{}", USAGE), - CliError::InvalidChainId(e) => write!(f, "Invalid chain ID: {}", e), + CliError::ClarityRuntimeError(e) => write!(f, "Clarity error: {e:?}"), + CliError::ClarityGeneralError(e) => write!(f, "Clarity error: {e}"), + CliError::Message(e) => write!(f, "{e}"), + CliError::Usage => write!(f, "{USAGE}"), + CliError::InvalidChainId(e) => write!(f, "Invalid chain ID: {e}"), } } } @@ -237,37 +233,37 @@ impl From for CliError { impl From for CliError { fn from(value: NetError) -> Self { - CliError::Message(format!("Stacks NetError: {}", value)) + CliError::Message(format!("Stacks NetError: {value}")) } } impl From for CliError { fn from(value: CodecError) -> Self { - CliError::Message(format!("Stacks CodecError: {}", value)) + CliError::Message(format!("Stacks CodecError: {value}")) } } impl From for CliError { fn from(value: std::num::ParseIntError) -> Self { - CliError::Message(format!("Failed to parse integer: {}", value)) + CliError::Message(format!("Failed to parse integer: {value}")) } } impl From for CliError { fn from(value: io::Error) -> Self { - CliError::Message(format!("IO error reading CLI input: {}", value)) + CliError::Message(format!("IO error reading CLI input: {value}")) } } impl From for CliError { fn from(value: stacks_common::util::HexError) -> Self { - CliError::Message(format!("Bad hex string supplied: {}", value)) + CliError::Message(format!("Bad hex string supplied: {value}")) } } impl From for CliError { fn from(value: clarity::vm::types::serialization::SerializationError) -> Self { - CliError::Message(format!("Failed to deserialize: {}", value)) + CliError::Message(format!("Failed to deserialize: {value}")) } } @@ -304,12 +300,12 @@ fn make_standard_single_sig_tx( version: TransactionVersion, chain_id: u32, payload: TransactionPayload, - publicKey: &StacksPublicKey, + public_key: &StacksPublicKey, nonce: u64, tx_fee: u64, ) -> StacksTransaction { let mut spending_condition = - TransactionSpendingCondition::new_singlesig_p2pkh(publicKey.clone()) + TransactionSpendingCondition::new_singlesig_p2pkh(public_key.clone()) .expect("Failed to create p2pkh spending condition from public key."); spending_condition.set_nonce(nonce); spending_condition.set_tx_fee(tx_fee); @@ -344,7 +340,7 @@ fn sign_transaction_single_sig_standard( /// # Returns /// /// The number of times `flag` appears in `args`. -fn count_flag(args: &Vec, flag: &str) -> usize { +fn count_flag(args: &[String], flag: &str) -> usize { args.iter().filter(|&arg| arg == flag).count() } @@ -436,8 +432,7 @@ fn parse_anchor_mode( if count_micro > 1 || count_block > 1 { return Err(CliError::Message(format!( - "Duplicated anchor mode detected.\n\nUSAGE:\n{}", - usage, + "Duplicated anchor mode detected.\n\nUSAGE:\n{usage}" ))); } @@ -446,8 +441,7 @@ fn parse_anchor_mode( match (has_microblock, has_block) { (true, true) => Err(CliError::Message(format!( - "Both anchor modes detected.\n\nUSAGE:\n{}", - usage + "Both anchor modes detected.\n\nUSAGE:\n{usage}" ))), (true, false) => Ok(TransactionAnchorMode::OffChainOnly), (false, true) => Ok(TransactionAnchorMode::OnChainOnly), @@ -497,8 +491,7 @@ fn parse_postcondition_mode( 1 => { /* continue below */ } _ => { return Err(CliError::Message(format!( - "Duplicated `{}`.\n\nUSAGE:\n{}", - FLAG_POSTCONDITION, usage + "Duplicated `{FLAG_POSTCONDITION}`.\n\nUSAGE:\n{usage}" ))); } } @@ -528,12 +521,11 @@ fn handle_contract_publish( let mut args = args_slice.to_vec(); if !args.is_empty() && args[0] == "-h" { - return Err(CliError::Message(format!("USAGE:\n{}", PUBLISH_USAGE))); + return Err(CliError::Message(format!("USAGE:\n{PUBLISH_USAGE}"))); } if args.len() < 5 { return Err(CliError::Message(format!( - "Incorrect argument count supplied \n\nUSAGE:\n{}", - PUBLISH_USAGE + "Incorrect argument count supplied \n\nUSAGE:\n{PUBLISH_USAGE}", ))); } let anchor_mode = parse_anchor_mode(&mut args, PUBLISH_USAGE)?; @@ -589,12 +581,11 @@ fn handle_contract_call( ) -> Result { let mut args = args_slice.to_vec(); if !args.is_empty() && args[0] == "-h" { - return Err(CliError::Message(format!("USAGE:\n {}", CALL_USAGE))); + return Err(CliError::Message(format!("USAGE:\n {CALL_USAGE}"))); } if args.len() < 6 { return Err(CliError::Message(format!( - "Incorrect argument count supplied \n\nUSAGE:\n {}", - CALL_USAGE + "Incorrect argument count supplied \n\nUSAGE:\n {CALL_USAGE}", ))); } let anchor_mode = parse_anchor_mode(&mut args, CALL_USAGE)?; @@ -607,7 +598,7 @@ fn handle_contract_call( let val_args = &args[6..]; - if val_args.len() % 2 != 0 { + if !val_args.len().is_multiple_of(2) { return Err( "contract-call arguments must be supplied as a list of `-e ...` or `-x 0000...` or `--hex-file ` pairs" .into(), @@ -684,14 +675,12 @@ fn handle_token_transfer( let mut args = args_slice.to_vec(); if !args.is_empty() && args[0] == "-h" { return Err(CliError::Message(format!( - "USAGE:\n {}", - TOKEN_TRANSFER_USAGE + "USAGE:\n {TOKEN_TRANSFER_USAGE}" ))); } if args.len() < 5 { return Err(CliError::Message(format!( - "Incorrect argument count supplied \n\nUSAGE:\n {}", - TOKEN_TRANSFER_USAGE + "Incorrect argument count supplied \n\nUSAGE:\n {TOKEN_TRANSFER_USAGE}" ))); } @@ -741,7 +730,7 @@ fn handle_token_transfer( #[allow(clippy::indexing_slicing)] fn generate_secret_key(args: &[String], version: TransactionVersion) -> Result { if !args.is_empty() && args[0] == "-h" { - return Err(CliError::Message(format!("USAGE:\n {}", GENERATE_USAGE))); + return Err(CliError::Message(format!("USAGE:\n {GENERATE_USAGE}"))); } let sk = StacksPrivateKey::random(); @@ -762,18 +751,17 @@ fn generate_secret_key(args: &[String], version: TransactionVersion) -> Result Result { if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { - return Err(CliError::Message(format!("USAGE:\n {}", ADDRESSES_USAGE))); + return Err(CliError::Message(format!("USAGE:\n {ADDRESSES_USAGE}"))); } let sk = StacksPrivateKey::from_hex(&args[0]).expect("Failed to load private key"); @@ -803,10 +791,9 @@ fn get_addresses(args: &[String], version: TransactionVersion) -> Result Result Result { if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( - "Usage: {}\n", - DECODE_TRANSACTION_USAGE + "Usage: {DECODE_TRANSACTION_USAGE}\n" ))); } @@ -852,10 +838,7 @@ fn decode_transaction(args: &[String], _version: TransactionVersion) -> Result Result { if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { - return Err(CliError::Message(format!( - "Usage: {}\n", - DECODE_HEADER_USAGE - ))); + return Err(CliError::Message(format!("Usage: {DECODE_HEADER_USAGE}\n"))); } let header_data = if args[0] == "-" { // read from stdin @@ -892,10 +875,7 @@ fn decode_header(args: &[String], _version: TransactionVersion) -> Result Result { if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { - return Err(CliError::Message(format!( - "Usage: {}\n", - DECODE_BLOCK_USAGE - ))); + return Err(CliError::Message(format!("Usage: {DECODE_BLOCK_USAGE}\n"))); } let block_data = if args[0] == "-" { // read from stdin @@ -931,8 +911,7 @@ fn decode_block(args: &[String], _version: TransactionVersion) -> Result Result { if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( - "Usage: {}\n", - DECODE_MICROBLOCK_USAGE + "Usage: {DECODE_MICROBLOCK_USAGE}\n" ))); } let mblock_data = if args[0] == "-" { @@ -971,8 +950,7 @@ fn decode_microblock(args: &[String], _version: TransactionVersion) -> Result Result { if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( - "Usage: {}\n", - DECODE_MICROBLOCKS_USAGE + "Usage: {DECODE_MICROBLOCKS_USAGE}\n" ))); } let mblock_data = if args[0] == "-" { @@ -1014,10 +992,10 @@ fn main() { match main_handler(argv) { Ok(s) => { - println!("{}", s); + println!("{s}"); } Err(e) => { - eprintln!("{}", e); + eprintln!("{e}"); std::process::exit(1); } } @@ -1079,7 +1057,7 @@ mod test { mod utils { use super::*; pub fn tx_deserialize(hex_str: &str) -> StacksTransaction { - let tx_str = hex_bytes(&hex_str).expect("Failed to get hex byte from tx str!"); + let tx_str = hex_bytes(hex_str).expect("Failed to get hex byte from tx str!"); let mut cursor = io::Cursor::new(&tx_str); StacksTransaction::consensus_deserialize(&mut cursor).expect("Failed deserialize tx!") } @@ -1222,10 +1200,7 @@ mod test { let result = main_handler(to_string_vec(&publish_args)); assert!(result.is_err()); - let exp_err_msg = format!( - "{}\n\nUSAGE:\n{}", - "Both anchor modes detected.", PUBLISH_USAGE - ); + let exp_err_msg = format!("Both anchor modes detected.\n\nUSAGE:\n{PUBLISH_USAGE}",); assert_eq!(exp_err_msg, result.unwrap_err().to_string()); // Scenario FAIL using duplicated anchor mode @@ -1243,10 +1218,7 @@ mod test { let result = main_handler(to_string_vec(&publish_args)); assert!(result.is_err()); - let exp_err_msg = format!( - "{}\n\nUSAGE:\n{}", - "Duplicated anchor mode detected.", PUBLISH_USAGE - ); + let exp_err_msg = format!("Duplicated anchor mode detected.\n\nUSAGE:\n{PUBLISH_USAGE}"); assert_eq!(exp_err_msg, result.unwrap_err().to_string()); } @@ -1305,10 +1277,8 @@ mod test { let result = main_handler(to_string_vec(&publish_args)); assert!(result.is_err()); - let exp_err_msg = format!( - "{}\n\nUSAGE:\n{}", - "Invalid value for `--postcondition-mode`.", PUBLISH_USAGE - ); + let exp_err_msg = + format!("Invalid value for `--postcondition-mode`.\n\nUSAGE:\n{PUBLISH_USAGE}"); assert_eq!(exp_err_msg, result.unwrap_err().to_string()); // Scenario FAIL with missing post-condition value @@ -1325,10 +1295,8 @@ mod test { let result = main_handler(to_string_vec(&publish_args)); assert!(result.is_err()); - let exp_err_msg = format!( - "{}\n\nUSAGE:\n{}", - "Missing value for `--postcondition-mode`.", PUBLISH_USAGE - ); + let exp_err_msg = + format!("Missing value for `--postcondition-mode`.\n\nUSAGE:\n{PUBLISH_USAGE}"); assert_eq!(exp_err_msg, result.unwrap_err().to_string()); // Scenario FAIL with duplicated post-condition @@ -1347,10 +1315,7 @@ mod test { let result = main_handler(to_string_vec(&publish_args)); assert!(result.is_err()); - let exp_err_msg = format!( - "{}\n\nUSAGE:\n{}", - "Duplicated `--postcondition-mode`.", PUBLISH_USAGE - ); + let exp_err_msg = format!("Duplicated `--postcondition-mode`.\n\nUSAGE:\n{PUBLISH_USAGE}"); assert_eq!(exp_err_msg, result.unwrap_err().to_string()); } @@ -1573,7 +1538,7 @@ mod test { "{}", sign_transaction_single_sig_standard("01zz", &sk).unwrap_err() ); - println!("{}", s); + println!("{s}"); assert!(s.contains("Bad hex string")); let cc_args = [ @@ -1637,7 +1602,7 @@ mod test { let result = main_handler(to_string_vec(&cc_args)); assert!(result.is_err(), "Result should be err!"); - let expected_msg = format!("Cannot read file: {}. Reason: ", file_path); + let expected_msg = format!("Cannot read file: {file_path}. Reason: "); assert!(result.unwrap_err().to_string().starts_with(&expected_msg)); } @@ -1645,7 +1610,7 @@ mod test { fn test_contract_call_with_serialized_arg_from_file_fails_due_to_bad_hex() { let mut file = NamedTempFile::new().expect("Cannot create tempfile!"); // Bad hex string but (good except for the \n) - write!(file, "0000000000000000000000000000000001\n").expect("Cannot Write to temp file"); + writeln!(file, "0000000000000000000000000000000001").expect("Cannot Write to temp file"); let file_path = file.path().to_str().unwrap(); let cc_args = [ @@ -1657,7 +1622,7 @@ mod test { "foo-contract", "transfer-fookens", "--hex-file", - &file_path, + file_path, ]; let result = main_handler(to_string_vec(&cc_args)); @@ -1683,7 +1648,7 @@ mod test { "foo-contract", "transfer-fookens", "--hex-file", - &file_path, + file_path, ]; let result = main_handler(to_string_vec(&cc_args)); @@ -1724,7 +1689,7 @@ mod test { ]; let result = main_handler(to_string_vec(&tx_args)).unwrap(); - eprintln!("result:\n{}", result); + eprintln!("result:\n{result}"); } #[test] @@ -1735,7 +1700,7 @@ mod test { ]; let result = main_handler(to_string_vec(&block_args)).unwrap(); - eprintln!("result:\n{}", result); + eprintln!("result:\n{result}"); } #[test] @@ -1746,7 +1711,7 @@ mod test { ]; let result = main_handler(to_string_vec(&header_args)).unwrap(); - eprintln!("result:\n{}", result); + eprintln!("result:\n{result}"); } #[test] From a72c8647a26b37571805651f24fee66174581215 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 23 Sep 2025 14:43:03 +0200 Subject: [PATCH 44/56] refactored block_replay --- stackslib/src/net/api/blocksimulate.rs | 326 ++++++++++--------- stackslib/src/net/api/tests/blocksimulate.rs | 25 ++ 2 files changed, 193 insertions(+), 158 deletions(-) diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blocksimulate.rs index 49973300afe..09eaa51764d 100644 --- a/stackslib/src/net/api/blocksimulate.rs +++ b/stackslib/src/net/api/blocksimulate.rs @@ -23,6 +23,7 @@ use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::StacksChainState; @@ -49,6 +50,172 @@ impl RPCNakamotoBlockSimulateRequestHandler { auth, } } + + pub fn block_replay( + &self, + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + ) -> Result { + let Some(block_id) = &self.block_id else { + return Err(ChainError::InvalidStacksBlock("block_id is None".into())); + }; + + let Some((tenure_id, parent_block_id)) = chainstate + .nakamoto_blocks_db() + .get_tenure_and_parent_block_id(&block_id)? + else { + return Err(ChainError::NoSuchBlockError); + }; + + let staging_db_path = chainstate.get_nakamoto_staging_blocks_path()?; + let db_conn = StacksChainState::open_nakamoto_staging_blocks(&staging_db_path, false)?; + let rowid = db_conn + .conn() + .get_nakamoto_block_rowid(&block_id)? + .ok_or(ChainError::NoSuchBlockError)?; + + let mut blob_fd = db_conn + .open_nakamoto_block(rowid, false) + .map_err(|e| { + let msg = format!("Failed to open Nakamoto block {}: {:?}", &block_id, &e); + warn!("{}", &msg); + msg + }) + .unwrap(); + + let block = NakamotoBlock::consensus_deserialize(&mut blob_fd) + .map_err(|e| { + let msg = format!("Failed to read Nakamoto block {}: {:?}", &block_id, &e); + warn!("{}", &msg); + msg + }) + .unwrap(); + + let burn_dbconn = match sortdb.index_handle_at_block(chainstate, &parent_block_id) { + Ok(burn_dbconn) => burn_dbconn, + Err(_) => return Err(ChainError::NoSuchBlockError), + }; + + let tenure_change = block + .txs + .iter() + .find(|tx| matches!(tx.payload, TransactionPayload::TenureChange(..))); + let coinbase = block + .txs + .iter() + .find(|tx| matches!(tx.payload, TransactionPayload::Coinbase(..))); + let tenure_cause = tenure_change.and_then(|tx| match &tx.payload { + TransactionPayload::TenureChange(tc) => Some(tc.cause), + _ => None, + }); + + let parent_stacks_header = + NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id) + .unwrap() + .unwrap(); + let mut builder = NakamotoBlockBuilder::new( + &parent_stacks_header, + &block.header.consensus_hash, + block.header.burn_spent, + tenure_change, + coinbase, + block.header.pox_treatment.len(), + None, + None, + ) + .unwrap(); + + let mut miner_tenure_info = builder + .load_ephemeral_tenure_info(chainstate, &burn_dbconn, tenure_cause) + .unwrap(); + let burn_chain_height = miner_tenure_info.burn_tip_height; + let mut tenure_tx = builder + .tenure_begin(&burn_dbconn, &mut miner_tenure_info) + .unwrap(); + + let mut block_fees: u128 = 0; + let mut txs_receipts = vec![]; + + for (i, tx) in block.txs.iter().enumerate() { + let tx_len = tx.tx_len(); + + let tx_result = builder.try_mine_tx_with_len( + &mut tenure_tx, + tx, + tx_len, + &BlockLimitFunction::NO_LIMIT_HIT, + None, + ); + let err = match tx_result { + TransactionResult::Success(tx_result) => { + txs_receipts.push(tx_result.receipt); + Ok(()) + } + _ => Err(format!("Problematic tx {i}")), + }; + if let Err(reason) = err { + panic!("Rejected block tx: {reason}"); + } + + block_fees += tx.get_tx_fee() as u128; + } + + let simulated_block = builder.mine_nakamoto_block(&mut tenure_tx, burn_chain_height); + + tenure_tx.rollback_block(); + + let block_hash = block.header.block_hash(); + let block_height = block.header.chain_length; + + let tx_merkle_root = block.header.tx_merkle_root.clone(); + + let mut simulated_block = RPCSimulatedBlock { + block_id: block_id.clone(), + block_hash, + block_height, + parent_block_id, + consensus_hash: tenure_id, + fees: block_fees, + tx_merkle_root: block.header.tx_merkle_root, + state_index_root: block.header.state_index_root, + timestamp: block.header.timestamp, + miner_signature: block.header.miner_signature, + signer_signature: block.header.signer_signature, + transactions: vec![], + valid_merkle_root: tx_merkle_root == simulated_block.header.tx_merkle_root, + }; + + for receipt in txs_receipts { + let events = receipt + .events + .iter() + .enumerate() + .map(|(event_index, event)| { + event + .json_serialize(event_index, &receipt.transaction.txid(), true) + .unwrap() + }) + .collect(); + let transaction_data = match &receipt.transaction { + TransactionOrigin::Stacks(stacks) => Some(stacks.clone()), + TransactionOrigin::Burn(_) => None, + }; + let txid = receipt.transaction.txid(); + let transaction = RPCSimulatedBlockTransaction { + txid, + tx_index: receipt.tx_index, + data: transaction_data, + hex: receipt.transaction.serialize_to_dbstring(), + result: receipt.result, + stx_burned: receipt.stx_burned, + execution_cost: receipt.execution_cost, + events, + }; + simulated_block.transactions.push(transaction); + } + + Ok(simulated_block) + } } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] @@ -155,164 +322,7 @@ impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { let simulated_block_res = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - let block_id = block_id.clone(); - let Some((tenure_id, parent_block_id)) = chainstate - .nakamoto_blocks_db() - .get_tenure_and_parent_block_id(&block_id)? - else { - return Err(ChainError::NoSuchBlockError); - }; - - let staging_db_path = chainstate.get_nakamoto_staging_blocks_path()?; - let db_conn = - StacksChainState::open_nakamoto_staging_blocks(&staging_db_path, false)?; - let rowid = db_conn - .conn() - .get_nakamoto_block_rowid(&block_id)? - .ok_or(ChainError::NoSuchBlockError)?; - - let mut blob_fd = db_conn - .open_nakamoto_block(rowid, false) - .map_err(|e| { - let msg = format!("Failed to open Nakamoto block {}: {:?}", &block_id, &e); - warn!("{}", &msg); - msg - }) - .unwrap(); - - let block = NakamotoBlock::consensus_deserialize(&mut blob_fd) - .map_err(|e| { - let msg = format!("Failed to read Nakamoto block {}: {:?}", &block_id, &e); - warn!("{}", &msg); - msg - }) - .unwrap(); - - let burn_dbconn = match sortdb.index_handle_at_block(chainstate, &parent_block_id) { - Ok(burn_dbconn) => burn_dbconn, - Err(_) => return Err(ChainError::NoSuchBlockError), - }; - - let tenure_change = block - .txs - .iter() - .find(|tx| matches!(tx.payload, TransactionPayload::TenureChange(..))); - let coinbase = block - .txs - .iter() - .find(|tx| matches!(tx.payload, TransactionPayload::Coinbase(..))); - let tenure_cause = tenure_change.and_then(|tx| match &tx.payload { - TransactionPayload::TenureChange(tc) => Some(tc.cause), - _ => None, - }); - - let parent_stacks_header = - NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id) - .unwrap() - .unwrap(); - let mut builder = NakamotoBlockBuilder::new( - &parent_stacks_header, - &block.header.consensus_hash, - block.header.burn_spent, - tenure_change, - coinbase, - block.header.pox_treatment.len(), - None, - None, - ) - .unwrap(); - - let mut miner_tenure_info = builder - .load_ephemeral_tenure_info(chainstate, &burn_dbconn, tenure_cause) - .unwrap(); - let burn_chain_height = miner_tenure_info.burn_tip_height; - let mut tenure_tx = builder - .tenure_begin(&burn_dbconn, &mut miner_tenure_info) - .unwrap(); - - let mut block_fees: u128 = 0; - let mut txs_receipts = vec![]; - - for (i, tx) in block.txs.iter().enumerate() { - let tx_len = tx.tx_len(); - - let tx_result = builder.try_mine_tx_with_len( - &mut tenure_tx, - tx, - tx_len, - &BlockLimitFunction::NO_LIMIT_HIT, - None, - ); - let err = match tx_result { - TransactionResult::Success(tx_result) => { - txs_receipts.push(tx_result.receipt); - Ok(()) - } - _ => Err(format!("Problematic tx {i}")), - }; - if let Err(reason) = err { - panic!("Rejected block tx: {reason}"); - } - - block_fees += tx.get_tx_fee() as u128; - } - - let simulated_block = - builder.mine_nakamoto_block(&mut tenure_tx, burn_chain_height); - - tenure_tx.rollback_block(); - - let block_hash = block.header.block_hash(); - let block_height = block.header.chain_length; - - let tx_merkle_root = block.header.tx_merkle_root.clone(); - - let mut simulated_block = RPCSimulatedBlock { - block_id, - block_hash, - block_height, - parent_block_id, - consensus_hash: tenure_id, - fees: block_fees, - tx_merkle_root: block.header.tx_merkle_root, - state_index_root: block.header.state_index_root, - timestamp: block.header.timestamp, - miner_signature: block.header.miner_signature, - signer_signature: block.header.signer_signature, - transactions: vec![], - valid_merkle_root: tx_merkle_root == simulated_block.header.tx_merkle_root, - }; - - for receipt in txs_receipts { - let events = receipt - .events - .iter() - .enumerate() - .map(|(event_index, event)| { - event - .json_serialize(event_index, &receipt.transaction.txid(), true) - .unwrap() - }) - .collect(); - let transaction_data = match &receipt.transaction { - TransactionOrigin::Stacks(stacks) => Some(stacks.clone()), - TransactionOrigin::Burn(_) => None, - }; - let txid = receipt.transaction.txid(); - let transaction = RPCSimulatedBlockTransaction { - txid, - tx_index: receipt.tx_index, - data: transaction_data, - hex: receipt.transaction.serialize_to_dbstring(), - result: receipt.result, - stx_burned: receipt.stx_burned, - execution_cost: receipt.execution_cost, - events, - }; - simulated_block.transactions.push(transaction); - } - - Ok(simulated_block) + self.block_replay(sortdb, chainstate) }); // start loading up the block diff --git a/stackslib/src/net/api/tests/blocksimulate.rs b/stackslib/src/net/api/tests/blocksimulate.rs index 1d5d153b1ae..0735aec3ad1 100644 --- a/stackslib/src/net/api/tests/blocksimulate.rs +++ b/stackslib/src/net/api/tests/blocksimulate.rs @@ -18,6 +18,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use stacks_common::types::chainstate::StacksBlockId; +use crate::chainstate::stacks::Error as ChainError; use crate::net::api::blocksimulate; use crate::net::api::tests::TestRPC; use crate::net::connection::ConnectionOptions; @@ -63,6 +64,30 @@ fn test_try_parse_request() { assert_eq!(&preamble, request.preamble()); } +#[test] +fn test_block_reply_errors() { + let mut handler = + blocksimulate::RPCNakamotoBlockSimulateRequestHandler::new(Some("password".into())); + + let test_observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); + + let sort_db = rpc_test.peer_1.sortdb.take().unwrap(); + let chainstate = rpc_test.peer_1.chainstate(); + + let err = handler.block_replay(&sort_db, chainstate).err().unwrap(); + + assert!(matches!(err, ChainError::InvalidStacksBlock(_))); + assert_eq!(err.to_string(), "block_id is None"); + + handler.block_id = Some(StacksBlockId([0x01; 32])); + + let err = handler.block_replay(&sort_db, chainstate).err().unwrap(); + + assert!(matches!(err, ChainError::NoSuchBlockError)); + assert_eq!(err.to_string(), "No such Stacks block"); +} + #[test] fn test_try_make_response() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); From 61aa8192185146bf9be3cb5ec1bd86e3f40472e1 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 23 Sep 2025 14:48:36 +0200 Subject: [PATCH 45/56] got rid of unwrap() --- stackslib/src/net/api/blocksimulate.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blocksimulate.rs index 09eaa51764d..3608f7b4ec4 100644 --- a/stackslib/src/net/api/blocksimulate.rs +++ b/stackslib/src/net/api/blocksimulate.rs @@ -109,10 +109,18 @@ impl RPCNakamotoBlockSimulateRequestHandler { _ => None, }); - let parent_stacks_header = - NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id) - .unwrap() - .unwrap(); + let parent_stacks_header_opt = + match NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id) { + Ok(parent_stacks_header_opt) => parent_stacks_header_opt, + Err(e) => return Err(e), + }; + + let Some(parent_stacks_header) = parent_stacks_header_opt else { + return Err(ChainError::InvalidStacksBlock( + "Invalid Parent Block".into(), + )); + }; + let mut builder = NakamotoBlockBuilder::new( &parent_stacks_header, &block.header.consensus_hash, From 48327bba76dddce4b09a991db9b446039322c7fa Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 23 Sep 2025 14:55:43 +0200 Subject: [PATCH 46/56] better error management from unreplayable transactions --- stackslib/src/net/api/blocksimulate.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blocksimulate.rs index 3608f7b4ec4..ccca4f5fcb3 100644 --- a/stackslib/src/net/api/blocksimulate.rs +++ b/stackslib/src/net/api/blocksimulate.rs @@ -162,7 +162,11 @@ impl RPCNakamotoBlockSimulateRequestHandler { _ => Err(format!("Problematic tx {i}")), }; if let Err(reason) = err { - panic!("Rejected block tx: {reason}"); + let txid = tx.txid(); + return Err(ChainError::InvalidStacksTransaction( + format!("Unable to replay transaction {txid}: {reason}").into(), + false, + )); } block_fees += tx.get_tx_fee() as u128; From c5a70d5f3196633d1f4b974c6cefae28b528a44d Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 23 Sep 2025 17:45:00 +0200 Subject: [PATCH 47/56] got rid of unwrap --- stackslib/src/net/api/blocksimulate.rs | 55 +++++++++++++++----------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blocksimulate.rs index ccca4f5fcb3..09fe6bd0f78 100644 --- a/stackslib/src/net/api/blocksimulate.rs +++ b/stackslib/src/net/api/blocksimulate.rs @@ -74,22 +74,23 @@ impl RPCNakamotoBlockSimulateRequestHandler { .get_nakamoto_block_rowid(&block_id)? .ok_or(ChainError::NoSuchBlockError)?; - let mut blob_fd = db_conn - .open_nakamoto_block(rowid, false) - .map_err(|e| { - let msg = format!("Failed to open Nakamoto block {}: {:?}", &block_id, &e); - warn!("{}", &msg); - msg - }) - .unwrap(); + let mut blob_fd = match db_conn.open_nakamoto_block(rowid, false).map_err(|e| { + let msg = format!("Failed to open Nakamoto block {}: {:?}", &block_id, &e); + warn!("{}", &msg); + msg + }) { + Ok(blob_fd) => blob_fd, + Err(e) => return Err(ChainError::InvalidStacksBlock(e)), + }; - let block = NakamotoBlock::consensus_deserialize(&mut blob_fd) - .map_err(|e| { - let msg = format!("Failed to read Nakamoto block {}: {:?}", &block_id, &e); - warn!("{}", &msg); - msg - }) - .unwrap(); + let block = match NakamotoBlock::consensus_deserialize(&mut blob_fd).map_err(|e| { + let msg = format!("Failed to read Nakamoto block {}: {:?}", &block_id, &e); + warn!("{}", &msg); + msg + }) { + Ok(block) => block, + Err(e) => return Err(ChainError::InvalidStacksBlock(e)), + }; let burn_dbconn = match sortdb.index_handle_at_block(chainstate, &parent_block_id) { Ok(burn_dbconn) => burn_dbconn, @@ -121,7 +122,7 @@ impl RPCNakamotoBlockSimulateRequestHandler { )); }; - let mut builder = NakamotoBlockBuilder::new( + let mut builder = match NakamotoBlockBuilder::new( &parent_stacks_header, &block.header.consensus_hash, block.header.burn_spent, @@ -130,16 +131,22 @@ impl RPCNakamotoBlockSimulateRequestHandler { block.header.pox_treatment.len(), None, None, - ) - .unwrap(); + ) { + Ok(builder) => builder, + Err(e) => return Err(e), + }; + + let mut miner_tenure_info = + match builder.load_ephemeral_tenure_info(chainstate, &burn_dbconn, tenure_cause) { + Ok(miner_tenure_info) => miner_tenure_info, + Err(e) => return Err(e), + }; - let mut miner_tenure_info = builder - .load_ephemeral_tenure_info(chainstate, &burn_dbconn, tenure_cause) - .unwrap(); let burn_chain_height = miner_tenure_info.burn_tip_height; - let mut tenure_tx = builder - .tenure_begin(&burn_dbconn, &mut miner_tenure_info) - .unwrap(); + let mut tenure_tx = match builder.tenure_begin(&burn_dbconn, &mut miner_tenure_info) { + Ok(tenure_tx) => tenure_tx, + Err(e) => return Err(e), + }; let mut block_fees: u128 = 0; let mut txs_receipts = vec![]; From e3994aaf5d1223a05565fb488ebf8bba9c6bf152 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 23 Sep 2025 17:59:56 +0200 Subject: [PATCH 48/56] renamed simulate to replay --- CHANGELOG.md | 2 +- .../examples/block-simulate.example.json | 108 ------------------ .../schemas/block-simulate.schema.yaml | 78 ------------- docs/rpc/openapi.yaml | 18 +-- .../api/{blocksimulate.rs => blockreplay.rs} | 60 +++++----- stackslib/src/net/api/mod.rs | 4 +- .../{blocksimulate.rs => blockreplay.rs} | 16 +-- stackslib/src/net/api/tests/mod.rs | 2 +- 8 files changed, 51 insertions(+), 237 deletions(-) delete mode 100644 docs/rpc/components/examples/block-simulate.example.json delete mode 100644 docs/rpc/components/schemas/block-simulate.schema.yaml rename stackslib/src/net/api/{blocksimulate.rs => blockreplay.rs} (87%) rename stackslib/src/net/api/tests/{blocksimulate.rs => blockreplay.rs} (89%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a725d46f08..b7e14fb57ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Add `stackerdb_timeout_secs` to miner config for limiting duration of StackerDB HTTP requests. - When determining a global transaction replay set, the state evaluator now uses a longest-common-prefix algorithm to find a replay set in the case where a single replay set has less than 70% of signer weight. - New endpoints /v3/tenures/blocks/, /v3/tenures/blocks/hash, /v3/tenures/blocks/height allowing retrieving the list of stacks blocks from a burn block -- New authenticated endpoint /v3/block/simulate to simulate the execution of any Nakamoto block in the chain (useful for validation, replay, getting events...) +- New authenticated endpoint /v3/block/replay to replay the execution of any Nakamoto block in the chain (useful for validation, simulation, getting events...) - Creates epoch 3.3 and costs-4 in preparation for a hardfork to activate Clarity 4 - Adds support for new Clarity 4 builtins (not activated until epoch 3.3): - `contract-hash?` diff --git a/docs/rpc/components/examples/block-simulate.example.json b/docs/rpc/components/examples/block-simulate.example.json deleted file mode 100644 index b6c79c99f2d..00000000000 --- a/docs/rpc/components/examples/block-simulate.example.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "block_hash": "732f57eefc4dbfb015c9988d9943c47273d25fbe039220d53f311b307609c83f", - "block_id": "856f6b08f338164df7422f66337c8ce916b6b0301fcaa09de06c61cfb79e2a45", - "block_height": 123, - "consensus_hash": "33dffda027e2ca3aaf278855c59a8a0b2d2dd51f", - "fees": 1, - "miner_signature": "004b2878d7639060c4d183b1338447c06ceb0ad55424856c550a4c988401fdf8cf1fe6e8c05cc0e3ff8d4383590bf700cb5dd1a8bb3269f7125f6e0704b66eade8", - "parent_block_id": "3ac36fc1acfc86ba80ea27cd26017c675f75bc07fb042814b72e74cd7d331503", - "signer_signature": [ - "00d4e08331db614d18d7b5af53cf9bc03add9c7a2dcb6f0448721de7ea98f662cf7dc43ee73e14d18dfae3d3d349ff67e0fd773a446fb8c949c93ae4676f4d34bc", - "01619c6e69bad5b43e11bae3eb4d4626e5cf19d595923b0b5d2053e8883a266b41315fdaefd1eca03c5c0580b0f7fd28053c3f34eb0a12220b61392d342f5afb0d", - "0078fa352e0e4d2d94b25d4070ae24a819f268b2260a1e4d0d867415dbdc39e2cf75e57de69375794073e22a75873a5e1ca33ed96eadd8086415e934f697b78fdb", - "00b8d9b0d0cdfabe3c65237801e714185777f60507c773fcd2a72ed00b9d4c59cb5ab96e0e8d545bd562b5ca3de6db1d3b9fccd8f41c3bfa7de3528deb1acd30d6" - ], - "state_index_root": "839b826290027e5b92de415495be7bab2eab2ad4e2f8c371a1a773ae552fedba", - "timestamp": 1758284349, - "transactions": [ - { - "data": { - "anchor_mode": "OnChainOnly", - "auth": { - "Standard": { - "Singlesig": { - "hash_mode": "P2PKH", - "key_encoding": "Compressed", - "nonce": 99, - "signature": "01e29229b386e1f69ffd91e339c878246235ec1cd4771b42a7f45e1ed108643bc9417d43dd96a02c93314ef4cf5bcbcc5642df2e1f5a177333ff983c8719d80661", - "signer": "2965a4e6e4226868fa3ae88b2b9bb9e937d77fba", - "tx_fee": 1 - } - } - }, - "chain_id": 2147483648, - "payload": { - "TokenTransfer": [ - { - "Standard": [ - 26, - [ - 189, - 65, - 200, - 147, - 188, - 192, - 157, - 152, - 224, - 211, - 77, - 255, - 135, - 190, - 175, - 153, - 88, - 51, - 140, - 222 - ] - ] - }, - 1, - "00000000000000000000000000000000000000000000000000000000000000000000" - ] - }, - "post_condition_mode": "Deny", - "post_conditions": [], - "version": "Testnet" - }, - "events": [ - { - "committed": true, - "event_index": 0, - "stx_transfer_event": { - "amount": "1", - "memo": "00000000000000000000000000000000000000000000000000000000000000000000", - "recipient": "ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM", - "sender": "STMPB976WGH6GT7T7BM8PAWVQ7MKFNVZQAXS4BFS" - }, - "txid": "0xf14dd7dec56405fd7dac69c3080fb569fae4c49c591f9ad0e5cf5c797add9005", - "type": "stx_transfer_event" - } - ], - "execution_cost": { - "read_count": 0, - "read_length": 0, - "runtime": 0, - "write_count": 0, - "write_length": 0 - }, - "hex": "808000000004002965a4e6e4226868fa3ae88b2b9bb9e937d77fba000000000000006300000000000000010001e29229b386e1f69ffd91e339c878246235ec1cd4771b42a7f45e1ed108643bc9417d43dd96a02c93314ef4cf5bcbcc5642df2e1f5a177333ff983c8719d8066101020000000000051abd41c893bcc09d98e0d34dff87beaf9958338cde000000000000000100000000000000000000000000000000000000000000000000000000000000000000", - "result": { - "Response": { - "committed": true, - "data": { - "Bool": true - } - } - }, - "stx_burned": 0, - "tx_index": 0, - "txid": "f14dd7dec56405fd7dac69c3080fb569fae4c49c591f9ad0e5cf5c797add9005" - } - ], - "tx_merkle_root": "a68e3c76471d9e66b71a14165c4c9a2b980c51efb5b313425cffcef7172d6080", - "valid_merkle_root": true -} \ No newline at end of file diff --git a/docs/rpc/components/schemas/block-simulate.schema.yaml b/docs/rpc/components/schemas/block-simulate.schema.yaml deleted file mode 100644 index c8e9802bb21..00000000000 --- a/docs/rpc/components/schemas/block-simulate.schema.yaml +++ /dev/null @@ -1,78 +0,0 @@ -type: object -properties: - block_hash: - type: string - description: Hash of the block - pattern: "^[0-9a-f]{64}$" - block_id: - type: string - description: Block ID (index block hash) - pattern: "^[0-9a-f]{64}$" - block_height: - type: integer - description: Height of the Stacks block - format: uint64 - consensus_hash: - type: string - description: Consensus hash of the tenure - pattern: "^[0-9a-f]{40}$" - fees: - type: integer - description: total fees for the block - miner_signature: - type: string - description: Uncompressed signature of the miner - pattern: "^[0-9a-f]{130}$" - parent_block_id: - type: string - description: Parent Block ID (index block hash) - pattern: "^[0-9a-f]{64}$" - signer_signature: - type: array - items: - type: string - description: Uncompressed signature of the signer - pattern: "^[0-9a-f]{130}$" - state_index_root: - type: string - pattern: "^[0-9a-f]{64}$" - description: block state index root computed from the MARF (got from the original block) - timestamp: - type: integer - tx_merkle_root: - type: string - description: merkle_root of the included transactions - valid_merkle_root: - type: boolean - description: does the merkle_root matches the chain block and the simulated one? - transactions: - type: array - items: - type: object - properties: - data: - type: object - description: JSON representation of the transaction payload - events: - type: array - items: - type: object - description: JSON representation of the transaction events - execution_cost: - type: object - description: costs accounting for the transaction - hex: - type: string - description: hexadecimal representation of the transaction body - result: - type: object - description: Clarity value representing the transaction result - stx_burned: - type: integer - description: number of burned stx - tx_index: - type: integer - description: index of the transaction in the array of transactions - txid: - type: string - description: transaction id \ No newline at end of file diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 64a048f03a6..b24645c8de7 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -159,8 +159,8 @@ components: $ref: ./components/schemas/get-stacker-set.schema.yaml TenureBlocks: $ref: ./components/schemas/tenure-blocks.schema.yaml - BlockSimulate: - $ref: ./components/schemas/block-simulate.schema.yaml + BlockReplay: + $ref: ./components/schemas/block-replay.schema.yaml paths: /v2/transactions: @@ -2200,15 +2200,15 @@ paths: "500": $ref: "#/components/responses/InternalServerError" - /v3/blocks/simulate/{block_id}: + /v3/blocks/replay/{block_id}: get: - summary: Simulate mining of a block and returns its content + summary: Replay mining of a block and returns its content tags: - Blocks security: [] - operationId: blockSimulate + operationId: blockReplay description: | - Simulate the mining of a block (no data is written in the MARF) and returns its content. + Replay the mining of a block (no data is written in the MARF) and returns its content. parameters: - name: block_id in: path @@ -2219,13 +2219,13 @@ paths: pattern: "^[0-9a-f]{64}$" responses: "200": - description: Content of the simulated block + description: Content of the replayed block content: application/json: schema: - $ref: "#/components/schemas/BlockSimulate" + $ref: "#/components/schemas/BlockReplay" example: - $ref: "./components/examples/block-simulate.example.json" + $ref: "./components/examples/block-replay.example.json" "400": $ref: "#/components/responses/BadRequest" "404": diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blockreplay.rs similarity index 87% rename from stackslib/src/net/api/blocksimulate.rs rename to stackslib/src/net/api/blockreplay.rs index 09fe6bd0f78..01d421270c6 100644 --- a/stackslib/src/net/api/blocksimulate.rs +++ b/stackslib/src/net/api/blockreplay.rs @@ -38,12 +38,12 @@ use crate::net::httpcore::{RPCRequestHandler, StacksHttpResponse}; use crate::net::{Error as NetError, StacksHttpRequest, StacksNodeState}; #[derive(Clone)] -pub struct RPCNakamotoBlockSimulateRequestHandler { +pub struct RPCNakamotoBlockReplayRequestHandler { pub block_id: Option, pub auth: Option, } -impl RPCNakamotoBlockSimulateRequestHandler { +impl RPCNakamotoBlockReplayRequestHandler { pub fn new(auth: Option) -> Self { Self { block_id: None, @@ -55,7 +55,7 @@ impl RPCNakamotoBlockSimulateRequestHandler { &self, sortdb: &SortitionDB, chainstate: &mut StacksChainState, - ) -> Result { + ) -> Result { let Some(block_id) = &self.block_id else { return Err(ChainError::InvalidStacksBlock("block_id is None".into())); }; @@ -179,7 +179,7 @@ impl RPCNakamotoBlockSimulateRequestHandler { block_fees += tx.get_tx_fee() as u128; } - let simulated_block = builder.mine_nakamoto_block(&mut tenure_tx, burn_chain_height); + let replayed_block = builder.mine_nakamoto_block(&mut tenure_tx, burn_chain_height); tenure_tx.rollback_block(); @@ -188,7 +188,7 @@ impl RPCNakamotoBlockSimulateRequestHandler { let tx_merkle_root = block.header.tx_merkle_root.clone(); - let mut simulated_block = RPCSimulatedBlock { + let mut replayed_block = RPCReplayedBlock { block_id: block_id.clone(), block_hash, block_height, @@ -201,7 +201,7 @@ impl RPCNakamotoBlockSimulateRequestHandler { miner_signature: block.header.miner_signature, signer_signature: block.header.signer_signature, transactions: vec![], - valid_merkle_root: tx_merkle_root == simulated_block.header.tx_merkle_root, + valid_merkle_root: tx_merkle_root == replayed_block.header.tx_merkle_root, }; for receipt in txs_receipts { @@ -220,7 +220,7 @@ impl RPCNakamotoBlockSimulateRequestHandler { TransactionOrigin::Burn(_) => None, }; let txid = receipt.transaction.txid(); - let transaction = RPCSimulatedBlockTransaction { + let transaction = RPCReplayedBlockTransaction { txid, tx_index: receipt.tx_index, data: transaction_data, @@ -230,15 +230,15 @@ impl RPCNakamotoBlockSimulateRequestHandler { execution_cost: receipt.execution_cost, events, }; - simulated_block.transactions.push(transaction); + replayed_block.transactions.push(transaction); } - Ok(simulated_block) + Ok(replayed_block) } } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct RPCSimulatedBlockTransaction { +pub struct RPCReplayedBlockTransaction { pub txid: Txid, pub tx_index: u32, pub data: Option, @@ -250,7 +250,7 @@ pub struct RPCSimulatedBlockTransaction { } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct RPCSimulatedBlock { +pub struct RPCReplayedBlock { pub block_id: StacksBlockId, pub block_hash: BlockHeaderHash, pub block_height: u64, @@ -262,22 +262,22 @@ pub struct RPCSimulatedBlock { pub timestamp: u64, pub miner_signature: MessageSignature, pub signer_signature: Vec, - pub transactions: Vec, + pub transactions: Vec, pub valid_merkle_root: bool, } /// Decode the HTTP request -impl HttpRequest for RPCNakamotoBlockSimulateRequestHandler { +impl HttpRequest for RPCNakamotoBlockReplayRequestHandler { fn verb(&self) -> &'static str { "GET" } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/blocks/simulate/(?P[0-9a-f]{64})$"#).unwrap() + Regex::new(r#"^/v3/blocks/replay/(?P[0-9a-f]{64})$"#).unwrap() } fn metrics_identifier(&self) -> &str { - "/v3/blocks/simulate/:block_id" + "/v3/blocks/replay/:block_id" } /// Try to decode this request. @@ -289,7 +289,7 @@ impl HttpRequest for RPCNakamotoBlockSimulateRequestHandler { query: Option<&str>, _body: &[u8], ) -> Result { - // If no authorization is set, then the block simulation endpoint is not enabled + // If no authorization is set, then the block replay endpoint is not enabled let Some(password) = &self.auth else { return Err(Error::Http(400, "Bad Request.".into())); }; @@ -321,7 +321,7 @@ impl HttpRequest for RPCNakamotoBlockSimulateRequestHandler { } } -impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { +impl RPCRequestHandler for RPCNakamotoBlockReplayRequestHandler { /// Reset internal state fn restart(&mut self) { self.block_id = None; @@ -339,14 +339,14 @@ impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { .take() .ok_or(NetError::SendError("Missing `block_id`".into()))?; - let simulated_block_res = + let replayed_block_res = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { self.block_replay(sortdb, chainstate) }); // start loading up the block - let simulated_block = match simulated_block_res { - Ok(simulated_block) => simulated_block, + let replayed_block = match replayed_block_res { + Ok(replayed_block) => replayed_block, Err(ChainError::NoSuchBlockError) => { return StacksHttpResponse::new_error( &preamble, @@ -366,18 +366,18 @@ impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { }; let preamble = HttpResponsePreamble::ok_json(&preamble); - let body = HttpResponseContents::try_from_json(&simulated_block)?; + let body = HttpResponseContents::try_from_json(&replayed_block)?; Ok((preamble, body)) } } impl StacksHttpRequest { - /// Make a new block_simulate request to this endpoint - pub fn new_block_simulate(host: PeerHost, block_id: &StacksBlockId) -> StacksHttpRequest { + /// Make a new block_replay request to this endpoint + pub fn new_block_replay(host: PeerHost, block_id: &StacksBlockId) -> StacksHttpRequest { StacksHttpRequest::new_for_peer( host, "GET".into(), - format!("/v3/blocks/simulate/{block_id}"), + format!("/v3/blocks/replay/{block_id}"), HttpRequestContents::new(), ) .expect("FATAL: failed to construct request from infallible data") @@ -385,7 +385,7 @@ impl StacksHttpRequest { } /// Decode the HTTP response -impl HttpResponse for RPCNakamotoBlockSimulateRequestHandler { +impl HttpResponse for RPCNakamotoBlockReplayRequestHandler { /// Decode this response from a byte stream. This is called by the client to decode this /// message fn try_parse_response( @@ -393,17 +393,17 @@ impl HttpResponse for RPCNakamotoBlockSimulateRequestHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let rpc_simulated_block: RPCSimulatedBlock = parse_json(preamble, body)?; - Ok(HttpResponsePayload::try_from_json(rpc_simulated_block)?) + let rpc_replayed_block: RPCReplayedBlock = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(rpc_replayed_block)?) } } impl StacksHttpResponse { - pub fn decode_simulated_block(self) -> Result { + pub fn decode_replayed_block(self) -> Result { let contents = self.get_http_payload_ok()?; let response_json: serde_json::Value = contents.try_into()?; - let simulated_block: RPCSimulatedBlock = serde_json::from_value(response_json) + let replayed_block: RPCReplayedBlock = serde_json::from_value(response_json) .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; - Ok(simulated_block) + Ok(replayed_block) } } diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 0223be3115f..a5777a751d9 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -17,7 +17,7 @@ use crate::net::http::Error; use crate::net::httpcore::StacksHttp; use crate::net::Error as NetError; -pub mod blocksimulate; +pub mod blockreplay; pub mod callreadonly; pub mod fastcallreadonly; pub mod get_tenures_fork_info; @@ -75,7 +75,7 @@ impl StacksHttp { /// Register all RPC methods. /// Put your new RPC method handlers here. pub fn register_rpc_methods(&mut self) { - self.register_rpc_endpoint(blocksimulate::RPCNakamotoBlockSimulateRequestHandler::new( + self.register_rpc_endpoint(blockreplay::RPCNakamotoBlockReplayRequestHandler::new( self.auth_token.clone(), )); self.register_rpc_endpoint(callreadonly::RPCCallReadOnlyRequestHandler::new( diff --git a/stackslib/src/net/api/tests/blocksimulate.rs b/stackslib/src/net/api/tests/blockreplay.rs similarity index 89% rename from stackslib/src/net/api/tests/blocksimulate.rs rename to stackslib/src/net/api/tests/blockreplay.rs index 0735aec3ad1..3e16abc95a8 100644 --- a/stackslib/src/net/api/tests/blocksimulate.rs +++ b/stackslib/src/net/api/tests/blockreplay.rs @@ -19,7 +19,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use stacks_common::types::chainstate::StacksBlockId; use crate::chainstate::stacks::Error as ChainError; -use crate::net::api::blocksimulate; +use crate::net::api::blockreplay; use crate::net::api::tests::TestRPC; use crate::net::connection::ConnectionOptions; use crate::net::httpcore::{StacksHttp, StacksHttpRequest}; @@ -32,7 +32,7 @@ fn test_try_parse_request() { let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); let mut request = - StacksHttpRequest::new_block_simulate(addr.into(), &StacksBlockId([0x01; 32])); + StacksHttpRequest::new_block_replay(addr.into(), &StacksBlockId([0x01; 32])); // add the authorization header request.add_header("authorization".into(), "password".into()); @@ -44,7 +44,7 @@ fn test_try_parse_request() { let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); let mut handler = - blocksimulate::RPCNakamotoBlockSimulateRequestHandler::new(Some("password".into())); + blockreplay::RPCNakamotoBlockReplayRequestHandler::new(Some("password".into())); let mut parsed_request = http .handle_try_parse_request( @@ -67,7 +67,7 @@ fn test_try_parse_request() { #[test] fn test_block_reply_errors() { let mut handler = - blocksimulate::RPCNakamotoBlockSimulateRequestHandler::new(Some("password".into())); + blockreplay::RPCNakamotoBlockReplayRequestHandler::new(Some("password".into())); let test_observer = TestEventObserver::new(); let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); @@ -101,21 +101,21 @@ fn test_try_make_response() { // query existing, non-empty Nakamoto block let mut request = - StacksHttpRequest::new_block_simulate(addr.clone().into(), &rpc_test.canonical_tip); + StacksHttpRequest::new_block_replay(addr.clone().into(), &rpc_test.canonical_tip); // add the authorization header request.add_header("authorization".into(), "password".into()); requests.push(request); // query non-existent block let mut request = - StacksHttpRequest::new_block_simulate(addr.clone().into(), &StacksBlockId([0x01; 32])); + StacksHttpRequest::new_block_replay(addr.clone().into(), &StacksBlockId([0x01; 32])); // add the authorization header request.add_header("authorization".into(), "password".into()); requests.push(request); // unauthenticated request let request = - StacksHttpRequest::new_block_simulate(addr.clone().into(), &StacksBlockId([0x00; 32])); + StacksHttpRequest::new_block_replay(addr.clone().into(), &StacksBlockId([0x00; 32])); requests.push(request); let mut responses = rpc_test.run(requests); @@ -128,7 +128,7 @@ fn test_try_make_response() { std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() ); - let resp = response.decode_simulated_block().unwrap(); + let resp = response.decode_replayed_block().unwrap(); let tip_block = test_observer.get_blocks().last().unwrap().clone(); diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 533422d86a5..97a60a6b43a 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -59,7 +59,7 @@ use crate::net::{ UrlString, }; -mod blocksimulate; +mod blockreplay; mod callreadonly; mod fastcallreadonly; mod get_tenures_fork_info; From 52f287a4012ddbddcc2f82a851ec2bc06cd606d3 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 24 Sep 2025 07:47:18 +0200 Subject: [PATCH 49/56] added field docs --- stackslib/src/net/api/blockreplay.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/stackslib/src/net/api/blockreplay.rs b/stackslib/src/net/api/blockreplay.rs index 01d421270c6..4df70e80e19 100644 --- a/stackslib/src/net/api/blockreplay.rs +++ b/stackslib/src/net/api/blockreplay.rs @@ -239,30 +239,51 @@ impl RPCNakamotoBlockReplayRequestHandler { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct RPCReplayedBlockTransaction { + /// transaction id pub txid: Txid, + /// index of transaction in the block pub tx_index: u32, + /// body (headers + payload) of transaction pub data: Option, + /// hex representation of the transaction body pub hex: String, + /// result of transaction execution (clarity value) pub result: Value, + /// amount of burned stx pub stx_burned: u128, + /// execution cost infos pub execution_cost: ExecutionCost, + /// generated events pub events: Vec, } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct RPCReplayedBlock { + /// block id (index_block_hash) pub block_id: StacksBlockId, + /// block hash pub block_hash: BlockHeaderHash, + /// height of the block pub block_height: u64, + /// index_block_hash of the parent pub parent_block_id: StacksBlockId, + /// consensus hash of the tenure containing the block pub consensus_hash: ConsensusHash, + /// total fees for the transactions in the block pub fees: u128, + /// merkle tree root hash of the included transactions pub tx_merkle_root: Sha512Trunc256Sum, + /// state index of the MARF pub state_index_root: TrieHash, + /// block timestamp pub timestamp: u64, + /// signature of the miner pub miner_signature: MessageSignature, + /// list of signers signatures pub signer_signature: Vec, + /// the list of block transactions pub transactions: Vec, + /// check if the computed merkle tree root hash matches the one from the original block pub valid_merkle_root: bool, } From 7d998b122bcb4f025cd172df2c1f23f19d05b13d Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 24 Sep 2025 08:00:28 +0200 Subject: [PATCH 50/56] added from_receipt --- stackslib/src/net/api/blockreplay.rs | 63 ++++++++++++---------- stackslib/src/net/api/tests/blockreplay.rs | 3 +- 2 files changed, 37 insertions(+), 29 deletions(-) diff --git a/stackslib/src/net/api/blockreplay.rs b/stackslib/src/net/api/blockreplay.rs index 4df70e80e19..93f286fcc11 100644 --- a/stackslib/src/net/api/blockreplay.rs +++ b/stackslib/src/net/api/blockreplay.rs @@ -27,7 +27,7 @@ use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::events::TransactionOrigin; +use crate::chainstate::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; use crate::chainstate::stacks::{Error as ChainError, StacksTransaction, TransactionPayload}; use crate::net::http::{ @@ -204,32 +204,8 @@ impl RPCNakamotoBlockReplayRequestHandler { valid_merkle_root: tx_merkle_root == replayed_block.header.tx_merkle_root, }; - for receipt in txs_receipts { - let events = receipt - .events - .iter() - .enumerate() - .map(|(event_index, event)| { - event - .json_serialize(event_index, &receipt.transaction.txid(), true) - .unwrap() - }) - .collect(); - let transaction_data = match &receipt.transaction { - TransactionOrigin::Stacks(stacks) => Some(stacks.clone()), - TransactionOrigin::Burn(_) => None, - }; - let txid = receipt.transaction.txid(); - let transaction = RPCReplayedBlockTransaction { - txid, - tx_index: receipt.tx_index, - data: transaction_data, - hex: receipt.transaction.serialize_to_dbstring(), - result: receipt.result, - stx_burned: receipt.stx_burned, - execution_cost: receipt.execution_cost, - events, - }; + for receipt in &txs_receipts { + let transaction = RPCReplayedBlockTransaction::from_receipt(receipt); replayed_block.transactions.push(transaction); } @@ -257,6 +233,39 @@ pub struct RPCReplayedBlockTransaction { pub events: Vec, } +impl RPCReplayedBlockTransaction { + pub fn from_receipt(receipt: &StacksTransactionReceipt) -> Self { + let events = receipt + .events + .iter() + .enumerate() + .map(|(event_index, event)| { + event + .json_serialize(event_index, &receipt.transaction.txid(), true) + .unwrap() + }) + .collect(); + + let transaction_data = match &receipt.transaction { + TransactionOrigin::Stacks(stacks) => Some(stacks.clone()), + TransactionOrigin::Burn(_) => None, + }; + + let txid = receipt.transaction.txid(); + + Self { + txid, + tx_index: receipt.tx_index, + data: transaction_data, + hex: receipt.transaction.serialize_to_dbstring(), + result: receipt.result.clone(), + stx_burned: receipt.stx_burned, + execution_cost: receipt.execution_cost.clone(), + events, + } + } +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct RPCReplayedBlock { /// block id (index_block_hash) diff --git a/stackslib/src/net/api/tests/blockreplay.rs b/stackslib/src/net/api/tests/blockreplay.rs index 3e16abc95a8..31e727727b1 100644 --- a/stackslib/src/net/api/tests/blockreplay.rs +++ b/stackslib/src/net/api/tests/blockreplay.rs @@ -31,8 +31,7 @@ fn test_try_parse_request() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); - let mut request = - StacksHttpRequest::new_block_replay(addr.into(), &StacksBlockId([0x01; 32])); + let mut request = StacksHttpRequest::new_block_replay(addr.into(), &StacksBlockId([0x01; 32])); // add the authorization header request.add_header("authorization".into(), "password".into()); From 3e09e5a89e456ea9d5319e7cd646b9d46b03af8f Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 24 Sep 2025 08:18:01 +0200 Subject: [PATCH 51/56] added RPCReplayedBlock::from_block --- stackslib/src/net/api/blockreplay.rs | 55 ++++++++++++++++++---------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/stackslib/src/net/api/blockreplay.rs b/stackslib/src/net/api/blockreplay.rs index 93f286fcc11..c7607b108dd 100644 --- a/stackslib/src/net/api/blockreplay.rs +++ b/stackslib/src/net/api/blockreplay.rs @@ -183,33 +183,20 @@ impl RPCNakamotoBlockReplayRequestHandler { tenure_tx.rollback_block(); - let block_hash = block.header.block_hash(); - let block_height = block.header.chain_length; - let tx_merkle_root = block.header.tx_merkle_root.clone(); - let mut replayed_block = RPCReplayedBlock { - block_id: block_id.clone(), - block_hash, - block_height, - parent_block_id, - consensus_hash: tenure_id, - fees: block_fees, - tx_merkle_root: block.header.tx_merkle_root, - state_index_root: block.header.state_index_root, - timestamp: block.header.timestamp, - miner_signature: block.header.miner_signature, - signer_signature: block.header.signer_signature, - transactions: vec![], - valid_merkle_root: tx_merkle_root == replayed_block.header.tx_merkle_root, - }; + let mut rpc_replayed_block = + RPCReplayedBlock::from_block(block, block_fees, tenure_id, parent_block_id); for receipt in &txs_receipts { let transaction = RPCReplayedBlockTransaction::from_receipt(receipt); - replayed_block.transactions.push(transaction); + rpc_replayed_block.transactions.push(transaction); } - Ok(replayed_block) + rpc_replayed_block.valid_merkle_root = + tx_merkle_root == replayed_block.header.tx_merkle_root; + + Ok(rpc_replayed_block) } } @@ -296,6 +283,34 @@ pub struct RPCReplayedBlock { pub valid_merkle_root: bool, } +impl RPCReplayedBlock { + pub fn from_block( + block: NakamotoBlock, + block_fees: u128, + tenure_id: ConsensusHash, + parent_block_id: StacksBlockId, + ) -> Self { + let block_id = block.block_id(); + let block_hash = block.header.block_hash(); + + Self { + block_id, + block_hash, + block_height: block.header.chain_length, + parent_block_id, + consensus_hash: tenure_id, + fees: block_fees, + tx_merkle_root: block.header.tx_merkle_root, + state_index_root: block.header.state_index_root, + timestamp: block.header.timestamp, + miner_signature: block.header.miner_signature, + signer_signature: block.header.signer_signature, + transactions: vec![], + valid_merkle_root: false, + } + } +} + /// Decode the HTTP request impl HttpRequest for RPCNakamotoBlockReplayRequestHandler { fn verb(&self) -> &'static str { From d2a6026c2ee7ca233485e8008126d38fb6c38b4b Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 24 Sep 2025 08:27:50 +0200 Subject: [PATCH 52/56] fixed error message --- stackslib/src/net/api/blockreplay.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/blockreplay.rs b/stackslib/src/net/api/blockreplay.rs index c7607b108dd..b57bbff1976 100644 --- a/stackslib/src/net/api/blockreplay.rs +++ b/stackslib/src/net/api/blockreplay.rs @@ -357,9 +357,8 @@ impl HttpRequest for RPCNakamotoBlockReplayRequestHandler { })? .as_str(); - let block_id = StacksBlockId::from_hex(block_id_str).map_err(|_| { - Error::DecodeError("Invalid path: unparseable consensus hash".to_string()) - })?; + let block_id = StacksBlockId::from_hex(block_id_str) + .map_err(|_| Error::DecodeError("Invalid path: unparseable block id".to_string()))?; self.block_id = Some(block_id); Ok(HttpRequestContents::new().query_string(query)) From 848e429b2c86db6889c84b6a0bf98914ee404170 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Wed, 24 Sep 2025 10:46:26 +0100 Subject: [PATCH 53/56] move stackslib::cli to stacks-inspect --- Cargo.lock | 2 + contrib/stacks-inspect/Cargo.toml | 1 + contrib/stacks-inspect/README.md | 8 +- .../stacks-inspect/src/lib.rs | 136 ++++++++++-------- contrib/stacks-inspect/src/main.rs | 21 +-- stacks-node/Cargo.toml | 1 + stacks-node/src/tests/neon_integrations.rs | 4 +- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- stackslib/src/lib.rs | 3 - 9 files changed, 101 insertions(+), 77 deletions(-) rename stackslib/src/cli.rs => contrib/stacks-inspect/src/lib.rs (92%) diff --git a/Cargo.lock b/Cargo.lock index 1ef9cc74d59..31652204374 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3181,6 +3181,7 @@ dependencies = [ "clarity 0.0.1", "libstackerdb 0.0.1", "mutants", + "regex", "rusqlite", "serde_json", "slog", @@ -3222,6 +3223,7 @@ dependencies = [ "slog", "stacks-common 0.0.1", "stacks-common 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", + "stacks-inspect", "stacks-signer 0.0.1", "stacks-signer 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", "stackslib 0.0.1", diff --git a/contrib/stacks-inspect/Cargo.toml b/contrib/stacks-inspect/Cargo.toml index 354c1fdc549..785b9993981 100644 --- a/contrib/stacks-inspect/Cargo.toml +++ b/contrib/stacks-inspect/Cargo.toml @@ -8,6 +8,7 @@ stackslib = { package = "stackslib", path = "../../stackslib", default-features clarity = { path = "../../clarity", default-features = false } libstackerdb = { path = "../../libstackerdb", default-features = false } stacks-common = { path = "../../stacks-common", default-features = false } +regex = { version = "1", default-features = false } rusqlite = { workspace = true } serde_json = { workspace = true } slog = { workspace = true } diff --git a/contrib/stacks-inspect/README.md b/contrib/stacks-inspect/README.md index b3ea3b52b95..3929c57b16c 100644 --- a/contrib/stacks-inspect/README.md +++ b/contrib/stacks-inspect/README.md @@ -17,18 +17,18 @@ cargo build -p stacks-inspect Basic usage: ```bash # Show version -./target/debug/stacks-inspect --version +cargo run -p stacks-inspect -- --version # Example: decode a bitcoin header from file -./target/debug/stacks-inspect decode-bitcoin-header +cargo run -p stacks-inspect -- decode-bitcoin-header # Example: analyze anti-MEV behavior over a height range -./target/debug/stacks-inspect analyze-sortition-mev [miner advantage ...] +cargo run -p stacks-inspect -- analyze-sortition-mev [miner advantage ...] ``` For detailed commands and flags, run: ```bash -./target/debug/stacks-inspect --help +cargo run -p stacks-inspect -- --help ``` Notes: diff --git a/stackslib/src/cli.rs b/contrib/stacks-inspect/src/lib.rs similarity index 92% rename from stackslib/src/cli.rs rename to contrib/stacks-inspect/src/lib.rs index ea4261c85c6..444a016dbd6 100644 --- a/stackslib/src/cli.rs +++ b/contrib/stacks-inspect/src/lib.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2025 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -14,41 +13,42 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Subcommands used by `stacks-inspect` binary - use std::path::PathBuf; use std::time::Instant; use std::{fs, process}; use clarity::types::chainstate::SortitionId; -use clarity::util::hash::{to_hex, Sha512Trunc256Sum}; -use db::blocks::DummyEventDispatcher; -use db::ChainstateTx; +use clarity::util::hash::{Sha512Trunc256Sum, to_hex}; use regex::Regex; use rusqlite::{Connection, OpenFlags}; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFProof; - -use crate::burnchains::Burnchain; -use crate::chainstate::burn::db::sortdb::{ - get_ancestor_sort_id, SortitionDB, SortitionHandleContext, +use stacks_common::{debug, info, warn}; +use stackslib::burnchains::Burnchain; +use stackslib::chainstate::burn::ConsensusHash; +use stackslib::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionHandleContext, get_ancestor_sort_id, +}; +use stackslib::chainstate::coordinator::OnChainRewardSetProvider; +use stackslib::chainstate::nakamoto::miner::{ + BlockMetadata, NakamotoBlockBuilder, NakamotoTenureInfo, +}; +use stackslib::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stackslib::chainstate::stacks::db::blocks::DummyEventDispatcher; +use stackslib::chainstate::stacks::db::{ + ChainstateTx, StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo, }; -use crate::chainstate::burn::ConsensusHash; -use crate::chainstate::coordinator::OnChainRewardSetProvider; -use crate::chainstate::nakamoto::miner::{BlockMetadata, NakamotoBlockBuilder, NakamotoTenureInfo}; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; -use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; -use crate::chainstate::stacks::miner::*; -use crate::chainstate::stacks::{Error as ChainstateError, *}; -use crate::clarity_vm::clarity::ClarityInstance; -use crate::clarity_vm::database::GetTenureStartId; -use crate::config::{Config, ConfigFile, DEFAULT_MAINNET_CONFIG}; -use crate::core::*; -use crate::cost_estimates::metrics::UnitMetric; -use crate::cost_estimates::UnitEstimator; -use crate::util_lib::db::IndexDBTx; +use stackslib::chainstate::stacks::miner::*; +use stackslib::chainstate::stacks::{Error as ChainstateError, *}; +use stackslib::clarity_vm::clarity::ClarityInstance; +use stackslib::clarity_vm::database::GetTenureStartId; +use stackslib::config::{Config, ConfigFile, DEFAULT_MAINNET_CONFIG}; +use stackslib::core::*; +use stackslib::cost_estimates::UnitEstimator; +use stackslib::cost_estimates::metrics::UnitMetric; +use stackslib::util_lib::db::IndexDBTx; /// Options common to many `stacks-inspect` subcommands /// Returned by `process_common_opts()` @@ -141,13 +141,13 @@ pub fn command_replay_block(argv: &[String], conf: Option<&Config>) { let query = match mode { Some("prefix") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", - argv[3] - ), + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", + argv[3] + ), Some("first") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", - argv[3] - ), + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", + argv[3] + ), Some("range") => { let arg4 = argv[3] .parse::() @@ -155,7 +155,9 @@ pub fn command_replay_block(argv: &[String], conf: Option<&Config>) { let arg5 = argv[4].parse::().expect(" not a valid u64"); let start = arg4.saturating_sub(1); let blocks = arg5.saturating_sub(arg4); - format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}") + format!( + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}" + ) } Some("index-range") => { let start = argv[3] @@ -163,12 +165,14 @@ pub fn command_replay_block(argv: &[String], conf: Option<&Config>) { .expect(" not a valid u64"); let end = argv[4].parse::().expect(" not a valid u64"); let blocks = end.saturating_sub(start); - format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") + format!( + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}" + ) } Some("last") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", - argv[3] - ), + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", + argv[3] + ), Some(_) => print_help_and_exit(), // Default to ALL blocks None => "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0".into(), @@ -229,13 +233,13 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&Config>) { let query = match mode { Some("prefix") => format!( - "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", - argv[3] - ), + "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", + argv[3] + ), Some("first") => format!( - "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", - argv[3] - ), + "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", + argv[3] + ), Some("range") => { let arg4 = argv[3] .parse::() @@ -243,7 +247,9 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&Config>) { let arg5 = argv[4].parse::().expect(" not a valid u64"); let start = arg4.saturating_sub(1); let blocks = arg5.saturating_sub(arg4); - format!("SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}") + format!( + "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}" + ) } Some("index-range") => { let start = argv[3] @@ -251,12 +257,14 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&Config>) { .expect(" not a valid u64"); let end = argv[4].parse::().expect(" not a valid u64"); let blocks = end.saturating_sub(start); - format!("SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") + format!( + "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}" + ) } Some("last") => format!( - "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", - argv[3] - ), + "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", + argv[3] + ), Some(_) => print_help_and_exit(), // Default to ALL blocks None => "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0".into(), @@ -386,8 +394,12 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { let n = &argv[0]; eprintln!("Usage: {n} [min-fee [max-time]]"); eprintln!(""); - eprintln!("Given a , try to ''mine'' an anchored block. This invokes the miner block"); - eprintln!("assembly, but does not attempt to broadcast a block commit. This is useful for determining"); + eprintln!( + "Given a , try to ''mine'' an anchored block. This invokes the miner block" + ); + eprintln!( + "assembly, but does not attempt to broadcast a block commit. This is useful for determining" + ); eprintln!("what transactions a given chain state would include in an anchor block,"); eprintln!("or otherwise simulating a miner."); process::exit(1); @@ -523,11 +535,11 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { let elapsed = start.elapsed(); let summary = format!( "block @ height = {h} off of {pid} ({pch}/{pbh}) in {t}ms. Min-fee: {min_fee}, Max-time: {max_time}", - h=parent_stacks_header.stacks_block_height + 1, - pid=&parent_stacks_header.index_block_hash(), - pch=&parent_stacks_header.consensus_hash, - pbh=&parent_stacks_header.anchored_header.block_hash(), - t=elapsed.as_millis(), + h = parent_stacks_header.stacks_block_height + 1, + pid = &parent_stacks_header.index_block_hash(), + pch = &parent_stacks_header.consensus_hash, + pbh = &parent_stacks_header.anchored_header.block_hash(), + t = elapsed.as_millis(), ); let code = match result { @@ -770,7 +782,9 @@ fn replay_block( ), None => { // shouldn't happen - panic!("CORRUPTION: staging block {block_consensus_hash}/{block_hash} does not correspond to a burn block"); + panic!( + "CORRUPTION: staging block {block_consensus_hash}/{block_hash} does not correspond to a burn block" + ); } }; @@ -835,8 +849,10 @@ fn replay_block( ) { Ok((receipt, _, _)) => { if receipt.anchored_block_cost != cost { - println!("Failed processing block! block = {block_id}. Unexpected cost. expected = {cost}, evaluated = {}", - receipt.anchored_block_cost); + println!( + "Failed processing block! block = {block_id}. Unexpected cost. expected = {cost}, evaluated = {}", + receipt.anchored_block_cost + ); process::exit(1); } @@ -1153,7 +1169,9 @@ fn replay_block_nakamoto( // check the cost let evaluated_cost = receipt.anchored_block_cost.clone(); if evaluated_cost != expected_cost { - println!("Failed processing block! block = {block_id}. Unexpected cost. expected = {expected_cost}, evaluated = {evaluated_cost}"); + println!( + "Failed processing block! block = {block_id}. Unexpected cost. expected = {expected_cost}, evaluated = {evaluated_cost}" + ); process::exit(1); } } @@ -1197,7 +1215,7 @@ pub mod test { "stacks-inspect try-mine --config my_config.toml /tmp/chainstate/mainnet", ); let argv_init = argv.clone(); - let opts = drain_common_opts(&mut argv, 0); + let _opts = drain_common_opts(&mut argv, 0); let opts = drain_common_opts(&mut argv, 1); assert_eq!(argv, argv_init); diff --git a/contrib/stacks-inspect/src/main.rs b/contrib/stacks-inspect/src/main.rs index 9c43923920d..5743a8fc586 100644 --- a/contrib/stacks-inspect/src/main.rs +++ b/contrib/stacks-inspect/src/main.rs @@ -19,6 +19,11 @@ extern crate stacks_common; use clarity::consts::CHAIN_ID_MAINNET; use clarity::types::StacksEpochId; use clarity::types::chainstate::StacksPrivateKey; +use clarity_cli::DEFAULT_CLI_EPOCH; +use stacks_inspect::{ + command_contract_hash, command_replay_block, command_replay_block_nakamoto, + command_replay_mock_mining, command_try_mine, drain_common_opts, +}; use stackslib::chainstate::stacks::miner::BlockBuilderSettings; use stackslib::chainstate::stacks::{ CoinbasePayload, StacksBlock, StacksBlockBuilder, StacksMicroblock, StacksTransaction, @@ -80,6 +85,7 @@ use stackslib::chainstate::stacks::index::marf::{MARF, MARFOpenOpts, MarfConnect use stackslib::clarity::vm::ClarityVersion; use stackslib::clarity::vm::costs::ExecutionCost; use stackslib::clarity::vm::types::StacksAddressExtensions; +use stackslib::clarity_cli; use stackslib::core::MemPoolDB; use stackslib::cost_estimates::UnitEstimator; use stackslib::cost_estimates::metrics::UnitMetric; @@ -91,7 +97,6 @@ use stackslib::net::relay::Relayer; use stackslib::net::{GetNakamotoInvData, HandshakeData, StacksMessage, StacksMessageType}; use stackslib::util_lib::db::sqlite_open; use stackslib::util_lib::strings::UrlString; -use stackslib::{clarity_cli, cli}; struct P2PSession { pub local_peer: LocalPeer, @@ -301,7 +306,7 @@ fn main() { process::exit(1); } - let common_opts = cli::drain_common_opts(&mut argv, 1); + let common_opts = drain_common_opts(&mut argv, 1); if argv[1] == "--version" { println!( @@ -789,7 +794,7 @@ check if the associated microblocks can be downloaded } if argv[1] == "try-mine" { - cli::command_try_mine(&argv[1..], common_opts.config.as_ref()); + command_try_mine(&argv[1..], common_opts.config.as_ref()); process::exit(0); } @@ -896,7 +901,7 @@ check if the associated microblocks can be downloaded } let program: String = fs::read_to_string(&argv[2]) .unwrap_or_else(|_| panic!("Error reading file: {}", argv[2])); - let clarity_version = ClarityVersion::default_for_epoch(clarity_cli::DEFAULT_CLI_EPOCH); + let clarity_version = ClarityVersion::default_for_epoch(DEFAULT_CLI_EPOCH); match clarity_cli::vm_execute(&program, clarity_version) { Ok(Some(result)) => println!("{result}"), Ok(None) => println!(), @@ -1582,17 +1587,17 @@ check if the associated microblocks can be downloaded } if argv[1] == "replay-block" { - cli::command_replay_block(&argv[1..], common_opts.config.as_ref()); + command_replay_block(&argv[1..], common_opts.config.as_ref()); process::exit(0); } if argv[1] == "replay-naka-block" { - cli::command_replay_block_nakamoto(&argv[1..], common_opts.config.as_ref()); + command_replay_block_nakamoto(&argv[1..], common_opts.config.as_ref()); process::exit(0); } if argv[1] == "replay-mock-mining" { - cli::command_replay_mock_mining(&argv[1..], common_opts.config.as_ref()); + command_replay_mock_mining(&argv[1..], common_opts.config.as_ref()); process::exit(0); } @@ -1601,7 +1606,7 @@ check if the associated microblocks can be downloaded } if argv[1] == "contract-hash" { - cli::command_contract_hash(&argv[1..], common_opts.config.as_ref()); + command_contract_hash(&argv[1..], common_opts.config.as_ref()); process::exit(0); } diff --git a/stacks-node/Cargo.toml b/stacks-node/Cargo.toml index d2b8e916b99..5f53b93e6bb 100644 --- a/stacks-node/Cargo.toml +++ b/stacks-node/Cargo.toml @@ -48,6 +48,7 @@ clarity = { path = "../clarity", features = ["default", "testing"]} stacks-common = { path = "../stacks-common", features = ["default", "testing"] } stacks = { package = "stackslib", path = "../stackslib", features = ["default", "testing"] } stacks-signer = { path = "../stacks-signer", features = ["testing"] } +stacks-inspect = { path = "../contrib/stacks-inspect", default-features = false } tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } mutants = "0.0.3" diff --git a/stacks-node/src/tests/neon_integrations.rs b/stacks-node/src/tests/neon_integrations.rs index c2760a595fc..32a974f3096 100644 --- a/stacks-node/src/tests/neon_integrations.rs +++ b/stacks-node/src/tests/neon_integrations.rs @@ -37,7 +37,6 @@ use stacks::chainstate::stacks::{ StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; -use stacks::cli; use stacks::codec::StacksMessageCodec; use stacks::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use stacks::core::mempool::{MemPoolWalkStrategy, MemPoolWalkTxTypes}; @@ -80,6 +79,7 @@ use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{bytes_to_hex, hex_bytes, to_hex, Hash160}; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, sleep_ms}; +use stacks_inspect; use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _}; use tokio::net::{TcpListener, TcpStream}; @@ -9600,7 +9600,7 @@ fn mock_miner_replay() { let args: Vec = vec!["replay-mock-mining".into(), db_path, blocks_dir]; info!("Replaying mock mined blocks..."); - cli::command_replay_mock_mining(&args, Some(&conf)); + stacks_inspect::command_replay_mock_mining(&args, Some(&conf)); // ---------- Test finished, clean up ---------- diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index de350275be4..52bc37d78a5 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4550,7 +4550,7 @@ impl NakamotoChainState { /// Append a Nakamoto Stacks block to the Stacks chain state. /// NOTE: This does _not_ set the block as processed! The caller must do this. - pub(crate) fn append_block<'a>( + pub fn append_block<'a>( chainstate_tx: &mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, burn_dbconn: &mut SortitionHandleConn, diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index 6561af78114..16e618da101 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -64,9 +64,6 @@ pub mod burnchains; pub mod clarity_cli; /// A high level library for interacting with the Clarity vm pub mod clarity_vm; -/// Allow panics in CLI commands -#[allow(clippy::indexing_slicing)] -pub mod cli; pub mod config; pub mod core; pub mod cost_estimates; From ed0a0850bbea5ff382852f32bb9ca5a4249ded1e Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 24 Sep 2025 12:51:01 +0200 Subject: [PATCH 54/56] added block-replay schemas --- .../examples/block-replay.example.json | 108 ++++++++++++++++++ .../schemas/block-replay.schema.yaml | 78 +++++++++++++ 2 files changed, 186 insertions(+) create mode 100644 docs/rpc/components/examples/block-replay.example.json create mode 100644 docs/rpc/components/schemas/block-replay.schema.yaml diff --git a/docs/rpc/components/examples/block-replay.example.json b/docs/rpc/components/examples/block-replay.example.json new file mode 100644 index 00000000000..b6c79c99f2d --- /dev/null +++ b/docs/rpc/components/examples/block-replay.example.json @@ -0,0 +1,108 @@ +{ + "block_hash": "732f57eefc4dbfb015c9988d9943c47273d25fbe039220d53f311b307609c83f", + "block_id": "856f6b08f338164df7422f66337c8ce916b6b0301fcaa09de06c61cfb79e2a45", + "block_height": 123, + "consensus_hash": "33dffda027e2ca3aaf278855c59a8a0b2d2dd51f", + "fees": 1, + "miner_signature": "004b2878d7639060c4d183b1338447c06ceb0ad55424856c550a4c988401fdf8cf1fe6e8c05cc0e3ff8d4383590bf700cb5dd1a8bb3269f7125f6e0704b66eade8", + "parent_block_id": "3ac36fc1acfc86ba80ea27cd26017c675f75bc07fb042814b72e74cd7d331503", + "signer_signature": [ + "00d4e08331db614d18d7b5af53cf9bc03add9c7a2dcb6f0448721de7ea98f662cf7dc43ee73e14d18dfae3d3d349ff67e0fd773a446fb8c949c93ae4676f4d34bc", + "01619c6e69bad5b43e11bae3eb4d4626e5cf19d595923b0b5d2053e8883a266b41315fdaefd1eca03c5c0580b0f7fd28053c3f34eb0a12220b61392d342f5afb0d", + "0078fa352e0e4d2d94b25d4070ae24a819f268b2260a1e4d0d867415dbdc39e2cf75e57de69375794073e22a75873a5e1ca33ed96eadd8086415e934f697b78fdb", + "00b8d9b0d0cdfabe3c65237801e714185777f60507c773fcd2a72ed00b9d4c59cb5ab96e0e8d545bd562b5ca3de6db1d3b9fccd8f41c3bfa7de3528deb1acd30d6" + ], + "state_index_root": "839b826290027e5b92de415495be7bab2eab2ad4e2f8c371a1a773ae552fedba", + "timestamp": 1758284349, + "transactions": [ + { + "data": { + "anchor_mode": "OnChainOnly", + "auth": { + "Standard": { + "Singlesig": { + "hash_mode": "P2PKH", + "key_encoding": "Compressed", + "nonce": 99, + "signature": "01e29229b386e1f69ffd91e339c878246235ec1cd4771b42a7f45e1ed108643bc9417d43dd96a02c93314ef4cf5bcbcc5642df2e1f5a177333ff983c8719d80661", + "signer": "2965a4e6e4226868fa3ae88b2b9bb9e937d77fba", + "tx_fee": 1 + } + } + }, + "chain_id": 2147483648, + "payload": { + "TokenTransfer": [ + { + "Standard": [ + 26, + [ + 189, + 65, + 200, + 147, + 188, + 192, + 157, + 152, + 224, + 211, + 77, + 255, + 135, + 190, + 175, + 153, + 88, + 51, + 140, + 222 + ] + ] + }, + 1, + "00000000000000000000000000000000000000000000000000000000000000000000" + ] + }, + "post_condition_mode": "Deny", + "post_conditions": [], + "version": "Testnet" + }, + "events": [ + { + "committed": true, + "event_index": 0, + "stx_transfer_event": { + "amount": "1", + "memo": "00000000000000000000000000000000000000000000000000000000000000000000", + "recipient": "ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM", + "sender": "STMPB976WGH6GT7T7BM8PAWVQ7MKFNVZQAXS4BFS" + }, + "txid": "0xf14dd7dec56405fd7dac69c3080fb569fae4c49c591f9ad0e5cf5c797add9005", + "type": "stx_transfer_event" + } + ], + "execution_cost": { + "read_count": 0, + "read_length": 0, + "runtime": 0, + "write_count": 0, + "write_length": 0 + }, + "hex": "808000000004002965a4e6e4226868fa3ae88b2b9bb9e937d77fba000000000000006300000000000000010001e29229b386e1f69ffd91e339c878246235ec1cd4771b42a7f45e1ed108643bc9417d43dd96a02c93314ef4cf5bcbcc5642df2e1f5a177333ff983c8719d8066101020000000000051abd41c893bcc09d98e0d34dff87beaf9958338cde000000000000000100000000000000000000000000000000000000000000000000000000000000000000", + "result": { + "Response": { + "committed": true, + "data": { + "Bool": true + } + } + }, + "stx_burned": 0, + "tx_index": 0, + "txid": "f14dd7dec56405fd7dac69c3080fb569fae4c49c591f9ad0e5cf5c797add9005" + } + ], + "tx_merkle_root": "a68e3c76471d9e66b71a14165c4c9a2b980c51efb5b313425cffcef7172d6080", + "valid_merkle_root": true +} \ No newline at end of file diff --git a/docs/rpc/components/schemas/block-replay.schema.yaml b/docs/rpc/components/schemas/block-replay.schema.yaml new file mode 100644 index 00000000000..c8e9802bb21 --- /dev/null +++ b/docs/rpc/components/schemas/block-replay.schema.yaml @@ -0,0 +1,78 @@ +type: object +properties: + block_hash: + type: string + description: Hash of the block + pattern: "^[0-9a-f]{64}$" + block_id: + type: string + description: Block ID (index block hash) + pattern: "^[0-9a-f]{64}$" + block_height: + type: integer + description: Height of the Stacks block + format: uint64 + consensus_hash: + type: string + description: Consensus hash of the tenure + pattern: "^[0-9a-f]{40}$" + fees: + type: integer + description: total fees for the block + miner_signature: + type: string + description: Uncompressed signature of the miner + pattern: "^[0-9a-f]{130}$" + parent_block_id: + type: string + description: Parent Block ID (index block hash) + pattern: "^[0-9a-f]{64}$" + signer_signature: + type: array + items: + type: string + description: Uncompressed signature of the signer + pattern: "^[0-9a-f]{130}$" + state_index_root: + type: string + pattern: "^[0-9a-f]{64}$" + description: block state index root computed from the MARF (got from the original block) + timestamp: + type: integer + tx_merkle_root: + type: string + description: merkle_root of the included transactions + valid_merkle_root: + type: boolean + description: does the merkle_root matches the chain block and the simulated one? + transactions: + type: array + items: + type: object + properties: + data: + type: object + description: JSON representation of the transaction payload + events: + type: array + items: + type: object + description: JSON representation of the transaction events + execution_cost: + type: object + description: costs accounting for the transaction + hex: + type: string + description: hexadecimal representation of the transaction body + result: + type: object + description: Clarity value representing the transaction result + stx_burned: + type: integer + description: number of burned stx + tx_index: + type: integer + description: index of the transaction in the array of transactions + txid: + type: string + description: transaction id \ No newline at end of file From 3022273c4ec94808b405ef34f21e9028b43eaed2 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Wed, 24 Sep 2025 12:59:22 +0100 Subject: [PATCH 55/56] remove stacks-events --- stacks-node/Cargo.toml | 8 --- stacks-node/src/stacks_events.rs | 94 -------------------------------- 2 files changed, 102 deletions(-) delete mode 100644 stacks-node/src/stacks_events.rs diff --git a/stacks-node/Cargo.toml b/stacks-node/Cargo.toml index d2b8e916b99..dd7bd8af3d7 100644 --- a/stacks-node/Cargo.toml +++ b/stacks-node/Cargo.toml @@ -61,14 +61,6 @@ madhouse = { git = "https://github.com/stacks-network/madhouse-rs.git", tag = "0 proptest = "1.6.*" stdext = "0.3.1" -[[bin]] -name = "stacks-node" -path = "src/main.rs" - -[[bin]] -name = "stacks-events" -path = "src/stacks_events.rs" - [features] monitoring_prom = ["stacks/monitoring_prom", "async-h1", "async-std", "http-types"] slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] diff --git a/stacks-node/src/stacks_events.rs b/stacks-node/src/stacks_events.rs deleted file mode 100644 index d7ec3494668..00000000000 --- a/stacks-node/src/stacks_events.rs +++ /dev/null @@ -1,94 +0,0 @@ -use std::io::prelude::*; -use std::io::BufReader; -use std::net::{TcpListener, TcpStream}; - -use chrono::{SecondsFormat, Utc}; -use lazy_static::lazy_static; -use regex::Regex; -use serde_json::{json, Value}; - -const DEFAULT_ADDR: &str = "127.0.0.1:3700"; - -fn main() { - let mut args = pico_args::Arguments::from_env(); - let addr: String = args - .opt_value_from_str("--addr") - .expect("Failed to parse --addr argument") - .unwrap_or(DEFAULT_ADDR.into()); - let help = args - .opt_value_from_str("--help") - .expect("Failed to parse --help argument") - .unwrap_or(false); - - if help { - println!("Usage: stacks-events [--addr=]"); - println!(" --addr= Address to listen on (default: {DEFAULT_ADDR})",); - return; - } - - serve_for_events(&addr); -} - -fn serve_for_events(addr: &String) { - let listener = TcpListener::bind(addr).unwrap(); - eprintln!("Listening on {addr}"); - for stream in listener.incoming() { - let stream = stream.unwrap(); - handle_connection(stream); - } -} - -lazy_static! { - static ref RE_POST: Regex = Regex::new(r"^POST /(.*?) HTTP/1.1\r\n$").unwrap(); - static ref RE_CONTENT_LENGTH: Regex = Regex::new(r"^content-length: (\d+)\r\n$").unwrap(); -} - -fn handle_connection(mut stream: TcpStream) { - let mut buf = String::with_capacity(10 * 1024); - let mut reader = BufReader::new(stream.try_clone().unwrap()); - - let mut path = None; - let mut content_length = None; - let payload; - - loop { - buf.clear(); - reader.read_line(&mut buf).unwrap(); - if path.is_none() { - let caps = RE_POST.captures(&buf).unwrap(); - path = Some(caps.get(1).unwrap().as_str().to_string()); - } else if content_length.is_none() { - let caps = RE_CONTENT_LENGTH.captures(&buf); - if let Some(caps) = caps { - content_length = Some(caps.get(1).unwrap().as_str().parse::().unwrap()); - } - } else if buf == "\r\n" { - buf.clear(); - reader - .take(content_length.unwrap()) - .read_to_string(&mut buf) - .unwrap(); - payload = Some(buf.to_owned()); - break; - } - } - - let payload_json: Value = serde_json::from_str(&payload.unwrap()).unwrap(); - let record = json!({ - "ts": Utc::now().to_rfc3339_opts(SecondsFormat::Millis, true), - "path": path.unwrap(), - "payload": payload_json, - }); - println!("{record}"); - - { - let contents = "Thanks!"; - let response = format!( - "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{contents}", - contents.len() - ); - - let _nmb_bytes = stream.write(response.as_bytes()).unwrap(); - stream.flush().unwrap(); - } -} From 3a009b6f9fef7704f2b9f8f971b3f8f38d9cb688 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 24 Sep 2025 14:03:06 +0200 Subject: [PATCH 56/56] fixed block_id parameter parsing --- stackslib/src/net/api/blockreplay.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/api/blockreplay.rs b/stackslib/src/net/api/blockreplay.rs index b57bbff1976..2d3fe50e44c 100644 --- a/stackslib/src/net/api/blockreplay.rs +++ b/stackslib/src/net/api/blockreplay.rs @@ -359,6 +359,7 @@ impl HttpRequest for RPCNakamotoBlockReplayRequestHandler { let block_id = StacksBlockId::from_hex(block_id_str) .map_err(|_| Error::DecodeError("Invalid path: unparseable block id".to_string()))?; + self.block_id = Some(block_id); Ok(HttpRequestContents::new().query_string(query)) @@ -378,10 +379,9 @@ impl RPCRequestHandler for RPCNakamotoBlockReplayRequestHandler { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let block_id = self - .block_id - .take() - .ok_or(NetError::SendError("Missing `block_id`".into()))?; + let Some(block_id) = &self.block_id else { + return Err(NetError::SendError("Missing `block_id`".into())); + }; let replayed_block_res = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| {