diff --git a/CHANGELOG.md b/CHANGELOG.md index fc21d8eac5..abfcacf350 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,24 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## [2.4.0.0.5] +## [2.4.0.1.0] -This introduces a set of improvements to the Stacks miner behavior. In +### Added + +- When the Clarity library is built with feature flag `developer-mode`, comments + from the source code are now attached to the `SymbolicExpression` nodes. This + will be useful for tools that use the Clarity library to analyze and + manipulate Clarity source code, e.g. a formatter. +- New RPC endpoint at /v2/constant_val to fetch a constant from a contract. +- A new subsystem, called StackerDB, has been added, which allows a set of + Stacks nodes to store off-chain data on behalf of a specially-crafter smart + contract. This is an opt-in feature; Stacks nodes explicitly subscribe to + StackerDB replicas in their config files. +- Message definitions and codecs for Stacker DB, a replicated off-chain DB + hosted by subscribed Stacks nodes and controlled by smart contracts +- Added 3 new public and regionally diverse bootstrap nodes: est.stacksnodes.org, cet.stacksnodes.org, sgt.stacksnodes.org + +In addition, this introduces a set of improvements to the Stacks miner behavior. In particular: * The VRF public key can be re-used across node restarts. * Settings that affect mining are hot-reloaded from the config file. They take @@ -26,6 +41,22 @@ contract-calls). * When configured, the node will optionally only RBF block-commits if it can produce a block with strictly more transactions. +### Changed + +- `developer-mode` is no longer enabled in the default feature set. This is the correct default behavior, since the stacks-node should NOT build with developer-mode enabled by default. Tools that need to use developer-mode should enable it explicitly. + +### Fixed + +- The transaction receipts for smart contract publish transactions now indicate + a result of `(err none)` if the top-level code of the smart contract contained + runtime error and include details about the error in the `vm_error` field of + the receipt. Fixes issues #3154, #3328. +- Added config setting `burnchain.wallet_name` which addresses blank wallets no + longer being created by default in recent bitcoin versions. Fixes issue #3596 +- Use the current burnchain tip to lookup UTXOs (Issue #3733) +- The node now gracefully shuts down even if it is in the middle of a handshake with + bitcoind. Fixes issue #3734. + ## [2.4.0.0.4] This is a high-priority hotfix that addresses a bug in transaction processing which @@ -56,41 +87,6 @@ could impact miner availability. This is a hotfix that changes the logging failure behavior from panicking to dropping the log message (PR #3784). -## [Unreleased] - -### Added - -- When the Clarity library is built with feature flag `developer-mode`, comments - from the source code are now attached to the `SymbolicExpression` nodes. This - will be useful for tools that use the Clarity library to analyze and - manipulate Clarity source code, e.g. a formatter. -- New RPC endpoint at /v2/constant_val to fetch a constant from a contract. -- A new subsystem, called StackerDB, has been added, which allows a set of - Stacks nodes to store off-chain data on behalf of a specially-crafter smart - contract. This is an opt-in feature; Stacks nodes explicitly subscribe to - StackerDB replicas in their config files. -- Message definitions and codecs for Stacker DB, a replicated off-chain DB - hosted by subscribed Stacks nodes and controlled by smart contracts -- Added 3 new public and regionally diverse bootstrap nodes: est.stacksnodes.org, cet.stacksnodes.org, sgt.stacksnodes.org -- satoshis_per_byte can be changed in the config file and miners will always use - the most up to date value - -### Changed - -- `developer-mode` is no longer enabled in the default feature set. This is the correct default behavior, since the stacks-node should NOT build with developer-mode enabled by default. Tools that need to use developer-mode should enable it explicitly. - -### Fixed - -- The transaction receipts for smart contract publish transactions now indicate - a result of `(err none)` if the top-level code of the smart contract contained - runtime error and include details about the error in the `vm_error` field of - the receipt. Fixes issues #3154, #3328. -- Added config setting `burnchain.wallet_name` which addresses blank wallets no - longer being created by default in recent bitcoin versions. Fixes issue #3596 -- Use the current burnchain tip to lookup UTXOs (Issue #3733) -- The node now gracefully shuts down even if it is in the middle of a handshake with - bitcoind. Fixes issue #3734. - ## [2.4.0.0.1] This is a minor change to add `txid` fields into the log messages from failing diff --git a/build-scripts/Dockerfile.linux-glibc-arm64 b/build-scripts/Dockerfile.linux-glibc-arm64 index 7ce50b6a68..11e38f8804 100644 --- a/build-scripts/Dockerfile.linux-glibc-arm64 +++ b/build-scripts/Dockerfile.linux-glibc-arm64 @@ -18,7 +18,7 @@ RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && CC=aarch64-linux-gnu-gcc \ CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc \ CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ - cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ && mkdir -p /out \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out diff --git a/build-scripts/Dockerfile.linux-glibc-armv7 b/build-scripts/Dockerfile.linux-glibc-armv7 index eb893baeb6..cc05298dfe 100644 --- a/build-scripts/Dockerfile.linux-glibc-armv7 +++ b/build-scripts/Dockerfile.linux-glibc-armv7 @@ -18,9 +18,9 @@ RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && CC=arm-linux-gnueabihf-gcc \ CC_armv7_unknown_linux_gnueabihf=arm-linux-gnueabihf-gcc \ CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ - cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ && mkdir -p /out \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-glibc-x64 b/build-scripts/Dockerfile.linux-glibc-x64 index 2db13cb51e..7d0591023d 100644 --- a/build-scripts/Dockerfile.linux-glibc-x64 +++ b/build-scripts/Dockerfile.linux-glibc-x64 @@ -15,7 +15,7 @@ RUN apt-get update && apt-get install -y git RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && cd ${BUILD_DIR} \ && rustup target add ${TARGET} \ - && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ && mkdir -p /out \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out diff --git a/build-scripts/Dockerfile.linux-musl-arm64 b/build-scripts/Dockerfile.linux-musl-arm64 index 135e6f9fc9..24a07f018a 100644 --- a/build-scripts/Dockerfile.linux-musl-arm64 +++ b/build-scripts/Dockerfile.linux-musl-arm64 @@ -13,9 +13,10 @@ COPY . . RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && cd ${BUILD_DIR} \ && rustup target add ${TARGET} \ - && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ && mkdir -p /out \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / + diff --git a/build-scripts/Dockerfile.linux-musl-armv7 b/build-scripts/Dockerfile.linux-musl-armv7 index 57b93b47ec..2ce5a99912 100644 --- a/build-scripts/Dockerfile.linux-musl-armv7 +++ b/build-scripts/Dockerfile.linux-musl-armv7 @@ -13,9 +13,9 @@ COPY . . RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && cd ${BUILD_DIR} \ && rustup target add ${TARGET} \ - && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ && mkdir -p /out \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-musl-x64 b/build-scripts/Dockerfile.linux-musl-x64 index 73e64b4d67..e34c629d62 100644 --- a/build-scripts/Dockerfile.linux-musl-x64 +++ b/build-scripts/Dockerfile.linux-musl-x64 @@ -15,9 +15,10 @@ RUN apk update && apk add git musl-dev RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && cd ${BUILD_DIR} \ && rustup target add ${TARGET} \ - && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ && mkdir -p /out \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / + diff --git a/build-scripts/Dockerfile.macos-arm64 b/build-scripts/Dockerfile.macos-arm64 index d6b80f267a..0fd8a1e4c3 100644 --- a/build-scripts/Dockerfile.macos-arm64 +++ b/build-scripts/Dockerfile.macos-arm64 @@ -21,9 +21,10 @@ RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && cd ${BUILD_DIR} \ && rustup target add ${TARGET} \ && . /opt/osxcross/env-macos-aarch64 \ - && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ && mkdir -p /out \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / + diff --git a/build-scripts/Dockerfile.macos-x64 b/build-scripts/Dockerfile.macos-x64 index 5403b2fe87..d73aa35f98 100644 --- a/build-scripts/Dockerfile.macos-x64 +++ b/build-scripts/Dockerfile.macos-x64 @@ -21,9 +21,10 @@ RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && cd ${BUILD_DIR} \ && rustup target add ${TARGET} \ && . /opt/osxcross/env-macos-x86_64 \ - && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ && mkdir -p /out \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / + diff --git a/build-scripts/Dockerfile.windows-x64 b/build-scripts/Dockerfile.windows-x64 index c3ffcd5d29..c1f1e87a7e 100644 --- a/build-scripts/Dockerfile.windows-x64 +++ b/build-scripts/Dockerfile.windows-x64 @@ -17,9 +17,9 @@ RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && rustup target add ${TARGET} \ && CC_x86_64_pc_windows_gnu=x86_64-w64-mingw32-gcc \ CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER=x86_64-w64-mingw32-gcc \ - cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ && mkdir -p /out \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect.exe /out/blockstack-cli.exe /out/clarity-cli.exe /out/stacks-node.exe / \ No newline at end of file +COPY --from=build /out/stacks-inspect.exe /out/blockstack-cli.exe /out/clarity-cli.exe /out/stacks-node.exe / diff --git a/clarity/src/libclarity.rs b/clarity/src/libclarity.rs index def9435dbb..9948bad3a4 100644 --- a/clarity/src/libclarity.rs +++ b/clarity/src/libclarity.rs @@ -70,11 +70,13 @@ pub mod boot_util { use crate::vm::representations::ContractName; use crate::vm::types::QualifiedContractIdentifier; + #[allow(clippy::expect_used)] pub fn boot_code_id(name: &str, mainnet: bool) -> QualifiedContractIdentifier { let addr = boot_code_addr(mainnet); QualifiedContractIdentifier::new( addr.into(), - ContractName::try_from(name.to_string()).unwrap(), + ContractName::try_from(name.to_string()) + .expect("FATAL: boot contract name is not a legal ContractName"), ) } diff --git a/clarity/src/vm/analysis/analysis_db.rs b/clarity/src/vm/analysis/analysis_db.rs index b278bc5ef7..0fe0a83d54 100644 --- a/clarity/src/vm/analysis/analysis_db.rs +++ b/clarity/src/vm/analysis/analysis_db.rs @@ -45,13 +45,16 @@ impl<'a> AnalysisDatabase<'a> { pub fn execute(&mut self, f: F) -> Result where F: FnOnce(&mut Self) -> Result, + E: From, { self.begin(); - let result = f(self).map_err(|e| { - self.roll_back(); - e + let result = f(self).or_else(|e| { + self.roll_back() + .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into())?; + Err(e) })?; - self.commit(); + self.commit() + .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into())?; Ok(result) } @@ -59,12 +62,16 @@ impl<'a> AnalysisDatabase<'a> { self.store.nest(); } - pub fn commit(&mut self) { - self.store.commit(); + pub fn commit(&mut self) -> CheckResult<()> { + self.store + .commit() + .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into()) } - pub fn roll_back(&mut self) { - self.store.rollback(); + pub fn roll_back(&mut self) -> CheckResult<()> { + self.store + .rollback() + .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into()) } pub fn storage_key() -> &'static str { @@ -78,7 +85,8 @@ impl<'a> AnalysisDatabase<'a> { pub fn test_insert_contract_hash(&mut self, contract_identifier: &QualifiedContractIdentifier) { use stacks_common::util::hash::Sha512Trunc256Sum; self.store - .prepare_for_contract_metadata(contract_identifier, Sha512Trunc256Sum([0; 32])); + .prepare_for_contract_metadata(contract_identifier, Sha512Trunc256Sum([0; 32])) + .unwrap(); } pub fn has_contract(&mut self, contract_identifier: &QualifiedContractIdentifier) -> bool { @@ -90,30 +98,42 @@ impl<'a> AnalysisDatabase<'a> { pub fn load_contract_non_canonical( &mut self, contract_identifier: &QualifiedContractIdentifier, - ) -> Option { + ) -> CheckResult> { self.store .get_metadata(contract_identifier, AnalysisDatabase::storage_key()) // treat NoSuchContract error thrown by get_metadata as an Option::None -- // the analysis will propagate that as a CheckError anyways. - .ok()? - .map(|x| ContractAnalysis::deserialize(&x)) + .ok() + .flatten() + .map(|x| { + ContractAnalysis::deserialize(&x).map_err(|_| { + CheckErrors::Expects("Bad data deserialized from DB".into()).into() + }) + }) + .transpose() } pub fn load_contract( &mut self, contract_identifier: &QualifiedContractIdentifier, epoch: &StacksEpochId, - ) -> Option { - self.store + ) -> CheckResult> { + Ok(self + .store .get_metadata(contract_identifier, AnalysisDatabase::storage_key()) // treat NoSuchContract error thrown by get_metadata as an Option::None -- // the analysis will propagate that as a CheckError anyways. - .ok()? - .map(|x| ContractAnalysis::deserialize(&x)) - .map(|mut x| { - x.canonicalize_types(epoch); - x + .ok() + .flatten() + .map(|x| { + ContractAnalysis::deserialize(&x) + .map_err(|_| CheckErrors::Expects("Bad data deserialized from DB".into())) }) + .transpose()? + .and_then(|mut x| { + x.canonicalize_types(epoch); + Some(x) + })) } pub fn insert_contract( @@ -127,7 +147,8 @@ impl<'a> AnalysisDatabase<'a> { } self.store - .insert_metadata(contract_identifier, key, &contract.serialize()); + .insert_metadata(contract_identifier, key, &contract.serialize()) + .map_err(|e| CheckErrors::Expects(format!("{e:?}")))?; Ok(()) } @@ -140,7 +161,7 @@ impl<'a> AnalysisDatabase<'a> { // stored as its own entry. the analysis cost tracking currently only // charges based on the function type size. let contract = self - .load_contract_non_canonical(contract_identifier) + .load_contract_non_canonical(contract_identifier)? .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; Ok(contract.clarity_version) } @@ -156,7 +177,7 @@ impl<'a> AnalysisDatabase<'a> { // stored as its own entry. the analysis cost tracking currently only // charges based on the function type size. let contract = self - .load_contract_non_canonical(contract_identifier) + .load_contract_non_canonical(contract_identifier)? .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; Ok(contract .get_public_function_type(function_name) @@ -174,7 +195,7 @@ impl<'a> AnalysisDatabase<'a> { // stored as its own entry. the analysis cost tracking currently only // charges based on the function type size. let contract = self - .load_contract_non_canonical(contract_identifier) + .load_contract_non_canonical(contract_identifier)? .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; Ok(contract .get_read_only_function_type(function_name) @@ -192,7 +213,7 @@ impl<'a> AnalysisDatabase<'a> { // stored as its own entry. the analysis cost tracking currently only // charges based on the function type size. let contract = self - .load_contract_non_canonical(contract_identifier) + .load_contract_non_canonical(contract_identifier)? .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; Ok(contract.get_defined_trait(trait_name).map(|trait_map| { trait_map @@ -207,7 +228,7 @@ impl<'a> AnalysisDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, ) -> CheckResult> { let contract = self - .load_contract_non_canonical(contract_identifier) + .load_contract_non_canonical(contract_identifier)? .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; Ok(contract.implemented_traits) } diff --git a/clarity/src/vm/analysis/contract_interface_builder/mod.rs b/clarity/src/vm/analysis/contract_interface_builder/mod.rs index a418d9f66f..c9bc3c71c1 100644 --- a/clarity/src/vm/analysis/contract_interface_builder/mod.rs +++ b/clarity/src/vm/analysis/contract_interface_builder/mod.rs @@ -19,13 +19,16 @@ use std::collections::{BTreeMap, BTreeSet}; use stacks_common::types::StacksEpochId; use crate::vm::analysis::types::ContractAnalysis; +use crate::vm::analysis::CheckResult; use crate::vm::types::signatures::CallableSubtype; use crate::vm::types::{ FixedFunction, FunctionArg, FunctionType, TupleTypeSignature, TypeSignature, }; -use crate::vm::{ClarityName, ClarityVersion}; +use crate::vm::{CheckErrors, ClarityName, ClarityVersion}; -pub fn build_contract_interface(contract_analysis: &ContractAnalysis) -> ContractInterface { +pub fn build_contract_interface( + contract_analysis: &ContractAnalysis, +) -> CheckResult { let mut contract_interface = ContractInterface::new(contract_analysis.epoch, contract_analysis.clarity_version); @@ -55,21 +58,21 @@ pub fn build_contract_interface(contract_analysis: &ContractAnalysis) -> Contrac .append(&mut ContractInterfaceFunction::from_map( private_function_types, ContractInterfaceFunctionAccess::private, - )); + )?); contract_interface .functions .append(&mut ContractInterfaceFunction::from_map( public_function_types, ContractInterfaceFunctionAccess::public, - )); + )?); contract_interface .functions .append(&mut ContractInterfaceFunction::from_map( read_only_function_types, ContractInterfaceFunctionAccess::read_only, - )); + )?); contract_interface .variables @@ -99,7 +102,7 @@ pub fn build_contract_interface(contract_analysis: &ContractAnalysis) -> Contrac fungible_tokens, )); - contract_interface + Ok(contract_interface) } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -260,30 +263,40 @@ pub struct ContractInterfaceFunction { } impl ContractInterfaceFunction { - pub fn from_map( + fn from_map( map: &BTreeMap, access: ContractInterfaceFunctionAccess, - ) -> Vec { + ) -> CheckResult> { map.iter() - .map(|(name, function_type)| ContractInterfaceFunction { - name: name.clone().into(), - access: access.to_owned(), - outputs: ContractInterfaceFunctionOutput { - type_f: match function_type { - FunctionType::Fixed(FixedFunction { returns, .. }) => { - ContractInterfaceAtomType::from_type_signature(returns) + .map(|(name, function_type)| { + Ok(ContractInterfaceFunction { + name: name.clone().into(), + access: access.to_owned(), + outputs: ContractInterfaceFunctionOutput { + type_f: match function_type { + FunctionType::Fixed(FixedFunction { returns, .. }) => { + ContractInterfaceAtomType::from_type_signature(&returns) + } + _ => return Err(CheckErrors::Expects( + "Contract functions should only have fixed function return types!" + .into(), + ) + .into()), + }, + }, + args: match function_type { + FunctionType::Fixed(FixedFunction { args, .. }) => { + ContractInterfaceFunctionArg::from_function_args(&args) + } + _ => { + return Err(CheckErrors::Expects( + "Contract functions should only have fixed function arguments!" + .into(), + ) + .into()) } - _ => panic!( - "Contract functions should only have fixed function return types!" - ), }, - }, - args: match function_type { - FunctionType::Fixed(FixedFunction { args, .. }) => { - ContractInterfaceFunctionArg::from_function_args(args) - } - _ => panic!("Contract functions should only have fixed function arguments!"), - }, + }) }) .collect() } @@ -315,7 +328,7 @@ impl ContractInterfaceFungibleTokens { } impl ContractInterfaceNonFungibleTokens { - pub fn from_map(assets: &BTreeMap) -> Vec { + fn from_map(assets: &BTreeMap) -> Vec { assets .iter() .map(|(name, type_sig)| Self { @@ -327,7 +340,7 @@ impl ContractInterfaceNonFungibleTokens { } impl ContractInterfaceVariable { - pub fn from_map( + fn from_map( map: &BTreeMap, access: ContractInterfaceVariableAccess, ) -> Vec { @@ -349,7 +362,7 @@ pub struct ContractInterfaceMap { } impl ContractInterfaceMap { - pub fn from_map( + fn from_map( map: &BTreeMap, ) -> Vec { map.iter() @@ -386,8 +399,10 @@ impl ContractInterface { } } - pub fn serialize(&self) -> String { - serde_json::to_string(self).expect("Failed to serialize contract interface") + pub fn serialize(&self) -> CheckResult { + serde_json::to_string(self).map_err(|_| { + CheckErrors::Expects("Failed to serialize contract interface".into()).into() + }) } } diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index 81ae521c95..71fefb6457 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -37,6 +37,9 @@ pub enum CheckErrors { ExpectedName, SupertypeTooLarge, + // unexpected interpreter behavior + Expects(String), + // match errors BadMatchOptionSyntax(Box), BadMatchResponseSyntax(Box), @@ -195,6 +198,17 @@ pub struct CheckError { pub diagnostic: Diagnostic, } +impl CheckErrors { + /// Does this check error indicate that the transaction should be + /// rejected? + pub fn rejectable(&self) -> bool { + match &self { + CheckErrors::SupertypeTooLarge | CheckErrors::Expects(_) => true, + _ => false, + } + } +} + impl CheckError { pub fn new(err: CheckErrors) -> CheckError { let diagnostic = Diagnostic::err(&err); @@ -254,6 +268,10 @@ impl From for CheckErrors { CostErrors::CostContractLoadFailure => { CheckErrors::CostComputationFailed("Failed to load cost contract".into()) } + CostErrors::InterpreterFailure => { + CheckErrors::Expects("Unexpected interpreter failure in cost computation".into()) + } + CostErrors::Expect(s) => CheckErrors::Expects(s), } } } @@ -320,6 +338,7 @@ impl DiagnosableError for CheckErrors { match &self { CheckErrors::ExpectedLiteral => "expected a literal argument".into(), CheckErrors::SupertypeTooLarge => "supertype of two types is too large".into(), + CheckErrors::Expects(s) => format!("unexpected interpreter behavior: {s}"), CheckErrors::BadMatchOptionSyntax(source) => format!("match on a optional type uses the following syntax: (match input some-name if-some-expression if-none-expression). Caused by: {}", source.message()), diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 50c5b1e7d5..27e056afe9 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -57,7 +57,7 @@ pub fn mem_type_check( epoch, ASTRules::PrecheckSize, ) - .unwrap() + .map_err(|_| CheckErrors::Expects("Failed to build AST".into()))? .expressions; let mut marf = MemoryBackingStore::new(); @@ -77,8 +77,12 @@ pub fn mem_type_check( let first_type = x .type_map .as_ref() - .unwrap() - .get_type(x.expressions.last().unwrap()) + .ok_or_else(|| CheckErrors::Expects("Should be non-empty".into()))? + .get_type( + x.expressions + .last() + .ok_or_else(|| CheckErrors::Expects("Should be non-empty".into()))?, + ) .cloned(); Ok((first_type, x)) } @@ -139,13 +143,18 @@ pub fn run_analysis( | StacksEpochId::Epoch24 => { TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db) } - StacksEpochId::Epoch10 => unreachable!("Epoch 1.0 is not a valid epoch for analysis"), + StacksEpochId::Epoch10 => { + return Err(CheckErrors::Expects( + "Epoch 1.0 is not a valid epoch for analysis".into(), + ) + .into()) + } }?; TraitChecker::run_pass(&epoch, &mut contract_analysis, db)?; ArithmeticOnlyChecker::check_contract_cost_eligible(&mut contract_analysis); if STORE_CONTRACT_SRC_INTERFACE { - let interface = build_contract_interface(&contract_analysis); + let interface = build_contract_interface(&contract_analysis)?; contract_analysis.contract_interface = Some(interface); } if save_contract { diff --git a/clarity/src/vm/analysis/trait_checker/mod.rs b/clarity/src/vm/analysis/trait_checker/mod.rs index 451d23a6af..811d436a1f 100644 --- a/clarity/src/vm/analysis/trait_checker/mod.rs +++ b/clarity/src/vm/analysis/trait_checker/mod.rs @@ -56,7 +56,7 @@ impl TraitChecker { for trait_identifier in &contract_analysis.implemented_traits { let trait_name = trait_identifier.name.to_string(); let contract_defining_trait = analysis_db - .load_contract(&trait_identifier.contract_identifier, &self.epoch) + .load_contract(&trait_identifier.contract_identifier, &self.epoch)? .ok_or(CheckErrors::TraitReferenceUnknown( trait_identifier.name.to_string(), ))?; diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index 862d4d7b33..34bfc53c4b 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -51,7 +51,9 @@ impl FunctionType { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => self.check_args_2_1(accounting, args, clarity_version), - StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), + StacksEpochId::Epoch10 => { + return Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) + } } } @@ -72,7 +74,9 @@ impl FunctionType { | StacksEpochId::Epoch24 => { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } - StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), + StacksEpochId::Epoch10 => { + return Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) + } } } } diff --git a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs index ad80733f80..9ea729d475 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs @@ -37,6 +37,7 @@ use crate::vm::costs::{ analysis_typecheck_cost, cost_functions, runtime_cost, ClarityCostFunctionReference, CostErrors, CostOverflowingMath, CostTracker, ExecutionCost, LimitedCostTracker, }; +use crate::vm::errors::InterpreterError; use crate::vm::functions::define::DefineFunctionsParsed; use crate::vm::functions::NativeFunctions; use crate::vm::representations::SymbolicExpressionType::{ @@ -93,7 +94,7 @@ impl CostTracker for TypeChecker<'_, '_> { fn add_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { self.cost_track.add_memory(memory) } - fn drop_memory(&mut self, memory: u64) { + fn drop_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { self.cost_track.drop_memory(memory) } fn reset_memory(&mut self) { @@ -236,7 +237,12 @@ impl FunctionType { Ok(TypeSignature::BoolType) } - FunctionType::Binary(_, _, _) => unreachable!("Binary type should be reached in 2.05"), + FunctionType::Binary(_, _, _) => { + return Err(CheckErrors::Expects( + "Binary type should not be reached in 2.05".into(), + ) + .into()) + } } } @@ -247,7 +253,7 @@ impl FunctionType { ) -> CheckResult { let (expected_args, returns) = match self { FunctionType::Fixed(FixedFunction { args, returns }) => (args, returns), - _ => panic!("Unexpected function type"), + _ => return Err(CheckErrors::Expects("Unexpected function type".into()).into()), }; check_argument_count(expected_args.len(), func_args)?; @@ -258,10 +264,8 @@ impl FunctionType { Value::Principal(PrincipalData::Contract(contract)), ) => { let contract_to_check = db - .load_contract(contract, &StacksEpochId::Epoch2_05) - .ok_or_else(|| { - CheckErrors::NoSuchContract(contract.name.to_string()) - })?; + .load_contract(contract, &StacksEpochId::Epoch2_05)? + .ok_or_else(|| CheckErrors::NoSuchContract(contract.name.to_string()))?; let trait_definition = db .get_defined_trait( &trait_id.contract_identifier, @@ -281,8 +285,8 @@ impl FunctionType { )?; } (expected_type, value) => { - if !expected_type.admits(&StacksEpochId::Epoch2_05, value)? { - let actual_type = TypeSignature::type_of(value); + if !expected_type.admits(&StacksEpochId::Epoch2_05, &value)? { + let actual_type = TypeSignature::type_of(&value)?; return Err( CheckErrors::TypeError(expected_type.clone(), actual_type).into() ); @@ -302,7 +306,7 @@ fn trait_type_size(trait_sig: &BTreeMap) -> Chec Ok(total_size) } -fn type_reserved_variable(variable_name: &str) -> Option { +fn type_reserved_variable(variable_name: &str) -> CheckResult> { if let Some(variable) = NativeVariables::lookup_by_name_at_version(variable_name, &ClarityVersion::Clarity1) { @@ -312,18 +316,22 @@ fn type_reserved_variable(variable_name: &str) -> Option { ContractCaller => TypeSignature::PrincipalType, BlockHeight => TypeSignature::UIntType, BurnBlockHeight => TypeSignature::UIntType, - NativeNone => TypeSignature::new_option(no_type()).unwrap(), + NativeNone => TypeSignature::new_option(no_type()) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, NativeTrue => TypeSignature::BoolType, NativeFalse => TypeSignature::BoolType, TotalLiquidMicroSTX => TypeSignature::UIntType, Regtest => TypeSignature::BoolType, TxSponsor | Mainnet | ChainId => { - unreachable!("tx-sponsor, mainnet, and chain-id should not reach here in 2.05") + return Err(CheckErrors::Expects( + "tx-sponsor, mainnet, and chain-id should not reach here in 2.05".into(), + ) + .into()) } }; - Some(var_type) + Ok(Some(var_type)) } else { - None + Ok(None) } } @@ -396,7 +404,8 @@ impl<'a, 'b> TypeChecker<'a, 'b> { Ok(()) } Err(e) => Err(e), - })?; + })? + .ok_or_else(|| CheckErrors::Expects("Expected a depth result".into()))?; } runtime_cost(ClarityCostFunction::AnalysisStorage, self, size)?; @@ -427,39 +436,41 @@ impl<'a, 'b> TypeChecker<'a, 'b> { context: &TypingContext, expected_type: &TypeSignature, ) -> TypeResult { - if let ( - LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), - TypeSignature::TraitReferenceType(trait_identifier), - ) = (&expr.expr, expected_type) - { - let contract_to_check = self - .db - .load_contract(contract_identifier, &StacksEpochId::Epoch2_05) - .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; - - let contract_defining_trait = self - .db - .load_contract( - &trait_identifier.contract_identifier, + match (&expr.expr, expected_type) { + ( + LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), + TypeSignature::TraitReferenceType(trait_identifier), + ) => { + let contract_to_check = self + .db + .load_contract(&contract_identifier, &StacksEpochId::Epoch2_05)? + .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; + + let contract_defining_trait = self + .db + .load_contract( + &trait_identifier.contract_identifier, + &StacksEpochId::Epoch2_05, + )? + .ok_or(CheckErrors::NoSuchContract( + trait_identifier.contract_identifier.to_string(), + ))?; + + let trait_definition = contract_defining_trait + .get_defined_trait(&trait_identifier.name) + .ok_or(CheckErrors::NoSuchTrait( + trait_identifier.contract_identifier.to_string(), + trait_identifier.name.to_string(), + ))?; + + contract_to_check.check_trait_compliance( &StacksEpochId::Epoch2_05, - ) - .ok_or(CheckErrors::NoSuchContract( - trait_identifier.contract_identifier.to_string(), - ))?; - - let trait_definition = contract_defining_trait - .get_defined_trait(&trait_identifier.name) - .ok_or(CheckErrors::NoSuchTrait( - trait_identifier.contract_identifier.to_string(), - trait_identifier.name.to_string(), - ))?; - - contract_to_check.check_trait_compliance( - &StacksEpochId::Epoch2_05, - trait_identifier, - trait_definition, - )?; - return Ok(expected_type.clone()); + trait_identifier, + trait_definition, + )?; + return Ok(expected_type.clone()); + } + (_, _) => {} } let actual_type = self.type_check(expr, context)?; @@ -554,7 +565,10 @@ impl<'a, 'b> TypeChecker<'a, 'b> { .map_err(|_| CheckErrors::BadSyntaxBinding)?; if self.function_return_tracker.is_some() { - panic!("Interpreter error: Previous function define left dirty typecheck state."); + return Err(CheckErrors::Expects( + "Interpreter error: Previous function define left dirty typecheck state.".into(), + ) + .into()); } let mut function_context = context.extend()?; @@ -647,7 +661,10 @@ impl<'a, 'b> TypeChecker<'a, 'b> { if let Some(ref native_function) = NativeFunctions::lookup_by_name_at_version(function, &ClarityVersion::Clarity1) { - let typed_function = TypedNativeFunction::type_native_function(native_function); + let typed_function = match TypedNativeFunction::type_native_function(native_function) { + Ok(f) => f, + Err(e) => return Some(Err(e.into())), + }; Some(typed_function.type_check_application(self, args, context)) } else { None @@ -688,7 +705,7 @@ impl<'a, 'b> TypeChecker<'a, 'b> { fn lookup_variable(&mut self, name: &str, context: &TypingContext) -> TypeResult { runtime_cost(ClarityCostFunction::AnalysisLookupVariableConst, self, 0)?; - if let Some(type_result) = type_reserved_variable(name) { + if let Some(type_result) = type_reserved_variable(name)? { Ok(type_result) } else if let Some(type_result) = self.contract_context.get_variable_type(name) { Ok(type_result.clone()) @@ -715,7 +732,7 @@ impl<'a, 'b> TypeChecker<'a, 'b> { context: &TypingContext, ) -> TypeResult { let type_sig = match expr.expr { - AtomValue(ref value) | LiteralValue(ref value) => TypeSignature::type_of(value), + AtomValue(ref value) | LiteralValue(ref value) => TypeSignature::type_of(value)?, Atom(ref name) => self.lookup_variable(name, context)?, List(ref expression) => self.type_check_function_application(expression, context)?, TraitReference(_, _) | Field(_) => { diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index 67a90ae5a5..21676d6102 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -26,7 +26,7 @@ use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::{ analysis_typecheck_cost, cost_functions, runtime_cost, CostOverflowingMath, }; -use crate::vm::errors::{Error as InterpError, RuntimeErrorType}; +use crate::vm::errors::{Error as InterpError, InterpreterError, RuntimeErrorType}; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::{ BlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, @@ -479,7 +479,10 @@ fn check_principal_of( ) -> TypeResult { check_argument_count(1, args)?; checker.type_check_expects(&args[0], context, &BUFF_33)?; - Ok(TypeSignature::new_response(TypeSignature::PrincipalType, TypeSignature::UIntType).unwrap()) + Ok( + TypeSignature::new_response(TypeSignature::PrincipalType, TypeSignature::UIntType) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, + ) } fn check_secp256k1_recover( @@ -490,7 +493,10 @@ fn check_secp256k1_recover( check_argument_count(2, args)?; checker.type_check_expects(&args[0], context, &BUFF_32)?; checker.type_check_expects(&args[1], context, &BUFF_65)?; - Ok(TypeSignature::new_response(BUFF_33.clone(), TypeSignature::UIntType).unwrap()) + Ok( + TypeSignature::new_response(BUFF_33.clone(), TypeSignature::UIntType) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, + ) } fn check_secp256k1_verify( @@ -545,10 +551,12 @@ impl TypedNativeFunction { } } - pub fn type_native_function(function: &NativeFunctions) -> TypedNativeFunction { + pub fn type_native_function( + function: &NativeFunctions, + ) -> Result { use self::TypedNativeFunction::{Simple, Special}; use crate::vm::functions::NativeFunctions::*; - match function { + let out = match function { Add | Subtract | Divide | Multiply => { Simple(SimpleNativeFunction(FunctionType::ArithmeticVariadic)) } @@ -566,30 +574,39 @@ impl TypedNativeFunction { ToUInt => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::IntType, - ClarityName::try_from("value".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("value".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TypeSignature::UIntType, }))), ToInt => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::UIntType, - ClarityName::try_from("value".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("value".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TypeSignature::IntType, }))), Not => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::BoolType, - ClarityName::try_from("value".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("value".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TypeSignature::BoolType, }))), Hash160 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -597,7 +614,7 @@ impl TypedNativeFunction { ))), Sha256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -605,7 +622,7 @@ impl TypedNativeFunction { ))), Sha512Trunc256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -613,7 +630,7 @@ impl TypedNativeFunction { ))), Sha512 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -621,7 +638,7 @@ impl TypedNativeFunction { ))), Keccak256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -632,8 +649,11 @@ impl TypedNativeFunction { GetStxBalance => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::PrincipalType, - ClarityName::try_from("owner".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("owner".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TypeSignature::UIntType, }))), @@ -641,44 +661,59 @@ impl TypedNativeFunction { args: vec![ FunctionArg::new( TypeSignature::UIntType, - ClarityName::try_from("amount".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("amount".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, ), FunctionArg::new( TypeSignature::PrincipalType, - ClarityName::try_from("sender".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("sender".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, ), FunctionArg::new( TypeSignature::PrincipalType, - ClarityName::try_from("recipient".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("recipient".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, ), ], returns: TypeSignature::new_response( TypeSignature::BoolType, TypeSignature::UIntType, ) - .unwrap(), + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, }))), StxBurn => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![ FunctionArg::new( TypeSignature::UIntType, - ClarityName::try_from("amount".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("amount".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, ), FunctionArg::new( TypeSignature::PrincipalType, - ClarityName::try_from("sender".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("sender".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, ), ], returns: TypeSignature::new_response( TypeSignature::BoolType, TypeSignature::UIntType, ) - .unwrap(), + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, }))), GetTokenBalance => Special(SpecialNativeFunction(&assets::check_special_get_balance)), GetAssetOwner => Special(SpecialNativeFunction(&assets::check_special_get_owner)), @@ -743,7 +778,14 @@ impl TypedNativeFunction { | StringToUInt | IntToAscii | IntToUtf8 | GetBurnBlockInfo | StxTransferMemo | StxGetAccount | BitwiseAnd | BitwiseOr | BitwiseNot | BitwiseLShift | BitwiseRShift | BitwiseXor2 | Slice | ToConsensusBuff | FromConsensusBuff - | ReplaceAt => unreachable!("Clarity 2 keywords should not show up in 2.05"), - } + | ReplaceAt => { + return Err(CheckErrors::Expects( + "Clarity 2 keywords should not show up in 2.05".into(), + ) + .into()) + } + }; + + Ok(out) } } diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs index 48978dcef1..49a29e8d38 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs @@ -42,7 +42,7 @@ fn get_simple_native_or_user_define( NativeFunctions::lookup_by_name_at_version(function_name, &ClarityVersion::Clarity1) { if let TypedNativeFunction::Simple(SimpleNativeFunction(function_type)) = - TypedNativeFunction::type_native_function(native_function) + TypedNativeFunction::type_native_function(native_function)? { Ok(function_type) } else { @@ -82,12 +82,12 @@ pub fn check_special_map( TypeSignature::SequenceType(sequence) => { let (entry_type, len) = match sequence { ListType(list_data) => list_data.destruct(), - BufferType(buffer_data) => (TypeSignature::min_buffer(), buffer_data.into()), + BufferType(buffer_data) => (TypeSignature::min_buffer()?, buffer_data.into()), StringType(ASCII(ascii_data)) => { - (TypeSignature::min_string_ascii(), ascii_data.into()) + (TypeSignature::min_string_ascii()?, ascii_data.into()) } StringType(UTF8(utf8_data)) => { - (TypeSignature::min_string_utf8(), utf8_data.into()) + (TypeSignature::min_string_utf8()?, utf8_data.into()) } }; min_args = min_args.min(len); @@ -130,7 +130,7 @@ pub fn check_special_filter( { let input_type = match argument_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), _ => Err(CheckErrors::ExpectedSequence(argument_type.clone())), }?; @@ -167,7 +167,7 @@ pub fn check_special_fold( let argument_type = checker.type_check(&args[1], context)?; let input_type = match argument_type { - TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()), + TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()?), _ => Err(CheckErrors::ExpectedSequence(argument_type)), }?; @@ -382,12 +382,14 @@ pub fn check_special_element_at( } TypeSignature::SequenceType(StringType(ASCII(_))) => Ok(TypeSignature::OptionalType( Box::new(TypeSignature::SequenceType(StringType(ASCII( - BufferLength::try_from(1u32).unwrap(), + BufferLength::try_from(1u32) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, )))), )), TypeSignature::SequenceType(StringType(UTF8(_))) => Ok(TypeSignature::OptionalType( Box::new(TypeSignature::SequenceType(StringType(UTF8( - StringUTF8Length::try_from(1u32).unwrap(), + StringUTF8Length::try_from(1u32) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, )))), )), _ => Err(CheckErrors::ExpectedSequence(collection_type).into()), @@ -405,7 +407,7 @@ pub fn check_special_index_of( let list_type = checker.type_check(&args[0], context)?; let expected_input_type = match list_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), _ => Err(CheckErrors::ExpectedSequence(list_type)), }?; diff --git a/clarity/src/vm/analysis/type_checker/v2_05/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_05/tests/contracts.rs index edab6db397..f24e6eb348 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/tests/contracts.rs @@ -162,7 +162,10 @@ fn test_names_tokens_contracts_interface() { ) .unwrap() .1; - let test_contract_json_str = build_contract_interface(&contract_analysis).serialize(); + let test_contract_json_str = build_contract_interface(&contract_analysis) + .unwrap() + .serialize() + .unwrap(); let test_contract_json: serde_json::Value = serde_json::from_str(&test_contract_json_str).unwrap(); diff --git a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs index 59488e8056..6529b859f5 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs @@ -711,16 +711,16 @@ fn test_index_of() { CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeError(TypeSignature::IntType, TypeSignature::UIntType), CheckErrors::TypeError( - TypeSignature::min_buffer(), - TypeSignature::min_string_ascii(), + TypeSignature::min_buffer().unwrap(), + TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( - TypeSignature::min_string_utf8(), - TypeSignature::min_string_ascii(), + TypeSignature::min_string_utf8().unwrap(), + TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( - TypeSignature::min_string_ascii(), - TypeSignature::min_string_utf8(), + TypeSignature::min_string_ascii().unwrap(), + TypeSignature::min_string_utf8().unwrap(), ), CheckErrors::CouldNotDetermineType, CheckErrors::CouldNotDetermineType, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index 1686c5c2a2..e87db175aa 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -98,7 +98,7 @@ impl CostTracker for TypeChecker<'_, '_> { fn add_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { self.cost_track.add_memory(memory) } - fn drop_memory(&mut self, memory: u64) { + fn drop_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { self.cost_track.drop_memory(memory) } fn reset_memory(&mut self) { @@ -270,9 +270,9 @@ impl FunctionType { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), - TypeSignature::max_buffer(), + TypeSignature::max_string_ascii()?, + TypeSignature::max_string_utf8()?, + TypeSignature::max_buffer()?, ], first.clone(), ) @@ -337,7 +337,7 @@ impl FunctionType { contract_identifier.clone(), )) } - _ => TypeSignature::type_of(value), + _ => TypeSignature::type_of(value)?, }) } } @@ -399,7 +399,7 @@ impl FunctionType { } TypeSignature::TupleType(TupleTypeSignature::try_from(type_map)?) } - _ => TypeSignature::type_of(value), + _ => TypeSignature::type_of(value)?, }) } @@ -415,7 +415,7 @@ impl FunctionType { ) -> CheckResult { let (expected_args, returns) = match self { FunctionType::Fixed(FixedFunction { args, returns }) => (args, returns), - _ => panic!("Unexpected function type"), + _ => return Err(CheckErrors::Expects("Unexpected function type".into()).into()), }; check_argument_count(expected_args.len(), func_args)?; @@ -427,7 +427,7 @@ impl FunctionType { Value::Principal(PrincipalData::Contract(contract)), ) => { let contract_to_check = db - .load_contract(contract, &StacksEpochId::Epoch21) + .load_contract(contract, &StacksEpochId::Epoch21)? .ok_or_else(|| { CheckErrors::NoSuchContract(contract.name.to_string()) })?; @@ -437,7 +437,7 @@ impl FunctionType { &trait_id.name, &StacksEpochId::Epoch21, ) - .unwrap() + .map_err(|_| CheckErrors::Expects("Failed to get trait".into()))? .ok_or(CheckErrors::NoSuchContract( trait_id.contract_identifier.to_string(), ))?; @@ -448,8 +448,8 @@ impl FunctionType { )?; } (expected_type, value) => { - if !expected_type.admits(&StacksEpochId::Epoch21, value)? { - let actual_type = TypeSignature::type_of(value); + if !expected_type.admits(&StacksEpochId::Epoch21, &value)? { + let actual_type = TypeSignature::type_of(&value)?; return Err( CheckErrors::TypeError(expected_type.clone(), actual_type).into() ); @@ -726,7 +726,7 @@ fn clarity2_inner_type_check_type( TypeSignature::CallableType(CallableSubtype::Trait(expected_trait_id)), ) => { let contract_to_check = match db - .load_contract(contract_identifier, &StacksEpochId::Epoch21) + .load_contract(&contract_identifier, &StacksEpochId::Epoch21)? { Some(contract) => { runtime_cost( @@ -842,16 +842,21 @@ fn contract_analysis_size(contract: &ContractAnalysis) -> CheckResult { Ok(total_size) } -fn type_reserved_variable(variable_name: &str, version: &ClarityVersion) -> Option { +fn type_reserved_variable( + variable_name: &str, + version: &ClarityVersion, +) -> CheckResult> { if let Some(variable) = NativeVariables::lookup_by_name_at_version(variable_name, version) { use crate::vm::variables::NativeVariables::*; let var_type = match variable { TxSender => TypeSignature::PrincipalType, - TxSponsor => TypeSignature::new_option(TypeSignature::PrincipalType).unwrap(), + TxSponsor => TypeSignature::new_option(TypeSignature::PrincipalType) + .map_err(|_| CheckErrors::Expects("Bad construction".into()))?, ContractCaller => TypeSignature::PrincipalType, BlockHeight => TypeSignature::UIntType, BurnBlockHeight => TypeSignature::UIntType, - NativeNone => TypeSignature::new_option(no_type()).unwrap(), + NativeNone => TypeSignature::new_option(no_type()) + .map_err(|_| CheckErrors::Expects("Bad construction".into()))?, NativeTrue => TypeSignature::BoolType, NativeFalse => TypeSignature::BoolType, TotalLiquidMicroSTX => TypeSignature::UIntType, @@ -859,9 +864,9 @@ fn type_reserved_variable(variable_name: &str, version: &ClarityVersion) -> Opti Mainnet => TypeSignature::BoolType, ChainId => TypeSignature::UIntType, }; - Some(var_type) + Ok(Some(var_type)) } else { - None + Ok(None) } } @@ -937,7 +942,8 @@ impl<'a, 'b> TypeChecker<'a, 'b> { Ok(()) } Err(e) => Err(e), - })?; + })? + .ok_or_else(|| CheckErrors::Expects("Expected a depth result".into()))?; } runtime_cost(ClarityCostFunction::AnalysisStorage, self, size)?; @@ -1063,7 +1069,10 @@ impl<'a, 'b> TypeChecker<'a, 'b> { .map_err(|_| CheckErrors::BadSyntaxBinding)?; if self.function_return_tracker.is_some() { - panic!("Interpreter error: Previous function define left dirty typecheck state."); + return Err(CheckErrors::Expects( + "Interpreter error: Previous function define left dirty typecheck state.".into(), + ) + .into()); } let mut function_context = context.extend()?; @@ -1157,7 +1166,10 @@ impl<'a, 'b> TypeChecker<'a, 'b> { if let Some(ref native_function) = NativeFunctions::lookup_by_name_at_version(function, &self.clarity_version) { - let typed_function = TypedNativeFunction::type_native_function(native_function); + let typed_function = match TypedNativeFunction::type_native_function(native_function) { + Ok(f) => f, + Err(e) => return Some(Err(e.into())), + }; Some(typed_function.type_check_application(self, args, context)) } else { None @@ -1198,7 +1210,7 @@ impl<'a, 'b> TypeChecker<'a, 'b> { fn lookup_variable(&mut self, name: &str, context: &TypingContext) -> TypeResult { runtime_cost(ClarityCostFunction::AnalysisLookupVariableConst, self, 0)?; - if let Some(type_result) = type_reserved_variable(name, &self.clarity_version) { + if let Some(type_result) = type_reserved_variable(name, &self.clarity_version)? { Ok(type_result) } else if let Some(type_result) = self.contract_context.get_variable_type(name) { Ok(type_result.clone()) @@ -1227,39 +1239,41 @@ impl<'a, 'b> TypeChecker<'a, 'b> { context: &TypingContext, expected_type: &TypeSignature, ) -> TypeResult { - if let ( - LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), - TypeSignature::CallableType(CallableSubtype::Trait(trait_identifier)), - ) = (&expr.expr, expected_type) - { - let contract_to_check = self - .db - .load_contract(contract_identifier, &StacksEpochId::Epoch21) - .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; - - let contract_defining_trait = self - .db - .load_contract( - &trait_identifier.contract_identifier, + match (&expr.expr, expected_type) { + ( + LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), + TypeSignature::CallableType(CallableSubtype::Trait(trait_identifier)), + ) => { + let contract_to_check = self + .db + .load_contract(&contract_identifier, &StacksEpochId::Epoch21)? + .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; + + let contract_defining_trait = self + .db + .load_contract( + &trait_identifier.contract_identifier, + &StacksEpochId::Epoch21, + )? + .ok_or(CheckErrors::NoSuchContract( + trait_identifier.contract_identifier.to_string(), + ))?; + + let trait_definition = contract_defining_trait + .get_defined_trait(&trait_identifier.name) + .ok_or(CheckErrors::NoSuchTrait( + trait_identifier.contract_identifier.to_string(), + trait_identifier.name.to_string(), + ))?; + + contract_to_check.check_trait_compliance( &StacksEpochId::Epoch21, - ) - .ok_or(CheckErrors::NoSuchContract( - trait_identifier.contract_identifier.to_string(), - ))?; - - let trait_definition = contract_defining_trait - .get_defined_trait(&trait_identifier.name) - .ok_or(CheckErrors::NoSuchTrait( - trait_identifier.contract_identifier.to_string(), - trait_identifier.name.to_string(), - ))?; - - contract_to_check.check_trait_compliance( - &StacksEpochId::Epoch21, - trait_identifier, - trait_definition, - )?; - return Ok(expected_type.clone()); + trait_identifier, + &trait_definition, + )?; + return Ok(expected_type.clone()); + } + (_, _) => {} } let actual_type = self.type_check(expr, context)?; @@ -1282,8 +1296,8 @@ impl<'a, 'b> TypeChecker<'a, 'b> { expected_type: &TypeSignature, ) -> TypeResult { let mut expr_type = match expr.expr { - AtomValue(ref value) => TypeSignature::type_of(value), - LiteralValue(ref value) => TypeSignature::literal_type_of(value), + AtomValue(ref value) => TypeSignature::type_of(value)?, + LiteralValue(ref value) => TypeSignature::literal_type_of(value)?, Atom(ref name) => self.lookup_variable(name, context)?, List(ref expression) => self.type_check_function_application(expression, context)?, TraitReference(_, _) | Field(_) => { @@ -1320,8 +1334,8 @@ impl<'a, 'b> TypeChecker<'a, 'b> { context: &TypingContext, ) -> TypeResult { let expr_type = match expr.expr { - AtomValue(ref value) => TypeSignature::type_of(value), - LiteralValue(ref value) => TypeSignature::literal_type_of(value), + AtomValue(ref value) => TypeSignature::type_of(value)?, + LiteralValue(ref value) => TypeSignature::literal_type_of(value)?, Atom(ref name) => self.lookup_variable(name, context)?, List(ref expression) => self.type_check_function_application(expression, context)?, TraitReference(_, _) | Field(_) => { diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/assets.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/assets.rs index 65ad452332..d94e0fad56 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/assets.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/assets.rs @@ -227,7 +227,8 @@ pub fn check_special_stx_transfer_memo( let from_type: TypeSignature = TypeSignature::PrincipalType; let to_type: TypeSignature = TypeSignature::PrincipalType; let memo_type: TypeSignature = TypeSignature::SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(TOKEN_TRANSFER_MEMO_LENGTH as u32).unwrap(), + BufferLength::try_from(TOKEN_TRANSFER_MEMO_LENGTH as u32) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, )); runtime_cost(ClarityCostFunction::AnalysisTypeLookup, checker, 0)?; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs index c0dbd8307e..9876062241 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs @@ -36,6 +36,6 @@ pub fn check_special_from_consensus_buff( ) -> TypeResult { check_argument_count(2, args)?; let result_type = TypeSignature::parse_type_repr(StacksEpochId::Epoch21, &args[0], checker)?; - checker.type_check_expects(&args[1], context, &TypeSignature::max_buffer())?; + checker.type_check_expects(&args[1], context, &TypeSignature::max_buffer()?)?; TypeSignature::new_option(result_type).map_err(CheckError::from) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index 55401f1817..0dee77465c 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -566,7 +566,10 @@ fn check_principal_of( ) -> TypeResult { check_argument_count(1, args)?; checker.type_check_expects(&args[0], context, &BUFF_33)?; - Ok(TypeSignature::new_response(TypeSignature::PrincipalType, TypeSignature::UIntType).unwrap()) + Ok( + TypeSignature::new_response(TypeSignature::PrincipalType, TypeSignature::UIntType) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, + ) } /// Forms: @@ -588,7 +591,7 @@ fn check_principal_construct( checker.type_check_expects( &args[2], context, - &TypeSignature::contract_name_string_ascii_type(), + &TypeSignature::contract_name_string_ascii_type()?, )?; } Ok(TypeSignature::new_response( @@ -597,13 +600,13 @@ fn check_principal_construct( ("error_code".into(), TypeSignature::UIntType), ( "value".into(), - TypeSignature::new_option(TypeSignature::PrincipalType).expect("FATAL: failed to create (optional principal) type signature"), + TypeSignature::new_option(TypeSignature::PrincipalType).map_err(|_| CheckErrors::Expects("FATAL: failed to create (optional principal) type signature".into()))?, ), ]) - .expect("FAIL: PrincipalConstruct failed to initialize type signature") + .map_err(|_| CheckErrors::Expects("FAIL: PrincipalConstruct failed to initialize type signature".into()))? .into() ) - .expect("FATAL: failed to create `(response principal { error_code: uint, principal: (optional principal) })` type signature") + .map_err(|_| CheckErrors::Expects("FATAL: failed to create `(response principal { error_code: uint, principal: (optional principal) })` type signature".into()))? ) } @@ -615,7 +618,10 @@ fn check_secp256k1_recover( check_argument_count(2, args)?; checker.type_check_expects(&args[0], context, &BUFF_32)?; checker.type_check_expects(&args[1], context, &BUFF_65)?; - Ok(TypeSignature::new_response(BUFF_33.clone(), TypeSignature::UIntType).unwrap()) + Ok( + TypeSignature::new_response(BUFF_33.clone(), TypeSignature::UIntType) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, + ) } fn check_secp256k1_verify( @@ -673,7 +679,11 @@ fn check_get_burn_block_info( checker.type_check_expects(&args[1], context, &TypeSignature::UIntType)?; - Ok(TypeSignature::new_option(block_info_prop.type_result())?) + Ok(TypeSignature::new_option( + block_info_prop + .type_result() + .map_err(|_| CheckErrors::Expects("FAILED to type valid burn info property".into()))?, + )?) } impl TypedNativeFunction { @@ -696,10 +706,12 @@ impl TypedNativeFunction { } } - pub fn type_native_function(function: &NativeFunctions) -> TypedNativeFunction { + pub fn type_native_function( + function: &NativeFunctions, + ) -> Result { use self::TypedNativeFunction::{Simple, Special}; use crate::vm::functions::NativeFunctions::*; - match function { + let out = match function { Add | Subtract | Divide | Multiply | BitwiseOr | BitwiseAnd | BitwiseXor2 => { Simple(SimpleNativeFunction(FunctionType::ArithmeticVariadic)) } @@ -724,24 +736,33 @@ impl TypedNativeFunction { ToUInt => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::IntType, - ClarityName::try_from("value".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("value".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TypeSignature::UIntType, }))), ToInt => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::UIntType, - ClarityName::try_from("value".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("value".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TypeSignature::IntType, }))), IsStandard => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::PrincipalType, - ClarityName::try_from("value".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("value".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TypeSignature::BoolType, }))), @@ -749,10 +770,14 @@ impl TypedNativeFunction { Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(16_u32).unwrap(), + BufferLength::try_from(16_u32) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, )), - ClarityName::try_from("value".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("value".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TypeSignature::IntType, }))) @@ -761,25 +786,29 @@ impl TypedNativeFunction { Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(16_u32).unwrap(), + BufferLength::try_from(16_u32) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, )), - ClarityName::try_from("value".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("value".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TypeSignature::UIntType, }))) } StringToInt => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), + TypeSignature::max_string_ascii()?, + TypeSignature::max_string_utf8()?, ], TypeSignature::OptionalType(Box::new(TypeSignature::IntType)), ))), StringToUInt => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), + TypeSignature::max_string_ascii()?, + TypeSignature::max_string_utf8()?, ], TypeSignature::OptionalType(Box::new(TypeSignature::UIntType)), ))), @@ -796,14 +825,17 @@ impl TypedNativeFunction { Not => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::BoolType, - ClarityName::try_from("value".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("value".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TypeSignature::BoolType, }))), Hash160 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -811,7 +843,7 @@ impl TypedNativeFunction { ))), Sha256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -819,7 +851,7 @@ impl TypedNativeFunction { ))), Sha512Trunc256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -827,7 +859,7 @@ impl TypedNativeFunction { ))), Sha512 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -835,7 +867,7 @@ impl TypedNativeFunction { ))), Keccak256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -846,8 +878,11 @@ impl TypedNativeFunction { GetStxBalance => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::PrincipalType, - ClarityName::try_from("owner".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("owner".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TypeSignature::UIntType, }))), @@ -855,65 +890,85 @@ impl TypedNativeFunction { PrincipalDestruct => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::PrincipalType, - ClarityName::try_from("principal".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("principal".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: { /// The return type of `principal-destruct` is a Response, in which the success /// and error types are the same. - fn parse_principal_basic_type() -> TypeSignature { - TupleTypeSignature::try_from(vec![ + fn parse_principal_basic_type() -> Result { + Ok(TupleTypeSignature::try_from(vec![ ("version".into(), BUFF_1.clone()), ("hash-bytes".into(), BUFF_20.clone()), ( "name".into(), TypeSignature::new_option( - TypeSignature::contract_name_string_ascii_type(), + TypeSignature::contract_name_string_ascii_type()?, ) - .unwrap(), + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, ), ]) - .expect("FAIL: PrincipalDestruct failed to initialize type signature") - .into() + .map_err(|_| { + CheckErrors::Expects( + "FAIL: PrincipalDestruct failed to initialize type signature" + .into(), + ) + })?) } TypeSignature::ResponseType(Box::new(( - parse_principal_basic_type(), - parse_principal_basic_type(), + parse_principal_basic_type()?.into(), + parse_principal_basic_type()?.into(), ))) }, }))), StxGetAccount => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( TypeSignature::PrincipalType, - ClarityName::try_from("owner".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("owner".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, )], returns: TupleTypeSignature::try_from(vec![ ("unlocked".into(), TypeSignature::UIntType), ("locked".into(), TypeSignature::UIntType), ("unlock-height".into(), TypeSignature::UIntType), ]) - .expect("FAIL: StxGetAccount failed to initialize type signature") + .map_err(|_| { + CheckErrors::Expects( + "FAIL: StxGetAccount failed to initialize type signature".into(), + ) + })? .into(), }))), StxBurn => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![ FunctionArg::new( TypeSignature::UIntType, - ClarityName::try_from("amount".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("amount".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, ), FunctionArg::new( TypeSignature::PrincipalType, - ClarityName::try_from("sender".to_owned()) - .expect("FAIL: ClarityName failed to accept default arg name"), + ClarityName::try_from("sender".to_owned()).map_err(|_| { + CheckErrors::Expects( + "FAIL: ClarityName failed to accept default arg name".into(), + ) + })?, ), ], returns: TypeSignature::new_response( TypeSignature::BoolType, TypeSignature::UIntType, ) - .unwrap(), + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, }))), StxTransfer => Special(SpecialNativeFunction(&assets::check_special_stx_transfer)), StxTransferMemo => Special(SpecialNativeFunction( @@ -990,6 +1045,8 @@ impl TypedNativeFunction { FromConsensusBuff => Special(SpecialNativeFunction( &conversions::check_special_from_consensus_buff, )), - } + }; + + Ok(out) } } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs index 5317487d73..9eb2ae17c9 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs @@ -42,7 +42,7 @@ fn get_simple_native_or_user_define( NativeFunctions::lookup_by_name_at_version(function_name, &checker.clarity_version) { if let TypedNativeFunction::Simple(SimpleNativeFunction(function_type)) = - TypedNativeFunction::type_native_function(native_function) + TypedNativeFunction::type_native_function(native_function)? { Ok(function_type) } else { @@ -82,12 +82,12 @@ pub fn check_special_map( TypeSignature::SequenceType(sequence) => { let (entry_type, len) = match sequence { ListType(list_data) => list_data.destruct(), - BufferType(buffer_data) => (TypeSignature::min_buffer(), buffer_data.into()), + BufferType(buffer_data) => (TypeSignature::min_buffer()?, buffer_data.into()), StringType(ASCII(ascii_data)) => { - (TypeSignature::min_string_ascii(), ascii_data.into()) + (TypeSignature::min_string_ascii()?, ascii_data.into()) } StringType(UTF8(utf8_data)) => { - (TypeSignature::min_string_utf8(), utf8_data.into()) + (TypeSignature::min_string_utf8()?, utf8_data.into()) } }; min_args = min_args.min(len); @@ -130,7 +130,7 @@ pub fn check_special_filter( { let input_type = match argument_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), _ => Err(CheckErrors::ExpectedSequence(argument_type.clone())), }?; @@ -167,7 +167,7 @@ pub fn check_special_fold( let argument_type = checker.type_check(&args[1], context)?; let input_type = match argument_type { - TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()), + TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()?), _ => Err(CheckErrors::ExpectedSequence(argument_type)), }?; @@ -382,12 +382,14 @@ pub fn check_special_element_at( } TypeSignature::SequenceType(StringType(ASCII(_))) => Ok(TypeSignature::OptionalType( Box::new(TypeSignature::SequenceType(StringType(ASCII( - BufferLength::try_from(1u32).unwrap(), + BufferLength::try_from(1u32) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, )))), )), TypeSignature::SequenceType(StringType(UTF8(_))) => Ok(TypeSignature::OptionalType( Box::new(TypeSignature::SequenceType(StringType(UTF8( - StringUTF8Length::try_from(1u32).unwrap(), + StringUTF8Length::try_from(1u32) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, )))), )), _ => Err(CheckErrors::ExpectedSequence(collection_type).into()), @@ -405,7 +407,7 @@ pub fn check_special_index_of( let list_type = checker.type_check(&args[0], context)?; let expected_input_type = match list_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), _ => Err(CheckErrors::ExpectedSequence(list_type)), }?; @@ -455,7 +457,7 @@ pub fn check_special_replace_at( TypeSignature::SequenceType(seq) => seq, _ => return Err(CheckErrors::ExpectedSequence(input_type).into()), }; - let unit_seq = seq_type.unit_type(); + let unit_seq = seq_type.unit_type()?; // Check index argument checker.type_check_expects(&args[1], context, &TypeSignature::UIntType)?; // Check element argument diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 6f700df9a3..32384e76c2 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -213,7 +213,10 @@ fn test_names_tokens_contracts_interface() { "; let contract_analysis = mem_type_check(INTERFACE_TEST_CONTRACT).unwrap().1; - let test_contract_json_str = build_contract_interface(&contract_analysis).serialize(); + let test_contract_json_str = build_contract_interface(&contract_analysis) + .unwrap() + .serialize() + .unwrap(); let test_contract_json: serde_json::Value = serde_json::from_str(&test_contract_json_str).unwrap(); @@ -3482,6 +3485,13 @@ fn clarity_trait_experiments_double_trait_method2_v1_v2( }; } +#[cfg(test)] +impl From for String { + fn from(o: CheckErrors) -> Self { + o.to_string() + } +} + #[apply(test_clarity_versions)] fn clarity_trait_experiments_cross_epochs( #[case] version: ClarityVersion, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index fd9b4df5fa..a6feabefa0 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -101,7 +101,10 @@ fn test_from_consensus_buff() { ), ( "(from-consensus-buff? int u6)", - CheckErrors::TypeError(TypeSignature::max_buffer(), TypeSignature::UIntType), + CheckErrors::TypeError( + TypeSignature::max_buffer().unwrap(), + TypeSignature::UIntType, + ), ), ( "(from-consensus-buff? (buff 1048576) 0x00)", @@ -1043,16 +1046,16 @@ fn test_index_of() { CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeError(TypeSignature::IntType, TypeSignature::UIntType), CheckErrors::TypeError( - TypeSignature::min_buffer(), - TypeSignature::min_string_ascii(), + TypeSignature::min_buffer().unwrap(), + TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( - TypeSignature::min_string_utf8(), - TypeSignature::min_string_ascii(), + TypeSignature::min_string_utf8().unwrap(), + TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( - TypeSignature::min_string_ascii(), - TypeSignature::min_string_utf8(), + TypeSignature::min_string_ascii().unwrap(), + TypeSignature::min_string_utf8().unwrap(), ), CheckErrors::TypeError( TypeSignature::list_of(TypeSignature::IntType, 1).unwrap(), @@ -1061,16 +1064,16 @@ fn test_index_of() { CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeError(TypeSignature::IntType, TypeSignature::UIntType), CheckErrors::TypeError( - TypeSignature::min_buffer(), - TypeSignature::min_string_ascii(), + TypeSignature::min_buffer().unwrap(), + TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( - TypeSignature::min_string_utf8(), - TypeSignature::min_string_ascii(), + TypeSignature::min_string_utf8().unwrap(), + TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( - TypeSignature::min_string_ascii(), - TypeSignature::min_string_utf8(), + TypeSignature::min_string_ascii().unwrap(), + TypeSignature::min_string_utf8().unwrap(), ), CheckErrors::CouldNotDetermineType, CheckErrors::CouldNotDetermineType, @@ -2141,15 +2144,15 @@ fn test_string_to_ints() { CheckErrors::IncorrectArgumentCount(1, 0), CheckErrors::UnionTypeError( vec![ - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), + TypeSignature::max_string_ascii().unwrap(), + TypeSignature::max_string_utf8().unwrap(), ], SequenceType(BufferType(BufferLength::try_from(17_u32).unwrap())), ), CheckErrors::UnionTypeError( vec![ - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), + TypeSignature::max_string_ascii().unwrap(), + TypeSignature::max_string_utf8().unwrap(), ], IntType, ), @@ -2157,15 +2160,15 @@ fn test_string_to_ints() { CheckErrors::IncorrectArgumentCount(1, 0), CheckErrors::UnionTypeError( vec![ - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), + TypeSignature::max_string_ascii().unwrap(), + TypeSignature::max_string_utf8().unwrap(), ], SequenceType(BufferType(BufferLength::try_from(17_u32).unwrap())), ), CheckErrors::UnionTypeError( vec![ - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), + TypeSignature::max_string_ascii().unwrap(), + TypeSignature::max_string_utf8().unwrap(), ], IntType, ), @@ -3320,14 +3323,17 @@ fn test_principal_construct() { ( r#"(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a320 "foooooooooooooooooooooooooooooooooooooooo")"#, CheckErrors::TypeError( - TypeSignature::contract_name_string_ascii_type(), - TypeSignature::bound_string_ascii_type(41), + TypeSignature::contract_name_string_ascii_type().unwrap(), + TypeSignature::bound_string_ascii_type(41).unwrap(), ), ), // bad argument type for `name` ( r#"(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a320 u123)"#, - CheckErrors::TypeError(TypeSignature::contract_name_string_ascii_type(), UIntType), + CheckErrors::TypeError( + TypeSignature::contract_name_string_ascii_type().unwrap(), + UIntType, + ), ), // too many arguments ( diff --git a/clarity/src/vm/analysis/types.rs b/clarity/src/vm/analysis/types.rs index f82f7f0591..2471919b54 100644 --- a/clarity/src/vm/analysis/types.rs +++ b/clarity/src/vm/analysis/types.rs @@ -95,6 +95,7 @@ impl ContractAnalysis { } } + #[allow(clippy::expect_used)] pub fn take_contract_cost_tracker(&mut self) -> LimitedCostTracker { self.cost_track .take() diff --git a/clarity/src/vm/ast/definition_sorter/mod.rs b/clarity/src/vm/ast/definition_sorter/mod.rs index b7d5b67387..73bba88ab0 100644 --- a/clarity/src/vm/ast/definition_sorter/mod.rs +++ b/clarity/src/vm/ast/definition_sorter/mod.rs @@ -117,7 +117,7 @@ impl DefinitionSorter { Atom(ref name) => { if let Some(dep) = self.top_level_expressions_map.get(name) { if dep.atom_index != expr.id { - self.graph.add_directed_edge(tle_index, dep.expr_index); + self.graph.add_directed_edge(tle_index, dep.expr_index)?; } } Ok(()) @@ -125,7 +125,7 @@ impl DefinitionSorter { TraitReference(ref name) => { if let Some(dep) = self.top_level_expressions_map.get(name) { if dep.atom_index != expr.id { - self.graph.add_directed_edge(tle_index, dep.expr_index); + self.graph.add_directed_edge(tle_index, dep.expr_index)?; } } Ok(()) @@ -413,9 +413,17 @@ impl Graph { self.adjacency_list.push(vec![]); } - fn add_directed_edge(&mut self, src_expr_index: usize, dst_expr_index: usize) { - let list = self.adjacency_list.get_mut(src_expr_index).unwrap(); + fn add_directed_edge( + &mut self, + src_expr_index: usize, + dst_expr_index: usize, + ) -> ParseResult<()> { + let list = self + .adjacency_list + .get_mut(src_expr_index) + .ok_or_else(|| ParseErrors::InterpreterFailure)?; list.push(dst_expr_index); + Ok(()) } fn get_node_descendants(&self, expr_index: usize) -> Vec { diff --git a/clarity/src/vm/ast/errors.rs b/clarity/src/vm/ast/errors.rs index ce34f09e80..c1a0914b5f 100644 --- a/clarity/src/vm/ast/errors.rs +++ b/clarity/src/vm/ast/errors.rs @@ -91,6 +91,8 @@ pub enum ParseErrors { /// Should be an unreachable error UnexpectedParserFailure, + /// Should be an unreachable failure which invalidates the transaction + InterpreterFailure, } #[derive(Debug, PartialEq)] @@ -110,6 +112,13 @@ impl ParseError { } } + pub fn rejectable(&self) -> bool { + match self.err { + ParseErrors::InterpreterFailure => true, + _ => false, + } + } + pub fn has_pre_expression(&self) -> bool { self.pre_expressions.is_some() } @@ -165,6 +174,9 @@ impl From for ParseError { CostErrors::CostContractLoadFailure => ParseError::new( ParseErrors::CostComputationFailed("Failed to load cost contract".into()), ), + CostErrors::InterpreterFailure | CostErrors::Expect(_) => { + ParseError::new(ParseErrors::InterpreterFailure) + } } } } @@ -289,7 +301,8 @@ impl DiagnosableError for ParseErrors { ParseErrors::IllegalUtf8String(s) => format!("illegal UTF8 string \"{}\"", s), ParseErrors::ExpectedWhitespace => "expected whitespace before expression".into(), ParseErrors::NoteToMatchThis(token) => format!("to match this '{}'", token), - ParseErrors::UnexpectedParserFailure => "unexpected failure while parsing".into(), + ParseErrors::UnexpectedParserFailure => "unexpected failure while parsing".to_string(), + ParseErrors::InterpreterFailure => "unexpected failure while parsing".to_string(), } } diff --git a/clarity/src/vm/ast/mod.rs b/clarity/src/vm/ast/mod.rs index d5c2167992..d1c21b507b 100644 --- a/clarity/src/vm/ast/mod.rs +++ b/clarity/src/vm/ast/mod.rs @@ -144,6 +144,7 @@ fn build_ast_typical( /// placeholders into the AST. Collects as many diagnostics as possible. /// Always returns a ContractAST, a vector of diagnostics, and a boolean /// that indicates if the build was successful. +#[allow(clippy::unwrap_used)] pub fn build_ast_with_diagnostics( contract_identifier: &QualifiedContractIdentifier, source_code: &str, @@ -362,7 +363,9 @@ mod test { fn add_memory(&mut self, _memory: u64) -> std::result::Result<(), CostErrors> { Ok(()) } - fn drop_memory(&mut self, _memory: u64) {} + fn drop_memory(&mut self, _memory: u64) -> std::result::Result<(), CostErrors> { + Ok(()) + } fn reset_memory(&mut self) {} fn short_circuit_contract_call( &mut self, diff --git a/clarity/src/vm/ast/parser/v1.rs b/clarity/src/vm/ast/parser/v1.rs index 7ba15cfc97..bad2b13f32 100644 --- a/clarity/src/vm/ast/parser/v1.rs +++ b/clarity/src/vm/ast/parser/v1.rs @@ -91,6 +91,7 @@ enum ParseContext { impl LexMatcher { fn new(regex_str: &str, handles: TokenType) -> LexMatcher { + #[allow(clippy::unwrap_used)] LexMatcher { matcher: Regex::new(&format!("^{}", regex_str)).unwrap(), handler: handles, @@ -218,7 +219,9 @@ fn inner_lex(input: &str, max_nesting: u64) -> ParseResult Parser<'a> { // Peek ahead for a '.', indicating a contract identifier if self.peek_next_token().token == Token::Dot { + #[allow(clippy::unwrap_used)] let dot = self.next_token().unwrap(); // skip over the dot let (name, contract_span) = match self.next_token() { Some(PlacedToken { @@ -595,6 +596,7 @@ impl<'a> Parser<'a> { // Peek ahead for a '.', indicating a trait identifier if self.peek_next_token().token == Token::Dot { + #[allow(clippy::unwrap_used)] let dot = self.next_token().unwrap(); // skip over the dot let (name, trait_span) = match self.next_token() { Some(PlacedToken { @@ -736,6 +738,7 @@ impl<'a> Parser<'a> { // Peek ahead for a '.', indicating a trait identifier if self.peek_next_token().token == Token::Dot { + #[allow(clippy::unwrap_used)] let dot = self.next_token().unwrap(); // skip over the dot let (name, trait_span) = match self.next_token() { Some(PlacedToken { @@ -1002,7 +1005,8 @@ impl<'a> Parser<'a> { | Token::LessEqual | Token::Greater | Token::GreaterEqual => { - let name = ClarityName::try_from(token.token.to_string()).unwrap(); + let name = ClarityName::try_from(token.token.to_string()) + .map_err(|_| ParseErrors::InterpreterFailure)?; let mut e = PreSymbolicExpression::atom(name); e.copy_span(&token.span); Some(e) @@ -1104,6 +1108,7 @@ pub fn parse(input: &str) -> ParseResult> { } } +#[allow(clippy::unwrap_used)] pub fn parse_collect_diagnostics( input: &str, ) -> (Vec, Vec, bool) { @@ -1369,7 +1374,7 @@ mod tests { assert_eq!(stmts.len(), 1); assert!(diagnostics.is_empty()); if let Some(v) = stmts[0].match_atom_value() { - assert_eq!(v.clone().expect_ascii(), "new\nline"); + assert_eq!(v.clone().expect_ascii().unwrap(), "new\nline"); } else { panic!("failed to parse ascii string"); } @@ -3392,7 +3397,7 @@ mod tests { } ); let val = stmts[0].match_atom_value().unwrap().clone(); - assert_eq!(val.expect_buff(2), vec![0x12, 0x34]); + assert_eq!(val.expect_buff(2).unwrap(), vec![0x12, 0x34]); } #[test] diff --git a/clarity/src/vm/ast/sugar_expander/mod.rs b/clarity/src/vm/ast/sugar_expander/mod.rs index 8f0f3c9d7f..9ec552b34e 100644 --- a/clarity/src/vm/ast/sugar_expander/mod.rs +++ b/clarity/src/vm/ast/sugar_expander/mod.rs @@ -90,7 +90,12 @@ impl SugarExpander { .collect::>(); pairs.insert( 0, - SymbolicExpression::atom("tuple".to_string().try_into().unwrap()), + SymbolicExpression::atom( + "tuple" + .to_string() + .try_into() + .map_err(|_| ParseErrors::InterpreterFailure)?, + ), ); SymbolicExpression::list(pairs.into_boxed_slice()) } diff --git a/clarity/src/vm/callables.rs b/clarity/src/vm/callables.rs index c589e4b397..764c1479bb 100644 --- a/clarity/src/vm/callables.rs +++ b/clarity/src/vm/callables.rs @@ -21,7 +21,8 @@ use std::iter::FromIterator; use stacks_common::types::StacksEpochId; -use super::costs::CostOverflowingMath; +use super::costs::{CostErrors, CostOverflowingMath}; +use super::errors::InterpreterError; use super::types::signatures::CallableSubtype; use super::ClarityVersion; use crate::vm::analysis::errors::CheckErrors; @@ -38,9 +39,6 @@ use crate::vm::types::{ }; use crate::vm::{eval, Environment, LocalContext, Value}; -type SpecialFunctionType = - dyn Fn(&[SymbolicExpression], &mut Environment, &LocalContext) -> Result; - pub enum CallableType { UserFunction(DefinedFunction), NativeFunction(&'static str, NativeHandle, ClarityCostFunction), @@ -53,7 +51,10 @@ pub enum CallableType { ClarityCostFunction, &'static dyn Fn(&[Value]) -> Result, ), - SpecialFunction(&'static str, &'static SpecialFunctionType), + SpecialFunction( + &'static str, + &'static dyn Fn(&[SymbolicExpression], &mut Environment, &LocalContext) -> Result, + ), } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] @@ -88,12 +89,19 @@ impl NativeHandle { match self { Self::SingleArg(function) => { check_argument_count(1, &args)?; - function(args.pop().unwrap()) + function( + args.pop() + .ok_or_else(|| InterpreterError::Expect("Unexpected list length".into()))?, + ) } Self::DoubleArg(function) => { check_argument_count(2, &args)?; - let second = args.pop().unwrap(); - let first = args.pop().unwrap(); + let second = args + .pop() + .ok_or_else(|| InterpreterError::Expect("Unexpected list length".into()))?; + let first = args + .pop() + .ok_or_else(|| InterpreterError::Expect("Unexpected list length".into()))?; function(first, second) } Self::MoreArg(function) => function(args), @@ -105,7 +113,10 @@ impl NativeHandle { pub fn cost_input_sized_vararg(args: &[Value]) -> Result { args.iter() .try_fold(0, |sum, value| { - (value.serialized_size() as u64).cost_overflow_add(sum) + (value + .serialized_size() + .map_err(|e| CostErrors::Expect(format!("{e:?}")))? as u64) + .cost_overflow_add(sum) }) .map_err(Error::from) } @@ -152,7 +163,7 @@ impl DefinedFunction { runtime_cost( ClarityCostFunction::InnerTypeCheckCost, env, - arg_type.size(), + arg_type.size()?, )?; } @@ -235,11 +246,7 @@ impl DefinedFunction { ) .into()); } - if context - .variables - .insert(name.clone(), value.clone()) - .is_some() - { + if let Some(_) = context.variables.insert(name.clone(), value.clone()) { return Err(CheckErrors::NameAlreadyUsed(name.to_string()).into()); } } @@ -281,7 +288,7 @@ impl DefinedFunction { } } - if context.variables.insert(name.clone(), cast_value).is_some() { + if let Some(_) = context.variables.insert(name.clone(), cast_value) { return Err(CheckErrors::NameAlreadyUsed(name.to_string()).into()); } } @@ -318,7 +325,7 @@ impl DefinedFunction { self.name.to_string(), ))?; - let args = self.arg_types.to_vec(); + let args = self.arg_types.iter().map(|a| a.clone()).collect(); if !expected_sig.check_args_trait_compliance(epoch, args)? { return Err( CheckErrors::BadTraitImplementation(trait_name, self.name.to_string()).into(), @@ -388,12 +395,16 @@ impl CallableType { impl FunctionIdentifier { fn new_native_function(name: &str) -> FunctionIdentifier { let identifier = format!("_native_:{}", name); - FunctionIdentifier { identifier } + FunctionIdentifier { + identifier: identifier, + } } fn new_user_function(name: &str, context: &str) -> FunctionIdentifier { let identifier = format!("{}:{}", context, name); - FunctionIdentifier { identifier } + FunctionIdentifier { + identifier: identifier, + } } } @@ -530,7 +541,7 @@ mod test { trait_identifier: None, }); let cast_contract = clarity2_implicit_cast(&trait_ty, &contract).unwrap(); - let cast_trait = cast_contract.expect_callable(); + let cast_trait = cast_contract.expect_callable().unwrap(); assert_eq!(&cast_trait.contract_identifier, &contract_identifier); assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); @@ -538,7 +549,7 @@ mod test { let optional_ty = TypeSignature::new_option(trait_ty.clone()).unwrap(); let optional_contract = Value::some(contract.clone()).unwrap(); let cast_optional = clarity2_implicit_cast(&optional_ty, &optional_contract).unwrap(); - match &cast_optional.expect_optional().unwrap() { + match &cast_optional.expect_optional().unwrap().unwrap() { Value::CallableContract(CallableData { contract_identifier: contract_id, trait_identifier: trait_id, @@ -554,7 +565,11 @@ mod test { TypeSignature::new_response(trait_ty.clone(), TypeSignature::UIntType).unwrap(); let response_contract = Value::okay(contract.clone()).unwrap(); let cast_response = clarity2_implicit_cast(&response_ok_ty, &response_contract).unwrap(); - let cast_trait = cast_response.expect_result_ok().expect_callable(); + let cast_trait = cast_response + .expect_result_ok() + .unwrap() + .expect_callable() + .unwrap(); assert_eq!(&cast_trait.contract_identifier, &contract_identifier); assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); @@ -563,7 +578,11 @@ mod test { TypeSignature::new_response(TypeSignature::UIntType, trait_ty.clone()).unwrap(); let response_contract = Value::error(contract.clone()).unwrap(); let cast_response = clarity2_implicit_cast(&response_err_ty, &response_contract).unwrap(); - let cast_trait = cast_response.expect_result_err().expect_callable(); + let cast_trait = cast_response + .expect_result_err() + .unwrap() + .expect_callable() + .unwrap(); assert_eq!(&cast_trait.contract_identifier, &contract_identifier); assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); @@ -571,9 +590,9 @@ mod test { let list_ty = TypeSignature::list_of(trait_ty.clone(), 4).unwrap(); let list_contract = Value::list_from(vec![contract.clone(), contract2.clone()]).unwrap(); let cast_list = clarity2_implicit_cast(&list_ty, &list_contract).unwrap(); - let items = cast_list.expect_list(); + let items = cast_list.expect_list().unwrap(); for item in items { - let cast_trait = item.expect_callable(); + let cast_trait = item.expect_callable().unwrap(); assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); } @@ -599,10 +618,12 @@ mod test { let cast_tuple = clarity2_implicit_cast(&tuple_ty, &tuple_contract).unwrap(); let cast_trait = cast_tuple .expect_tuple() + .unwrap() .get(&a_name) .unwrap() .clone() - .expect_callable(); + .expect_callable() + .unwrap(); assert_eq!(&cast_trait.contract_identifier, &contract_identifier); assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); @@ -615,11 +636,14 @@ mod test { ]) .unwrap(); let cast_list = clarity2_implicit_cast(&list_opt_ty, &list_opt_contract).unwrap(); - let items = cast_list.expect_list(); + let items = cast_list.expect_list().unwrap(); for item in items { - if let Some(cast_opt) = item.expect_optional() { - let cast_trait = cast_opt.expect_callable(); - assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); + match item.expect_optional().unwrap() { + Some(cast_opt) => { + let cast_trait = cast_opt.expect_callable().unwrap(); + assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); + } + None => (), } } @@ -632,9 +656,9 @@ mod test { ]) .unwrap(); let cast_list = clarity2_implicit_cast(&list_res_ty, &list_res_contract).unwrap(); - let items = cast_list.expect_list(); + let items = cast_list.expect_list().unwrap(); for item in items { - let cast_trait = item.expect_result_ok().expect_callable(); + let cast_trait = item.expect_result_ok().unwrap().expect_callable().unwrap(); assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); } @@ -647,9 +671,9 @@ mod test { ]) .unwrap(); let cast_list = clarity2_implicit_cast(&list_res_ty, &list_res_contract).unwrap(); - let items = cast_list.expect_list(); + let items = cast_list.expect_list().unwrap(); for item in items { - let cast_trait = item.expect_result_err().expect_callable(); + let cast_trait = item.expect_result_err().unwrap().expect_callable().unwrap(); assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); } @@ -664,10 +688,10 @@ mod test { .unwrap(); let opt_list_res_contract = Value::some(list_res_contract).unwrap(); let cast_opt = clarity2_implicit_cast(&opt_list_res_ty, &opt_list_res_contract).unwrap(); - let inner = cast_opt.expect_optional().unwrap(); - let items = inner.expect_list(); + let inner = cast_opt.expect_optional().unwrap().unwrap(); + let items = inner.expect_list().unwrap(); for item in items { - let cast_trait = item.expect_result_err().expect_callable(); + let cast_trait = item.expect_result_err().unwrap().expect_callable().unwrap(); assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); } @@ -681,8 +705,10 @@ mod test { match &cast_optional .expect_optional() .unwrap() + .unwrap() .expect_optional() .unwrap() + .unwrap() { Value::CallableContract(CallableData { contract_identifier: contract_id, diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index 1370dd7302..ff7cc427b6 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -113,7 +113,6 @@ pub trait ClarityConnection { self.with_clarity_db_readonly_owned(|mut db| (to_do(&mut db), db)) } - #[allow(clippy::too_many_arguments)] fn with_readonly_clarity_env( &mut self, mainnet: bool, @@ -137,9 +136,14 @@ pub trait ClarityConnection { let result = vm_env .execute_in_env(sender, sponsor, Some(initial_context), to_do) .map(|(result, _, _)| result); - let (db, _) = vm_env - .destruct() - .expect("Failed to recover database reference after executing transaction"); + // this expect is allowed, if the database has escaped this context, then it is no longer sane + // and we must crash + #[allow(clippy::expect_used)] + let (db, _) = { + vm_env + .destruct() + .expect("Failed to recover database reference after executing transaction") + }; (result, db) }) } @@ -161,7 +165,8 @@ pub trait TransactionConnection: ClarityConnection { ) -> Result<(R, AssetMap, Vec, bool), E> where A: FnOnce(&AssetMap, &mut ClarityDatabase) -> bool, - F: FnOnce(&mut OwnedEnvironment) -> Result<(R, AssetMap, Vec), E>; + F: FnOnce(&mut OwnedEnvironment) -> Result<(R, AssetMap, Vec), E>, + E: From; /// Do something with the analysis database and cost tracker /// instance of this transaction connection. This is a low-level @@ -230,12 +235,20 @@ pub trait TransactionConnection: ClarityConnection { let result = db.insert_contract(identifier, contract_analysis); match result { Ok(_) => { - db.commit(); - (cost_tracker, Ok(())) + let result = db + .commit() + .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into()); + (cost_tracker, result) } Err(e) => { - db.roll_back(); - (cost_tracker, Err(e)) + let result = db + .roll_back() + .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into()); + if result.is_err() { + (cost_tracker, result) + } else { + (cost_tracker, Err(e)) + } } } }) @@ -258,7 +271,7 @@ pub trait TransactionConnection: ClarityConnection { }, |_, _| false, ) - .map(|(value, assets, events, _)| (value, assets, events)) + .and_then(|(value, assets, events, _)| Ok((value, assets, events))) } /// Execute a contract call in the current block. diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index de2fd278c9..e90e5bc9b9 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::hash_map::Entry; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::convert::TryInto; use std::fmt; @@ -243,12 +242,6 @@ pub type StackTrace = Vec; pub const TRANSIENT_CONTRACT_NAME: &str = "__transient"; -impl Default for AssetMap { - fn default() -> Self { - Self::new() - } -} - impl AssetMap { pub fn new() -> AssetMap { AssetMap { @@ -283,7 +276,7 @@ impl AssetMap { amount: u128, ) -> Result { let current_amount = match self.token_map.get(principal) { - Some(principal_map) => *principal_map.get(asset).unwrap_or(&0), + Some(principal_map) => *principal_map.get(&asset).unwrap_or(&0), None => 0, }; @@ -312,16 +305,15 @@ impl AssetMap { asset: AssetIdentifier, transfered: Value, ) { - if !self.asset_map.contains_key(principal) { - self.asset_map.insert(principal.clone(), HashMap::new()); - } - - let principal_map = self.asset_map.get_mut(principal).unwrap(); // should always exist, because of checked insert above. + let principal_map = self + .asset_map + .entry(principal.clone()) + .or_insert_with(|| HashMap::new()); - if let Entry::Vacant(e) = principal_map.entry(asset.clone()) { - e.insert(vec![transfered]); + if let Some(map_entry) = principal_map.get_mut(&asset) { + map_entry.push(transfered); } else { - principal_map.get_mut(&asset).unwrap().push(transfered); + principal_map.insert(asset, vec![transfered]); } } @@ -333,12 +325,10 @@ impl AssetMap { ) -> Result<()> { let next_amount = self.get_next_amount(principal, &asset, amount)?; - if !self.token_map.contains_key(principal) { - self.token_map.insert(principal.clone(), HashMap::new()); - } - - let principal_map = self.token_map.get_mut(principal).unwrap(); // should always exist, because of checked insert above. - + let principal_map = self + .token_map + .entry(principal.clone()) + .or_insert_with(|| HashMap::new()); principal_map.insert(asset, next_amount); Ok(()) @@ -371,16 +361,14 @@ impl AssetMap { // After this point, this function will not fail. for (principal, mut principal_map) in other.asset_map.drain() { for (asset, mut transfers) in principal_map.drain() { - if !self.asset_map.contains_key(&principal) { - self.asset_map.insert(principal.clone(), HashMap::new()); - } - - let landing_map = self.asset_map.get_mut(&principal).unwrap(); // should always exist, because of checked insert above. - if let Entry::Vacant(e) = landing_map.entry(asset.clone()) { - e.insert(transfers); - } else { - let landing_vec = landing_map.get_mut(&asset).unwrap(); + let landing_map = self + .asset_map + .entry(principal.clone()) + .or_insert_with(|| HashMap::new()); + if let Some(landing_vec) = landing_map.get_mut(&asset) { landing_vec.append(&mut transfers); + } else { + landing_map.insert(asset, transfers); } } } @@ -394,11 +382,10 @@ impl AssetMap { } for (principal, asset, amount) in to_add.drain(..) { - if !self.token_map.contains_key(&principal) { - self.token_map.insert(principal.clone(), HashMap::new()); - } - - let principal_map = self.token_map.get_mut(&principal).unwrap(); // should always exist, because of checked insert above. + let principal_map = self + .token_map + .entry(principal) + .or_insert_with(|| HashMap::new()); principal_map.insert(asset, amount); } @@ -416,60 +403,59 @@ impl AssetMap { } for (principal, stx_amount) in self.stx_map.drain() { - let output_map = if map.contains_key(&principal) { - map.get_mut(&principal).unwrap() - } else { - map.insert(principal.clone(), HashMap::new()); - map.get_mut(&principal).unwrap() - }; - output_map.insert(AssetIdentifier::STX(), AssetMapEntry::STX(stx_amount)); + let output_map = map + .entry(principal.clone()) + .or_insert_with(|| HashMap::new()); + output_map.insert( + AssetIdentifier::STX(), + AssetMapEntry::STX(stx_amount as u128), + ); } for (principal, stx_burned_amount) in self.burn_map.drain() { - let output_map = if map.contains_key(&principal) { - map.get_mut(&principal).unwrap() - } else { - map.insert(principal.clone(), HashMap::new()); - map.get_mut(&principal).unwrap() - }; + let output_map = map + .entry(principal.clone()) + .or_insert_with(|| HashMap::new()); output_map.insert( AssetIdentifier::STX_burned(), - AssetMapEntry::Burn(stx_burned_amount), + AssetMapEntry::Burn(stx_burned_amount as u128), ); } for (principal, mut principal_map) in self.asset_map.drain() { - let output_map = if map.contains_key(&principal) { - map.get_mut(&principal).unwrap() - } else { - map.insert(principal.clone(), HashMap::new()); - map.get_mut(&principal).unwrap() - }; - + let output_map = map + .entry(principal.clone()) + .or_insert_with(|| HashMap::new()); for (asset, transfers) in principal_map.drain() { output_map.insert(asset, AssetMapEntry::Asset(transfers)); } } - map + return map; } pub fn get_stx(&self, principal: &PrincipalData) -> Option { - self.stx_map.get(principal).copied() + match self.stx_map.get(principal) { + Some(value) => Some(*value), + None => None, + } } pub fn get_stx_burned(&self, principal: &PrincipalData) -> Option { - self.burn_map.get(principal).copied() + match self.burn_map.get(principal) { + Some(value) => Some(*value), + None => None, + } } - pub fn get_stx_burned_total(&self) -> u128 { + pub fn get_stx_burned_total(&self) -> Result { let mut total: u128 = 0; for principal in self.burn_map.keys() { total = total .checked_add(*self.burn_map.get(principal).unwrap_or(&0u128)) - .expect("BURN OVERFLOW"); + .ok_or_else(|| InterpreterError::Expect("BURN OVERFLOW".into()))?; } - total + Ok(total) } pub fn get_fungible_tokens( @@ -478,7 +464,10 @@ impl AssetMap { asset_identifier: &AssetIdentifier, ) -> Option { match self.token_map.get(principal) { - Some(assets) => assets.get(asset_identifier).copied(), + Some(ref assets) => match assets.get(asset_identifier) { + Some(value) => Some(*value), + None => None, + }, None => None, } } @@ -489,7 +478,7 @@ impl AssetMap { asset_identifier: &AssetIdentifier, ) -> Option<&Vec> { match self.asset_map.get(principal) { - Some(assets) => match assets.get(asset_identifier) { + Some(ref assets) => match assets.get(asset_identifier) { Some(values) => Some(values), None => None, }, @@ -503,7 +492,7 @@ impl fmt::Display for AssetMap { write!(f, "[")?; for (principal, principal_map) in self.token_map.iter() { for (asset, amount) in principal_map.iter() { - writeln!(f, "{} spent {} {}", principal, amount, asset)?; + write!(f, "{} spent {} {}\n", principal, amount, asset)?; } } for (principal, principal_map) in self.asset_map.iter() { @@ -512,25 +501,19 @@ impl fmt::Display for AssetMap { for t in transfer { write!(f, "{}, ", t)?; } - writeln!(f, "] {}", asset)?; + write!(f, "] {}\n", asset)?; } } for (principal, stx_amount) in self.stx_map.iter() { - writeln!(f, "{} spent {} microSTX", principal, stx_amount)?; + write!(f, "{} spent {} microSTX\n", principal, stx_amount)?; } for (principal, stx_burn_amount) in self.burn_map.iter() { - writeln!(f, "{} burned {} microSTX", principal, stx_burn_amount)?; + write!(f, "{} burned {} microSTX\n", principal, stx_burn_amount)?; } write!(f, "]") } } -impl Default for EventBatch { - fn default() -> Self { - Self::new() - } -} - impl EventBatch { pub fn new() -> EventBatch { EventBatch { events: vec![] } @@ -555,9 +538,9 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { #[cfg(any(test, feature = "testing"))] pub fn new_toplevel(mut database: ClarityDatabase<'a>) -> OwnedEnvironment<'a, '_> { database.begin(); - let epoch = database.get_clarity_epoch_version(); + let epoch = database.get_clarity_epoch_version().unwrap(); let version = ClarityVersion::default_for_epoch(epoch); - database.roll_back(); + database.roll_back().unwrap(); debug!( "Begin OwnedEnvironment(epoch = {}, version = {})", @@ -669,7 +652,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { Ok((return_value, asset_map, event_batch.events)) } Err(e) => { - self.context.roll_back(); + self.context.roll_back()?; Err(e) } } @@ -777,10 +760,11 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { let mut snapshot = env .global_context .database - .get_stx_balance_snapshot(recipient); + .get_stx_balance_snapshot(&recipient) + .unwrap(); - snapshot.credit(amount); - snapshot.save(); + snapshot.credit(amount).unwrap(); + snapshot.save().unwrap(); env.global_context .database @@ -879,7 +863,7 @@ impl CostTracker for Environment<'_, '_, '_> { fn add_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { self.global_context.cost_track.add_memory(memory) } - fn drop_memory(&mut self, memory: u64) { + fn drop_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { self.global_context.cost_track.drop_memory(memory) } fn reset_memory(&mut self) { @@ -912,7 +896,7 @@ impl CostTracker for GlobalContext<'_, '_> { fn add_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { self.cost_track.add_memory(memory) } - fn drop_memory(&mut self, memory: u64) { + fn drop_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { self.cost_track.drop_memory(memory) } fn reset_memory(&mut self) { @@ -988,7 +972,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { program: &str, rules: ast::ASTRules, ) -> Result { - let clarity_version = self.contract_context.clarity_version; + let clarity_version = self.contract_context.clarity_version.clone(); let parsed = ast::build_ast_with_rules( contract_identifier, @@ -1000,7 +984,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { )? .expressions; - if parsed.is_empty() { + if parsed.len() < 1 { return Err(RuntimeErrorType::ParseError( "Expected a program of at least length 1".to_string(), ) @@ -1016,7 +1000,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let result = { let mut nested_env = Environment::new( - self.global_context, + &mut self.global_context, &contract.contract_context, self.call_stack, self.sender.clone(), @@ -1027,7 +1011,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { eval(&parsed[0], &mut nested_env, &local_context) }; - self.global_context.roll_back(); + self.global_context.roll_back()?; result } @@ -1043,7 +1027,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { pub fn eval_raw_with_rules(&mut self, program: &str, rules: ast::ASTRules) -> Result { let contract_id = QualifiedContractIdentifier::transient(); - let clarity_version = self.contract_context.clarity_version; + let clarity_version = self.contract_context.clarity_version.clone(); let parsed = ast::build_ast_with_rules( &contract_id, @@ -1055,14 +1039,15 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { )? .expressions; - if parsed.is_empty() { + if parsed.len() < 1 { return Err(RuntimeErrorType::ParseError( "Expected a program of at least length 1".to_string(), ) .into()); } let local_context = LocalContext::new(); - eval(&parsed[0], self, &local_context) + let result = { eval(&parsed[0], self, &local_context) }; + result } #[cfg(any(test, feature = "testing"))] @@ -1159,7 +1144,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { tx_name)))?; // sanitize contract-call inputs in epochs >= 2.4 // testing todo: ensure sanitize_value() preserves trait callability! - let expected_type = TypeSignature::type_of(value); + let expected_type = TypeSignature::type_of(value)?; let (sanitized_value, _) = Value::sanitize_value( self.epoch(), &expected_type, @@ -1185,7 +1170,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { Ok(value) => { if let Some(handler) = self.global_context.database.get_cc_special_cases_handler() { handler( - self.global_context, + &mut self.global_context, self.sender.as_ref(), self.sponsor.as_ref(), contract_identifier, @@ -1219,7 +1204,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let result = { let mut nested_env = Environment::new( - self.global_context, + &mut self.global_context, next_contract_context, self.call_stack, self.sender.clone(), @@ -1231,7 +1216,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { }; if make_read_only { - self.global_context.roll_back(); + self.global_context.roll_back()?; result } else { self.global_context.handle_tx_result(result) @@ -1255,13 +1240,15 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { self.global_context .database .set_block_hash(prior_bhh, true) - .expect( - "ERROR: Failed to restore prior active block after time-shifted evaluation.", - ); + .map_err(|_| { + InterpreterError::Expect( + "ERROR: Failed to restore prior active block after time-shifted evaluation." + .into()) + })?; result }); - self.global_context.roll_back(); + self.global_context.roll_back()?; result } @@ -1272,7 +1259,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_content: &str, ast_rules: ASTRules, ) -> Result<()> { - let clarity_version = self.contract_context.clarity_version; + let clarity_version = self.contract_context.clarity_version.clone(); let contract_ast = ast::build_ast_with_rules( &contract_identifier, @@ -1286,7 +1273,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_identifier, clarity_version, &contract_ast, - contract_content, + &contract_content, ) } @@ -1331,10 +1318,10 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_identifier.clone(), contract_content, self.sponsor.clone(), - self.global_context, + &mut self.global_context, contract_version, ); - self.drop_memory(memory_use); + self.drop_memory(memory_use)?; result })(); @@ -1343,7 +1330,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let data_size = contract.contract_context.data_size; self.global_context .database - .insert_contract(&contract_identifier, contract); + .insert_contract(&contract_identifier, contract)?; self.global_context .database .set_contract_data_size(&contract_identifier, data_size)?; @@ -1352,7 +1339,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { Ok(()) } Err(e) => { - self.global_context.roll_back(); + self.global_context.roll_back()?; Err(e) } } @@ -1378,12 +1365,12 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { Ok(value) } Err(_) => { - self.global_context.roll_back(); + self.global_context.roll_back()?; Err(InterpreterError::InsufficientBalance.into()) } }, Err(e) => { - self.global_context.roll_back(); + self.global_context.roll_back()?; Err(e) } } @@ -1402,7 +1389,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { Ok(ret) } Err(e) => { - self.global_context.roll_back(); + self.global_context.roll_back()?; Err(e) } } @@ -1593,13 +1580,13 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { } pub fn is_top_level(&self) -> bool { - self.asset_maps.is_empty() + self.asset_maps.len() == 0 } - fn get_asset_map(&mut self) -> &mut AssetMap { + fn get_asset_map(&mut self) -> Result<&mut AssetMap> { self.asset_maps .last_mut() - .expect("Failed to obtain asset map") + .ok_or_else(|| InterpreterError::Expect("Failed to obtain asset map".into()).into()) } pub fn log_asset_transfer( @@ -1608,13 +1595,14 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { contract_identifier: &QualifiedContractIdentifier, asset_name: &ClarityName, transfered: Value, - ) { + ) -> Result<()> { let asset_identifier = AssetIdentifier { contract_identifier: contract_identifier.clone(), asset_name: asset_name.clone(), }; - self.get_asset_map() - .add_asset_transfer(sender, asset_identifier, transfered) + self.get_asset_map()? + .add_asset_transfer(sender, asset_identifier, transfered); + Ok(()) } pub fn log_token_transfer( @@ -1628,16 +1616,16 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { contract_identifier: contract_identifier.clone(), asset_name: asset_name.clone(), }; - self.get_asset_map() + self.get_asset_map()? .add_token_transfer(sender, asset_identifier, transfered) } pub fn log_stx_transfer(&mut self, sender: &PrincipalData, transfered: u128) -> Result<()> { - self.get_asset_map().add_stx_transfer(sender, transfered) + self.get_asset_map()?.add_stx_transfer(sender, transfered) } pub fn log_stx_burn(&mut self, sender: &PrincipalData, transfered: u128) -> Result<()> { - self.get_asset_map().add_stx_burn(sender, transfered) + self.get_asset_map()?.add_stx_burn(sender, transfered) } pub fn execute(&mut self, f: F) -> Result @@ -1645,9 +1633,9 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { F: FnOnce(&mut Self) -> Result, { self.begin(); - let result = f(self).map_err(|e| { - self.roll_back(); - e + let result = f(self).or_else(|e| { + self.roll_back()?; + Err(e) })?; self.commit()?; Ok(result) @@ -1683,7 +1671,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { ); f(&mut exec_env) }; - self.roll_back(); + self.roll_back().map_err(crate::vm::errors::Error::from)?; match result { Ok(return_value) => Ok(return_value), @@ -1714,19 +1702,17 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { pub fn commit(&mut self) -> Result<(Option, Option)> { trace!("Calling commit"); self.read_only.pop(); - let asset_map = self - .asset_maps - .pop() - .expect("ERROR: Committed non-nested context."); - let mut event_batch = self - .event_batches - .pop() - .expect("ERROR: Committed non-nested context."); + let asset_map = self.asset_maps.pop().ok_or_else(|| { + InterpreterError::Expect("ERROR: Committed non-nested context.".into()) + })?; + let mut event_batch = self.event_batches.pop().ok_or_else(|| { + InterpreterError::Expect("ERROR: Committed non-nested context.".into()) + })?; let out_map = match self.asset_maps.last_mut() { Some(tail_back) => { if let Err(e) = tail_back.commit_other(asset_map) { - self.database.roll_back(); + self.database.roll_back()?; return Err(e); } None @@ -1742,19 +1728,25 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { None => Some(event_batch), }; - self.database.commit(); + self.database.commit()?; Ok((out_map, out_batch)) } - pub fn roll_back(&mut self) { + pub fn roll_back(&mut self) -> Result<()> { let popped = self.asset_maps.pop(); - assert!(popped.is_some()); + if popped.is_none() { + return Err(InterpreterError::Expect("Expected entry to rollback".into()).into()); + } let popped = self.read_only.pop(); - assert!(popped.is_some()); + if popped.is_none() { + return Err(InterpreterError::Expect("Expected entry to rollback".into()).into()); + } let popped = self.event_batches.pop(); - assert!(popped.is_some()); + if popped.is_none() { + return Err(InterpreterError::Expect("Expected entry to rollback".into()).into()); + } - self.database.roll_back(); + self.database.roll_back() } pub fn handle_tx_result(&mut self, result: Result) -> Result { @@ -1763,17 +1755,17 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { if data.committed { self.commit()?; } else { - self.roll_back(); + self.roll_back()?; } Ok(Value::Response(data)) } else { Err( - CheckErrors::PublicFunctionMustReturnResponse(TypeSignature::type_of(&result)) + CheckErrors::PublicFunctionMustReturnResponse(TypeSignature::type_of(&result)?) .into(), ) } } else { - self.roll_back(); + self.roll_back()?; result } } @@ -1858,12 +1850,6 @@ impl ContractContext { } } -impl<'a> Default for LocalContext<'a> { - fn default() -> Self { - Self::new() - } -} - impl<'a> LocalContext<'a> { pub fn new() -> LocalContext<'a> { LocalContext { @@ -1921,12 +1907,6 @@ impl<'a> LocalContext<'a> { } } -impl Default for CallStack { - fn default() -> Self { - Self::new() - } -} - impl CallStack { pub fn new() -> CallStack { CallStack { @@ -1967,15 +1947,18 @@ impl CallStack { ) .into()); } - if tracked && !self.set.remove(function) { - panic!("Tried to remove tracked function from call stack, but could not find in current context.") + if tracked && !self.set.remove(&function) { + return Err(InterpreterError::InterpreterError( + "Tried to remove tracked function from call stack, but could not find in current context.".into() + ) + .into()); } Ok(()) } else { - Err(InterpreterError::InterpreterError( + return Err(InterpreterError::InterpreterError( "Tried to remove item from empty call stack.".to_string(), ) - .into()) + .into()); } } diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index c2c257755b..41bda80484 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -46,21 +46,24 @@ type Result = std::result::Result; pub const CLARITY_MEMORY_LIMIT: u64 = 100 * 1000 * 1000; // TODO: factor out into a boot lib? -pub const COSTS_1_NAME: &str = "costs"; -pub const COSTS_2_NAME: &str = "costs-2"; -pub const COSTS_3_NAME: &str = "costs-3"; +pub const COSTS_1_NAME: &'static str = "costs"; +pub const COSTS_2_NAME: &'static str = "costs-2"; +pub const COSTS_3_NAME: &'static str = "costs-3"; lazy_static! { - static ref COST_TUPLE_TYPE_SIGNATURE: TypeSignature = TypeSignature::TupleType( - TupleTypeSignature::try_from(vec![ - ("runtime".into(), TypeSignature::UIntType), - ("write_length".into(), TypeSignature::UIntType), - ("write_count".into(), TypeSignature::UIntType), - ("read_count".into(), TypeSignature::UIntType), - ("read_length".into(), TypeSignature::UIntType), - ]) - .expect("BUG: failed to construct type signature for cost tuple") - ); + static ref COST_TUPLE_TYPE_SIGNATURE: TypeSignature = { + #[allow(clippy::expect_used)] + TypeSignature::TupleType( + TupleTypeSignature::try_from(vec![ + ("runtime".into(), TypeSignature::UIntType), + ("write_length".into(), TypeSignature::UIntType), + ("write_count".into(), TypeSignature::UIntType), + ("read_count".into(), TypeSignature::UIntType), + ("read_length".into(), TypeSignature::UIntType), + ]) + .expect("BUG: failed to construct type signature for cost tuple"), + ) + }; } pub fn runtime_cost, C: CostTracker>( @@ -77,7 +80,7 @@ pub fn runtime_cost, C: CostTracker>( macro_rules! finally_drop_memory { ( $env: expr, $used_mem:expr; $exec:expr ) => {{ let result = (|| $exec)(); - $env.drop_memory($used_mem); + $env.drop_memory($used_mem)?; result }}; } @@ -97,12 +100,15 @@ pub fn analysis_typecheck_cost( } pub trait MemoryConsumer { - fn get_memory_use(&self) -> u64; + fn get_memory_use(&self) -> Result; } impl MemoryConsumer for Value { - fn get_memory_use(&self) -> u64 { - self.size().into() + fn get_memory_use(&self) -> Result { + Ok(self + .size() + .map_err(|_| CostErrors::InterpreterFailure)? + .into()) } } @@ -114,7 +120,7 @@ pub trait CostTracker { ) -> Result; fn add_cost(&mut self, cost: ExecutionCost) -> Result<()>; fn add_memory(&mut self, memory: u64) -> Result<()>; - fn drop_memory(&mut self, memory: u64); + fn drop_memory(&mut self, memory: u64) -> Result<()>; fn reset_memory(&mut self); /// Check if the given contract-call should be short-circuited. /// If so: this charges the cost to the CostTracker, and return true @@ -142,7 +148,9 @@ impl CostTracker for () { fn add_memory(&mut self, _memory: u64) -> std::result::Result<(), CostErrors> { Ok(()) } - fn drop_memory(&mut self, _memory: u64) {} + fn drop_memory(&mut self, _memory: u64) -> Result<()> { + Ok(()) + } fn reset_memory(&mut self) {} fn short_circuit_contract_call( &mut self, @@ -245,7 +253,6 @@ pub struct TrackerData { chain_id: u32, } -#[allow(clippy::large_enum_variant)] #[derive(Clone)] pub enum LimitedCostTracker { Limited(TrackerData), @@ -321,18 +328,37 @@ pub enum CostErrors { CostBalanceExceeded(ExecutionCost, ExecutionCost), MemoryBalanceExceeded(u64, u64), CostContractLoadFailure, + InterpreterFailure, + Expect(String), +} + +impl CostErrors { + fn rejectable(&self) -> bool { + match self { + CostErrors::InterpreterFailure => true, + CostErrors::Expect(_) => true, + _ => false, + } + } } fn load_state_summary(mainnet: bool, clarity_db: &mut ClarityDatabase) -> Result { let cost_voting_contract = boot_code_id("cost-voting", mainnet); - let clarity_epoch = clarity_db.get_clarity_epoch_version(); + let clarity_epoch = clarity_db + .get_clarity_epoch_version() + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; let last_processed_at = match clarity_db.get_value( "vm-costs::last-processed-at-height", &TypeSignature::UIntType, &clarity_epoch, ) { - Ok(Some(v)) => u32::try_from(v.value.expect_u128()).expect("Block height overflowed u32"), + Ok(Some(v)) => u32::try_from( + v.value + .expect_u128() + .map_err(|_| CostErrors::InterpreterFailure)?, + ) + .map_err(|_| CostErrors::InterpreterFailure)?, Ok(None) => return Ok(CostStateSummary::empty()), Err(e) => return Err(CostErrors::CostComputationFailed(e.to_string())), }; @@ -345,7 +371,9 @@ fn load_state_summary(mainnet: bool, clarity_db: &mut ClarityDatabase) -> Result ) .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; let serialized: SerializedCostStateSummary = match metadata_result { - Some(serialized) => serde_json::from_str(&serialized).unwrap(), + Some(serialized) => { + serde_json::from_str(&serialized).map_err(|_| CostErrors::InterpreterFailure)? + } None => return Ok(CostStateSummary::empty()), }; Ok(CostStateSummary::from(serialized)) @@ -358,7 +386,9 @@ fn store_state_summary( ) -> Result<()> { let block_height = clarity_db.get_current_block_height(); let cost_voting_contract = boot_code_id("cost-voting", mainnet); - let epoch = clarity_db.get_clarity_epoch_version(); + let epoch = clarity_db + .get_clarity_epoch_version() + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; clarity_db .put_value( "vm-costs::last-processed-at-height", @@ -368,12 +398,14 @@ fn store_state_summary( .map_err(|_e| CostErrors::CostContractLoadFailure)?; let serialized_summary = serde_json::to_string(&SerializedCostStateSummary::from(to_store.clone())) - .expect("BUG: failure to serialize cost state summary struct"); - clarity_db.set_metadata( - &cost_voting_contract, - "::state_summary", - &serialized_summary, - ); + .map_err(|_| CostErrors::InterpreterFailure)?; + clarity_db + .set_metadata( + &cost_voting_contract, + "::state_summary", + &serialized_summary, + ) + .map_err(|e| CostErrors::Expect(e.to_string()))?; Ok(()) } @@ -393,7 +425,9 @@ fn load_cost_functions( clarity_db: &mut ClarityDatabase, apply_updates: bool, ) -> Result { - let clarity_epoch = clarity_db.get_clarity_epoch_version(); + let clarity_epoch = clarity_db + .get_clarity_epoch_version() + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; let last_processed_count = clarity_db .get_value( "vm-costs::last_processed_count", @@ -403,7 +437,8 @@ fn load_cost_functions( .map_err(|_e| CostErrors::CostContractLoadFailure)? .map(|result| result.value) .unwrap_or(Value::UInt(0)) - .expect_u128(); + .expect_u128() + .map_err(|_| CostErrors::InterpreterFailure)?; let cost_voting_contract = boot_code_id("cost-voting", mainnet); let confirmed_proposals_count = clarity_db .lookup_variable_unknown_descriptor( @@ -412,7 +447,8 @@ fn load_cost_functions( &clarity_epoch, ) .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))? - .expect_u128(); + .expect_u128() + .map_err(|_| CostErrors::InterpreterFailure)?; debug!("Check cost voting contract"; "confirmed_proposal_count" => confirmed_proposals_count, "last_processed_count" => last_processed_count); @@ -435,19 +471,26 @@ fn load_cost_functions( "confirmed-id".into(), Value::UInt(confirmed_proposal), )]) - .expect("BUG: failed to construct simple tuple"), + .map_err(|_| { + CostErrors::Expect("BUG: failed to construct simple tuple".into()) + })?, ), &clarity_epoch, ) - .expect("BUG: Failed querying confirmed-proposals") + .map_err(|_| CostErrors::Expect("BUG: Failed querying confirmed-proposals".into()))? .expect_optional() - .expect("BUG: confirmed-proposal-count exceeds stored proposals") - .expect_tuple(); + .map_err(|_| CostErrors::InterpreterFailure)? + .ok_or_else(|| { + CostErrors::Expect("BUG: confirmed-proposal-count exceeds stored proposals".into()) + })? + .expect_tuple() + .map_err(|_| CostErrors::InterpreterFailure)?; let target_contract = match entry .get("function-contract") - .expect("BUG: malformed cost proposal tuple") + .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? .clone() .expect_principal() + .map_err(|_| CostErrors::InterpreterFailure)? { PrincipalData::Contract(contract_id) => contract_id, _ => { @@ -459,9 +502,10 @@ fn load_cost_functions( let target_function = match ClarityName::try_from( entry .get("function-name") - .expect("BUG: malformed cost proposal tuple") + .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? .clone() - .expect_ascii(), + .expect_ascii() + .map_err(|_| CostErrors::InterpreterFailure)?, ) { Ok(x) => x, Err(_) => { @@ -472,9 +516,10 @@ fn load_cost_functions( }; let cost_contract = match entry .get("cost-function-contract") - .expect("BUG: malformed cost proposal tuple") + .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? .clone() .expect_principal() + .map_err(|_| CostErrors::InterpreterFailure)? { PrincipalData::Contract(contract_id) => contract_id, _ => { @@ -487,8 +532,9 @@ fn load_cost_functions( let cost_function = match ClarityName::try_from( entry .get_owned("cost-function-name") - .expect("BUG: malformed cost proposal tuple") - .expect_ascii(), + .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? + .expect_ascii() + .map_err(|_| CostErrors::InterpreterFailure)?, ) { Ok(x) => x, Err(_) => { @@ -507,6 +553,7 @@ fn load_cost_functions( // arithmetic-checking analysis pass let (cost_func_ref, cost_func_type) = match clarity_db .load_contract_analysis(&cost_contract) + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))? { Some(c) => { if !c.is_cost_contract_eligible { @@ -588,7 +635,10 @@ fn load_cost_functions( .insert(target, cost_func_ref); } else { // referring to a user-defined function - match clarity_db.load_contract_analysis(&target_contract) { + match clarity_db + .load_contract_analysis(&target_contract) + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))? + { Some(c) => { if let Some(Fixed(tf)) = c.read_only_function_types.get(&target_function) { if cost_func_type.args.len() != tf.args.len() { @@ -600,7 +650,7 @@ fn load_cost_functions( continue; } for arg in &cost_func_type.args { - if arg.signature != TypeSignature::UIntType { + if &arg.signature != &TypeSignature::UIntType { warn!("Confirmed cost proposal invalid: contains non uint argument"; "confirmed_proposal_id" => confirmed_proposal, ); @@ -713,10 +763,10 @@ impl LimitedCostTracker { Self::Free } - fn default_cost_contract_for_epoch(epoch_id: StacksEpochId) -> String { - match epoch_id { + fn default_cost_contract_for_epoch(epoch_id: StacksEpochId) -> Result { + let result = match epoch_id { StacksEpochId::Epoch10 => { - panic!("Attempted to get default cost functions for Epoch 1.0 where Clarity does not exist"); + return Err(CostErrors::Expect("Attempted to get default cost functions for Epoch 1.0 where Clarity does not exist".into())); } StacksEpochId::Epoch20 => COSTS_1_NAME.to_string(), StacksEpochId::Epoch2_05 => COSTS_2_NAME.to_string(), @@ -724,7 +774,8 @@ impl LimitedCostTracker { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => COSTS_3_NAME.to_string(), - } + }; + Ok(result) } } @@ -734,9 +785,11 @@ impl TrackerData { /// fork. fn load_costs(&mut self, clarity_db: &mut ClarityDatabase, apply_updates: bool) -> Result<()> { clarity_db.begin(); - let epoch_id = clarity_db.get_clarity_epoch_version(); + let epoch_id = clarity_db + .get_clarity_epoch_version() + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; let boot_costs_id = boot_code_id( - &LimitedCostTracker::default_cost_contract_for_epoch(epoch_id), + &LimitedCostTracker::default_cost_contract_for_epoch(epoch_id)?, self.mainnet, ); @@ -744,8 +797,13 @@ impl TrackerData { contract_call_circuits, mut cost_function_references, } = load_cost_functions(self.mainnet, clarity_db, apply_updates).map_err(|e| { - clarity_db.roll_back(); - e + let result = clarity_db + .roll_back() + .map_err(|e| CostErrors::Expect(e.to_string())); + match result { + Ok(_) => e, + Err(rollback_err) => rollback_err, + } })?; self.contract_call_circuits = contract_call_circuits; @@ -753,7 +811,7 @@ impl TrackerData { let mut cost_contracts = HashMap::new(); let mut m = HashMap::new(); for f in ClarityCostFunction::ALL.iter() { - let cost_function_ref = cost_function_references.remove(f).unwrap_or_else(|| { + let cost_function_ref = cost_function_references.remove(&f).unwrap_or_else(|| { ClarityCostFunctionReference::new(boot_costs_id.clone(), f.get_name()) }); if !cost_contracts.contains_key(&cost_function_ref.contract_id) { @@ -764,7 +822,9 @@ impl TrackerData { error!("Failed to load intended Clarity cost contract"; "contract" => %cost_function_ref.contract_id, "error" => ?e); - clarity_db.roll_back(); + clarity_db + .roll_back() + .map_err(|e| CostErrors::Expect(e.to_string()))?; return Err(CostErrors::CostContractLoadFailure); } }; @@ -782,7 +842,9 @@ impl TrackerData { error!("Failed to load intended Clarity cost contract"; "contract" => %boot_costs_id.to_string(), "error" => %format!("{:?}", e)); - clarity_db.roll_back(); + clarity_db + .roll_back() + .map_err(|e| CostErrors::Expect(e.to_string()))?; return Err(CostErrors::CostContractLoadFailure); } }; @@ -794,12 +856,16 @@ impl TrackerData { self.cost_contracts = cost_contracts; if apply_updates { - clarity_db.commit(); + clarity_db + .commit() + .map_err(|e| CostErrors::Expect(e.to_string()))?; } else { - clarity_db.roll_back(); + clarity_db + .roll_back() + .map_err(|e| CostErrors::Expect(e.to_string()))?; } - Ok(()) + return Ok(()); } } @@ -810,7 +876,8 @@ impl LimitedCostTracker { Self::Free => ExecutionCost::zero(), } } - pub fn set_total(&mut self, total: ExecutionCost) { + #[allow(clippy::panic)] + pub fn set_total(&mut self, total: ExecutionCost) -> () { // used by the miner to "undo" the cost of a transaction when trying to pack a block. match self { Self::Limited(ref mut data) => data.total = total, @@ -956,11 +1023,12 @@ fn add_memory(s: &mut TrackerData, memory: u64) -> std::result::Result<(), CostE } } -fn drop_memory(s: &mut TrackerData, memory: u64) { +fn drop_memory(s: &mut TrackerData, memory: u64) -> Result<()> { s.memory = s .memory .checked_sub(memory) - .expect("Underflowed dropped memory"); + .ok_or_else(|| CostErrors::Expect("Underflowed dropped memory".into()))?; + Ok(()) } impl CostTracker for LimitedCostTracker { @@ -972,11 +1040,13 @@ impl CostTracker for LimitedCostTracker { match self { Self::Free => { // tracker is free, return zero! - Ok(ExecutionCost::zero()) + return Ok(ExecutionCost::zero()); } Self::Limited(ref mut data) => { if cost_function == ClarityCostFunction::Unimplemented { - panic!("Used unimplemented cost function"); + return Err(CostErrors::Expect( + "Used unimplemented cost function".into(), + )); } let cost_function_ref = data .cost_function_references @@ -1003,9 +1073,9 @@ impl CostTracker for LimitedCostTracker { Self::Limited(ref mut data) => add_memory(data, memory), } } - fn drop_memory(&mut self, memory: u64) { + fn drop_memory(&mut self, memory: u64) -> Result<()> { match self { - Self::Free => {} + Self::Free => Ok(()), Self::Limited(ref mut data) => drop_memory(data, memory), } } @@ -1056,7 +1126,7 @@ impl CostTracker for &mut LimitedCostTracker { fn add_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { LimitedCostTracker::add_memory(self, memory) } - fn drop_memory(&mut self, memory: u64) { + fn drop_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { LimitedCostTracker::drop_memory(self, memory) } fn reset_memory(&mut self) { @@ -1090,7 +1160,8 @@ impl fmt::Display for ExecutionCost { impl ToSql for ExecutionCost { fn to_sql(&self) -> rusqlite::Result { - let val = serde_json::to_string(self).expect("FAIL: could not serialize ExecutionCost"); + let val = serde_json::to_string(self) + .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?; Ok(ToSqlOutput::from(val)) } } @@ -1099,7 +1170,7 @@ impl FromSql for ExecutionCost { fn column_result(value: ValueRef) -> FromSqlResult { let str_val = String::column_result(value)?; let parsed = serde_json::from_str(&str_val) - .expect("CORRUPTION: failed to parse ExecutionCost from DB"); + .map_err(|e| rusqlite::types::FromSqlError::Other(Box::new(e)))?; Ok(parsed) } } @@ -1112,13 +1183,16 @@ pub trait CostOverflowingMath { impl CostOverflowingMath for u64 { fn cost_overflow_mul(self, other: u64) -> Result { - self.checked_mul(other).ok_or(CostErrors::CostOverflow) + self.checked_mul(other) + .ok_or_else(|| CostErrors::CostOverflow) } fn cost_overflow_add(self, other: u64) -> Result { - self.checked_add(other).ok_or(CostErrors::CostOverflow) + self.checked_add(other) + .ok_or_else(|| CostErrors::CostOverflow) } fn cost_overflow_sub(self, other: u64) -> Result { - self.checked_sub(other).ok_or(CostErrors::CostOverflow) + self.checked_sub(other) + .ok_or_else(|| CostErrors::CostOverflow) } } @@ -1135,7 +1209,9 @@ impl ExecutionCost { /// Returns the percentage of self consumed in `numerator`'s largest proportion dimension. pub fn proportion_largest_dimension(&self, numerator: &ExecutionCost) -> u64 { - *[ + // max() should always return because there are > 0 elements + #[allow(clippy::expect_used)] + [ numerator.runtime / cmp::max(1, self.runtime / 100), numerator.write_length / cmp::max(1, self.write_length / 100), numerator.write_count / cmp::max(1, self.write_count / 100), @@ -1145,6 +1221,7 @@ impl ExecutionCost { .iter() .max() .expect("BUG: should find maximum") + .clone() } /// Returns the dot product of this execution cost with `resolution`/block_limit diff --git a/clarity/src/vm/coverage.rs b/clarity/src/vm/coverage.rs index 4ac9d459a7..bfb01e89c1 100644 --- a/clarity/src/vm/coverage.rs +++ b/clarity/src/vm/coverage.rs @@ -25,12 +25,6 @@ struct CoverageFileInfo { coverage: HashMap>, } -impl Default for CoverageReporter { - fn default() -> Self { - Self::new() - } -} - impl CoverageReporter { pub fn new() -> CoverageReporter { CoverageReporter { diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 7a74357f5e..0cfdaedb21 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -271,6 +271,7 @@ impl HeadersDB for NullHeadersDB { ) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { + #[allow(clippy::unwrap_used)] let first_block_hash = BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(); Some(first_block_hash) @@ -328,6 +329,7 @@ impl HeadersDB for NullHeadersDB { } } +#[allow(clippy::panic)] impl BurnStateDB for NullBurnStateDB { fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { None @@ -438,13 +440,13 @@ impl<'a> ClarityDatabase<'a> { } /// Commit current key-value wrapper layer - pub fn commit(&mut self) { - self.store.commit(); + pub fn commit(&mut self) -> Result<()> { + self.store.commit().map_err(|e| e.into()) } /// Drop current key-value wrapper layer - pub fn roll_back(&mut self) { - self.store.rollback(); + pub fn roll_back(&mut self) -> Result<()> { + self.store.rollback().map_err(|e| e.into()) } pub fn set_block_hash( @@ -455,18 +457,18 @@ impl<'a> ClarityDatabase<'a> { self.store.set_block_hash(bhh, query_pending_data) } - pub fn put(&mut self, key: &str, value: &T) { - self.store.put(key, &value.serialize()); + pub fn put(&mut self, key: &str, value: &T) -> Result<()> { + self.store.put(&key, &value.serialize()) } /// Like `put()`, but returns the serialized byte size of the stored value - pub fn put_with_size(&mut self, key: &str, value: &T) -> u64 { + pub fn put_with_size(&mut self, key: &str, value: &T) -> Result { let serialized = value.serialize(); - self.store.put(key, &serialized); - byte_len_of_serialization(&serialized) + self.store.put(&key, &serialized)?; + Ok(byte_len_of_serialization(&serialized)) } - pub fn get(&mut self, key: &str) -> Option + pub fn get(&mut self, key: &str) -> Result> where T: ClarityDeserializable, { @@ -488,22 +490,26 @@ impl<'a> ClarityDatabase<'a> { let mut pre_sanitized_size = None; let serialized = if sanitize { - let value_size = value.serialized_size() as u64; + let value_size = value + .serialized_size() + .map_err(|e| InterpreterError::Expect(e.to_string()))? + as u64; + let (sanitized_value, did_sanitize) = - Value::sanitize_value(epoch, &TypeSignature::type_of(&value), value) + Value::sanitize_value(epoch, &TypeSignature::type_of(&value)?, value) .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; // if data needed to be sanitized *charge* for the unsanitized cost if did_sanitize { pre_sanitized_size = Some(value_size); } - sanitized_value.serialize_to_vec() + sanitized_value.serialize_to_vec()? } else { - value.serialize_to_vec() + value.serialize_to_vec()? }; let size = serialized.len() as u64; let hex_serialized = to_hex(serialized.as_slice()); - self.store.put(key, &hex_serialized); + self.store.put(&key, &hex_serialized)?; Ok(pre_sanitized_size.unwrap_or(size)) } @@ -519,7 +525,7 @@ impl<'a> ClarityDatabase<'a> { .map_err(|e| InterpreterError::DBError(e.to_string()).into()) } - pub fn get_with_proof(&mut self, key: &str) -> Option<(T, Vec)> + pub fn get_with_proof(&mut self, key: &str) -> Result)>> where T: ClarityDeserializable, { @@ -561,15 +567,15 @@ impl<'a> ClarityDatabase<'a> { ) -> Result<()> { let hash = Sha512Trunc256Sum::from_data(contract_content.as_bytes()); self.store - .prepare_for_contract_metadata(contract_identifier, hash); + .prepare_for_contract_metadata(contract_identifier, hash)?; // insert contract-size let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); - self.insert_metadata(contract_identifier, &key, &(contract_content.len() as u64)); + self.insert_metadata(contract_identifier, &key, &(contract_content.len() as u64))?; // insert contract-src if STORE_CONTRACT_SRC_INTERFACE { let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-src"); - self.insert_metadata(contract_identifier, &key, &contract_content.to_string()); + self.insert_metadata(contract_identifier, &key, &contract_content.to_string())?; } Ok(()) } @@ -589,8 +595,10 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, key: &str, data: &str, - ) { - self.store.insert_metadata(contract_identifier, key, data); + ) -> Result<()> { + self.store + .insert_metadata(contract_identifier, key, data) + .map_err(|e| e.into()) } fn insert_metadata( @@ -598,15 +606,17 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, key: &str, data: &T, - ) { + ) -> Result<()> { if self.store.has_metadata_entry(contract_identifier, key) { - panic!( + Err(InterpreterError::Expect(format!( "Metadata entry '{}' already exists for contract: {}", key, contract_identifier - ); + )) + .into()) } else { self.store - .insert_metadata(contract_identifier, key, &data.serialize()); + .insert_metadata(contract_identifier, key, &data.serialize()) + .map_err(|e| e.into()) } } @@ -618,9 +628,11 @@ impl<'a> ClarityDatabase<'a> { where T: ClarityDeserializable, { - self.store - .get_metadata(contract_identifier, key) - .map(|x_opt| x_opt.map(|x| T::deserialize(&x))) + let x_opt = self.store.get_metadata(contract_identifier, key)?; + match x_opt { + None => Ok(None), + Some(x) => T::deserialize(&x).map(|out| Some(out)), + } } pub fn fetch_metadata_manual( @@ -632,9 +644,13 @@ impl<'a> ClarityDatabase<'a> { where T: ClarityDeserializable, { - self.store - .get_metadata_manual(at_height, contract_identifier, key) - .map(|x_opt| x_opt.map(|x| T::deserialize(&x))) + let x_opt = self + .store + .get_metadata_manual(at_height, contract_identifier, key)?; + match x_opt { + None => Ok(None), + Some(x) => T::deserialize(&x).map(|out| Some(out)), + } } // load contract analysis stored by an analysis_db instance. @@ -644,13 +660,17 @@ impl<'a> ClarityDatabase<'a> { pub fn load_contract_analysis( &mut self, contract_identifier: &QualifiedContractIdentifier, - ) -> Option { - self.store + ) -> Result> { + let x_opt = self + .store .get_metadata(contract_identifier, AnalysisDatabase::storage_key()) // treat NoSuchContract error thrown by get_metadata as an Option::None -- // the analysis will propagate that as a CheckError anyways. - .ok()? - .map(|x| ContractAnalysis::deserialize(&x)) + .ok(); + match x_opt.flatten() { + None => Ok(None), + Some(x) => ContractAnalysis::deserialize(&x).map(|out| Some(out)), + } } pub fn get_contract_size( @@ -658,13 +678,21 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, ) -> Result { let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); - let contract_size: u64 = self.fetch_metadata(contract_identifier, &key)?.expect( - "Failed to read non-consensus contract metadata, even though contract exists in MARF.", - ); + let contract_size: u64 = + self.fetch_metadata(contract_identifier, &key)? + .ok_or_else(|| { + InterpreterError::Expect( + "Failed to read non-consensus contract metadata, even though contract exists in MARF." + .into()) + })?; let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-data-size"); - let data_size: u64 = self.fetch_metadata(contract_identifier, &key)?.expect( - "Failed to read non-consensus contract metadata, even though contract exists in MARF.", - ); + let data_size: u64 = self + .fetch_metadata(contract_identifier, &key)? + .ok_or_else(|| { + InterpreterError::Expect( + "Failed to read non-consensus contract metadata, even though contract exists in MARF." + .into()) + })?; // u64 overflow is _checked_ on insert into contract-data-size Ok(data_size + contract_size) @@ -677,13 +705,17 @@ impl<'a> ClarityDatabase<'a> { data_size: u64, ) -> Result<()> { let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); - let contract_size: u64 = self.fetch_metadata(contract_identifier, &key)?.expect( - "Failed to read non-consensus contract metadata, even though contract exists in MARF.", - ); + let contract_size: u64 = + self.fetch_metadata(contract_identifier, &key)? + .ok_or_else(|| { + InterpreterError::Expect( + "Failed to read non-consensus contract metadata, even though contract exists in MARF." + .into()) + })?; contract_size.cost_overflow_add(data_size)?; let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-data-size"); - self.insert_metadata(contract_identifier, &key, &data_size); + self.insert_metadata(contract_identifier, &key, &data_size)?; Ok(()) } @@ -691,9 +723,10 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, contract: Contract, - ) { + ) -> Result<()> { let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract"); - self.insert_metadata(contract_identifier, &key, &contract); + self.insert_metadata(contract_identifier, &key, &contract)?; + Ok(()) } pub fn has_contract(&mut self, contract_identifier: &QualifiedContractIdentifier) -> bool { @@ -706,10 +739,11 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, ) -> Result { let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract"); - let mut data: Contract = self.fetch_metadata(contract_identifier, &key)?.expect( - "Failed to read non-consensus contract metadata, even though contract exists in MARF.", - ); - data.canonicalize_types(&self.get_clarity_epoch_version()); + let mut data: Contract = self.fetch_metadata(contract_identifier, &key)? + .ok_or_else(|| InterpreterError::Expect( + "Failed to read non-consensus contract metadata, even though contract exists in MARF." + .into()))?; + data.canonicalize_types(&self.get_clarity_epoch_version()?); Ok(data) } @@ -721,57 +755,68 @@ impl<'a> ClarityDatabase<'a> { /// Since Clarity did not exist in stacks 1.0, the lowest valid epoch ID is stacks 2.0. /// The instantiation of subsequent epochs may bump up the epoch version in the clarity DB if /// Clarity is updated in that epoch. - pub fn get_clarity_epoch_version(&mut self) -> StacksEpochId { - match self.get(Self::clarity_state_epoch_key()) { - Some(x) => u32::try_into(x).expect("Bad Clarity epoch version in stored Clarity state"), + pub fn get_clarity_epoch_version(&mut self) -> Result { + let out = match self.get(Self::clarity_state_epoch_key())? { + Some(x) => u32::try_into(x).map_err(|_| { + InterpreterError::Expect("Bad Clarity epoch version in stored Clarity state".into()) + })?, None => StacksEpochId::Epoch20, - } + }; + Ok(out) } /// Should be called _after_ all of the epoch's initialization has been invoked - pub fn set_clarity_epoch_version(&mut self, epoch: StacksEpochId) { + pub fn set_clarity_epoch_version(&mut self, epoch: StacksEpochId) -> Result<()> { self.put(Self::clarity_state_epoch_key(), &(epoch as u32)) } /// Returns the _current_ total liquid ustx - pub fn get_total_liquid_ustx(&mut self) -> u128 { - self.get_value( - ClarityDatabase::ustx_liquid_supply_key(), - &TypeSignature::UIntType, - &StacksEpochId::latest(), - ) - .expect("FATAL: failed to load ustx_liquid_supply Clarity key") - .map(|v| v.value.expect_u128()) - .unwrap_or(0) + pub fn get_total_liquid_ustx(&mut self) -> Result { + Ok(self + .get_value( + ClarityDatabase::ustx_liquid_supply_key(), + &TypeSignature::UIntType, + &StacksEpochId::latest(), + ) + .map_err(|_| { + InterpreterError::Expect( + "FATAL: failed to load ustx_liquid_supply Clarity key".into(), + ) + })? + .map(|v| v.value.expect_u128()) + .transpose()? + .unwrap_or(0)) } - fn set_ustx_liquid_supply(&mut self, set_to: u128) { + fn set_ustx_liquid_supply(&mut self, set_to: u128) -> Result<()> { self.put_value( ClarityDatabase::ustx_liquid_supply_key(), Value::UInt(set_to), // okay to pin epoch, because ustx_liquid_supply does not need to sanitize &StacksEpochId::Epoch21, ) - .expect("FATAL: Failed to store STX liquid supply"); + .map_err(|_| { + InterpreterError::Expect("FATAL: Failed to store STX liquid supply".into()).into() + }) } pub fn increment_ustx_liquid_supply(&mut self, incr_by: u128) -> Result<()> { - let current = self.get_total_liquid_ustx(); + let current = self.get_total_liquid_ustx()?; let next = current.checked_add(incr_by).ok_or_else(|| { error!("Overflowed `ustx-liquid-supply`"); RuntimeErrorType::ArithmeticOverflow })?; - self.set_ustx_liquid_supply(next); + self.set_ustx_liquid_supply(next)?; Ok(()) } pub fn decrement_ustx_liquid_supply(&mut self, decr_by: u128) -> Result<()> { - let current = self.get_total_liquid_ustx(); + let current = self.get_total_liquid_ustx()?; let next = current.checked_sub(decr_by).ok_or_else(|| { error!("`stx-burn?` accepted that reduces `ustx-liquid-supply` below 0"); RuntimeErrorType::ArithmeticUnderflow })?; - self.set_ustx_liquid_supply(next); + self.set_ustx_liquid_supply(next)?; Ok(()) } @@ -790,12 +835,17 @@ impl<'a> ClarityDatabase<'a> { /// Returns the ID of a *Stacks* block, by a *Stacks* block height. /// /// Fails if `block_height` >= the "currently" under construction Stacks block height. - pub fn get_index_block_header_hash(&mut self, block_height: u32) -> StacksBlockId { + pub fn get_index_block_header_hash(&mut self, block_height: u32) -> Result { self.store .get_block_header_hash(block_height) // the caller is responsible for ensuring that the block_height given // is < current_block_height, so this should _always_ return a value. - .expect("Block header hash must return for provided block height") + .ok_or_else(|| { + InterpreterError::Expect( + "Block header hash must return for provided block height".into(), + ) + .into() + }) } /// This is the height we are currently constructing. It comes from the MARF. @@ -816,11 +866,11 @@ impl<'a> ClarityDatabase<'a> { /// Return the height for PoX v2 -> v3 auto unlocks /// from the burn state db - pub fn get_v2_unlock_height(&mut self) -> u32 { - if self.get_clarity_epoch_version() >= StacksEpochId::Epoch22 { - self.burn_state_db.get_v2_unlock_height() + pub fn get_v2_unlock_height(&mut self) -> Result { + if self.get_clarity_epoch_version()? >= StacksEpochId::Epoch22 { + Ok(self.burn_state_db.get_v2_unlock_height()) } else { - u32::MAX + Ok(u32::MAX) } } @@ -829,46 +879,52 @@ impl<'a> ClarityDatabase<'a> { /// This is the burnchain block height of the parent of the Stacks block at the current Stacks /// block height (i.e. that returned by `get_index_block_header_hash` for /// `get_current_block_height`). - pub fn get_current_burnchain_block_height(&mut self) -> u32 { + pub fn get_current_burnchain_block_height(&mut self) -> Result { let cur_stacks_height = self.store.get_current_block_height(); let last_mined_bhh = if cur_stacks_height == 0 { - return self.burn_state_db.get_burn_start_height(); + return Ok(self.burn_state_db.get_burn_start_height()); } else { - self.get_index_block_header_hash( - cur_stacks_height - .checked_sub(1) - .expect("BUG: cannot eval burn-block-height in boot code"), - ) + self.get_index_block_header_hash(cur_stacks_height.checked_sub(1).ok_or_else( + || { + InterpreterError::Expect( + "BUG: cannot eval burn-block-height in boot code".into(), + ) + }, + )?)? }; self.get_burnchain_block_height(&last_mined_bhh) - .unwrap_or_else(|| { - panic!( + .ok_or_else(|| { + InterpreterError::Expect(format!( "Block header hash '{}' must return for provided stacks block height {}", &last_mined_bhh, cur_stacks_height - ) + )) + .into() }) } - pub fn get_block_header_hash(&mut self, block_height: u32) -> BlockHeaderHash { - let id_bhh = self.get_index_block_header_hash(block_height); + pub fn get_block_header_hash(&mut self, block_height: u32) -> Result { + let id_bhh = self.get_index_block_header_hash(block_height)?; self.headers_db .get_stacks_block_header_hash_for_block(&id_bhh) - .expect("Failed to get block data.") + .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } - pub fn get_block_time(&mut self, block_height: u32) -> u64 { - let id_bhh = self.get_index_block_header_hash(block_height); + pub fn get_block_time(&mut self, block_height: u32) -> Result { + let id_bhh = self.get_index_block_header_hash(block_height)?; self.headers_db .get_burn_block_time_for_block(&id_bhh) - .expect("Failed to get block data.") + .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } - pub fn get_burnchain_block_header_hash(&mut self, block_height: u32) -> BurnchainHeaderHash { - let id_bhh = self.get_index_block_header_hash(block_height); + pub fn get_burnchain_block_header_hash( + &mut self, + block_height: u32, + ) -> Result { + let id_bhh = self.get_index_block_header_hash(block_height)?; self.headers_db .get_burn_header_hash_for_block(&id_bhh) - .expect("Failed to get block data.") + .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } /// 1. Get the current Stacks tip height (which is in the process of being evaluated) @@ -876,41 +932,41 @@ impl<'a> ClarityDatabase<'a> { /// This is the highest Stacks block in this fork whose consensus hash is known. /// 3. Resolve the parent StacksBlockId to its consensus hash /// 4. Resolve the consensus hash to the associated SortitionId - fn get_sortition_id_for_stacks_tip(&mut self) -> Option { + fn get_sortition_id_for_stacks_tip(&mut self) -> Result> { let current_stacks_height = self.get_current_block_height(); if current_stacks_height < 1 { // we are in the Stacks genesis block - return None; + return Ok(None); } // this is the StacksBlockId of the last block evaluated in this fork - let parent_id_bhh = self.get_index_block_header_hash(current_stacks_height - 1); + let parent_id_bhh = self.get_index_block_header_hash(current_stacks_height - 1)?; // infallible, since we always store the consensus hash with the StacksBlockId in the // headers DB let consensus_hash = self .headers_db .get_consensus_hash_for_block(&parent_id_bhh) - .unwrap_or_else(|| { - panic!( + .ok_or_else(|| { + InterpreterError::Expect(format!( "FATAL: no consensus hash found for StacksBlockId {}", &parent_id_bhh - ) - }); + )) + })?; // infallible, since every sortition has a consensus hash let sortition_id = self .burn_state_db .get_sortition_id_from_consensus_hash(&consensus_hash) - .unwrap_or_else(|| { - panic!( + .ok_or_else(|| { + InterpreterError::Expect(format!( "FATAL: no SortitionID found for consensus hash {}", &consensus_hash - ) - }); + )) + })?; - Some(sortition_id) + Ok(Some(sortition_id)) } /// Fetch the burnchain block header hash for a given burnchain height. @@ -923,10 +979,14 @@ impl<'a> ClarityDatabase<'a> { pub fn get_burnchain_block_header_hash_for_burnchain_height( &mut self, burnchain_block_height: u32, - ) -> Option { - let sortition_id = self.get_sortition_id_for_stacks_tip()?; - self.burn_state_db - .get_burn_header_hash(burnchain_block_height, &sortition_id) + ) -> Result> { + let sortition_id = match self.get_sortition_id_for_stacks_tip()? { + Some(x) => x, + None => return Ok(None), + }; + Ok(self + .burn_state_db + .get_burn_header_hash(burnchain_block_height, &sortition_id)) } /// Get the PoX reward addresses and per-address payout for a given burnchain height. Because the burnchain can fork, @@ -934,56 +994,73 @@ impl<'a> ClarityDatabase<'a> { pub fn get_pox_payout_addrs_for_burnchain_height( &mut self, burnchain_block_height: u32, - ) -> Option<(Vec, u128)> { - let sortition_id = self.get_sortition_id_for_stacks_tip()?; - self.burn_state_db - .get_pox_payout_addrs(burnchain_block_height, &sortition_id) + ) -> Result, u128)>> { + let sortition_id = match self.get_sortition_id_for_stacks_tip()? { + Some(x) => x, + None => return Ok(None), + }; + Ok(self + .burn_state_db + .get_pox_payout_addrs(burnchain_block_height, &sortition_id)) } pub fn get_burnchain_block_height(&mut self, id_bhh: &StacksBlockId) -> Option { self.headers_db.get_burn_block_height_for_block(id_bhh) } - pub fn get_block_vrf_seed(&mut self, block_height: u32) -> VRFSeed { - let id_bhh = self.get_index_block_header_hash(block_height); + pub fn get_block_vrf_seed(&mut self, block_height: u32) -> Result { + let id_bhh = self.get_index_block_header_hash(block_height)?; self.headers_db .get_vrf_seed_for_block(&id_bhh) - .expect("Failed to get block data.") + .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } - pub fn get_miner_address(&mut self, block_height: u32) -> StandardPrincipalData { - let id_bhh = self.get_index_block_header_hash(block_height); - self.headers_db + pub fn get_miner_address(&mut self, block_height: u32) -> Result { + let id_bhh = self.get_index_block_header_hash(block_height)?; + Ok(self + .headers_db .get_miner_address(&id_bhh) - .expect("Failed to get block data.") - .into() + .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()))? + .into()) } - pub fn get_miner_spend_winner(&mut self, block_height: u32) -> u128 { + pub fn get_miner_spend_winner(&mut self, block_height: u32) -> Result { if block_height == 0 { - return 0; + return Ok(0); } - let id_bhh = self.get_index_block_header_hash(block_height); - self.headers_db + let id_bhh = self.get_index_block_header_hash(block_height)?; + Ok(self + .headers_db .get_burnchain_tokens_spent_for_winning_block(&id_bhh) - .expect("FATAL: no winning burnchain token spend record for block") + .ok_or_else(|| { + InterpreterError::Expect( + "FATAL: no winning burnchain token spend record for block".into(), + ) + })? + .into()) } - pub fn get_miner_spend_total(&mut self, block_height: u32) -> u128 { + pub fn get_miner_spend_total(&mut self, block_height: u32) -> Result { if block_height == 0 { - return 0; + return Ok(0); } - let id_bhh = self.get_index_block_header_hash(block_height); - self.headers_db + let id_bhh = self.get_index_block_header_hash(block_height)?; + Ok(self + .headers_db .get_burnchain_tokens_spent_for_block(&id_bhh) - .expect("FATAL: no total burnchain token spend record for block") + .ok_or_else(|| { + InterpreterError::Expect( + "FATAL: no total burnchain token spend record for block".into(), + ) + })? + .into()) } - pub fn get_block_reward(&mut self, block_height: u32) -> Option { + pub fn get_block_reward(&mut self, block_height: u32) -> Result> { if block_height == 0 { - return None; + return Ok(None); } let cur_height: u64 = self.get_current_block_height().into(); @@ -991,25 +1068,29 @@ impl<'a> ClarityDatabase<'a> { // reward for the *child* of this block must have matured, since that determines the // streamed tx fee reward portion if ((block_height + 1) as u64) + MINER_REWARD_MATURITY >= cur_height { - return None; + return Ok(None); } - let id_bhh = self.get_index_block_header_hash(block_height); - let reward = self + let id_bhh = self.get_index_block_header_hash(block_height)?; + let reward: u128 = self .headers_db .get_tokens_earned_for_block(&id_bhh) - .expect("FATAL: matured block has no recorded reward"); + .map(|x| x.into()) + .ok_or_else(|| { + InterpreterError::Expect("FATAL: matured block has no recorded reward".into()) + })?; - Some(reward) + Ok(Some(reward)) } - pub fn get_stx_btc_ops_processed(&mut self) -> u64 { - self.get("vm_pox::stx_btc_ops::processed_blocks") - .unwrap_or(0) + pub fn get_stx_btc_ops_processed(&mut self) -> Result { + Ok(self + .get("vm_pox::stx_btc_ops::processed_blocks")? + .unwrap_or(0)) } - pub fn set_stx_btc_ops_processed(&mut self, processed: u64) { - self.put("vm_pox::stx_btc_ops::processed_blocks", &processed); + pub fn set_stx_btc_ops_processed(&mut self, processed: u64) -> Result<()> { + self.put("vm_pox::stx_btc_ops::processed_blocks", &processed) } } @@ -1031,8 +1112,7 @@ impl<'a> ClarityDatabase<'a> { ) -> Result<()> { let key = ClarityDatabase::make_microblock_pubkey_height_key(pubkey_hash); let value = format!("{}", &height); - self.put(&key, &value); - Ok(()) + self.put(&key, &value) } pub fn get_cc_special_cases_handler(&self) -> Option { @@ -1049,64 +1129,95 @@ impl<'a> ClarityDatabase<'a> { let value = Value::Tuple( TupleData::from_data(vec![ ( - ClarityName::try_from("reporter").expect("BUG: valid string representation"), + ClarityName::try_from("reporter").map_err(|_| { + InterpreterError::Expect("BUG: valid string representation".into()) + })?, Value::Principal(PrincipalData::Standard(reporter.clone())), ), ( - ClarityName::try_from("sequence").expect("BUG: valid string representation"), + ClarityName::try_from("sequence").map_err(|_| { + InterpreterError::Expect("BUG: valid string representation".into()) + })?, Value::UInt(seq as u128), ), ]) - .expect("BUG: valid tuple representation"), + .map_err(|_| InterpreterError::Expect("BUG: valid tuple representation".into()))?, ); let mut value_bytes = vec![]; - value - .serialize_write(&mut value_bytes) - .expect("BUG: valid tuple representation did not serialize"); + value.serialize_write(&mut value_bytes).map_err(|_| { + InterpreterError::Expect("BUG: valid tuple representation did not serialize".into()) + })?; let value_str = to_hex(&value_bytes); - self.put(&key, &value_str); - Ok(()) + self.put(&key, &value_str) } - pub fn get_microblock_pubkey_hash_height(&mut self, pubkey_hash: &Hash160) -> Option { + pub fn get_microblock_pubkey_hash_height( + &mut self, + pubkey_hash: &Hash160, + ) -> Result> { let key = ClarityDatabase::make_microblock_pubkey_height_key(pubkey_hash); - self.get(&key).map(|height_str: String| { - height_str - .parse::() - .expect("BUG: inserted non-u32 as height of microblock pubkey hash") - }) + self.get(&key)? + .map(|height_str: String| { + height_str.parse::().map_err(|_| { + InterpreterError::Expect( + "BUG: inserted non-u32 as height of microblock pubkey hash".into(), + ) + .into() + }) + }) + .transpose() } /// Returns (who-reported-the-poison-microblock, sequence-of-microblock-fork) pub fn get_microblock_poison_report( &mut self, height: u32, - ) -> Option<(StandardPrincipalData, u16)> { + ) -> Result> { let key = ClarityDatabase::make_microblock_poison_key(height); - self.get(&key).map(|reporter_hex_str: String| { - let reporter_value = Value::try_deserialize_hex_untyped(&reporter_hex_str) - .expect("BUG: failed to decode serialized poison-microblock reporter"); - let tuple_data = reporter_value.expect_tuple(); - let reporter_value = tuple_data - .get("reporter") - .expect("BUG: poison-microblock report has no 'reporter'") - .to_owned(); - let seq_value = tuple_data - .get("sequence") - .expect("BUG: poison-microblock report has no 'sequence'") - .to_owned(); - - let reporter_principal = reporter_value.expect_principal(); - let seq_u128 = seq_value.expect_u128(); - - let seq: u16 = seq_u128.try_into().expect("BUG: seq exceeds u16 max"); - if let PrincipalData::Standard(principal_data) = reporter_principal { - (principal_data, seq) - } else { - panic!("BUG: poison-microblock report principal is not a standard principal"); - } - }) + self.get(&key)? + .map(|reporter_hex_str: String| { + let reporter_value = Value::try_deserialize_hex_untyped(&reporter_hex_str) + .map_err(|_| { + InterpreterError::Expect( + "BUG: failed to decode serialized poison-microblock reporter".into(), + ) + })?; + let tuple_data = reporter_value.expect_tuple()?; + let reporter_value = tuple_data + .get("reporter") + .map_err(|_| { + InterpreterError::Expect( + "BUG: poison-microblock report has no 'reporter'".into(), + ) + })? + .to_owned(); + let seq_value = tuple_data + .get("sequence") + .map_err(|_| { + InterpreterError::Expect( + "BUG: poison-microblock report has no 'sequence'".into(), + ) + })? + .to_owned(); + + let reporter_principal = reporter_value.expect_principal()?; + let seq_u128 = seq_value.expect_u128()?; + + let seq: u16 = seq_u128 + .try_into() + .map_err(|_| InterpreterError::Expect("BUG: seq exceeds u16 max".into()))?; + if let PrincipalData::Standard(principal_data) = reporter_principal { + Ok((principal_data, seq)) + } else { + return Err(InterpreterError::Expect( + "BUG: poison-microblock report principal is not a standard principal" + .into(), + ) + .into()); + } + }) + .transpose() } } @@ -1126,12 +1237,12 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, variable_name: &str, value_type: TypeSignature, - ) -> DataVariableMetadata { + ) -> Result { let variable_data = DataVariableMetadata { value_type }; let key = ClarityDatabase::make_metadata_key(StoreType::VariableMeta, variable_name); - self.insert_metadata(contract_identifier, &key, &variable_data); - variable_data + self.insert_metadata(contract_identifier, &key, &variable_data)?; + Ok(variable_data) } pub fn load_variable( @@ -1173,7 +1284,7 @@ impl<'a> ClarityDatabase<'a> { ) -> Result { if !variable_descriptor .value_type - .admits(&self.get_clarity_epoch_version(), &value)? + .admits(&self.get_clarity_epoch_version()?, &value)? { return Err( CheckErrors::TypeValueError(variable_descriptor.value_type.clone(), value).into(), @@ -1260,16 +1371,16 @@ impl<'a> ClarityDatabase<'a> { map_name: &str, key_type: TypeSignature, value_type: TypeSignature, - ) -> DataMapMetadata { + ) -> Result { let data = DataMapMetadata { key_type, value_type, }; let key = ClarityDatabase::make_metadata_key(StoreType::DataMapMeta, map_name); - self.insert_metadata(contract_identifier, &key, &data); + self.insert_metadata(contract_identifier, &key, &data)?; - data + Ok(data) } pub fn load_map( @@ -1287,12 +1398,12 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, map_name: &str, key_value: &Value, - ) -> String { - ClarityDatabase::make_key_for_data_map_entry_serialized( + ) -> Result { + Ok(ClarityDatabase::make_key_for_data_map_entry_serialized( contract_identifier, map_name, - &key_value.serialize_to_hex(), - ) + &key_value.serialize_to_hex()?, + )) } fn make_key_for_data_map_entry_serialized( @@ -1330,7 +1441,7 @@ impl<'a> ClarityDatabase<'a> { ) -> Result { if !map_descriptor .key_type - .admits(&self.get_clarity_epoch_version(), key_value)? + .admits(&self.get_clarity_epoch_version()?, key_value)? { return Err(CheckErrors::TypeValueError( map_descriptor.key_type.clone(), @@ -1340,7 +1451,7 @@ impl<'a> ClarityDatabase<'a> { } let key = - ClarityDatabase::make_key_for_data_map_entry(contract_identifier, map_name, key_value); + ClarityDatabase::make_key_for_data_map_entry(contract_identifier, map_name, key_value)?; let stored_type = TypeSignature::new_option(map_descriptor.value_type.clone())?; let result = self.get_value(&key, &stored_type, epoch)?; @@ -1361,7 +1472,7 @@ impl<'a> ClarityDatabase<'a> { ) -> Result { if !map_descriptor .key_type - .admits(&self.get_clarity_epoch_version(), key_value)? + .admits(&self.get_clarity_epoch_version()?, key_value)? { return Err(CheckErrors::TypeValueError( map_descriptor.key_type.clone(), @@ -1370,7 +1481,7 @@ impl<'a> ClarityDatabase<'a> { .into()); } - let key_serialized = key_value.serialize_to_hex(); + let key_serialized = key_value.serialize_to_hex()?; let key = ClarityDatabase::make_key_for_data_map_entry_serialized( contract_identifier, map_name, @@ -1392,7 +1503,9 @@ impl<'a> ClarityDatabase<'a> { value, serialized_byte_len: serialized_byte_len .checked_add(byte_len_of_serialization(&key_serialized)) - .expect("Overflowed Clarity key/value size"), + .ok_or_else(|| { + InterpreterError::Expect("Overflowed Clarity key/value size".into()) + })?, }), } } @@ -1502,7 +1615,7 @@ impl<'a> ClarityDatabase<'a> { ) -> Result { if !map_descriptor .key_type - .admits(&self.get_clarity_epoch_version(), &key_value)? + .admits(&self.get_clarity_epoch_version()?, &key_value)? { return Err( CheckErrors::TypeValueError(map_descriptor.key_type.clone(), key_value).into(), @@ -1510,14 +1623,14 @@ impl<'a> ClarityDatabase<'a> { } if !map_descriptor .value_type - .admits(&self.get_clarity_epoch_version(), &value)? + .admits(&self.get_clarity_epoch_version()?, &value)? { return Err( CheckErrors::TypeValueError(map_descriptor.value_type.clone(), value).into(), ); } - let key_serialized = key_value.serialize_to_hex(); + let key_serialized = key_value.serialize_to_hex()?; let key_serialized_byte_len = byte_len_of_serialization(&key_serialized); let key = ClarityDatabase::make_key_for_quad( contract_identifier, @@ -1541,7 +1654,9 @@ impl<'a> ClarityDatabase<'a> { value: Value::Bool(true), serialized_byte_len: key_serialized_byte_len .checked_add(placed_size) - .expect("Overflowed Clarity key/value size"), + .ok_or_else(|| { + InterpreterError::Expect("Overflowed Clarity key/value size".into()) + })?, }) } @@ -1555,7 +1670,7 @@ impl<'a> ClarityDatabase<'a> { ) -> Result { if !map_descriptor .key_type - .admits(&self.get_clarity_epoch_version(), key_value)? + .admits(&self.get_clarity_epoch_version()?, key_value)? { return Err(CheckErrors::TypeValueError( map_descriptor.key_type.clone(), @@ -1564,7 +1679,7 @@ impl<'a> ClarityDatabase<'a> { .into()); } - let key_serialized = key_value.serialize_to_hex(); + let key_serialized = key_value.serialize_to_hex()?; let key_serialized_byte_len = byte_len_of_serialization(&key_serialized); let key = ClarityDatabase::make_key_for_quad( contract_identifier, @@ -1586,7 +1701,9 @@ impl<'a> ClarityDatabase<'a> { value: Value::Bool(true), serialized_byte_len: key_serialized_byte_len .checked_add(*NONE_SERIALIZATION_LEN) - .expect("Overflowed Clarity key/value size"), + .ok_or_else(|| { + InterpreterError::Expect("Overflowed Clarity key/value size".into()) + })?, }) } } @@ -1599,13 +1716,13 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, token_name: &str, total_supply: &Option, - ) -> FungibleTokenMetadata { + ) -> Result { let data = FungibleTokenMetadata { total_supply: *total_supply, }; let key = ClarityDatabase::make_metadata_key(StoreType::FungibleTokenMeta, token_name); - self.insert_metadata(contract_identifier, &key, &data); + self.insert_metadata(contract_identifier, &key, &data)?; // total supply _is_ included in the consensus hash let supply_key = ClarityDatabase::make_key_for_trip( @@ -1613,9 +1730,9 @@ impl<'a> ClarityDatabase<'a> { StoreType::CirculatingSupply, token_name, ); - self.put(&supply_key, &(0_u128)); + self.put(&supply_key, &(0_u128))?; - data + Ok(data) } pub fn load_ft( @@ -1634,14 +1751,14 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, token_name: &str, key_type: &TypeSignature, - ) -> NonFungibleTokenMetadata { + ) -> Result { let data = NonFungibleTokenMetadata { key_type: key_type.clone(), }; let key = ClarityDatabase::make_metadata_key(StoreType::NonFungibleTokenMeta, token_name); - self.insert_metadata(contract_identifier, &key, &data); + self.insert_metadata(contract_identifier, &key, &data)?; - data + Ok(data) } fn load_nft( @@ -1667,9 +1784,9 @@ impl<'a> ClarityDatabase<'a> { StoreType::CirculatingSupply, token_name, ); - let current_supply: u128 = self - .get(&key) - .expect("ERROR: Clarity VM failed to track token supply."); + let current_supply: u128 = self.get(&key)?.ok_or_else(|| { + InterpreterError::Expect("ERROR: Clarity VM failed to track token supply.".into()) + })?; let new_supply = current_supply .checked_add(amount) @@ -1681,8 +1798,7 @@ impl<'a> ClarityDatabase<'a> { } } - self.put(&key, &new_supply); - Ok(()) + self.put(&key, &new_supply) } pub fn checked_decrease_token_supply( @@ -1696,9 +1812,9 @@ impl<'a> ClarityDatabase<'a> { StoreType::CirculatingSupply, token_name, ); - let current_supply: u128 = self - .get(&key) - .expect("ERROR: Clarity VM failed to track token supply."); + let current_supply: u128 = self.get(&key)?.ok_or_else(|| { + InterpreterError::Expect("ERROR: Clarity VM failed to track token supply.".into()) + })?; if amount > current_supply { return Err(RuntimeErrorType::SupplyUnderflow(current_supply, amount).into()); @@ -1706,8 +1822,7 @@ impl<'a> ClarityDatabase<'a> { let new_supply = current_supply - amount; - self.put(&key, &new_supply); - Ok(()) + self.put(&key, &new_supply) } pub fn get_ft_balance( @@ -1728,7 +1843,7 @@ impl<'a> ClarityDatabase<'a> { &principal.serialize(), ); - let result = self.get(&key); + let result = self.get(&key)?; match result { None => Ok(0), Some(balance) => Ok(balance), @@ -1748,9 +1863,7 @@ impl<'a> ClarityDatabase<'a> { token_name, &principal.serialize(), ); - self.put(&key, &balance); - - Ok(()) + self.put(&key, &balance) } pub fn get_ft_supply( @@ -1763,9 +1876,9 @@ impl<'a> ClarityDatabase<'a> { StoreType::CirculatingSupply, token_name, ); - let supply = self - .get(&key) - .expect("ERROR: Clarity VM failed to track token supply."); + let supply = self.get(&key)?.ok_or_else(|| { + InterpreterError::Expect("ERROR: Clarity VM failed to track token supply.".into()) + })?; Ok(supply) } @@ -1776,7 +1889,7 @@ impl<'a> ClarityDatabase<'a> { asset: &Value, key_type: &TypeSignature, ) -> Result { - if !key_type.admits(&self.get_clarity_epoch_version(), asset)? { + if !key_type.admits(&self.get_clarity_epoch_version()?, asset)? { return Err(CheckErrors::TypeValueError(key_type.clone(), (*asset).clone()).into()); } @@ -1784,22 +1897,23 @@ impl<'a> ClarityDatabase<'a> { contract_identifier, StoreType::NonFungibleToken, asset_name, - &asset.serialize_to_hex(), + &asset.serialize_to_hex()?, ); - let epoch = self.get_clarity_epoch_version(); + let epoch = self.get_clarity_epoch_version()?; let value: Option = self.get_value( &key, - &TypeSignature::new_option(TypeSignature::PrincipalType).unwrap(), + &TypeSignature::new_option(TypeSignature::PrincipalType) + .map_err(|_| InterpreterError::Expect("Unexpected type failure".into()))?, &epoch, )?; let owner = match value { - Some(owner) => owner.value.expect_optional(), + Some(owner) => owner.value.expect_optional()?, None => return Err(RuntimeErrorType::NoSuchToken.into()), }; let principal = match owner { - Some(value) => value.expect_principal(), + Some(value) => value.expect_principal()?, None => return Err(RuntimeErrorType::NoSuchToken.into()), }; @@ -1824,7 +1938,7 @@ impl<'a> ClarityDatabase<'a> { key_type: &TypeSignature, epoch: &StacksEpochId, ) -> Result<()> { - if !key_type.admits(&self.get_clarity_epoch_version(), asset)? { + if !key_type.admits(&self.get_clarity_epoch_version()?, asset)? { return Err(CheckErrors::TypeValueError(key_type.clone(), (*asset).clone()).into()); } @@ -1832,7 +1946,7 @@ impl<'a> ClarityDatabase<'a> { contract_identifier, StoreType::NonFungibleToken, asset_name, - &asset.serialize_to_hex(), + &asset.serialize_to_hex()?, ); let value = Value::some(Value::Principal(principal.clone()))?; @@ -1849,7 +1963,7 @@ impl<'a> ClarityDatabase<'a> { key_type: &TypeSignature, epoch: &StacksEpochId, ) -> Result<()> { - if !key_type.admits(&self.get_clarity_epoch_version(), asset)? { + if !key_type.admits(&self.get_clarity_epoch_version()?, asset)? { return Err(CheckErrors::TypeValueError(key_type.clone(), (*asset).clone()).into()); } @@ -1857,7 +1971,7 @@ impl<'a> ClarityDatabase<'a> { contract_identifier, StoreType::NonFungibleToken, asset_name, - &asset.serialize_to_hex(), + &asset.serialize_to_hex()?, ); self.put_value(&key, Value::none(), epoch)?; @@ -1890,9 +2004,9 @@ impl<'a> ClarityDatabase<'a> { pub fn get_stx_balance_snapshot<'conn>( &'conn mut self, principal: &PrincipalData, - ) -> STXBalanceSnapshot<'a, 'conn> { - let stx_balance = self.get_account_stx_balance(principal); - let cur_burn_height = self.get_current_burnchain_block_height() as u64; + ) -> Result> { + let stx_balance = self.get_account_stx_balance(principal)?; + let cur_burn_height = u64::from(self.get_current_burnchain_block_height()?); test_debug!("Balance of {} (raw={},locked={},unlock-height={},current-height={}) is {} (has_unlockable_tokens_at_burn_block={})", principal, @@ -1900,17 +2014,22 @@ impl<'a> ClarityDatabase<'a> { stx_balance.amount_locked(), stx_balance.unlock_height(), cur_burn_height, - stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()), - stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height())); + stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()?)?, + stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()?)); - STXBalanceSnapshot::new(principal, stx_balance, cur_burn_height, self) + Ok(STXBalanceSnapshot::new( + principal, + stx_balance, + cur_burn_height, + self, + )) } pub fn get_stx_balance_snapshot_genesis<'conn>( &'conn mut self, principal: &PrincipalData, - ) -> STXBalanceSnapshot<'a, 'conn> { - let stx_balance = self.get_account_stx_balance(principal); + ) -> Result> { + let stx_balance = self.get_account_stx_balance(principal)?; let cur_burn_height = 0; test_debug!("Balance of {} (raw={},locked={},unlock-height={},current-height={}) is {} (has_unlockable_tokens_at_burn_block={})", @@ -1919,30 +2038,39 @@ impl<'a> ClarityDatabase<'a> { stx_balance.amount_locked(), stx_balance.unlock_height(), cur_burn_height, - stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()), - stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height())); + stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()?)?, + stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()?)); - STXBalanceSnapshot::new(principal, stx_balance, cur_burn_height, self) + Ok(STXBalanceSnapshot::new( + principal, + stx_balance, + cur_burn_height, + self, + )) } - pub fn get_account_stx_balance(&mut self, principal: &PrincipalData) -> STXBalance { + pub fn get_account_stx_balance(&mut self, principal: &PrincipalData) -> Result { let key = ClarityDatabase::make_key_for_account_balance(principal); debug!("Fetching account balance"; "principal" => %principal.to_string()); - let result = self.get(&key); - match result { + let result = self.get(&key)?; + Ok(match result { None => STXBalance::zero(), Some(balance) => balance, - } + }) } - pub fn get_account_nonce(&mut self, principal: &PrincipalData) -> u64 { + pub fn get_account_nonce(&mut self, principal: &PrincipalData) -> Result { let key = ClarityDatabase::make_key_for_account_nonce(principal); - self.get(&key).unwrap_or(0) + let result = self.get(&key)?; + Ok(match result { + None => 0, + Some(nonce) => nonce, + }) } - pub fn set_account_nonce(&mut self, principal: &PrincipalData, nonce: u64) { + pub fn set_account_nonce(&mut self, principal: &PrincipalData, nonce: u64) -> Result<()> { let key = ClarityDatabase::make_key_for_account_nonce(principal); - self.put(&key, &nonce); + self.put(&key, &nonce) } } diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index b9400c227a..f3d9d2bb09 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -59,14 +59,14 @@ pub type SpecialCaseHandler = &'static dyn Fn( // attempt to continue processing in the event of an unexpected storage error. pub trait ClarityBackingStore { /// put K-V data into the committed datastore - fn put_all(&mut self, items: Vec<(String, String)>); + fn put_all(&mut self, items: Vec<(String, String)>) -> Result<()>; /// fetch K-V out of the committed datastore - fn get(&mut self, key: &str) -> Option; + fn get(&mut self, key: &str) -> Result>; /// fetch K-V out of the committed datastore, along with the byte representation /// of the Merkle proof for that key-value pair - fn get_with_proof(&mut self, key: &str) -> Option<(String, Vec)>; - fn has_entry(&mut self, key: &str) -> bool { - self.get(key).is_some() + fn get_with_proof(&mut self, key: &str) -> Result)>>; + fn has_entry(&mut self, key: &str) -> Result { + Ok(self.get(key)?.is_some()) } /// change the current MARF context to service reads from a different chain_tip @@ -110,19 +110,24 @@ pub trait ClarityBackingStore { ) -> Result<(StacksBlockId, Sha512Trunc256Sum)> { let key = make_contract_hash_key(contract); let contract_commitment = self - .get(&key) + .get(&key)? .map(|x| ContractCommitment::deserialize(&x)) .ok_or_else(|| CheckErrors::NoSuchContract(contract.to_string()))?; let ContractCommitment { block_height, hash: contract_hash, - } = contract_commitment; + } = contract_commitment?; let bhh = self.get_block_at_height(block_height) - .expect("Should always be able to map from height to block hash when looking up contract information."); + .ok_or_else(|| InterpreterError::Expect("Should always be able to map from height to block hash when looking up contract information.".into()))?; Ok((bhh, contract_hash)) } - fn insert_metadata(&mut self, contract: &QualifiedContractIdentifier, key: &str, value: &str) { + fn insert_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, + ) -> Result<()> { let bhh = self.get_open_chain_tip(); SqliteConnection::insert_metadata( self.get_side_store(), @@ -139,12 +144,7 @@ pub trait ClarityBackingStore { key: &str, ) -> Result> { let (bhh, _) = self.get_contract_hash(contract)?; - Ok(SqliteConnection::get_metadata( - self.get_side_store(), - &bhh, - &contract.to_string(), - key, - )) + SqliteConnection::get_metadata(self.get_side_store(), &bhh, &contract.to_string(), key) } fn get_metadata_manual( @@ -158,18 +158,17 @@ pub trait ClarityBackingStore { warn!("Unknown block height when manually querying metadata"; "block_height" => at_height); RuntimeErrorType::BadBlockHeight(at_height.to_string()) })?; - Ok(SqliteConnection::get_metadata( - self.get_side_store(), - &bhh, - &contract.to_string(), - key, - )) + SqliteConnection::get_metadata(self.get_side_store(), &bhh, &contract.to_string(), key) } - fn put_all_metadata(&mut self, items: Vec<((QualifiedContractIdentifier, String), String)>) { + fn put_all_metadata( + &mut self, + items: Vec<((QualifiedContractIdentifier, String), String)>, + ) -> Result<()> { for ((contract, key), value) in items.into_iter() { - self.insert_metadata(&contract, &key, &value); + self.insert_metadata(&contract, &key, &value)?; } + Ok(()) } } @@ -190,12 +189,21 @@ impl ClaritySerializable for ContractCommitment { } impl ClarityDeserializable for ContractCommitment { - fn deserialize(input: &str) -> ContractCommitment { - assert_eq!(input.len(), 72); - let hash = Sha512Trunc256Sum::from_hex(&input[0..64]).expect("Hex decode fail."); - let height_bytes = hex_bytes(&input[64..72]).expect("Hex decode fail."); - let block_height = u32::from_be_bytes(height_bytes.as_slice().try_into().unwrap()); - ContractCommitment { hash, block_height } + fn deserialize(input: &str) -> Result { + if input.len() != 72 { + return Err(InterpreterError::Expect("Unexpected input length".into()).into()); + } + let hash = Sha512Trunc256Sum::from_hex(&input[0..64]) + .map_err(|_| InterpreterError::Expect("Hex decode fail.".into()))?; + let height_bytes = hex_bytes(&input[64..72]) + .map_err(|_| InterpreterError::Expect("Hex decode fail.".into()))?; + let block_height = u32::from_be_bytes( + height_bytes + .as_slice() + .try_into() + .map_err(|_| InterpreterError::Expect("Block height decode fail.".into()))?, + ); + Ok(ContractCommitment { hash, block_height }) } } @@ -219,16 +227,17 @@ impl NullBackingStore { } } +#[allow(clippy::panic)] impl ClarityBackingStore for NullBackingStore { fn set_block_hash(&mut self, _bhh: StacksBlockId) -> Result { panic!("NullBackingStore can't set block hash") } - fn get(&mut self, _key: &str) -> Option { + fn get(&mut self, _key: &str) -> Result> { panic!("NullBackingStore can't retrieve data") } - fn get_with_proof(&mut self, _key: &str) -> Option<(String, Vec)> { + fn get_with_proof(&mut self, _key: &str) -> Result)>> { panic!("NullBackingStore can't retrieve data") } @@ -252,7 +261,7 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't get current block height") } - fn put_all(&mut self, mut _items: Vec<(String, String)>) { + fn put_all(&mut self, mut _items: Vec<(String, String)>) -> Result<()> { panic!("NullBackingStore cannot put") } } @@ -268,6 +277,7 @@ impl Default for MemoryBackingStore { } impl MemoryBackingStore { + #[allow(clippy::unwrap_used)] pub fn new() -> MemoryBackingStore { let side_store = SqliteConnection::memory().unwrap(); @@ -292,12 +302,12 @@ impl ClarityBackingStore for MemoryBackingStore { Err(RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash(bhh.0)).into()) } - fn get(&mut self, key: &str) -> Option { + fn get(&mut self, key: &str) -> Result> { SqliteConnection::get(self.get_side_store(), key) } - fn get_with_proof(&mut self, key: &str) -> Option<(String, Vec)> { - SqliteConnection::get(self.get_side_store(), key).map(|x| (x, vec![])) + fn get_with_proof(&mut self, key: &str) -> Result)>> { + Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) } fn get_side_store(&mut self) -> &Connection { @@ -328,9 +338,10 @@ impl ClarityBackingStore for MemoryBackingStore { None } - fn put_all(&mut self, items: Vec<(String, String)>) { + fn put_all(&mut self, items: Vec<(String, String)>) -> Result<()> { for (key, value) in items.into_iter() { - SqliteConnection::put(self.get_side_store(), &key, &value); + SqliteConnection::put(self.get_side_store(), &key, &value)?; } + Ok(()) } } diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index 8431a1742a..74b2c724fc 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -26,7 +26,7 @@ use stacks_common::util::hash::Sha512Trunc256Sum; use super::clarity_store::SpecialCaseHandler; use super::{ClarityBackingStore, ClarityDeserializable}; use crate::vm::database::clarity_store::make_contract_hash_key; -use crate::vm::errors::InterpreterResult; +use crate::vm::errors::{InterpreterError, InterpreterResult}; use crate::vm::types::serialization::SerializationError; use crate::vm::types::{ QualifiedContractIdentifier, SequenceData, SequenceSubtype, TupleData, TypeSignature, @@ -53,7 +53,7 @@ fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, _val fn rollback_check_pre_bottom_commit( edits: Vec<(T, RollbackValueCheck)>, lookup_map: &mut HashMap>, -) -> Vec<(T, String)> +) -> Result, InterpreterError> where T: Eq + Hash + Clone, { @@ -64,8 +64,8 @@ where let output = edits .into_iter() .map(|(key, _)| { - let value = rollback_lookup_map(&key, &(), lookup_map); - (key, value) + let value = rollback_lookup_map(&key, &(), lookup_map)?; + Ok((key, value)) }) .collect(); @@ -183,23 +183,27 @@ fn rollback_lookup_map( key: &T, value: &RollbackValueCheck, lookup_map: &mut HashMap>, -) -> String +) -> Result where T: Eq + Hash + Clone, { let popped_value; let remove_edit_deque = { - let key_edit_history = lookup_map - .get_mut(key) - .expect("ERROR: Clarity VM had edit log entry, but not lookup_map entry"); - popped_value = key_edit_history.pop().unwrap(); + let key_edit_history = lookup_map.get_mut(key).ok_or_else(|| { + InterpreterError::Expect( + "ERROR: Clarity VM had edit log entry, but not lookup_map entry".into(), + ) + })?; + popped_value = key_edit_history.pop().ok_or_else(|| { + InterpreterError::Expect("ERROR: expected value in edit history".into()) + })?; rollback_value_check(&popped_value, value); key_edit_history.is_empty() }; if remove_edit_deque { lookup_map.remove(key); } - popped_value + Ok(popped_value) } impl<'a> RollbackWrapper<'a> { @@ -240,58 +244,69 @@ impl<'a> RollbackWrapper<'a> { // Rollback the child's edits. // this clears all edits from the child's edit queue, // and removes any of those edits from the lookup map. - pub fn rollback(&mut self) { - let mut last_item = self - .stack - .pop() - .expect("ERROR: Clarity VM attempted to commit past the stack."); + pub fn rollback(&mut self) -> Result<(), InterpreterError> { + let mut last_item = self.stack.pop().ok_or_else(|| { + InterpreterError::Expect("ERROR: Clarity VM attempted to commit past the stack.".into()) + })?; last_item.edits.reverse(); last_item.metadata_edits.reverse(); for (key, value) in last_item.edits.drain(..) { - rollback_lookup_map(&key, &value, &mut self.lookup_map); + rollback_lookup_map(&key, &value, &mut self.lookup_map)?; } for (key, value) in last_item.metadata_edits.drain(..) { - rollback_lookup_map(&key, &value, &mut self.metadata_lookup_map); + rollback_lookup_map(&key, &value, &mut self.metadata_lookup_map)?; } + + Ok(()) } pub fn depth(&self) -> usize { self.stack.len() } - pub fn commit(&mut self) { - let mut last_item = self - .stack - .pop() - .expect("ERROR: Clarity VM attempted to commit past the stack."); - - if self.stack.is_empty() { - // committing to the backing store - let all_edits = rollback_check_pre_bottom_commit(last_item.edits, &mut self.lookup_map); - if !all_edits.is_empty() { - self.store.put_all(all_edits); - } + pub fn commit(&mut self) -> Result<(), InterpreterError> { + let mut last_item = self.stack.pop().ok_or_else(|| { + InterpreterError::Expect("ERROR: Clarity VM attempted to commit past the stack.".into()) + })?; - let metadata_edits = rollback_check_pre_bottom_commit( - last_item.metadata_edits, - &mut self.metadata_lookup_map, - ); - if !metadata_edits.is_empty() { - self.store.put_all_metadata(metadata_edits); - } - } else { + if let Some(next_up) = self.stack.last_mut() { // bubble up to the next item in the stack - let next_up = self.stack.last_mut().unwrap(); + // last_mut() must exist because of the if-statement for (key, value) in last_item.edits.drain(..) { next_up.edits.push((key, value)); } for (key, value) in last_item.metadata_edits.drain(..) { next_up.metadata_edits.push((key, value)); } + } else { + // stack is empty, committing to the backing store + let all_edits = + rollback_check_pre_bottom_commit(last_item.edits, &mut self.lookup_map)?; + if all_edits.len() > 0 { + self.store.put_all(all_edits).map_err(|e| { + InterpreterError::Expect(format!( + "ERROR: Failed to commit data to sql store: {e:?}" + )) + })?; + } + + let metadata_edits = rollback_check_pre_bottom_commit( + last_item.metadata_edits, + &mut self.metadata_lookup_map, + )?; + if metadata_edits.len() > 0 { + self.store.put_all_metadata(metadata_edits).map_err(|e| { + InterpreterError::Expect(format!( + "ERROR: Failed to commit data to sql store: {e:?}" + )) + })?; + } } + + Ok(()) } } @@ -303,27 +318,25 @@ fn inner_put( ) where T: Eq + Hash + Clone, { - if !lookup_map.contains_key(&key) { - lookup_map.insert(key.clone(), Vec::new()); - } - let key_edit_deque = lookup_map.get_mut(&key).unwrap(); + let key_edit_deque = lookup_map.entry(key.clone()).or_insert_with(|| Vec::new()); rollback_edits_push(edits, key, &value); key_edit_deque.push(value); } impl<'a> RollbackWrapper<'a> { - pub fn put(&mut self, key: &str, value: &str) { - let current = self - .stack - .last_mut() - .expect("ERROR: Clarity VM attempted PUT on non-nested context."); - - inner_put( + pub fn put(&mut self, key: &str, value: &str) -> InterpreterResult<()> { + let current = self.stack.last_mut().ok_or_else(|| { + InterpreterError::Expect( + "ERROR: Clarity VM attempted PUT on non-nested context.".into(), + ) + })?; + + Ok(inner_put( &mut self.lookup_map, &mut current.edits, key.to_string(), value.to_string(), - ) + )) } /// @@ -348,33 +361,34 @@ impl<'a> RollbackWrapper<'a> { /// this function will only return commitment proofs for values _already_ materialized /// in the underlying store. otherwise it returns None. - pub fn get_with_proof(&mut self, key: &str) -> Option<(T, Vec)> + pub fn get_with_proof(&mut self, key: &str) -> InterpreterResult)>> where T: ClarityDeserializable, { self.store - .get_with_proof(key) - .map(|(value, proof)| (T::deserialize(&value), proof)) + .get_with_proof(key)? + .map(|(value, proof)| Ok((T::deserialize(&value)?, proof))) + .transpose() } - pub fn get(&mut self, key: &str) -> Option + pub fn get(&mut self, key: &str) -> InterpreterResult> where T: ClarityDeserializable, { - self.stack - .last() - .expect("ERROR: Clarity VM attempted GET on non-nested context."); + self.stack.last().ok_or_else(|| { + InterpreterError::Expect( + "ERROR: Clarity VM attempted GET on non-nested context.".into(), + ) + })?; - let lookup_result = if self.query_pending_data { - self.lookup_map - .get(key) - .and_then(|x| x.last()) - .map(|x| T::deserialize(x)) - } else { - None - }; - - lookup_result.or_else(|| self.store.get(key).map(|x| T::deserialize(&x))) + if self.query_pending_data { + if let Some(pending_value) = self.lookup_map.get(key).and_then(|x| x.last()) { + // if there's pending data and we're querying pending data, return here + return Some(T::deserialize(pending_value)).transpose(); + } + } + // otherwise, lookup from store + self.store.get(key)?.map(|x| T::deserialize(&x)).transpose() } pub fn deserialize_value( @@ -400,17 +414,21 @@ impl<'a> RollbackWrapper<'a> { expected: &TypeSignature, epoch: &StacksEpochId, ) -> Result, SerializationError> { - self.stack - .last() - .expect("ERROR: Clarity VM attempted GET on non-nested context."); + self.stack.last().ok_or_else(|| { + SerializationError::DeserializationError( + "ERROR: Clarity VM attempted GET on non-nested context.".into(), + ) + })?; if self.query_pending_data { if let Some(x) = self.lookup_map.get(key).and_then(|x| x.last()) { return Ok(Some(Self::deserialize_value(x, expected, epoch)?)); } } - - match self.store.get(key) { + let stored_data = self.store.get(key).map_err(|_| { + SerializationError::DeserializationError("ERROR: Clarity backing store failure".into()) + })?; + match stored_data { Some(x) => Ok(Some(Self::deserialize_value(&x, expected, epoch)?)), None => Ok(None), } @@ -430,7 +448,7 @@ impl<'a> RollbackWrapper<'a> { &mut self, contract: &QualifiedContractIdentifier, content_hash: Sha512Trunc256Sum, - ) { + ) -> InterpreterResult<()> { let key = make_contract_hash_key(contract); let value = self.store.make_contract_commitment(content_hash); self.put(&key, &value) @@ -441,20 +459,21 @@ impl<'a> RollbackWrapper<'a> { contract: &QualifiedContractIdentifier, key: &str, value: &str, - ) { - let current = self - .stack - .last_mut() - .expect("ERROR: Clarity VM attempted PUT on non-nested context."); + ) -> Result<(), InterpreterError> { + let current = self.stack.last_mut().ok_or_else(|| { + InterpreterError::Expect( + "ERROR: Clarity VM attempted PUT on non-nested context.".into(), + ) + })?; let metadata_key = (contract.clone(), key.to_string()); - inner_put( + Ok(inner_put( &mut self.metadata_lookup_map, &mut current.metadata_edits, metadata_key, value.to_string(), - ) + )) } // Throws a NoSuchContract error if contract doesn't exist, @@ -464,9 +483,11 @@ impl<'a> RollbackWrapper<'a> { contract: &QualifiedContractIdentifier, key: &str, ) -> InterpreterResult> { - self.stack - .last() - .expect("ERROR: Clarity VM attempted GET on non-nested context."); + self.stack.last().ok_or_else(|| { + InterpreterError::Expect( + "ERROR: Clarity VM attempted GET on non-nested context.".into(), + ) + })?; // This is THEORETICALLY a spurious clone, but it's hard to turn something like // (&A, &B) into &(A, B). @@ -493,9 +514,11 @@ impl<'a> RollbackWrapper<'a> { contract: &QualifiedContractIdentifier, key: &str, ) -> InterpreterResult> { - self.stack - .last() - .expect("ERROR: Clarity VM attempted GET on non-nested context."); + self.stack.last().ok_or_else(|| { + InterpreterError::Expect( + "ERROR: Clarity VM attempted GET on non-nested context.".into(), + ) + })?; // This is THEORETICALLY a spurious clone, but it's hard to turn something like // (&A, &B) into &(A, B). @@ -514,12 +537,14 @@ impl<'a> RollbackWrapper<'a> { } } - pub fn has_entry(&mut self, key: &str) -> bool { - self.stack - .last() - .expect("ERROR: Clarity VM attempted GET on non-nested context."); + pub fn has_entry(&mut self, key: &str) -> InterpreterResult { + self.stack.last().ok_or_else(|| { + InterpreterError::Expect( + "ERROR: Clarity VM attempted GET on non-nested context.".into(), + ) + })?; if self.query_pending_data && self.lookup_map.contains_key(key) { - true + Ok(true) } else { self.store.has_entry(key) } diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 273473c9b3..6b2d64afa5 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -33,21 +33,21 @@ pub struct SqliteConnection { conn: Connection, } -fn sqlite_put(conn: &Connection, key: &str, value: &str) { +fn sqlite_put(conn: &Connection, key: &str, value: &str) -> Result<()> { let params: [&dyn ToSql; 2] = [&key, &value]; match conn.execute( "REPLACE INTO data_table (key, value) VALUES (?, ?)", ¶ms, ) { - Ok(_) => {} + Ok(_) => Ok(()), Err(e) => { error!("Failed to insert/replace ({},{}): {:?}", key, value, &e); - panic!("{}", SQL_FAIL_MESSAGE); + Err(InterpreterError::DBError(SQL_FAIL_MESSAGE.into()).into()) } - }; + } } -fn sqlite_get(conn: &Connection, key: &str) -> Option { +fn sqlite_get(conn: &Connection, key: &str) -> Result> { trace!("sqlite_get {}", key); let params: [&dyn ToSql; 1] = [&key]; let res = match conn @@ -58,10 +58,10 @@ fn sqlite_get(conn: &Connection, key: &str) -> Option { ) .optional() { - Ok(x) => x, + Ok(x) => Ok(x), Err(e) => { error!("Failed to query '{}': {:?}", key, &e); - panic!("{}", SQL_FAIL_MESSAGE); + Err(InterpreterError::DBError(SQL_FAIL_MESSAGE.into()).into()) } }; @@ -69,16 +69,16 @@ fn sqlite_get(conn: &Connection, key: &str) -> Option { res } -fn sqlite_has_entry(conn: &Connection, key: &str) -> bool { - sqlite_get(conn, key).is_some() +fn sqlite_has_entry(conn: &Connection, key: &str) -> Result { + Ok(sqlite_get(conn, key)?.is_some()) } impl SqliteConnection { - pub fn put(conn: &Connection, key: &str, value: &str) { + pub fn put(conn: &Connection, key: &str, value: &str) -> Result<()> { sqlite_put(conn, key, value) } - pub fn get(conn: &Connection, key: &str) -> Option { + pub fn get(conn: &Connection, key: &str) -> Result> { sqlite_get(conn, key) } @@ -88,7 +88,7 @@ impl SqliteConnection { contract_hash: &str, key: &str, value: &str, - ) { + ) -> Result<()> { let key = format!("clr-meta::{}::{}", contract_hash, key); let params: [&dyn ToSql; 3] = [&bhh, &key, &value]; @@ -103,26 +103,33 @@ impl SqliteConnection { &value.to_string(), &e ); - panic!("{}", SQL_FAIL_MESSAGE); + return Err(InterpreterError::DBError(SQL_FAIL_MESSAGE.into()).into()); } + Ok(()) } - pub fn commit_metadata_to(conn: &Connection, from: &StacksBlockId, to: &StacksBlockId) { + pub fn commit_metadata_to( + conn: &Connection, + from: &StacksBlockId, + to: &StacksBlockId, + ) -> Result<()> { let params = [to, from]; if let Err(e) = conn.execute( "UPDATE metadata_table SET blockhash = ? WHERE blockhash = ?", ¶ms, ) { error!("Failed to update {} to {}: {:?}", &from, &to, &e); - panic!("{}", SQL_FAIL_MESSAGE); + return Err(InterpreterError::DBError(SQL_FAIL_MESSAGE.into()).into()); } + Ok(()) } - pub fn drop_metadata(conn: &Connection, from: &StacksBlockId) { + pub fn drop_metadata(conn: &Connection, from: &StacksBlockId) -> Result<()> { if let Err(e) = conn.execute("DELETE FROM metadata_table WHERE blockhash = ?", &[from]) { error!("Failed to drop metadata from {}: {:?}", &from, &e); - panic!("{}", SQL_FAIL_MESSAGE); + return Err(InterpreterError::DBError(SQL_FAIL_MESSAGE.into()).into()); } + Ok(()) } pub fn get_metadata( @@ -130,7 +137,7 @@ impl SqliteConnection { bhh: &StacksBlockId, contract_hash: &str, key: &str, - ) -> Option { + ) -> Result> { let key = format!("clr-meta::{}::{}", contract_hash, key); let params: [&dyn ToSql; 2] = [&bhh, &key]; @@ -142,15 +149,15 @@ impl SqliteConnection { ) .optional() { - Ok(x) => x, + Ok(x) => Ok(x), Err(e) => { error!("Failed to query ({},{}): {:?}", &bhh, &key, &e); - panic!("{}", SQL_FAIL_MESSAGE); + Err(InterpreterError::DBError(SQL_FAIL_MESSAGE.into()).into()) } } } - pub fn has_entry(conn: &Connection, key: &str) -> bool { + pub fn has_entry(conn: &Connection, key: &str) -> Result { sqlite_has_entry(conn, key) } } diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 89a635765e..d352ba8d72 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -35,7 +35,7 @@ pub trait ClaritySerializable { } pub trait ClarityDeserializable { - fn deserialize(json: &str) -> T; + fn deserialize(json: &str) -> Result; } impl ClaritySerializable for String { @@ -45,8 +45,8 @@ impl ClaritySerializable for String { } impl ClarityDeserializable for String { - fn deserialize(serialized: &str) -> String { - serialized.into() + fn deserialize(serialized: &str) -> Result { + Ok(serialized.into()) } } @@ -58,7 +58,7 @@ macro_rules! clarity_serializable { } } impl ClarityDeserializable<$Name> for $Name { - fn deserialize(json: &str) -> Self { + fn deserialize(json: &str) -> Result { let mut deserializer = serde_json::Deserializer::from_str(&json); // serde's default 128 depth limit can be exhausted // by a 64-stack-depth AST, so disable the recursion limit @@ -66,7 +66,9 @@ macro_rules! clarity_serializable { // use stacker to prevent the deserializer from overflowing. // this will instead spill to the heap let deserializer = serde_stacker::Deserializer::new(&mut deserializer); - Deserialize::deserialize(deserializer).expect("Failed to deserialize vm.Value") + Deserialize::deserialize(deserializer).map_err(|_| { + InterpreterError::Expect("Failed to deserialize vm.Value".into()).into() + }) } } }; @@ -160,6 +162,7 @@ pub struct STXBalanceSnapshot<'db, 'conn> { type Result = std::result::Result; impl ClaritySerializable for STXBalance { + #[allow(clippy::expect_used)] fn serialize(&self) -> String { let mut buffer = Vec::new(); match self { @@ -231,24 +234,26 @@ impl ClaritySerializable for STXBalance { } impl ClarityDeserializable for STXBalance { - fn deserialize(input: &str) -> Self { - let bytes = hex_bytes(input).expect("STXBalance deserialization: failed decoding bytes."); - if bytes.len() == STXBalance::unlocked_and_v1_size { - let amount_unlocked = u128::from_be_bytes( - bytes[0..16] - .try_into() - .expect("STXBalance deserialization: failed reading amount_unlocked."), - ); - let amount_locked = u128::from_be_bytes( - bytes[16..32] - .try_into() - .expect("STXBalance deserialization: failed reading amount_locked."), - ); - let unlock_height = u64::from_be_bytes( - bytes[32..40] - .try_into() - .expect("STXBalance deserialization: failed reading unlock_height."), - ); + fn deserialize(input: &str) -> Result { + let bytes = hex_bytes(&input).map_err(|_| { + InterpreterError::Expect("STXBalance deserialization: failed decoding bytes.".into()) + })?; + let result = if bytes.len() == STXBalance::unlocked_and_v1_size { + let amount_unlocked = u128::from_be_bytes(bytes[0..16].try_into().map_err(|_| { + InterpreterError::Expect( + "STXBalance deserialization: failed reading amount_unlocked.".into(), + ) + })?); + let amount_locked = u128::from_be_bytes(bytes[16..32].try_into().map_err(|_| { + InterpreterError::Expect( + "STXBalance deserialization: failed reading amount_locked.".into(), + ) + })?); + let unlock_height = u64::from_be_bytes(bytes[32..40].try_into().map_err(|_| { + InterpreterError::Expect( + "STXBalance deserialization: failed reading unlock_height.".into(), + ) + })?); if amount_locked == 0 { STXBalance::Unlocked { @@ -264,26 +269,26 @@ impl ClarityDeserializable for STXBalance { } else if bytes.len() == STXBalance::v2_and_v3_size { let version = &bytes[0]; if version != &STXBalance::pox_2_version && version != &STXBalance::pox_3_version { - panic!( - "Bad version byte in STX Balance serialization = {}", - version - ); + return Err(InterpreterError::Expect(format!( + "Bad version byte in STX Balance serialization = {version}" + )) + .into()); } - let amount_unlocked = u128::from_be_bytes( - bytes[1..17] - .try_into() - .expect("STXBalance deserialization: failed reading amount_unlocked."), - ); - let amount_locked = u128::from_be_bytes( - bytes[17..33] - .try_into() - .expect("STXBalance deserialization: failed reading amount_locked."), - ); - let unlock_height = u64::from_be_bytes( - bytes[33..41] - .try_into() - .expect("STXBalance deserialization: failed reading unlock_height."), - ); + let amount_unlocked = u128::from_be_bytes(bytes[1..17].try_into().map_err(|_| { + InterpreterError::Expect( + "STXBalance deserialization: failed reading amount_unlocked.".into(), + ) + })?); + let amount_locked = u128::from_be_bytes(bytes[17..33].try_into().map_err(|_| { + InterpreterError::Expect( + "STXBalance deserialization: failed reading amount_locked.".into(), + ) + })?); + let unlock_height = u64::from_be_bytes(bytes[33..41].try_into().map_err(|_| { + InterpreterError::Expect( + "STXBalance deserialization: failed reading unlock_height.".into(), + ) + })?); if amount_locked == 0 { STXBalance::Unlocked { @@ -302,11 +307,19 @@ impl ClarityDeserializable for STXBalance { unlock_height, } } else { - unreachable!("Version is checked for pox_3 or pox_2 version compliance above"); + return Err(InterpreterError::Expect( + "Version is checked for pox_3 or pox_2 version compliance above".into(), + ) + .into()); } } else { - panic!("Bad STX Balance serialization size = {}", bytes.len()); - } + return Err(InterpreterError::Expect(format!( + "Bad STX Balance serialization size = {}", + bytes.len() + )) + .into()); + }; + Ok(result) } } @@ -329,35 +342,35 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { &self.balance } - pub fn save(self) { + pub fn save(self) -> Result<()> { let key = ClarityDatabase::make_key_for_account_balance(&self.principal); self.db_ref.put(&key, &self.balance) } pub fn transfer_to(mut self, recipient: &PrincipalData, amount: u128) -> Result<()> { - if !self.can_transfer(amount) { + if !self.can_transfer(amount)? { return Err(InterpreterError::InsufficientBalance.into()); } let recipient_key = ClarityDatabase::make_key_for_account_balance(recipient); let mut recipient_balance = self .db_ref - .get(&recipient_key) + .get(&recipient_key)? .unwrap_or(STXBalance::zero()); recipient_balance .checked_add_unlocked_amount(amount) .ok_or(Error::Runtime(RuntimeErrorType::ArithmeticOverflow, None))?; - self.debit(amount); - self.db_ref.put(&recipient_key, &recipient_balance); - self.save(); + self.debit(amount)?; + self.db_ref.put(&recipient_key, &recipient_balance)?; + self.save()?; Ok(()) } - pub fn get_available_balance(&mut self) -> u128 { + pub fn get_available_balance(&mut self) -> Result { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); - let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + let v2_unlock_height = self.db_ref.get_v2_unlock_height()?; self.balance.get_available_balance_at_burn_block( self.burn_block_height, v1_unlock_height, @@ -365,40 +378,41 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { ) } - pub fn canonical_balance_repr(&mut self) -> STXBalance { + pub fn canonical_balance_repr(&mut self) -> Result { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); - let v2_unlock_height = self.db_ref.get_v2_unlock_height(); - self.balance - .canonical_repr_at_block(self.burn_block_height, v1_unlock_height, v2_unlock_height) - .0 + let v2_unlock_height = self.db_ref.get_v2_unlock_height()?; + Ok(self + .balance + .canonical_repr_at_block(self.burn_block_height, v1_unlock_height, v2_unlock_height)? + .0) } - pub fn has_locked_tokens(&mut self) -> bool { + pub fn has_locked_tokens(&mut self) -> Result { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); - let v2_unlock_height = self.db_ref.get_v2_unlock_height(); - self.balance.has_locked_tokens_at_burn_block( + let v2_unlock_height = self.db_ref.get_v2_unlock_height()?; + Ok(self.balance.has_locked_tokens_at_burn_block( self.burn_block_height, v1_unlock_height, v2_unlock_height, - ) + )) } - pub fn has_unlockable_tokens(&mut self) -> bool { + pub fn has_unlockable_tokens(&mut self) -> Result { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); - let v2_unlock_height = self.db_ref.get_v2_unlock_height(); - self.balance.has_unlockable_tokens_at_burn_block( + let v2_unlock_height = self.db_ref.get_v2_unlock_height()?; + Ok(self.balance.has_unlockable_tokens_at_burn_block( self.burn_block_height, v1_unlock_height, v2_unlock_height, - ) + )) } - pub fn can_transfer(&mut self, amount: u128) -> bool { - self.get_available_balance() >= amount + pub fn can_transfer(&mut self, amount: u128) -> Result { + Ok(self.get_available_balance()? >= amount) } - pub fn debit(&mut self, amount: u128) { - let unlocked = self.unlock_available_tokens_if_any(); + pub fn debit(&mut self, amount: u128) -> Result<()> { + let unlocked = self.unlock_available_tokens_if_any()?; if unlocked > 0 { debug!("Consolidated after account-debit"); } @@ -406,23 +420,24 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { self.balance.debit_unlocked_amount(amount) } - pub fn credit(&mut self, amount: u128) { - let unlocked = self.unlock_available_tokens_if_any(); + pub fn credit(&mut self, amount: u128) -> Result<()> { + let unlocked = self.unlock_available_tokens_if_any()?; if unlocked > 0 { debug!("Consolidated after account-credit"); } self.balance .checked_add_unlocked_amount(amount) - .expect("STX balance overflow"); + .ok_or_else(|| InterpreterError::Expect("STX balance overflow".into()))?; + Ok(()) } pub fn set_balance(&mut self, balance: STXBalance) { self.balance = balance; } - pub fn lock_tokens_v1(&mut self, amount_to_lock: u128, unlock_burn_height: u64) { - let unlocked = self.unlock_available_tokens_if_any(); + pub fn lock_tokens_v1(&mut self, amount_to_lock: u128, unlock_burn_height: u64) -> Result<()> { + let unlocked = self.unlock_available_tokens_if_any()?; if unlocked > 0 { debug!("Consolidated after account-token-lock"); } @@ -432,12 +447,18 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { if unlock_burn_height <= self.burn_block_height { // caller needs to have checked this - panic!("FATAL: cannot set a lock with expired unlock burn height"); + return Err(InterpreterError::Expect( + "FATAL: cannot set a lock with expired unlock burn height".into(), + ) + .into()); } - if self.has_locked_tokens() { + if self.has_locked_tokens()? { // caller needs to have checked this - panic!("FATAL: account already has locked tokens"); + return Err(InterpreterError::Expect( + "FATAL: account already has locked tokens".into(), + ) + .into()); } // from `unlock_available_tokens_if_any` call above, `self.balance` should @@ -445,84 +466,100 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { let new_amount_unlocked = self .balance - .get_total_balance() + .get_total_balance()? .checked_sub(amount_to_lock) - .expect("STX underflow"); + .ok_or_else(|| InterpreterError::Expect("STX underflow".into()))?; self.balance = STXBalance::LockedPoxOne { amount_unlocked: new_amount_unlocked, amount_locked: amount_to_lock, unlock_height: unlock_burn_height, }; + Ok(()) } ////////////// Pox-2 ///////////////// /// Return true iff `self` represents a snapshot that has a lock /// created by PoX v2. - pub fn is_v2_locked(&mut self) -> bool { - matches!( - self.canonical_balance_repr(), - STXBalance::LockedPoxTwo { .. } - ) + pub fn is_v2_locked(&mut self) -> Result { + match self.canonical_balance_repr()? { + STXBalance::LockedPoxTwo { .. } => Ok(true), + _ => Ok(false), + } } /// Increase the account's current lock to `new_total_locked`. /// Panics if `self` was not locked by V2 PoX. - pub fn increase_lock_v2(&mut self, new_total_locked: u128) { - let unlocked = self.unlock_available_tokens_if_any(); + pub fn increase_lock_v2(&mut self, new_total_locked: u128) -> Result<()> { + let unlocked = self.unlock_available_tokens_if_any()?; if unlocked > 0 { debug!("Consolidated after extend-token-lock"); } - if !self.has_locked_tokens() { + if !self.has_locked_tokens()? { // caller needs to have checked this - panic!("FATAL: account does not have locked tokens"); + return Err(InterpreterError::Expect( + "FATAL: account does not have locked tokens".into(), + ) + .into()); } - if !self.is_v2_locked() { + if !self.is_v2_locked()? { // caller needs to have checked this - panic!("FATAL: account must be locked by pox-2"); + return Err( + InterpreterError::Expect("FATAL: account must be locked by pox-2".into()).into(), + ); } - assert!( - self.balance.amount_locked() <= new_total_locked, - "FATAL: account must lock more after `increase_lock_v2`" - ); + if !(self.balance.amount_locked() <= new_total_locked) { + return Err(InterpreterError::Expect( + "FATAL: account must lock more after `increase_lock_v2`".into(), + ) + .into()); + } let total_amount = self .balance .amount_unlocked() .checked_add(self.balance.amount_locked()) - .expect("STX balance overflowed u128"); - let amount_unlocked = total_amount - .checked_sub(new_total_locked) - .expect("STX underflow: more is locked than total balance"); + .ok_or_else(|| InterpreterError::Expect("STX balance overflowed u128".into()))?; + let amount_unlocked = total_amount.checked_sub(new_total_locked).ok_or_else(|| { + InterpreterError::Expect("STX underflow: more is locked than total balance".into()) + })?; self.balance = STXBalance::LockedPoxTwo { amount_unlocked, amount_locked: new_total_locked, unlock_height: self.balance.unlock_height(), }; + + Ok(()) } /// Extend this account's current lock to `unlock_burn_height`. /// After calling, this method will set the balance to a "LockedPoxTwo" balance, /// because this method is only invoked as a result of PoX2 interactions - pub fn extend_lock_v2(&mut self, unlock_burn_height: u64) { - let unlocked = self.unlock_available_tokens_if_any(); + pub fn extend_lock_v2(&mut self, unlock_burn_height: u64) -> Result<()> { + let unlocked = self.unlock_available_tokens_if_any()?; if unlocked > 0 { debug!("Consolidated after extend-token-lock"); } - if !self.has_locked_tokens() { + if !self.has_locked_tokens()? { // caller needs to have checked this - panic!("FATAL: account does not have locked tokens"); + return Err(InterpreterError::Expect( + "FATAL: account does not have locked tokens".into(), + ) + .into()); } if unlock_burn_height <= self.burn_block_height { // caller needs to have checked this - panic!("FATAL: cannot set a lock with expired unlock burn height"); + return Err(InterpreterError::Expect( + "FATAL: cannot set a lock with expired unlock burn height".into(), + ) + .into()); } self.balance = STXBalance::LockedPoxTwo { @@ -530,28 +567,37 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { amount_locked: self.balance.amount_locked(), unlock_height: unlock_burn_height, }; + Ok(()) } /// Lock `amount_to_lock` tokens on this account until `unlock_burn_height`. /// After calling, this method will set the balance to a "LockedPoxTwo" balance, /// because this method is only invoked as a result of PoX2 interactions - pub fn lock_tokens_v2(&mut self, amount_to_lock: u128, unlock_burn_height: u64) { - let unlocked = self.unlock_available_tokens_if_any(); + pub fn lock_tokens_v2(&mut self, amount_to_lock: u128, unlock_burn_height: u64) -> Result<()> { + let unlocked = self.unlock_available_tokens_if_any()?; if unlocked > 0 { debug!("Consolidated after account-token-lock"); } // caller needs to have checked this - assert!(amount_to_lock > 0, "BUG: cannot lock 0 tokens"); + if !(amount_to_lock > 0) { + return Err(InterpreterError::Expect("BUG: cannot lock 0 tokens".into()).into()); + } if unlock_burn_height <= self.burn_block_height { // caller needs to have checked this - panic!("FATAL: cannot set a lock with expired unlock burn height"); + return Err(InterpreterError::Expect( + "FATAL: cannot set a lock with expired unlock burn height".into(), + ) + .into()); } - if self.has_locked_tokens() { + if self.has_locked_tokens()? { // caller needs to have checked this - panic!("FATAL: account already has locked tokens"); + return Err(InterpreterError::Expect( + "FATAL: account already has locked tokens".into(), + ) + .into()); } // from `unlock_available_tokens_if_any` call above, `self.balance` should @@ -559,15 +605,16 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { let new_amount_unlocked = self .balance - .get_total_balance() + .get_total_balance()? .checked_sub(amount_to_lock) - .expect("STX underflow"); + .ok_or_else(|| InterpreterError::Expect("STX underflow".into()))?; self.balance = STXBalance::LockedPoxTwo { amount_unlocked: new_amount_unlocked, amount_locked: amount_to_lock, unlock_height: unlock_burn_height, }; + Ok(()) } //////////////// Pox-3 ////////////////// @@ -575,8 +622,8 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { /// Lock `amount_to_lock` tokens on this account until `unlock_burn_height`. /// After calling, this method will set the balance to a "LockedPoxThree" balance, /// because this method is only invoked as a result of PoX3 interactions - pub fn lock_tokens_v3(&mut self, amount_to_lock: u128, unlock_burn_height: u64) { - let unlocked = self.unlock_available_tokens_if_any(); + pub fn lock_tokens_v3(&mut self, amount_to_lock: u128, unlock_burn_height: u64) -> Result<()> { + let unlocked = self.unlock_available_tokens_if_any()?; if unlocked > 0 { debug!("Consolidated after account-token-lock"); } @@ -586,12 +633,18 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { if unlock_burn_height <= self.burn_block_height { // caller needs to have checked this - panic!("FATAL: cannot set a lock with expired unlock burn height"); + return Err(InterpreterError::Expect( + "FATAL: cannot set a lock with expired unlock burn height".into(), + ) + .into()); } - if self.has_locked_tokens() { + if self.has_locked_tokens()? { // caller needs to have checked this - panic!("FATAL: account already has locked tokens"); + return Err(InterpreterError::Expect( + "FATAL: account already has locked tokens".into(), + ) + .into()); } // from `unlock_available_tokens_if_any` call above, `self.balance` should @@ -599,34 +652,46 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { let new_amount_unlocked = self .balance - .get_total_balance() + .get_total_balance()? .checked_sub(amount_to_lock) - .expect("FATAL: account locks more STX than balance possessed"); + .ok_or_else(|| { + InterpreterError::Expect( + "FATAL: account locks more STX than balance possessed".into(), + ) + })?; self.balance = STXBalance::LockedPoxThree { amount_unlocked: new_amount_unlocked, amount_locked: amount_to_lock, unlock_height: unlock_burn_height, }; + + Ok(()) } /// Extend this account's current lock to `unlock_burn_height`. /// After calling, this method will set the balance to a "LockedPoxThree" balance, /// because this method is only invoked as a result of PoX3 interactions - pub fn extend_lock_v3(&mut self, unlock_burn_height: u64) { - let unlocked = self.unlock_available_tokens_if_any(); + pub fn extend_lock_v3(&mut self, unlock_burn_height: u64) -> Result<()> { + let unlocked = self.unlock_available_tokens_if_any()?; if unlocked > 0 { debug!("Consolidated after extend-token-lock"); } - if !self.has_locked_tokens() { + if !self.has_locked_tokens()? { // caller needs to have checked this - panic!("FATAL: account does not have locked tokens"); + return Err(InterpreterError::Expect( + "FATAL: account does not have locked tokens".into(), + ) + .into()); } if unlock_burn_height <= self.burn_block_height { // caller needs to have checked this - panic!("FATAL: cannot set a lock with expired unlock burn height"); + return Err(InterpreterError::Expect( + "FATAL: cannot set a lock with expired unlock burn height".into(), + ) + .into()); } self.balance = STXBalance::LockedPoxThree { @@ -634,24 +699,30 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { amount_locked: self.balance.amount_locked(), unlock_height: unlock_burn_height, }; + Ok(()) } /// Increase the account's current lock to `new_total_locked`. /// Panics if `self` was not locked by V3 PoX. - pub fn increase_lock_v3(&mut self, new_total_locked: u128) { - let unlocked = self.unlock_available_tokens_if_any(); + pub fn increase_lock_v3(&mut self, new_total_locked: u128) -> Result<()> { + let unlocked = self.unlock_available_tokens_if_any()?; if unlocked > 0 { debug!("Consolidated after extend-token-lock"); } - if !self.has_locked_tokens() { + if !self.has_locked_tokens()? { // caller needs to have checked this - panic!("FATAL: account does not have locked tokens"); + return Err(InterpreterError::Expect( + "FATAL: account does not have locked tokens".into(), + ) + .into()); } - if !self.is_v3_locked() { + if !self.is_v3_locked()? { // caller needs to have checked this - panic!("FATAL: account must be locked by pox-3"); + return Err( + InterpreterError::Expect("FATAL: account must be locked by pox-3".into()).into(), + ); } assert!( @@ -663,33 +734,34 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { .balance .amount_unlocked() .checked_add(self.balance.amount_locked()) - .expect("STX balance overflowed u128"); - let amount_unlocked = total_amount - .checked_sub(new_total_locked) - .expect("STX underflow: more is locked than total balance"); + .ok_or_else(|| InterpreterError::Expect("STX balance overflowed u128".into()))?; + let amount_unlocked = total_amount.checked_sub(new_total_locked).ok_or_else(|| { + InterpreterError::Expect("STX underflow: more is locked than total balance".into()) + })?; self.balance = STXBalance::LockedPoxThree { amount_unlocked, amount_locked: new_total_locked, unlock_height: self.balance.unlock_height(), }; + Ok(()) } /// Return true iff `self` represents a snapshot that has a lock /// created by PoX v3. - pub fn is_v3_locked(&mut self) -> bool { - matches!( - self.canonical_balance_repr(), - STXBalance::LockedPoxThree { .. } - ) + pub fn is_v3_locked(&mut self) -> Result { + match self.canonical_balance_repr()? { + STXBalance::LockedPoxThree { .. } => Ok(true), + _ => Ok(false), + } } /////////////// GENERAL ////////////////////// /// If this snapshot is locked, then alter the lock height to be /// the next burn block (i.e., `self.burn_block_height + 1`) - pub fn accelerate_unlock(&mut self) { - let unlocked = self.unlock_available_tokens_if_any(); + pub fn accelerate_unlock(&mut self) -> Result<()> { + let unlocked = self.unlock_available_tokens_if_any()?; if unlocked > 0 { debug!("Consolidated after account-token-lock"); } @@ -698,7 +770,10 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { self.balance = match self.balance { STXBalance::Unlocked { amount } => STXBalance::Unlocked { amount }, STXBalance::LockedPoxOne { .. } => { - unreachable!("Attempted to accelerate the unlock of a lockup created by PoX-1") + return Err(InterpreterError::Expect( + "Attempted to accelerate the unlock of a lockup created by PoX-1".into(), + ) + .into()) } STXBalance::LockedPoxTwo { amount_unlocked, @@ -719,18 +794,19 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { unlock_height: new_unlock_height, }, }; + Ok(()) } /// Unlock any tokens that are unlockable at the current /// burn block height, and return the amount newly unlocked - fn unlock_available_tokens_if_any(&mut self) -> u128 { + fn unlock_available_tokens_if_any(&mut self) -> Result { let (new_balance, unlocked) = self.balance.canonical_repr_at_block( self.burn_block_height, self.db_ref.get_v1_unlock_height(), - self.db_ref.get_v2_unlock_height(), - ); + self.db_ref.get_v2_unlock_height()?, + )?; self.balance = new_balance; - unlocked + Ok(unlocked) } } @@ -815,7 +891,7 @@ impl STXBalance { } } - fn debit_unlocked_amount(&mut self, delta: u128) { + fn debit_unlocked_amount(&mut self, delta: u128) -> Result<()> { match self { STXBalance::Unlocked { amount: amount_unlocked, @@ -829,7 +905,10 @@ impl STXBalance { | STXBalance::LockedPoxThree { amount_unlocked, .. } => { - *amount_unlocked = amount_unlocked.checked_sub(delta).expect("STX underflow"); + *amount_unlocked = amount_unlocked + .checked_sub(delta) + .ok_or_else(|| InterpreterError::Expect("STX underflow".into()))?; + Ok(()) } } } @@ -867,20 +946,20 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, - ) -> (STXBalance, u128) { + ) -> Result<(STXBalance, u128)> { if self.has_unlockable_tokens_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, ) { - ( + Ok(( STXBalance::Unlocked { - amount: self.get_total_balance(), + amount: self.get_total_balance()?, }, self.amount_locked(), - ) + )) } else { - (self.clone(), 0) + Ok((self.clone(), 0)) } } @@ -889,7 +968,7 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, - ) -> u128 { + ) -> Result { if self.has_unlockable_tokens_at_burn_block( burn_block_height, v1_unlock_height, @@ -897,7 +976,7 @@ impl STXBalance { ) { self.get_total_balance() } else { - match self { + let out = match self { STXBalance::Unlocked { amount } => *amount, STXBalance::LockedPoxOne { amount_unlocked, .. @@ -908,7 +987,8 @@ impl STXBalance { STXBalance::LockedPoxThree { amount_unlocked, .. } => *amount_unlocked, - } + }; + Ok(out) } } @@ -946,7 +1026,7 @@ impl STXBalance { } } - pub fn get_total_balance(&self) -> u128 { + pub fn get_total_balance(&self) -> Result { let (unlocked, locked) = match self { STXBalance::Unlocked { amount } => (*amount, 0), STXBalance::LockedPoxOne { @@ -965,7 +1045,9 @@ impl STXBalance { .. } => (*amount_unlocked, *amount_locked), }; - unlocked.checked_add(locked).expect("STX overflow") + unlocked + .checked_add(locked) + .ok_or_else(|| InterpreterError::Expect("STX overflow".into()).into()) } pub fn was_locked_by_v1(&self) -> bool { @@ -1106,11 +1188,11 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, - ) -> bool { - self.get_available_balance_at_burn_block( + ) -> Result { + Ok(self.get_available_balance_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, - ) >= amount + )? >= amount) } } diff --git a/clarity/src/vm/diagnostic.rs b/clarity/src/vm/diagnostic.rs index ee0ac0b56d..81939237d7 100644 --- a/clarity/src/vm/diagnostic.rs +++ b/clarity/src/vm/diagnostic.rs @@ -66,26 +66,24 @@ impl Diagnostic { impl fmt::Display for Diagnostic { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.level)?; - match self.spans.len().cmp(&1) { - std::cmp::Ordering::Equal => write!( + if self.spans.len() == 1 { + write!( f, - " (line {}, column {}): ", + " (line {}, column {})", self.spans[0].start_line, self.spans[0].start_column - )?, - std::cmp::Ordering::Greater => { - let lines: Vec = self - .spans - .iter() - .map(|s| format!("line: {}", s.start_line)) - .collect(); - write!(f, " ({}): ", lines.join(", "))?; - } - std::cmp::Ordering::Less => {} + )?; + } else if self.spans.len() > 1 { + let lines: Vec = self + .spans + .iter() + .map(|s| format!("line: {}", s.start_line)) + .collect(); + write!(f, " ({})", lines.join(", "))?; } write!(f, ": {}.", &self.message)?; if let Some(suggestion) = &self.suggestion { write!(f, "\n{}", suggestion)?; } - writeln!(f) + write!(f, "\n") } } diff --git a/clarity/src/vm/docs/contracts.rs b/clarity/src/vm/docs/contracts.rs index a8b9a5e3f9..ff864b26db 100644 --- a/clarity/src/vm/docs/contracts.rs +++ b/clarity/src/vm/docs/contracts.rs @@ -45,6 +45,7 @@ pub struct ContractSupportDocs { pub skip_func_display: HashSet<&'static str>, } +#[allow(clippy::expect_used)] fn make_func_ref(func_name: &str, func_type: &FunctionType, description: &str) -> FunctionRef { let input_type = get_input_type_string(func_type); let output_type = get_output_type_string(func_type); @@ -59,6 +60,7 @@ fn make_func_ref(func_name: &str, func_type: &FunctionType, description: &str) - } } +#[allow(clippy::expect_used)] fn get_constant_value(var_name: &str, contract_content: &str) -> Value { let to_eval = format!("{}\n{}", contract_content, var_name); doc_execute(&to_eval) @@ -92,6 +94,7 @@ fn doc_execute(program: &str) -> Result, vm::Error> { }) } +#[allow(clippy::expect_used)] pub fn make_docs(content: &str, support_docs: &ContractSupportDocs) -> ContractRef { let (_, contract_analysis) = mem_type_check(content, ClarityVersion::latest(), StacksEpochId::latest()) @@ -110,7 +113,7 @@ pub fn make_docs(content: &str, support_docs: &ContractSupportDocs) -> ContractR let description = support_docs .descriptions .get(func_name.as_str()) - .unwrap_or_else(|| panic!("BUG: no description for {}", func_name.as_str())); + .expect(&format!("BUG: no description for {}", func_name.as_str())); make_func_ref(func_name, func_type, description) }) .collect(); @@ -122,7 +125,7 @@ pub fn make_docs(content: &str, support_docs: &ContractSupportDocs) -> ContractR let description = support_docs .descriptions .get(func_name.as_str()) - .unwrap_or_else(|| panic!("BUG: no description for {}", func_name.as_str())); + .expect(&format!("BUG: no description for {}", func_name.as_str())); make_func_ref(func_name, func_type, description) }) .collect(); @@ -142,7 +145,8 @@ pub fn make_docs(content: &str, support_docs: &ContractSupportDocs) -> ContractR let ecode_result = doc_execute(&ecode_to_eval) .expect("BUG: failed to evaluate contract for constant value") .expect("BUG: failed to return constant value") - .expect_tuple(); + .expect_tuple() + .expect("BUG: failed to build tuple"); let error_codes = variable_types .iter() diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 034616c0f7..dd89db8ec6 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -776,6 +776,7 @@ pub fn get_input_type_string(function_type: &FunctionType) -> String { } } +#[allow(clippy::panic)] pub fn get_output_type_string(function_type: &FunctionType) -> String { match function_type { FunctionType::Variadic(_, ref out_type) => format!("{}", out_type), @@ -788,11 +789,12 @@ pub fn get_output_type_string(function_type: &FunctionType) -> String { FunctionType::Binary(left, right, ref out_sig) => match out_sig { FunctionReturnsSignature::Fixed(out_type) => format!("{}", out_type), FunctionReturnsSignature::TypeOfArgAtPosition(pos) => { - let arg_sig = match pos { - 0 => left, - 1 => right, + let arg_sig: &FunctionArgSignature; + match pos { + 0 => arg_sig = left, + 1 => arg_sig = right, _ => panic!("Index out of range: TypeOfArgAtPosition for FunctionType::Binary can only handle two arguments, zero-indexed (0 or 1).") - }; + } match arg_sig { FunctionArgSignature::Single(arg_type) => format!("{}", arg_type), FunctionArgSignature::Union(arg_types) => { @@ -808,12 +810,15 @@ pub fn get_output_type_string(function_type: &FunctionType) -> String { pub fn get_signature(function_name: &str, function_type: &FunctionType) -> Option { if let FunctionType::Fixed(FixedFunction { ref args, .. }) = function_type { - let in_names: Vec = args.iter().map(|x| x.name.to_string()).collect(); + let in_names: Vec = args + .iter() + .map(|x| format!("{}", x.name.as_str())) + .collect(); let arg_examples = in_names.join(" "); Some(format!( "({}{}{})", function_name, - if arg_examples.is_empty() { "" } else { " " }, + if arg_examples.len() == 0 { "" } else { " " }, arg_examples )) } else { @@ -821,6 +826,8 @@ pub fn get_signature(function_name: &str, function_type: &FunctionType) -> Optio } } +#[allow(clippy::expect_used)] +#[allow(clippy::panic)] fn make_for_simple_native( api: &SimpleFunctionAPI, function: &NativeFunctions, @@ -828,7 +835,8 @@ fn make_for_simple_native( ) -> FunctionAPI { let (input_type, output_type) = { if let TypedNativeFunction::Simple(SimpleNativeFunction(function_type)) = - TypedNativeFunction::type_native_function(function) + TypedNativeFunction::type_native_function(&function) + .expect("Failed to type a native function") { let input_type = get_input_type_string(&function_type); let output_type = get_output_type_string(&function_type); @@ -844,8 +852,8 @@ fn make_for_simple_native( FunctionAPI { name: api.name.map_or(name, |x| x.to_string()), snippet: api.snippet.to_string(), - input_type, - output_type, + input_type: input_type, + output_type: output_type, signature: api.signature.to_string(), description: api.description.to_string(), example: api.example.to_string(), @@ -2420,35 +2428,35 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { use crate::vm::functions::NativeFunctions::*; let name = function.get_name(); match function { - Add => make_for_simple_native(&ADD_API, function, name), - ToUInt => make_for_simple_native(&TO_UINT_API, function, name), - ToInt => make_for_simple_native(&TO_INT_API, function, name), - Subtract => make_for_simple_native(&SUB_API, function, name), - Multiply => make_for_simple_native(&MUL_API, function, name), - Divide => make_for_simple_native(&DIV_API, function, name), - BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, function, name), - BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, function, name), - BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, function, name), - BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, function, name), - IsStandard => make_for_simple_native(&IS_STANDARD_API, function, name), - PrincipalDestruct => make_for_simple_native(&PRINCPIPAL_DESTRUCT_API, function, name), - PrincipalConstruct => make_for_special(&PRINCIPAL_CONSTRUCT_API, function), - StringToInt => make_for_simple_native(&STRING_TO_INT_API, function, name), - StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, function, name), - IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, function, name), - IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, function, name), - CmpGeq => make_for_simple_native(&GEQ_API, function, name), - CmpLeq => make_for_simple_native(&LEQ_API, function, name), - CmpLess => make_for_simple_native(&LESS_API, function, name), - CmpGreater => make_for_simple_native(&GREATER_API, function, name), - Modulo => make_for_simple_native(&MOD_API, function, name), - Power => make_for_simple_native(&POW_API, function, name), - Sqrti => make_for_simple_native(&SQRTI_API, function, name), - Log2 => make_for_simple_native(&LOG2_API, function, name), - BitwiseXor => make_for_simple_native(&XOR_API, function, name), - And => make_for_simple_native(&AND_API, function, name), - Or => make_for_simple_native(&OR_API, function, name), - Not => make_for_simple_native(&NOT_API, function, name), + Add => make_for_simple_native(&ADD_API, &function, name), + ToUInt => make_for_simple_native(&TO_UINT_API, &function, name), + ToInt => make_for_simple_native(&TO_INT_API, &function, name), + Subtract => make_for_simple_native(&SUB_API, &function, name), + Multiply => make_for_simple_native(&MUL_API, &function, name), + Divide => make_for_simple_native(&DIV_API, &function, name), + BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, &function, name), + BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, &function, name), + BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, &function, name), + BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, &function, name), + IsStandard => make_for_simple_native(&IS_STANDARD_API, &function, name), + PrincipalDestruct => make_for_simple_native(&PRINCPIPAL_DESTRUCT_API, &function, name), + PrincipalConstruct => make_for_special(&PRINCIPAL_CONSTRUCT_API, &function), + StringToInt => make_for_simple_native(&STRING_TO_INT_API, &function, name), + StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, &function, name), + IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, &function, name), + IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, &function, name), + CmpGeq => make_for_simple_native(&GEQ_API, &function, name), + CmpLeq => make_for_simple_native(&LEQ_API, &function, name), + CmpLess => make_for_simple_native(&LESS_API, &function, name), + CmpGreater => make_for_simple_native(&GREATER_API, &function, name), + Modulo => make_for_simple_native(&MOD_API, &function, name), + Power => make_for_simple_native(&POW_API, &function, name), + Sqrti => make_for_simple_native(&SQRTI_API, &function, name), + Log2 => make_for_simple_native(&LOG2_API, &function, name), + BitwiseXor => make_for_simple_native(&XOR_API, &function, name), + And => make_for_simple_native(&AND_API, &function, name), + Or => make_for_simple_native(&OR_API, &function, name), + Not => make_for_simple_native(&NOT_API, &function, name), Equals => make_for_special(&EQUALS_API, function), If => make_for_special(&IF_API, function), Let => make_for_special(&LET_API, function), @@ -2512,20 +2520,20 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { BurnAsset => make_for_special(&BURN_ASSET, function), GetTokenSupply => make_for_special(&GET_TOKEN_SUPPLY, function), AtBlock => make_for_special(&AT_BLOCK, function), - GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, function, name), - StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, function, name), + GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, &function, name), + StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, &function, name), StxTransfer => make_for_special(&STX_TRANSFER, function), StxTransferMemo => make_for_special(&STX_TRANSFER_MEMO, function), - StxBurn => make_for_simple_native(&STX_BURN, function, name), + StxBurn => make_for_simple_native(&STX_BURN, &function, name), ToConsensusBuff => make_for_special(&TO_CONSENSUS_BUFF, function), FromConsensusBuff => make_for_special(&FROM_CONSENSUS_BUFF, function), ReplaceAt => make_for_special(&REPLACE_AT, function), - BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, function, name), - BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, function, name), - BitwiseOr => make_for_simple_native(&BITWISE_OR_API, function, name), - BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, function, name), - BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, function, name), - BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, function, name), + BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, &function, name), + BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, &function, name), + BitwiseOr => make_for_simple_native(&BITWISE_OR_API, &function, name), + BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, &function, name), + BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, &function, name), + BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, &function, name), } } @@ -2601,7 +2609,7 @@ pub fn make_define_reference(define_type: &DefineFunctions) -> FunctionAPI { fn make_all_api_reference() -> ReferenceAPIs { let mut functions: Vec<_> = NativeFunctions::ALL .iter() - .map(make_api_reference) + .map(|x| make_api_reference(x)) .collect(); for data_type in DefineFunctions::ALL.iter() { functions.push(make_define_reference(data_type)) @@ -2615,7 +2623,7 @@ fn make_all_api_reference() -> ReferenceAPIs { keywords.push(api_ref) } } - keywords.sort_by(|x, y| x.name.cmp(y.name)); + keywords.sort_by(|x, y| x.name.cmp(&y.name)); ReferenceAPIs { functions, @@ -2623,9 +2631,13 @@ fn make_all_api_reference() -> ReferenceAPIs { } } +#[allow(clippy::expect_used)] pub fn make_json_api_reference() -> String { let api_out = make_all_api_reference(); - serde_json::to_string(&api_out).expect("Failed to serialize documentation") + format!( + "{}", + serde_json::to_string(&api_out).expect("Failed to serialize documentation") + ) } #[cfg(test)] @@ -2662,7 +2674,7 @@ mod test { const DOC_HEADER_DB: DocHeadersDB = DocHeadersDB {}; impl MemoryBackingStore { - pub fn as_docs_clarity_db(&mut self) -> ClarityDatabase { + pub fn as_docs_clarity_db<'a>(&'a mut self) -> ClarityDatabase<'a> { ClarityDatabase::new(self, &DOC_HEADER_DB, &DOC_POX_STATE_DB) } } @@ -2833,13 +2845,13 @@ mod test { let mut current_segment: String = "".into(); for line in program.lines() { current_segment.push_str(line); - current_segment.push('\n'); + current_segment.push_str("\n"); if line.contains(";;") && line.contains("Returns ") { segments.push(current_segment); current_segment = "".into(); } } - if !current_segment.is_empty() { + if current_segment.len() > 0 { segments.push(current_segment); } @@ -2899,7 +2911,7 @@ mod test { .type_map .as_ref() .unwrap() - .get_type(analysis.expressions.last().unwrap()) + .get_type(&analysis.expressions.last().unwrap()) .cloned(), ); } @@ -2994,7 +3006,7 @@ mod test { let mut analysis_db = store.as_analysis_db(); let mut parsed = ast::build_ast( &contract_id, - token_contract_content, + &token_contract_content, &mut (), ClarityVersion::latest(), StacksEpochId::latest(), @@ -3049,9 +3061,10 @@ mod test { let mut snapshot = e .global_context .database - .get_stx_balance_snapshot_genesis(&docs_principal_id); + .get_stx_balance_snapshot_genesis(&docs_principal_id) + .unwrap(); snapshot.set_balance(balance); - snapshot.save(); + snapshot.save().unwrap(); e.global_context .database .increment_ustx_liquid_supply(100000) @@ -3063,7 +3076,7 @@ mod test { env.initialize_contract( contract_id, - token_contract_content, + &token_contract_content, None, ASTRules::PrecheckSize, ) diff --git a/clarity/src/vm/errors.rs b/clarity/src/vm/errors.rs index d03e75e034..fb8808936a 100644 --- a/clarity/src/vm/errors.rs +++ b/clarity/src/vm/errors.rs @@ -21,6 +21,7 @@ use rusqlite::Error as SqliteError; use serde_json::Error as SerdeJSONErr; use stacks_common::types::chainstate::BlockHeaderHash; +use super::ast::errors::ParseErrors; pub use crate::vm::analysis::errors::{ check_argument_count, check_arguments_at_least, check_arguments_at_most, CheckErrors, }; @@ -64,6 +65,7 @@ pub enum InterpreterError { InsufficientBalance, CostContractLoadFailure, DBError(String), + Expect(String), } /// RuntimeErrors are errors that smart contracts are expected @@ -112,7 +114,7 @@ pub type InterpreterResult = Result; impl PartialEq> for IncomparableError { fn eq(&self, _other: &IncomparableError) -> bool { - false + return false; } } @@ -132,12 +134,14 @@ impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::Runtime(ref err, ref stack) => { - write!(f, "{}", err)?; + match err { + _ => write!(f, "{}", err), + }?; if let Some(ref stack_trace) = stack { write!(f, "\n Stack Trace: \n")?; for item in stack_trace.iter() { - writeln!(f, "{}", item)?; + write!(f, "{}\n", item)?; } } Ok(()) @@ -165,21 +169,28 @@ impl error::Error for RuntimeErrorType { } } -impl From for Error { - fn from(err: CostErrors) -> Self { - Error::from(CheckErrors::from(err)) - } -} - impl From for Error { fn from(err: ParseError) -> Self { - Error::from(RuntimeErrorType::ASTError(err)) + match &err.err { + ParseErrors::InterpreterFailure => Error::from(InterpreterError::Expect( + "Unexpected interpreter failure during parsing".into(), + )), + _ => Error::from(RuntimeErrorType::ASTError(err)), + } } } -impl From for Error { - fn from(err: SerdeJSONErr) -> Self { - Error::from(RuntimeErrorType::JSONParseError(IncomparableError { err })) +impl From for Error { + fn from(err: CostErrors) -> Self { + match err { + CostErrors::InterpreterFailure => Error::from(InterpreterError::Expect( + "Interpreter failure during cost calculation".into(), + )), + CostErrors::Expect(s) => Error::from(InterpreterError::Expect(format!( + "Interpreter failure during cost calculation: {s}" + ))), + other_err => Error::from(CheckErrors::from(other_err)), + } } } @@ -212,9 +223,9 @@ impl From for () { fn from(err: Error) -> Self {} } -impl From for Value { - fn from(val: ShortReturnType) -> Self { - match val { +impl Into for ShortReturnType { + fn into(self) -> Value { + match self { ShortReturnType::ExpectedValue(v) => v, ShortReturnType::AssertionFailed(v) => v, } diff --git a/clarity/src/vm/events.rs b/clarity/src/vm/events.rs index 18fc84bb2e..c56f9e4a6b 100644 --- a/clarity/src/vm/events.rs +++ b/clarity/src/vm/events.rs @@ -17,6 +17,7 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::StacksAddress; +use super::types::serialization::SerializationError; use crate::vm::analysis::ContractAnalysis; use crate::vm::costs::ExecutionCost; use crate::vm::types::{ @@ -38,14 +39,14 @@ impl StacksTransactionEvent { event_index: usize, txid: &dyn std::fmt::Debug, committed: bool, - ) -> serde_json::Value { - match self { + ) -> Result { + let out = match self { StacksTransactionEvent::SmartContractEvent(event_data) => json!({ "txid": format!("0x{:?}", txid), "event_index": event_index, "committed": committed, "type": "contract_event", - "contract_event": event_data.json_serialize() + "contract_event": event_data.json_serialize()? }), StacksTransactionEvent::STXEvent(STXEventType::STXTransferEvent(event_data)) => json!({ "txid": format!("0x{:?}", txid), @@ -80,21 +81,21 @@ impl StacksTransactionEvent { "event_index": event_index, "committed": committed, "type": "nft_transfer_event", - "nft_transfer_event": event_data.json_serialize() + "nft_transfer_event": event_data.json_serialize()? }), StacksTransactionEvent::NFTEvent(NFTEventType::NFTMintEvent(event_data)) => json!({ "txid": format!("0x{:?}", txid), "event_index": event_index, "committed": committed, "type": "nft_mint_event", - "nft_mint_event": event_data.json_serialize() + "nft_mint_event": event_data.json_serialize()? }), StacksTransactionEvent::NFTEvent(NFTEventType::NFTBurnEvent(event_data)) => json!({ "txid": format!("0x{:?}", txid), "event_index": event_index, "committed": committed, "type": "nft_burn_event", - "nft_burn_event": event_data.json_serialize() + "nft_burn_event": event_data.json_serialize()? }), StacksTransactionEvent::FTEvent(FTEventType::FTTransferEvent(event_data)) => json!({ "txid": format!("0x{:?}", txid), @@ -117,7 +118,8 @@ impl StacksTransactionEvent { "type": "ft_burn_event", "ft_burn_event": event_data.json_serialize() }), - } + }; + Ok(out) } } @@ -220,20 +222,20 @@ pub struct NFTTransferEventData { } impl NFTTransferEventData { - pub fn json_serialize(&self) -> serde_json::Value { + pub fn json_serialize(&self) -> Result { let raw_value = { let mut bytes = vec![]; - self.value.serialize_write(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes)?; let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; - json!({ + Ok(json!({ "asset_identifier": format!("{}", self.asset_identifier), "sender": format!("{}",self.sender), "recipient": format!("{}",self.recipient), "value": self.value, "raw_value": format!("0x{}", raw_value.join("")), - }) + })) } } @@ -245,19 +247,19 @@ pub struct NFTMintEventData { } impl NFTMintEventData { - pub fn json_serialize(&self) -> serde_json::Value { + pub fn json_serialize(&self) -> Result { let raw_value = { let mut bytes = vec![]; - self.value.serialize_write(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes)?; let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; - json!({ + Ok(json!({ "asset_identifier": format!("{}", self.asset_identifier), "recipient": format!("{}",self.recipient), "value": self.value, "raw_value": format!("0x{}", raw_value.join("")), - }) + })) } } @@ -269,19 +271,19 @@ pub struct NFTBurnEventData { } impl NFTBurnEventData { - pub fn json_serialize(&self) -> serde_json::Value { + pub fn json_serialize(&self) -> Result { let raw_value = { let mut bytes = vec![]; - self.value.serialize_write(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes)?; let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; - json!({ + Ok(json!({ "asset_identifier": format!("{}", self.asset_identifier), "sender": format!("{}",self.sender), "value": self.value, "raw_value": format!("0x{}", raw_value.join("")), - }) + })) } } @@ -345,18 +347,18 @@ pub struct SmartContractEventData { } impl SmartContractEventData { - pub fn json_serialize(&self) -> serde_json::Value { + pub fn json_serialize(&self) -> Result { let raw_value = { let mut bytes = vec![]; - self.value.serialize_write(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes)?; let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; - json!({ + Ok(json!({ "contract_identifier": self.key.0.to_string(), "topic": self.key.1, "value": self.value, "raw_value": format!("0x{}", raw_value.join("")), - }) + })) } } diff --git a/clarity/src/vm/functions/arithmetic.rs b/clarity/src/vm/functions/arithmetic.rs index 6b30ce0852..bd0edbf5eb 100644 --- a/clarity/src/vm/functions/arithmetic.rs +++ b/clarity/src/vm/functions/arithmetic.rs @@ -21,7 +21,9 @@ use integer_sqrt::IntegerSquareRoot; use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::runtime_cost; -use crate::vm::errors::{check_argument_count, CheckErrors, InterpreterResult, RuntimeErrorType}; +use crate::vm::errors::{ + check_argument_count, CheckErrors, InterpreterError, InterpreterResult, RuntimeErrorType, +}; use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; use crate::vm::types::signatures::ListTypeData; use crate::vm::types::TypeSignature::BoolType; @@ -124,9 +126,9 @@ macro_rules! type_force_binary_comparison_v2 { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), - TypeSignature::max_buffer(), + TypeSignature::max_string_ascii()?, + TypeSignature::max_string_utf8()?, + TypeSignature::max_buffer()?, ], x, ) @@ -406,7 +408,11 @@ fn special_geq_v2( check_argument_count(2, args)?; let a = eval(&args[0], env, context)?; let b = eval(&args[1], env, context)?; - runtime_cost(ClarityCostFunction::Geq, env, cmp::min(a.size(), b.size()))?; + runtime_cost( + ClarityCostFunction::Geq, + env, + cmp::min(a.size()?, b.size()?), + )?; type_force_binary_comparison_v2!(geq, a, b) } @@ -449,7 +455,11 @@ fn special_leq_v2( check_argument_count(2, args)?; let a = eval(&args[0], env, context)?; let b = eval(&args[1], env, context)?; - runtime_cost(ClarityCostFunction::Leq, env, cmp::min(a.size(), b.size()))?; + runtime_cost( + ClarityCostFunction::Leq, + env, + cmp::min(a.size()?, b.size()?), + )?; type_force_binary_comparison_v2!(leq, a, b) } @@ -491,7 +501,7 @@ fn special_greater_v2( check_argument_count(2, args)?; let a = eval(&args[0], env, context)?; let b = eval(&args[1], env, context)?; - runtime_cost(ClarityCostFunction::Ge, env, cmp::min(a.size(), b.size()))?; + runtime_cost(ClarityCostFunction::Ge, env, cmp::min(a.size()?, b.size()?))?; type_force_binary_comparison_v2!(greater, a, b) } @@ -533,7 +543,7 @@ fn special_less_v2( check_argument_count(2, args)?; let a = eval(&args[0], env, context)?; let b = eval(&args[1], env, context)?; - runtime_cost(ClarityCostFunction::Le, env, cmp::min(a.size(), b.size()))?; + runtime_cost(ClarityCostFunction::Le, env, cmp::min(a.size()?, b.size()?))?; type_force_binary_comparison_v2!(less, a, b) } @@ -578,8 +588,9 @@ pub fn native_mod(a: Value, b: Value) -> InterpreterResult { pub fn native_bitwise_left_shift(input: Value, pos: Value) -> InterpreterResult { if let Value::UInt(u128_val) = pos { - let shamt = - u32::try_from(u128_val & 0x7f).expect("FATAL: lower 32 bits did not convert to u32"); + let shamt = u32::try_from(u128_val & 0x7f).map_err(|_| { + InterpreterError::Expect("FATAL: lower 32 bits did not convert to u32".into()) + })?; match input { Value::Int(input) => { @@ -592,7 +603,7 @@ pub fn native_bitwise_left_shift(input: Value, pos: Value) -> InterpreterResult< } _ => Err(CheckErrors::UnionTypeError( vec![TypeSignature::IntType, TypeSignature::UIntType], - TypeSignature::type_of(&input), + TypeSignature::type_of(&input)?, ) .into()), } @@ -603,8 +614,9 @@ pub fn native_bitwise_left_shift(input: Value, pos: Value) -> InterpreterResult< pub fn native_bitwise_right_shift(input: Value, pos: Value) -> InterpreterResult { if let Value::UInt(u128_val) = pos { - let shamt = - u32::try_from(u128_val & 0x7f).expect("FATAL: lower 32 bits did not convert to u32"); + let shamt = u32::try_from(u128_val & 0x7f).map_err(|_| { + InterpreterError::Expect("FATAL: lower 32 bits did not convert to u32".into()) + })?; match input { Value::Int(input) => { @@ -617,7 +629,7 @@ pub fn native_bitwise_right_shift(input: Value, pos: Value) -> InterpreterResult } _ => Err(CheckErrors::UnionTypeError( vec![TypeSignature::IntType, TypeSignature::UIntType], - TypeSignature::type_of(&input), + TypeSignature::type_of(&input)?, ) .into()), } diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index e117b81afc..d590f4675e 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -107,8 +107,8 @@ pub fn special_stx_balance( let mut snapshot = env .global_context .database - .get_stx_balance_snapshot(principal); - snapshot.get_available_balance() + .get_stx_balance_snapshot(principal)?; + snapshot.get_available_balance()? }; Ok(Value::UInt(balance)) } else { @@ -139,16 +139,16 @@ pub fn stx_transfer_consolidated( } // loading from/to principals and balances - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; // loading from's locked amount and height // TODO: this does not count the inner stacks block header load, but arguably, // this could be optimized away, so it shouldn't penalize the caller. env.add_memory(STXBalance::unlocked_and_v1_size as u64)?; env.add_memory(STXBalance::unlocked_and_v1_size as u64)?; - let mut sender_snapshot = env.global_context.database.get_stx_balance_snapshot(from); - if !sender_snapshot.can_transfer(amount) { + let mut sender_snapshot = env.global_context.database.get_stx_balance_snapshot(from)?; + if !sender_snapshot.can_transfer(amount)? { return clarity_ecode!(StxErrorCodes::NOT_ENOUGH_BALANCE); } @@ -231,23 +231,31 @@ pub fn special_stx_account( let stx_balance = env .global_context .database - .get_stx_balance_snapshot(&principal) - .canonical_balance_repr(); + .get_stx_balance_snapshot(&principal)? + .canonical_balance_repr()?; let v1_unlock_ht = env.global_context.database.get_v1_unlock_height(); - let v2_unlock_ht = env.global_context.database.get_v2_unlock_height(); + let v2_unlock_ht = env.global_context.database.get_v2_unlock_height()?; TupleData::from_data(vec![ ( - "unlocked".try_into().unwrap(), + "unlocked" + .try_into() + .map_err(|_| InterpreterError::Expect("Bad special tuple name".into()))?, Value::UInt(stx_balance.amount_unlocked()), ), ( - "locked".try_into().unwrap(), + "locked" + .try_into() + .map_err(|_| InterpreterError::Expect("Bad special tuple name".into()))?, Value::UInt(stx_balance.amount_locked()), ), ( - "unlock-height".try_into().unwrap(), - Value::UInt(stx_balance.effective_unlock_height(v1_unlock_ht, v2_unlock_ht) as u128), + "unlock-height" + .try_into() + .map_err(|_| InterpreterError::Expect("Bad special tuple name".into()))?, + Value::UInt(u128::from( + stx_balance.effective_unlock_height(v1_unlock_ht, v2_unlock_ht), + )), ), ]) .map(Value::Tuple) @@ -274,16 +282,19 @@ pub fn special_stx_burn( return clarity_ecode!(StxErrorCodes::SENDER_IS_NOT_TX_SENDER); } - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; env.add_memory(STXBalance::unlocked_and_v1_size as u64)?; - let mut burner_snapshot = env.global_context.database.get_stx_balance_snapshot(from); - if !burner_snapshot.can_transfer(amount) { + let mut burner_snapshot = env + .global_context + .database + .get_stx_balance_snapshot(&from)?; + if !burner_snapshot.can_transfer(amount)? { return clarity_ecode!(StxErrorCodes::NOT_ENOUGH_BALANCE); } - burner_snapshot.debit(amount); - burner_snapshot.save(); + burner_snapshot.debit(amount)?; + burner_snapshot.save()?; env.global_context .database @@ -337,10 +348,12 @@ pub fn special_mint_token( Some(ft_info), )?; - let final_to_bal = to_bal.checked_add(amount).expect("STX overflow"); + let final_to_bal = to_bal + .checked_add(amount) + .ok_or_else(|| InterpreterError::Expect("STX overflow".into()))?; - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; - env.add_memory(TypeSignature::UIntType.size() as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; + env.add_memory(TypeSignature::UIntType.size()? as u64)?; env.global_context.database.set_ft_balance( &env.contract_context.contract_identifier, @@ -383,7 +396,7 @@ pub fn special_mint_asset_v200( runtime_cost( ClarityCostFunction::NftMint, env, - expected_asset_type.size(), + expected_asset_type.size()?, )?; if !expected_asset_type.admits(env.epoch(), &asset)? { @@ -402,8 +415,8 @@ pub fn special_mint_asset_v200( Err(e) => Err(e), }?; - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; - env.add_memory(expected_asset_type.size() as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; + env.add_memory(expected_asset_type.size()? as u64)?; let epoch = *env.epoch(); env.global_context.database.set_nft_owner( @@ -448,7 +461,9 @@ pub fn special_mint_asset_v205( .ok_or(CheckErrors::NoSuchNFT(asset_name.to_string()))?; let expected_asset_type = &nft_metadata.key_type; - let asset_size = asset.serialized_size() as u64; + let asset_size = asset + .serialized_size() + .map_err(|e| InterpreterError::Expect(e.to_string()))? as u64; runtime_cost(ClarityCostFunction::NftMint, env, asset_size)?; if !expected_asset_type.admits(env.epoch(), &asset)? { @@ -467,7 +482,7 @@ pub fn special_mint_asset_v205( Err(e) => Err(e), }?; - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; env.add_memory(asset_size)?; let epoch = *env.epoch(); @@ -515,7 +530,7 @@ pub fn special_transfer_asset_v200( runtime_cost( ClarityCostFunction::NftTransfer, env, - expected_asset_type.size(), + expected_asset_type.size()?, )?; if !expected_asset_type.admits(env.epoch(), &asset)? { @@ -544,8 +559,8 @@ pub fn special_transfer_asset_v200( return clarity_ecode!(TransferAssetErrorCodes::NOT_OWNED_BY); } - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; - env.add_memory(expected_asset_type.size() as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; + env.add_memory(expected_asset_type.size()? as u64)?; let epoch = *env.epoch(); env.global_context.database.set_nft_owner( @@ -562,7 +577,7 @@ pub fn special_transfer_asset_v200( &env.contract_context.contract_identifier, asset_name, asset.clone(), - ); + )?; let asset_identifier = AssetIdentifier { contract_identifier: env.contract_context.contract_identifier.clone(), @@ -603,7 +618,9 @@ pub fn special_transfer_asset_v205( .ok_or(CheckErrors::NoSuchNFT(asset_name.to_string()))?; let expected_asset_type = &nft_metadata.key_type; - let asset_size = asset.serialized_size() as u64; + let asset_size = asset + .serialized_size() + .map_err(|e| InterpreterError::Expect(e.to_string()))? as u64; runtime_cost(ClarityCostFunction::NftTransfer, env, asset_size)?; if !expected_asset_type.admits(env.epoch(), &asset)? { @@ -632,7 +649,7 @@ pub fn special_transfer_asset_v205( return clarity_ecode!(TransferAssetErrorCodes::NOT_OWNED_BY); } - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; env.add_memory(asset_size)?; let epoch = *env.epoch(); @@ -650,7 +667,7 @@ pub fn special_transfer_asset_v205( &env.contract_context.contract_identifier, asset_name, asset.clone(), - ); + )?; let asset_identifier = AssetIdentifier { contract_identifier: env.contract_context.contract_identifier.clone(), @@ -728,10 +745,10 @@ pub fn special_transfer_token( .checked_add(amount) .ok_or(RuntimeErrorType::ArithmeticOverflow)?; - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; - env.add_memory(TypeSignature::UIntType.size() as u64)?; - env.add_memory(TypeSignature::UIntType.size() as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; + env.add_memory(TypeSignature::UIntType.size()? as u64)?; + env.add_memory(TypeSignature::UIntType.size()? as u64)?; env.global_context.database.set_ft_balance( &env.contract_context.contract_identifier, @@ -823,7 +840,7 @@ pub fn special_get_owner_v200( runtime_cost( ClarityCostFunction::NftOwner, env, - expected_asset_type.size(), + expected_asset_type.size()?, )?; if !expected_asset_type.admits(env.epoch(), &asset)? { @@ -836,10 +853,9 @@ pub fn special_get_owner_v200( &asset, expected_asset_type, ) { - Ok(owner) => { - Ok(Value::some(Value::Principal(owner)) - .expect("Principal should always fit in optional.")) - } + Ok(owner) => Ok(Value::some(Value::Principal(owner)).map_err(|_| { + InterpreterError::Expect("Principal should always fit in optional.".into()) + })?), Err(Error::Runtime(RuntimeErrorType::NoSuchToken, _)) => Ok(Value::none()), Err(e) => Err(e), } @@ -865,7 +881,9 @@ pub fn special_get_owner_v205( .ok_or(CheckErrors::NoSuchNFT(asset_name.to_string()))?; let expected_asset_type = &nft_metadata.key_type; - let asset_size = asset.serialized_size() as u64; + let asset_size = asset + .serialized_size() + .map_err(|e| InterpreterError::Expect(e.to_string()))? as u64; runtime_cost(ClarityCostFunction::NftOwner, env, asset_size)?; if !expected_asset_type.admits(env.epoch(), &asset)? { @@ -878,10 +896,9 @@ pub fn special_get_owner_v205( &asset, expected_asset_type, ) { - Ok(owner) => { - Ok(Value::some(Value::Principal(owner)) - .expect("Principal should always fit in optional.")) - } + Ok(owner) => Ok(Value::some(Value::Principal(owner)).map_err(|_| { + InterpreterError::Expect("Principal should always fit in optional.".into()) + })?), Err(Error::Runtime(RuntimeErrorType::NoSuchToken, _)) => Ok(Value::none()), Err(e) => Err(e), } @@ -956,8 +973,8 @@ pub fn special_burn_token( }; env.register_ft_burn_event(burner.clone(), amount, asset_identifier)?; - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; - env.add_memory(TypeSignature::UIntType.size() as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; + env.add_memory(TypeSignature::UIntType.size()? as u64)?; env.global_context.log_token_transfer( burner, @@ -996,7 +1013,7 @@ pub fn special_burn_asset_v200( runtime_cost( ClarityCostFunction::NftBurn, env, - expected_asset_type.size(), + expected_asset_type.size()?, )?; if !expected_asset_type.admits(env.epoch(), &asset)? { @@ -1021,8 +1038,8 @@ pub fn special_burn_asset_v200( return clarity_ecode!(BurnAssetErrorCodes::NOT_OWNED_BY); } - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; - env.add_memory(expected_asset_type.size() as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; + env.add_memory(expected_asset_type.size()? as u64)?; let epoch = *env.epoch(); env.global_context.database.burn_nft( @@ -1038,7 +1055,7 @@ pub fn special_burn_asset_v200( &env.contract_context.contract_identifier, asset_name, asset.clone(), - ); + )?; let asset_identifier = AssetIdentifier { contract_identifier: env.contract_context.contract_identifier.clone(), @@ -1075,7 +1092,9 @@ pub fn special_burn_asset_v205( .ok_or(CheckErrors::NoSuchNFT(asset_name.to_string()))?; let expected_asset_type = &nft_metadata.key_type; - let asset_size = asset.serialized_size() as u64; + let asset_size = asset + .serialized_size() + .map_err(|e| InterpreterError::Expect(e.to_string()))? as u64; runtime_cost(ClarityCostFunction::NftBurn, env, asset_size)?; if !expected_asset_type.admits(env.epoch(), &asset)? { @@ -1100,7 +1119,7 @@ pub fn special_burn_asset_v205( return clarity_ecode!(BurnAssetErrorCodes::NOT_OWNED_BY); } - env.add_memory(TypeSignature::PrincipalType.size() as u64)?; + env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; env.add_memory(asset_size)?; let epoch = *env.epoch(); @@ -1117,7 +1136,7 @@ pub fn special_burn_asset_v205( &env.contract_context.contract_identifier, asset_name, asset.clone(), - ); + )?; let asset_identifier = AssetIdentifier { contract_identifier: env.contract_context.contract_identifier.clone(), diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index b42e8490cb..b788455f9c 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -21,7 +21,9 @@ use stacks_common::types::StacksEpochId; use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::runtime_cost; -use crate::vm::errors::{check_argument_count, CheckErrors, InterpreterResult as Result}; +use crate::vm::errors::{ + check_argument_count, CheckErrors, InterpreterError, InterpreterResult as Result, +}; use crate::vm::representations::SymbolicExpression; use crate::vm::types::SequenceSubtype::{BufferType, StringType}; use crate::vm::types::StringSubtype::ASCII; @@ -53,12 +55,17 @@ pub fn buff_to_int_generic( ) -> Result { match value { Value::Sequence(SequenceData::Buffer(ref sequence_data)) => { - if sequence_data.len() > BufferLength::try_from(16_u32).unwrap() { - Err(CheckErrors::TypeValueError( - SequenceType(BufferType(BufferLength::try_from(16_u32).unwrap())), + if sequence_data.len()? + > BufferLength::try_from(16_u32) + .map_err(|_| InterpreterError::Expect("Failed to construct".into()))? + { + return Err(CheckErrors::TypeValueError( + SequenceType(BufferType(BufferLength::try_from(16_u32).map_err( + |_| InterpreterError::Expect("Failed to construct".into()), + )?)), value, ) - .into()) + .into()); } else { let mut transfer_buffer = [0u8; 16]; let original_slice = sequence_data.as_slice(); @@ -77,11 +84,15 @@ pub fn buff_to_int_generic( Ok(value) } } - _ => Err(CheckErrors::TypeValueError( - SequenceType(BufferType(BufferLength::try_from(16_u32).unwrap())), - value, - ) - .into()), + _ => { + return Err(CheckErrors::TypeValueError( + SequenceType(BufferType(BufferLength::try_from(16_u32).map_err( + |_| InterpreterError::Expect("Failed to construct".into()), + )?)), + value, + ) + .into()) + } } } @@ -142,8 +153,8 @@ pub fn native_string_to_int_generic( } _ => Err(CheckErrors::UnionTypeValueError( vec![ - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), + TypeSignature::max_string_ascii()?, + TypeSignature::max_string_utf8()?, ], value, ) @@ -186,13 +197,15 @@ pub fn native_int_to_string_generic( match value { Value::Int(ref int_value) => { let as_string = int_value.to_string(); - Ok(bytes_to_value_fn(as_string.into()) - .expect("Unexpected error converting Int to string.")) + Ok(bytes_to_value_fn(as_string.into()).map_err(|_| { + InterpreterError::Expect("Unexpected error converting Int to string.".into()) + })?) } Value::UInt(ref uint_value) => { let as_string = uint_value.to_string(); - Ok(bytes_to_value_fn(as_string.into()) - .expect("Unexpected error converting UInt to string.")) + Ok(bytes_to_value_fn(as_string.into()).map_err(|_| { + InterpreterError::Expect("Unexpected error converting UInt to string.".into()) + })?) } _ => Err(CheckErrors::UnionTypeValueError( vec![TypeSignature::IntType, TypeSignature::UIntType], @@ -219,7 +232,7 @@ pub fn to_consensus_buff(value: Value) -> Result { let mut clar_buff_serialized = vec![]; value .serialize_write(&mut clar_buff_serialized) - .expect("FATAL: failed to serialize to vec"); + .map_err(|_| InterpreterError::Expect("FATAL: failed to serialize to vec".into()))?; let clar_buff_serialized = match Value::buff_from(clar_buff_serialized) { Ok(x) => x, @@ -251,7 +264,7 @@ pub fn from_consensus_buff( Ok(buff_data.data) } else { Err(CheckErrors::TypeValueError( - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, value, )) }?; diff --git a/clarity/src/vm/functions/crypto.rs b/clarity/src/vm/functions/crypto.rs index 30de1e9afd..dd55f3a56f 100644 --- a/clarity/src/vm/functions/crypto.rs +++ b/clarity/src/vm/functions/crypto.rs @@ -27,7 +27,7 @@ use crate::vm::costs::{ constants as cost_constants, cost_functions, runtime_cost, CostTracker, MemoryConsumer, }; use crate::vm::errors::{ - check_argument_count, check_arguments_at_least, CheckErrors, Error, + check_argument_count, check_arguments_at_least, CheckErrors, Error, InterpreterError, InterpreterResult as Result, RuntimeErrorType, ShortReturnType, }; use crate::vm::representations::SymbolicExpressionType::{Atom, List}; @@ -49,7 +49,7 @@ macro_rules! native_hash_func { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer(), + TypeSignature::max_buffer()?, ], input, )), @@ -68,19 +68,19 @@ native_hash_func!(native_keccak256, hash::Keccak256Hash); // Note: Clarity1 had a bug in how the address is computed (issues/2619). // This method preserves the old, incorrect behavior for those running Clarity1. -fn pubkey_to_address_v1(pub_key: Secp256k1PublicKey) -> StacksAddress { +fn pubkey_to_address_v1(pub_key: Secp256k1PublicKey) -> Result { StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, 1, &vec![pub_key], ) - .unwrap() + .ok_or_else(|| InterpreterError::Expect("Failed to create address from pubkey".into()).into()) } // Note: Clarity1 had a bug in how the address is computed (issues/2619). // This version contains the code for Clarity2 and going forward. -fn pubkey_to_address_v2(pub_key: Secp256k1PublicKey, is_mainnet: bool) -> StacksAddress { +fn pubkey_to_address_v2(pub_key: Secp256k1PublicKey, is_mainnet: bool) -> Result { let network_byte = if is_mainnet { C32_ADDRESS_VERSION_MAINNET_SINGLESIG } else { @@ -92,7 +92,7 @@ fn pubkey_to_address_v2(pub_key: Secp256k1PublicKey, is_mainnet: bool) -> Stacks 1, &vec![pub_key], ) - .unwrap() + .ok_or_else(|| InterpreterError::Expect("Failed to create address from pubkey".into()).into()) } pub fn special_principal_of( @@ -121,12 +121,13 @@ pub fn special_principal_of( // Note: Clarity1 had a bug in how the address is computed (issues/2619). // We want to preserve the old behavior unless the version is greater. let addr = if *env.contract_context.get_clarity_version() > ClarityVersion::Clarity1 { - pubkey_to_address_v2(pub_key, env.global_context.mainnet) + pubkey_to_address_v2(pub_key, env.global_context.mainnet)? } else { - pubkey_to_address_v1(pub_key) + pubkey_to_address_v1(pub_key)? }; let principal = addr.to_account_principal(); - Ok(Value::okay(Value::Principal(principal)).unwrap()) + return Ok(Value::okay(Value::Principal(principal)) + .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?); } else { Ok(Value::err_uint(1)) } @@ -168,10 +169,17 @@ pub fn special_secp256k1_recover( _ => return Err(CheckErrors::TypeValueError(BUFF_65.clone(), param1).into()), }; - match secp256k1_recover(message, signature).map_err(|_| CheckErrors::InvalidSecp65k1Signature) { - Ok(pubkey) => Ok(Value::okay(Value::buff_from(pubkey.to_vec()).unwrap()).unwrap()), - _ => Ok(Value::err_uint(1)), - } + match secp256k1_recover(&message, &signature).map_err(|_| CheckErrors::InvalidSecp65k1Signature) + { + Ok(pubkey) => { + return Ok(Value::okay( + Value::buff_from(pubkey.to_vec()) + .map_err(|_| InterpreterError::Expect("Failed to construct buff".into()))?, + ) + .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?) + } + _ => return Ok(Value::err_uint(1)), + }; } pub fn special_secp256k1_verify( diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index ead11ab0eb..d036dd27c2 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -79,7 +79,7 @@ pub fn special_contract_call( let mut rest_args_sizes = vec![]; for arg in args[2..].iter() { let evaluated_arg = eval(arg, env, context)?; - rest_args_sizes.push(evaluated_arg.size() as u64); + rest_args_sizes.push(evaluated_arg.size()? as u64); rest_args.push(SymbolicExpression::atom_value(evaluated_arg)); } @@ -204,14 +204,14 @@ pub fn special_contract_call( }?; // sanitize contract-call outputs in epochs >= 2.4 - let result_type = TypeSignature::type_of(&result); + let result_type = TypeSignature::type_of(&result)?; let (result, _) = Value::sanitize_value(env.epoch(), &result_type, result) .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; // Ensure that the expected type from the trait spec admits // the type of the value returned by the dynamic dispatch. if let Some(returns_type_signature) = type_returns_constraint { - let actual_returns = TypeSignature::type_of(&result); + let actual_returns = TypeSignature::type_of(&result)?; if !returns_type_signature.admits_type(env.epoch(), &actual_returns)? { return Err( CheckErrors::ReturnTypesMustMatch(returns_type_signature, actual_returns).into(), @@ -242,7 +242,7 @@ pub fn special_fetch_variable_v200( runtime_cost( ClarityCostFunction::FetchVar, env, - data_types.value_type.size(), + data_types.value_type.size()?, )?; let epoch = *env.epoch(); @@ -278,7 +278,7 @@ pub fn special_fetch_variable_v205( let result_size = match &result { Ok(data) => data.serialized_byte_len, - Err(_e) => data_types.value_type.size() as u64, + Err(_e) => data_types.value_type.size()? as u64, }; runtime_cost(ClarityCostFunction::FetchVar, env, result_size)?; @@ -312,10 +312,10 @@ pub fn special_set_variable_v200( runtime_cost( ClarityCostFunction::SetVar, env, - data_types.value_type.size(), + data_types.value_type.size()?, )?; - env.add_memory(value.get_memory_use())?; + env.add_memory(value.get_memory_use()?)?; let epoch = *env.epoch(); env.global_context @@ -357,7 +357,7 @@ pub fn special_set_variable_v205( let result_size = match &result { Ok(data) => data.serialized_byte_len, - Err(_e) => data_types.value_type.size() as u64, + Err(_e) => data_types.value_type.size()? as u64, }; runtime_cost(ClarityCostFunction::SetVar, env, result_size)?; @@ -389,7 +389,7 @@ pub fn special_fetch_entry_v200( runtime_cost( ClarityCostFunction::FetchEntry, env, - data_types.value_type.size() + data_types.key_type.size(), + data_types.value_type.size()? + data_types.key_type.size()?, )?; let epoch = *env.epoch(); @@ -427,7 +427,7 @@ pub fn special_fetch_entry_v205( let result_size = match &result { Ok(data) => data.serialized_byte_len, - Err(_e) => (data_types.value_type.size() + data_types.key_type.size()) as u64, + Err(_e) => (data_types.value_type.size()? + data_types.key_type.size()?) as u64, }; runtime_cost(ClarityCostFunction::FetchEntry, env, result_size)?; @@ -457,7 +457,7 @@ pub fn special_at_block( env.add_memory(cost_constants::AT_BLOCK_MEMORY)?; let result = env.evaluate_at_block(bhh, &args[1], context); - env.drop_memory(cost_constants::AT_BLOCK_MEMORY); + env.drop_memory(cost_constants::AT_BLOCK_MEMORY)?; result } @@ -490,11 +490,11 @@ pub fn special_set_entry_v200( runtime_cost( ClarityCostFunction::SetEntry, env, - data_types.value_type.size() + data_types.key_type.size(), + data_types.value_type.size()? + data_types.key_type.size()?, )?; - env.add_memory(key.get_memory_use())?; - env.add_memory(value.get_memory_use())?; + env.add_memory(key.get_memory_use()?)?; + env.add_memory(value.get_memory_use()?)?; let epoch = *env.epoch(); env.global_context @@ -538,7 +538,7 @@ pub fn special_set_entry_v205( let result_size = match &result { Ok(data) => data.serialized_byte_len, - Err(_e) => (data_types.value_type.size() + data_types.key_type.size()) as u64, + Err(_e) => (data_types.value_type.size()? + data_types.key_type.size()?) as u64, }; runtime_cost(ClarityCostFunction::SetEntry, env, result_size)?; @@ -576,11 +576,11 @@ pub fn special_insert_entry_v200( runtime_cost( ClarityCostFunction::SetEntry, env, - data_types.value_type.size() + data_types.key_type.size(), + data_types.value_type.size()? + data_types.key_type.size()?, )?; - env.add_memory(key.get_memory_use())?; - env.add_memory(value.get_memory_use())?; + env.add_memory(key.get_memory_use()?)?; + env.add_memory(value.get_memory_use()?)?; let epoch = *env.epoch(); @@ -625,7 +625,7 @@ pub fn special_insert_entry_v205( let result_size = match &result { Ok(data) => data.serialized_byte_len, - Err(_e) => (data_types.value_type.size() + data_types.key_type.size()) as u64, + Err(_e) => (data_types.value_type.size()? + data_types.key_type.size()?) as u64, }; runtime_cost(ClarityCostFunction::SetEntry, env, result_size)?; @@ -661,10 +661,10 @@ pub fn special_delete_entry_v200( runtime_cost( ClarityCostFunction::SetEntry, env, - data_types.key_type.size(), + data_types.key_type.size()?, )?; - env.add_memory(key.get_memory_use())?; + env.add_memory(key.get_memory_use()?)?; let epoch = *env.epoch(); env.global_context @@ -706,7 +706,7 @@ pub fn special_delete_entry_v205( let result_size = match &result { Ok(data) => data.serialized_byte_len, - Err(_e) => data_types.key_type.size() as u64, + Err(_e) => data_types.key_type.size()? as u64, }; runtime_cost(ClarityCostFunction::SetEntry, env, result_size)?; @@ -756,11 +756,14 @@ pub fn special_get_block_info( let result = match block_info_prop { BlockInfoProperty::Time => { - let block_time = env.global_context.database.get_block_time(height_value); - Value::UInt(block_time as u128) + let block_time = env.global_context.database.get_block_time(height_value)?; + Value::UInt(u128::from(block_time)) } BlockInfoProperty::VrfSeed => { - let vrf_seed = env.global_context.database.get_block_vrf_seed(height_value); + let vrf_seed = env + .global_context + .database + .get_block_vrf_seed(height_value)?; Value::Sequence(SequenceData::Buffer(BuffData { data: vrf_seed.as_bytes().to_vec(), })) @@ -769,7 +772,7 @@ pub fn special_get_block_info( let header_hash = env .global_context .database - .get_block_header_hash(height_value); + .get_block_header_hash(height_value)?; Value::Sequence(SequenceData::Buffer(BuffData { data: header_hash.as_bytes().to_vec(), })) @@ -778,7 +781,7 @@ pub fn special_get_block_info( let burnchain_header_hash = env .global_context .database - .get_burnchain_block_header_hash(height_value); + .get_burnchain_block_header_hash(height_value)?; Value::Sequence(SequenceData::Buffer(BuffData { data: burnchain_header_hash.as_bytes().to_vec(), })) @@ -787,32 +790,35 @@ pub fn special_get_block_info( let id_header_hash = env .global_context .database - .get_index_block_header_hash(height_value); + .get_index_block_header_hash(height_value)?; Value::Sequence(SequenceData::Buffer(BuffData { data: id_header_hash.as_bytes().to_vec(), })) } BlockInfoProperty::MinerAddress => { - let miner_address = env.global_context.database.get_miner_address(height_value); + let miner_address = env + .global_context + .database + .get_miner_address(height_value)?; Value::from(miner_address) } BlockInfoProperty::MinerSpendWinner => { let winner_spend = env .global_context .database - .get_miner_spend_winner(height_value); + .get_miner_spend_winner(height_value)?; Value::UInt(winner_spend) } BlockInfoProperty::MinerSpendTotal => { let total_spend = env .global_context .database - .get_miner_spend_total(height_value); + .get_miner_spend_total(height_value)?; Value::UInt(total_spend) } BlockInfoProperty::BlockReward => { // this is already an optional - let block_reward_opt = env.global_context.database.get_block_reward(height_value); + let block_reward_opt = env.global_context.database.get_block_reward(height_value)?; return Ok(match block_reward_opt { Some(x) => Value::some(Value::UInt(x))?, None => Value::none(), @@ -871,7 +877,7 @@ pub fn special_get_burn_block_info( let burnchain_header_hash_opt = env .global_context .database - .get_burnchain_block_header_hash_for_burnchain_height(height_value); + .get_burnchain_block_header_hash_for_burnchain_height(height_value)?; match burnchain_header_hash_opt { Some(burnchain_header_hash) => { @@ -886,7 +892,7 @@ pub fn special_get_burn_block_info( let pox_addrs_and_payout = env .global_context .database - .get_pox_payout_addrs_for_burnchain_height(height_value); + .get_pox_payout_addrs_for_burnchain_height(height_value)?; match pox_addrs_and_payout { Some((addrs, payout)) => Ok(Value::some(Value::Tuple( @@ -897,13 +903,21 @@ pub fn special_get_burn_block_info( addrs.into_iter().map(Value::Tuple).collect(), env.epoch(), ) - .expect("FATAL: could not convert address list to Value"), + .map_err(|_| { + InterpreterError::Expect( + "FATAL: could not convert address list to Value".into(), + ) + })?, ), ("payout".into(), Value::UInt(payout)), ]) - .expect("FATAL: failed to build pox addrs and payout tuple"), + .map_err(|_| { + InterpreterError::Expect( + "FATAL: failed to build pox addrs and payout tuple".into(), + ) + })?, )) - .expect("FATAL: could not build Some(..)")), + .map_err(|_| InterpreterError::Expect("FATAL: could not build Some(..)".into()))?), None => Ok(Value::none()), } } diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index b653ebab76..7873854e0a 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -581,10 +581,13 @@ fn native_eq(args: Vec, env: &mut Environment) -> Result { } else { let first = &args[0]; // check types: - let mut arg_type = TypeSignature::type_of(first); + let mut arg_type = TypeSignature::type_of(first)?; for x in args.iter() { - arg_type = - TypeSignature::least_supertype(env.epoch(), &TypeSignature::type_of(x), &arg_type)?; + arg_type = TypeSignature::least_supertype( + env.epoch(), + &TypeSignature::type_of(x)?, + &arg_type, + )?; if x != first { return Ok(Value::Bool(false)); } @@ -610,7 +613,7 @@ fn special_print( })?; let input = eval(arg, env, context)?; - runtime_cost(ClarityCostFunction::Print, env, input.size())?; + runtime_cost(ClarityCostFunction::Print, env, input.size()?)?; if cfg!(feature = "developer-mode") { debug!("{}", &input); @@ -732,7 +735,7 @@ fn special_let( let binding_value = eval(var_sexp, env, &inner_context)?; - let bind_mem_use = binding_value.get_memory_use(); + let bind_mem_use = binding_value.get_memory_use()?; env.add_memory(bind_mem_use)?; memory_use += bind_mem_use; // no check needed, b/c it's done in add_memory. if *env.contract_context.get_clarity_version() >= ClarityVersion::Clarity2 { @@ -751,7 +754,7 @@ fn special_let( last_result.replace(body_result); } // last_result should always be Some(...), because of the arg len check above. - Ok(last_result.unwrap()) + last_result.ok_or_else(|| InterpreterError::Expect("Failed to get let result".into()).into()) }) } @@ -777,7 +780,7 @@ fn special_as_contract( let result = eval(&args[0], &mut nested_env, context); - env.drop_memory(cost_constants::AS_CONTRACT_MEMORY); + env.drop_memory(cost_constants::AS_CONTRACT_MEMORY)?; result } diff --git a/clarity/src/vm/functions/options.rs b/clarity/src/vm/functions/options.rs index b03d464be8..26829618af 100644 --- a/clarity/src/vm/functions/options.rs +++ b/clarity/src/vm/functions/options.rs @@ -18,8 +18,8 @@ use crate::vm::contexts::{Environment, LocalContext}; use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::{cost_functions, runtime_cost, CostTracker, MemoryConsumer}; use crate::vm::errors::{ - check_argument_count, check_arguments_at_least, CheckErrors, InterpreterResult as Result, - RuntimeErrorType, ShortReturnType, + check_argument_count, check_arguments_at_least, CheckErrors, InterpreterError, + InterpreterResult as Result, RuntimeErrorType, ShortReturnType, }; use crate::vm::types::{CallableData, OptionalData, ResponseData, TypeSignature, Value}; use crate::vm::Value::CallableContract; @@ -94,8 +94,11 @@ pub fn native_try_ret(input: Value) -> Result { if data.committed { Ok(*data.data) } else { - let short_return_val = Value::error(*data.data) - .expect("BUG: Failed to construct new response type from old response type"); + let short_return_val = Value::error(*data.data).map_err(|_| { + InterpreterError::Expect( + "BUG: Failed to construct new response type from old response type".into(), + ) + })?; Err(ShortReturnType::ExpectedValue(short_return_val).into()) } } @@ -118,7 +121,7 @@ fn eval_with_new_binding( return Err(CheckErrors::NameAlreadyUsed(bind_name.into()).into()); } - let memory_use = bind_value.get_memory_use(); + let memory_use = bind_value.get_memory_use()?; env.add_memory(memory_use)?; if *env.contract_context.get_clarity_version() >= ClarityVersion::Clarity2 { @@ -135,7 +138,7 @@ fn eval_with_new_binding( inner_context.variables.insert(bind_name, bind_value); let result = vm::eval(body, env, &inner_context); - env.drop_memory(memory_use); + env.drop_memory(memory_use)?; result } @@ -209,7 +212,7 @@ pub fn special_match( match input { Value::Response(data) => special_match_resp(data, &args[1..], env, context), Value::Optional(data) => special_match_opt(data, &args[1..], env, context), - _ => Err(CheckErrors::BadMatchInput(TypeSignature::type_of(&input)).into()), + _ => return Err(CheckErrors::BadMatchInput(TypeSignature::type_of(&input)?).into()), } } diff --git a/clarity/src/vm/functions/principals.rs b/clarity/src/vm/functions/principals.rs index 579761dfae..426fa4f703 100644 --- a/clarity/src/vm/functions/principals.rs +++ b/clarity/src/vm/functions/principals.rs @@ -85,8 +85,8 @@ fn create_principal_destruct_tuple( version: u8, hash_bytes: &[u8; 20], name_opt: Option, -) -> Value { - Value::Tuple( +) -> Result { + Ok(Value::Tuple( TupleData::from_data(vec![ ( "version".into(), @@ -107,23 +107,25 @@ fn create_principal_destruct_tuple( }), ), ]) - .expect("FAIL: Failed to initialize tuple."), - ) + .map_err(|_| InterpreterError::Expect("FAIL: Failed to initialize tuple.".into()))?, + )) } /// Creates Response return type, to wrap an *actual error* result of a `principal-construct`. /// /// The response is an error Response, where the `err` value is a tuple `{error_code, parse_tuple}`. /// `error_int` is of type `UInt`, `parse_tuple` is None. -fn create_principal_true_error_response(error_int: PrincipalConstructErrorCode) -> Value { +fn create_principal_true_error_response(error_int: PrincipalConstructErrorCode) -> Result { Value::error(Value::Tuple( TupleData::from_data(vec![ ("error_code".into(), Value::UInt(error_int as u128)), ("value".into(), Value::none()), ]) - .expect("FAIL: Failed to initialize tuple."), + .map_err(|_| InterpreterError::Expect("FAIL: Failed to initialize tuple.".into()))?, )) - .expect("FAIL: Failed to initialize (err ..) response") + .map_err(|_| { + InterpreterError::Expect("FAIL: Failed to initialize (err ..) response".into()).into() + }) } /// Creates Response return type, to wrap a *return value returned as an error* result of a @@ -134,18 +136,22 @@ fn create_principal_true_error_response(error_int: PrincipalConstructErrorCode) fn create_principal_value_error_response( error_int: PrincipalConstructErrorCode, value: Value, -) -> Value { +) -> Result { Value::error(Value::Tuple( TupleData::from_data(vec![ ("error_code".into(), Value::UInt(error_int as u128)), ( "value".into(), - Value::some(value).expect("Unexpected problem creating Value."), + Value::some(value).map_err(|_| { + InterpreterError::Expect("Unexpected problem creating Value.".into()) + })?, ), ]) - .expect("FAIL: Failed to initialize tuple."), + .map_err(|_| InterpreterError::Expect("FAIL: Failed to initialize tuple.".into()))?, )) - .expect("FAIL: Failed to initialize (err ..) response") + .map_err(|_| { + InterpreterError::Expect("FAIL: Failed to initialize (err ..) response".into()).into() + }) } pub fn special_principal_destruct( @@ -174,7 +180,7 @@ pub fn special_principal_destruct( // channel or the error channel. let version_byte_is_valid = version_matches_current_network(version_byte, env.global_context); - let tuple = create_principal_destruct_tuple(version_byte, &hash_bytes, name_opt); + let tuple = create_principal_destruct_tuple(version_byte, &hash_bytes, name_opt)?; Ok(Value::Response(ResponseData { committed: version_byte_is_valid, data: Box::new(tuple), @@ -215,9 +221,7 @@ pub fn special_principal_construct( } else if verified_version.is_empty() { // the type checker does not check the actual length of the buffer, but a 0-length buffer // will type-check to (buff 1) - return Ok(create_principal_true_error_response( - PrincipalConstructErrorCode::BUFFER_LENGTH, - )); + return create_principal_true_error_response(PrincipalConstructErrorCode::BUFFER_LENGTH); } else { (*verified_version)[0] }; @@ -225,9 +229,7 @@ pub fn special_principal_construct( // If the version byte is >= 32, this is a runtime error, because it wasn't the job of the // type system. This is a requirement for c32check encoding. if version_byte >= 32 { - return Ok(create_principal_true_error_response( - PrincipalConstructErrorCode::BUFFER_LENGTH, - )); + return create_principal_true_error_response(PrincipalConstructErrorCode::BUFFER_LENGTH); } // `version_byte_is_valid` determines whether the returned `Response` is through the success @@ -250,9 +252,7 @@ pub fn special_principal_construct( // If the hash-bytes buffer has less than 20 bytes, this is a runtime error, because it // wasn't the job of the type system (i.e. (buff X) for all X < 20 are all also (buff 20)) if verified_hash_bytes.len() < 20 { - return Ok(create_principal_true_error_response( - PrincipalConstructErrorCode::BUFFER_LENGTH, - )); + return create_principal_true_error_response(PrincipalConstructErrorCode::BUFFER_LENGTH); } // Construct the principal. @@ -267,7 +267,7 @@ pub fn special_principal_construct( Value::Sequence(SequenceData::String(CharType::ASCII(ascii_data))) => ascii_data, _ => { return Err(CheckErrors::TypeValueError( - TypeSignature::contract_name_string_ascii_type(), + TypeSignature::contract_name_string_ascii_type()?, name, ) .into()) @@ -276,15 +276,15 @@ pub fn special_principal_construct( // If it's not long enough, then it's a runtime error that warrants an (err ..) response. if name_bytes.data.len() < CONTRACT_MIN_NAME_LENGTH { - return Ok(create_principal_true_error_response( + return create_principal_true_error_response( PrincipalConstructErrorCode::CONTRACT_NAME, - )); + ); } // if it's too long, then this should have been caught by the type-checker if name_bytes.data.len() > CONTRACT_MAX_NAME_LENGTH { return Err(CheckErrors::TypeValueError( - TypeSignature::contract_name_string_ascii_type(), + TypeSignature::contract_name_string_ascii_type()?, Value::from(name_bytes), ) .into()); @@ -293,17 +293,20 @@ pub fn special_principal_construct( // The type-checker can't verify that the name is a valid ContractName, so we'll need to do // it here at runtime. If it's not valid, then it warrants this function evaluating to // (err ..). - let name_string = String::from_utf8(name_bytes.data).expect( - "FAIL: could not convert bytes of type (string-ascii 40) back to a UTF-8 string", - ); + let name_string = String::from_utf8(name_bytes.data).map_err(|_| { + InterpreterError::Expect( + "FAIL: could not convert bytes of type (string-ascii 40) back to a UTF-8 string" + .into(), + ) + })?; let contract_name = match ContractName::try_from(name_string) { Ok(cn) => cn, Err(_) => { // not a valid contract name - return Ok(create_principal_true_error_response( + return create_principal_true_error_response( PrincipalConstructErrorCode::CONTRACT_NAME, - )); + ); } }; @@ -317,11 +320,10 @@ pub fn special_principal_construct( }; if version_byte_is_valid { - Ok(Value::okay(principal).expect("FAIL: failed to build an (ok ..) response")) + Ok(Value::okay(principal).map_err(|_| { + InterpreterError::Expect("FAIL: failed to build an (ok ..) response".into()) + })?) } else { - Ok(create_principal_value_error_response( - PrincipalConstructErrorCode::VERSION_BYTE, - principal, - )) + create_principal_value_error_response(PrincipalConstructErrorCode::VERSION_BYTE, principal) } } diff --git a/clarity/src/vm/functions/sequences.rs b/clarity/src/vm/functions/sequences.rs index 16e7d8d077..029e62484a 100644 --- a/clarity/src/vm/functions/sequences.rs +++ b/clarity/src/vm/functions/sequences.rs @@ -41,7 +41,7 @@ pub fn list_cons( let mut arg_size = 0; for a in args.iter() { - arg_size = arg_size.cost_overflow_add(a.size().into())?; + arg_size = arg_size.cost_overflow_add(a.size()?.into())?; } runtime_cost(ClarityCostFunction::ListCons, env, arg_size)?; @@ -75,7 +75,7 @@ pub fn special_filter( } })?; } - _ => return Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)).into()), + _ => return Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)?).into()), }; Ok(sequence) } @@ -96,20 +96,18 @@ pub fn special_fold( let initial = eval(&args[2], env, context)?; match sequence { - Value::Sequence(ref mut sequence_data) => { - sequence_data - .atom_values() - .into_iter() - .try_fold(initial, |acc, x| { - apply( - &function, - &[x, SymbolicExpression::atom_value(acc)], - env, - context, - ) - }) - } - _ => Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)).into()), + Value::Sequence(ref mut sequence_data) => sequence_data + .atom_values()? + .into_iter() + .try_fold(initial, |acc, x| { + apply( + &function, + &[x, SymbolicExpression::atom_value(acc)], + env, + context, + ) + }), + _ => Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)?).into()), } } @@ -135,7 +133,7 @@ pub fn special_map( match sequence { Value::Sequence(ref mut sequence_data) => { min_args_len = min_args_len.min(sequence_data.len()); - for (apply_index, value) in sequence_data.atom_values().into_iter().enumerate() { + for (apply_index, value) in sequence_data.atom_values()?.into_iter().enumerate() { if apply_index > min_args_len { break; } @@ -147,7 +145,9 @@ pub fn special_map( } } _ => { - return Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)).into()) + return Err( + CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)?).into(), + ) } } } @@ -187,11 +187,11 @@ pub fn special_append( type_signature, } = list; let (entry_type, size) = type_signature.destruct(); - let element_type = TypeSignature::type_of(&element); + let element_type = TypeSignature::type_of(&element)?; runtime_cost( ClarityCostFunction::Append, env, - u64::from(cmp::max(entry_type.size(), element_type.size())), + u64::from(cmp::max(entry_type.size()?, element_type.size()?)), )?; if entry_type.is_no_type() { assert_eq!(size, 0); @@ -232,7 +232,7 @@ pub fn special_concat_v200( runtime_cost( ClarityCostFunction::Concat, env, - u64::from(wrapped_seq.size()).cost_overflow_add(u64::from(other_wrapped_seq.size()))?, + u64::from(wrapped_seq.size()?).cost_overflow_add(u64::from(other_wrapped_seq.size()?))?, )?; match (&mut wrapped_seq, other_wrapped_seq) { @@ -289,7 +289,9 @@ pub fn special_as_max_len( let sequence_len = match sequence { Value::Sequence(ref sequence_data) => sequence_data.len() as u128, _ => { - return Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)).into()) + return Err( + CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)?).into(), + ) } }; if sequence_len > *expected_len { @@ -302,17 +304,18 @@ pub fn special_as_max_len( } } else { let actual_len = eval(&args[1], env, context)?; - Err( - CheckErrors::TypeError(TypeSignature::UIntType, TypeSignature::type_of(&actual_len)) - .into(), + Err(CheckErrors::TypeError( + TypeSignature::UIntType, + TypeSignature::type_of(&actual_len)?, ) + .into()) } } pub fn native_len(sequence: Value) -> Result { match sequence { Value::Sequence(sequence_data) => Ok(Value::UInt(sequence_data.len() as u128)), - _ => Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)).into()), + _ => Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)?).into()), } } @@ -323,7 +326,7 @@ pub fn native_index_of(sequence: Value, to_find: Value) -> Result { None => Ok(Value::none()), } } else { - Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)).into()) + Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)?).into()) } } @@ -331,7 +334,7 @@ pub fn native_element_at(sequence: Value, index: Value) -> Result { let sequence_data = if let Value::Sequence(sequence_data) = sequence { sequence_data } else { - return Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)).into()); + return Err(CheckErrors::ExpectedSequence(TypeSignature::type_of(&sequence)?).into()); }; let index = if let Value::UInt(index_u128) = index { @@ -344,7 +347,7 @@ pub fn native_element_at(sequence: Value, index: Value) -> Result { return Err(CheckErrors::TypeValueError(TypeSignature::UIntType, index).into()); }; - if let Some(result) = sequence_data.element_at(index) { + if let Some(result) = sequence_data.element_at(index)? { Value::some(result) } else { Ok(Value::none()) @@ -383,7 +386,7 @@ pub fn special_slice( runtime_cost( ClarityCostFunction::Slice, env, - (right_position - left_position) * seq.element_size(), + (right_position - left_position) * seq.element_size()?, )?; let seq_value = seq.slice(env.epoch(), left_position as usize, right_position as usize)?; @@ -410,13 +413,13 @@ pub fn special_replace_at( check_argument_count(3, args)?; let seq = eval(&args[0], env, context)?; - let seq_type = TypeSignature::type_of(&seq); + let seq_type = TypeSignature::type_of(&seq)?; // runtime is the cost to copy over one element into its place - runtime_cost(ClarityCostFunction::ReplaceAt, env, seq_type.size())?; + runtime_cost(ClarityCostFunction::ReplaceAt, env, seq_type.size()?)?; let expected_elem_type = if let TypeSignature::SequenceType(seq_subtype) = &seq_type { - seq_subtype.unit_type() + seq_subtype.unit_type()? } else { return Err(CheckErrors::ExpectedSequence(seq_type).into()); }; diff --git a/clarity/src/vm/functions/tuples.rs b/clarity/src/vm/functions/tuples.rs index 266ccffbf7..9a509ccfbe 100644 --- a/clarity/src/vm/functions/tuples.rs +++ b/clarity/src/vm/functions/tuples.rs @@ -16,7 +16,8 @@ use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::{cost_functions, runtime_cost}; use crate::vm::errors::{ - check_argument_count, check_arguments_at_least, CheckErrors, InterpreterResult as Result, + check_argument_count, check_arguments_at_least, CheckErrors, InterpreterError, + InterpreterResult as Result, }; use crate::vm::representations::SymbolicExpressionType::List; use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; @@ -59,10 +60,13 @@ pub fn tuple_get( Some(data) => { if let Value::Tuple(tuple_data) = *data { runtime_cost(ClarityCostFunction::TupleGet, env, tuple_data.len())?; - Ok(Value::some(tuple_data.get_owned(arg_name)?) - .expect("Tuple contents should *always* fit in a some wrapper")) + Ok(Value::some(tuple_data.get_owned(arg_name)?).map_err(|_| { + InterpreterError::Expect( + "Tuple contents should *always* fit in a some wrapper".into(), + ) + })?) } else { - Err(CheckErrors::ExpectedTuple(TypeSignature::type_of(&data)).into()) + Err(CheckErrors::ExpectedTuple(TypeSignature::type_of(&data)?).into()) } } None => Ok(Value::none()), // just pass through none-types. @@ -72,19 +76,19 @@ pub fn tuple_get( runtime_cost(ClarityCostFunction::TupleGet, env, tuple_data.len())?; tuple_data.get_owned(arg_name) } - _ => Err(CheckErrors::ExpectedTuple(TypeSignature::type_of(&value)).into()), + _ => Err(CheckErrors::ExpectedTuple(TypeSignature::type_of(&value)?).into()), } } pub fn tuple_merge(base: Value, update: Value) -> Result { let initial_values = match base { Value::Tuple(initial_values) => Ok(initial_values), - _ => Err(CheckErrors::ExpectedTuple(TypeSignature::type_of(&base))), + _ => Err(CheckErrors::ExpectedTuple(TypeSignature::type_of(&base)?)), }?; let new_values = match update { Value::Tuple(new_values) => Ok(new_values), - _ => Err(CheckErrors::ExpectedTuple(TypeSignature::type_of(&update))), + _ => Err(CheckErrors::ExpectedTuple(TypeSignature::type_of(&update)?)), }?; let combined = TupleData::shallow_merge(initial_values, new_values)?; diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 9c1a483ac3..58b3b87a93 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -49,7 +49,6 @@ pub mod tests; #[cfg(any(test, feature = "testing"))] pub mod test_util; -#[allow(clippy::result_large_err)] pub mod clarity; use std::collections::BTreeMap; @@ -173,31 +172,33 @@ fn lookup_variable(name: &str, context: &LocalContext, env: &mut Environment) -> name )) .into()) - } else if let Some(value) = variables::lookup_reserved_variable(name, context, env)? { - Ok(value) } else { - runtime_cost( - ClarityCostFunction::LookupVariableDepth, - env, - context.depth(), - )?; - if let Some(value) = context.lookup_variable(name) { - runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size())?; - Ok(value.clone()) - } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { - runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size())?; - let (value, _) = - Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value), value) - .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; + if let Some(value) = variables::lookup_reserved_variable(name, context, env)? { Ok(value) - } else if let Some(callable_data) = context.lookup_callable_contract(name) { - if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { - Ok(callable_data.contract_identifier.clone().into()) + } else { + runtime_cost( + ClarityCostFunction::LookupVariableDepth, + env, + context.depth(), + )?; + if let Some(value) = context.lookup_variable(name) { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; + Ok(value.clone()) + } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; + let (value, _) = + Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value)?, value) + .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; + Ok(value) + } else if let Some(callable_data) = context.lookup_callable_contract(name) { + if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { + Ok(callable_data.contract_identifier.clone().into()) + } else { + Ok(Value::CallableContract(callable_data.clone())) + } } else { - Ok(Value::CallableContract(callable_data.clone())) + Err(CheckErrors::UndefinedVariable(name.to_string()).into()) } - } else { - Err(CheckErrors::UndefinedVariable(name.to_string()).into()) } } } @@ -237,7 +238,10 @@ pub fn apply( // only enough to do recursion detection. // do recursion check on user functions. - let track_recursion = matches!(function, CallableType::UserFunction(_)); + let track_recursion = match function { + CallableType::UserFunction(_) => true, + _ => false, + }; if track_recursion && env.call_stack.contains(&identifier) { return Err(CheckErrors::CircularReference(vec![identifier.to_string()]).into()); @@ -261,21 +265,21 @@ pub fn apply( let arg_value = match eval(arg_x, env, context) { Ok(x) => x, Err(e) => { - env.drop_memory(used_memory); + env.drop_memory(used_memory)?; env.call_stack.decr_apply_depth(); return Err(e); } }; - let arg_use = arg_value.get_memory_use(); + let arg_use = arg_value.get_memory_use()?; match env.add_memory(arg_use) { Ok(_x) => {} Err(e) => { - env.drop_memory(used_memory); + env.drop_memory(used_memory)?; env.call_stack.decr_apply_depth(); return Err(Error::from(e)); } }; - used_memory += arg_value.get_memory_use(); + used_memory += arg_value.get_memory_use()?; evaluated_args.push(arg_value); } env.call_stack.decr_apply_depth(); @@ -298,18 +302,18 @@ pub fn apply( .and_then(|_| function.apply(evaluated_args, env)) } CallableType::UserFunction(function) => function.apply(&evaluated_args, env), - _ => panic!("Should be unreachable."), + _ => return Err(InterpreterError::Expect("Should be unreachable.".into()).into()), }; add_stack_trace(&mut resp, env); - env.drop_memory(used_memory); + env.drop_memory(used_memory)?; env.call_stack.remove(&identifier, track_recursion)?; resp } } -pub fn eval( +pub fn eval<'a>( exp: &SymbolicExpression, - env: &mut Environment, + env: &'a mut Environment, context: &LocalContext, ) -> Result { use crate::vm::representations::SymbolicExpressionType::{ @@ -325,7 +329,7 @@ pub fn eval( let res = match exp.expr { AtomValue(ref value) | LiteralValue(ref value) => Ok(value.clone()), - Atom(ref value) => lookup_variable(value, context, env), + Atom(ref value) => lookup_variable(&value, context, env), List(ref children) => { let (function_variable, rest) = children .split_first() @@ -334,8 +338,8 @@ pub fn eval( let function_name = function_variable .match_atom() .ok_or(CheckErrors::BadFunctionName)?; - let f = lookup_function(function_name, env)?; - apply(&f, rest, env, context) + let f = lookup_function(&function_name, env)?; + apply(&f, &rest, env, context) } TraitReference(_, _) | Field(_) => { return Err(InterpreterError::BadSymbolicRepresentation( @@ -358,8 +362,10 @@ pub fn eval( pub fn is_reserved(name: &str, version: &ClarityVersion) -> bool { if let Some(_result) = functions::lookup_reserved_functions(name, version) { true + } else if variables::is_reserved_name(name, version) { + true } else { - variables::is_reserved_name(name, version) + false } } @@ -389,7 +395,7 @@ pub fn eval_all( match try_define { DefineResult::Variable(name, value) => { runtime_cost(ClarityCostFunction::BindName, global_context, 0)?; - let value_memory_use = value.get_memory_use(); + let value_memory_use = value.get_memory_use()?; global_context.add_memory(value_memory_use)?; total_memory_use += value_memory_use; @@ -401,31 +407,31 @@ pub fn eval_all( contract_context.functions.insert(name, value); }, DefineResult::PersistedVariable(name, value_type, value) => { - runtime_cost(ClarityCostFunction::CreateVar, global_context, value_type.size())?; + runtime_cost(ClarityCostFunction::CreateVar, global_context, value_type.size()?)?; contract_context.persisted_names.insert(name.clone()); global_context.add_memory(value_type.type_size() - .expect("type size should be realizable") as u64)?; + .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; - global_context.add_memory(value.size() as u64)?; + global_context.add_memory(value.size()? as u64)?; - let data_type = global_context.database.create_variable(&contract_context.contract_identifier, &name, value_type); + let data_type = global_context.database.create_variable(&contract_context.contract_identifier, &name, value_type)?; global_context.database.set_variable(&contract_context.contract_identifier, &name, value, &data_type, &global_context.epoch_id)?; contract_context.meta_data_var.insert(name, data_type); }, DefineResult::Map(name, key_type, value_type) => { runtime_cost(ClarityCostFunction::CreateMap, global_context, - u64::from(key_type.size()).cost_overflow_add( - u64::from(value_type.size()))?)?; + u64::from(key_type.size()?).cost_overflow_add( + u64::from(value_type.size()?))?)?; contract_context.persisted_names.insert(name.clone()); global_context.add_memory(key_type.type_size() - .expect("type size should be realizable") as u64)?; + .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; global_context.add_memory(value_type.type_size() - .expect("type size should be realizable") as u64)?; + .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; - let data_type = global_context.database.create_map(&contract_context.contract_identifier, &name, key_type, value_type); + let data_type = global_context.database.create_map(&contract_context.contract_identifier, &name, key_type, value_type)?; contract_context.meta_data_map.insert(name, data_type); }, @@ -434,20 +440,20 @@ pub fn eval_all( contract_context.persisted_names.insert(name.clone()); global_context.add_memory(TypeSignature::UIntType.type_size() - .expect("type size should be realizable") as u64)?; + .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; - let data_type = global_context.database.create_fungible_token(&contract_context.contract_identifier, &name, &total_supply); + let data_type = global_context.database.create_fungible_token(&contract_context.contract_identifier, &name, &total_supply)?; contract_context.meta_ft.insert(name, data_type); }, DefineResult::NonFungibleAsset(name, asset_type) => { - runtime_cost(ClarityCostFunction::CreateNft, global_context, asset_type.size())?; + runtime_cost(ClarityCostFunction::CreateNft, global_context, asset_type.size()?)?; contract_context.persisted_names.insert(name.clone()); global_context.add_memory(asset_type.type_size() - .expect("type size should be realizable") as u64)?; + .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; - let data_type = global_context.database.create_non_fungible_token(&contract_context.contract_identifier, &name, &asset_type); + let data_type = global_context.database.create_non_fungible_token(&contract_context.contract_identifier, &name, &asset_type)?; contract_context.meta_nft.insert(name, data_type); }, @@ -624,7 +630,7 @@ mod test { func_body, DefineType::Private, &"do_work".into(), - "", + &"", ); let context = LocalContext::new(); diff --git a/clarity/src/vm/representations.rs b/clarity/src/vm/representations.rs index a7cc12fcef..490f812269 100644 --- a/clarity/src/vm/representations.rs +++ b/clarity/src/vm/representations.rs @@ -54,10 +54,16 @@ lazy_static! { pub static ref CLARITY_NAME_REGEX_STRING: String = "^[a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*$|^[-+=/*]$|^[<>]=?$".into(); pub static ref CLARITY_NAME_REGEX: Regex = - Regex::new(CLARITY_NAME_REGEX_STRING.as_str()).unwrap(); + { + #[allow(clippy::unwrap_used)] + Regex::new(CLARITY_NAME_REGEX_STRING.as_str()).unwrap() + }; pub static ref CONTRACT_NAME_REGEX: Regex = + { + #[allow(clippy::unwrap_used)] Regex::new(format!("^{}$|^__transient$", CONTRACT_NAME_REGEX_STRING.as_str()).as_str()) - .unwrap(); + .unwrap() + }; } guarded_string!( @@ -119,8 +125,8 @@ impl StacksMessageCodec for ClarityName { impl StacksMessageCodec for ContractName { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - if self.as_bytes().len() < CONTRACT_MIN_NAME_LENGTH - || self.as_bytes().len() > CONTRACT_MAX_NAME_LENGTH + if self.as_bytes().len() < CONTRACT_MIN_NAME_LENGTH as usize + || self.as_bytes().len() > CONTRACT_MAX_NAME_LENGTH as usize { return Err(codec_error::SerializeError(format!( "Failed to serialize contract name: too short or too long: {}", @@ -418,7 +424,7 @@ pub enum TraitDefinition { Imported(TraitIdentifier), } -pub fn depth_traverse(expr: &SymbolicExpression, mut visit: F) -> Result +pub fn depth_traverse(expr: &SymbolicExpression, mut visit: F) -> Result, E> where F: FnMut(&SymbolicExpression) -> Result, { @@ -434,7 +440,7 @@ where } } - Ok(last.unwrap()) + Ok(last) } #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 1368660275..d800b9b738 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -64,7 +64,7 @@ pub fn execute_on_network(s: &str, use_mainnet: bool) -> Value { pub fn symbols_from_values(vec: Vec) -> Vec { vec.into_iter() - .map(SymbolicExpression::atom_value) + .map(|value| SymbolicExpression::atom_value(value)) .collect() } diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 4f30b9f777..98a0342e4f 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -197,7 +197,7 @@ fn test_contract_caller(epoch: StacksEpochId, mut env_factory: MemoryEnvironment QualifiedContractIdentifier::local("contract-b").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -307,7 +307,9 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener (define-read-only (as-contract-cc-get-sponsor) (as-contract (contract-call? .contract-a get-sponsor)))"; - let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR").expect_principal(); + let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR") + .expect_principal() + .unwrap(); let p2 = execute("'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G"); let mut placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), @@ -407,7 +409,7 @@ fn test_fully_qualified_contract_call( QualifiedContractIdentifier::local("contract-b").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -539,7 +541,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro { let mut env = owned_env.get_exec_environment( - Some(p2.clone().expect_principal()), + Some(p2.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -558,7 +560,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro { let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -586,7 +588,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro { // shouldn't be able to register a name you didn't preorder! let mut env = owned_env.get_exec_environment( - Some(p2.clone().expect_principal()), + Some(p2.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -605,7 +607,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro { // should work! let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -623,7 +625,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro { // try to underpay! let mut env = owned_env.get_exec_environment( - Some(p2.clone().expect_principal()), + Some(p2.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -696,7 +698,7 @@ fn test_simple_contract_call(epoch: StacksEpochId, mut env_factory: MemoryEnviro ); let mut env = owned_env.get_exec_environment( - Some(get_principal().expect_principal()), + Some(get_principal().expect_principal().unwrap()), None, &mut placeholder_context, ); diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index 92de68254d..da1bb738ad 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -1004,7 +1004,7 @@ fn test_principal_construct_check_errors() { let input = r#"(principal-construct? 0x16 0x0102030405060708091011121314151617181920 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")"#; assert_eq!( Err(CheckErrors::TypeValueError( - TypeSignature::contract_name_string_ascii_type(), + TypeSignature::contract_name_string_ascii_type().unwrap(), Value::Sequence(SequenceData::String(CharType::ASCII(ASCIIData { data: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" .as_bytes() diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index 62cca99e67..bba6413dd9 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -107,15 +107,15 @@ fn test_index_of() { let bad_expected = [ CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeValueError( - TypeSignature::min_buffer(), + TypeSignature::min_buffer().unwrap(), execute("\"a\"").unwrap().unwrap(), ), CheckErrors::TypeValueError( - TypeSignature::min_string_utf8(), + TypeSignature::min_string_utf8().unwrap(), execute("\"a\"").unwrap().unwrap(), ), CheckErrors::TypeValueError( - TypeSignature::min_string_ascii(), + TypeSignature::min_string_ascii().unwrap(), execute("u\"a\"").unwrap().unwrap(), ), ]; @@ -1094,10 +1094,10 @@ fn test_list_tuple_admission() { (tuple (value 0x3031)) (tuple (value 0x3032)))"; - let result_type = TypeSignature::type_of(&execute(test).unwrap().unwrap()); - let expected_type = TypeSignature::type_of(&execute(expected_type).unwrap().unwrap()); + let result_type = TypeSignature::type_of(&execute(test).unwrap().unwrap()).unwrap(); + let expected_type = TypeSignature::type_of(&execute(expected_type).unwrap().unwrap()).unwrap(); let testing_value = &execute(not_expected_type).unwrap().unwrap(); - let not_expected_type = TypeSignature::type_of(testing_value); + let not_expected_type = TypeSignature::type_of(testing_value).unwrap(); assert_eq!(expected_type, result_type); assert!(not_expected_type != result_type); diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 86818063c4..e7bc9cc221 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -269,7 +269,7 @@ fn test_to_consensus_buff_too_big() { .expect("Should execute") .expect("Should have return value"); - assert!(result.expect_optional().is_none()); + assert!(result.expect_optional().unwrap().is_none()); // this program prints the length of the // constructed 1048567 buffer and then executes @@ -290,7 +290,7 @@ fn test_to_consensus_buff_too_big() { .expect("Should execute") .expect("Should have return value"); - assert!(result.expect_optional().is_none()); + assert!(result.expect_optional().unwrap().is_none()); } #[test] @@ -345,7 +345,8 @@ fn test_from_consensus_buff_missed_expectations() { let result_val = vm_execute_v2(&program) .expect("from-consensus-buff? should succeed") .expect("from-consensus-buff? should return") - .expect_optional(); + .expect_optional() + .unwrap(); assert!( result_val.is_none(), "from-consensus-buff? should return none" @@ -381,6 +382,7 @@ fn test_to_from_consensus_buff_vectors() { .expect("from-consensus-buff? should succeed") .expect("from-consensus-buff? should return") .expect_optional() + .unwrap() .expect("from-consensus-buff? should return (some value)"); let expected_val = execute(value_repr); assert_eq!(result_val, expected_val); @@ -393,6 +395,7 @@ fn test_to_from_consensus_buff_vectors() { .expect("to-consensus-buff? should succeed") .expect("to-consensus-buff? should return") .expect_optional() + .unwrap() .expect("to-consensus-buff? should return (some buff)"); let expected_buff = execute(buff_repr); assert_eq!(result_buffer, expected_buff); @@ -1003,9 +1006,9 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), - TypeSignature::max_buffer(), + TypeSignature::max_string_ascii().unwrap(), + TypeSignature::max_string_utf8().unwrap(), + TypeSignature::max_buffer().unwrap(), ], Value::Int(0), ) @@ -1014,9 +1017,9 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), - TypeSignature::max_buffer(), + TypeSignature::max_string_ascii().unwrap(), + TypeSignature::max_string_utf8().unwrap(), + TypeSignature::max_buffer().unwrap(), ], Value::Int(0), ) @@ -1037,9 +1040,9 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), - TypeSignature::max_buffer(), + TypeSignature::max_string_ascii().unwrap(), + TypeSignature::max_string_utf8().unwrap(), + TypeSignature::max_buffer().unwrap(), ], Value::Sequence(SequenceData::String(CharType::ASCII(ASCIIData { data: "baa".as_bytes().to_vec(), @@ -1050,9 +1053,9 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii(), - TypeSignature::max_string_utf8(), - TypeSignature::max_buffer(), + TypeSignature::max_string_ascii().unwrap(), + TypeSignature::max_string_utf8().unwrap(), + TypeSignature::max_buffer().unwrap(), ], Value::Sequence(SequenceData::String(CharType::ASCII(ASCIIData { data: "baa".as_bytes().to_vec(), @@ -1457,7 +1460,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer(), + TypeSignature::max_buffer().unwrap(), ], Value::Bool(true), ) @@ -1466,7 +1469,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer(), + TypeSignature::max_buffer().unwrap(), ], Value::Bool(true), ) @@ -1475,7 +1478,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer(), + TypeSignature::max_buffer().unwrap(), ], Value::Bool(true), ) @@ -1484,7 +1487,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer(), + TypeSignature::max_buffer().unwrap(), ], Value::Bool(true), ) @@ -1494,7 +1497,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer(), + TypeSignature::max_buffer().unwrap(), ], Value::Bool(true), ) diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index 1905031789..f8e9548814 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -64,7 +64,7 @@ fn test_dynamic_dispatch_by_defining_trait( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -122,7 +122,7 @@ fn test_dynamic_dispatch_pass_trait_nested_in_let( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -179,7 +179,7 @@ fn test_dynamic_dispatch_pass_trait( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -235,7 +235,7 @@ fn test_dynamic_dispatch_intra_contract_call( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -300,7 +300,7 @@ fn test_dynamic_dispatch_by_implementing_imported_trait( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -365,7 +365,7 @@ fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -427,7 +427,7 @@ fn test_dynamic_dispatch_by_importing_trait( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -511,7 +511,7 @@ fn test_dynamic_dispatch_including_nested_trait( QualifiedContractIdentifier::local("target-nested-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -566,7 +566,7 @@ fn test_dynamic_dispatch_mismatched_args( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -623,7 +623,7 @@ fn test_dynamic_dispatch_mismatched_returned( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -683,7 +683,7 @@ fn test_reentrant_dynamic_dispatch( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -740,7 +740,7 @@ fn test_readwrite_dynamic_dispatch( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -797,7 +797,7 @@ fn test_readwrite_violation_dynamic_dispatch( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -870,7 +870,7 @@ fn test_bad_call_with_trait( { let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -938,7 +938,7 @@ fn test_good_call_with_trait( { let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1010,7 +1010,7 @@ fn test_good_call_2_with_trait( QualifiedContractIdentifier::local("implem").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1075,7 +1075,7 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1139,7 +1139,7 @@ fn test_contract_of_value( )); let result_contract = target_contract.clone(); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1206,7 +1206,7 @@ fn test_contract_of_no_impl( )); let result_contract = target_contract.clone(); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1264,7 +1264,7 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1321,7 +1321,7 @@ fn test_return_trait_with_contract_of_wrapped_in_let( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1376,7 +1376,7 @@ fn test_return_trait_with_contract_of( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1441,7 +1441,7 @@ fn test_pass_trait_to_subtrait(epoch: StacksEpochId, mut env_factory: MemoryEnvi QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1504,7 +1504,7 @@ fn test_embedded_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentG )); let opt_target = Value::some(target_contract).unwrap(); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1576,7 +1576,7 @@ fn test_pass_embedded_trait_to_subtrait_optional( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1648,7 +1648,7 @@ fn test_pass_embedded_trait_to_subtrait_ok( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1720,7 +1720,7 @@ fn test_pass_embedded_trait_to_subtrait_err( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1792,7 +1792,7 @@ fn test_pass_embedded_trait_to_subtrait_list( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1867,7 +1867,7 @@ fn test_pass_embedded_trait_to_subtrait_list_option( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -1942,7 +1942,7 @@ fn test_pass_embedded_trait_to_subtrait_option_list( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -2003,7 +2003,7 @@ fn test_let_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenera QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -2068,7 +2068,7 @@ fn test_let3_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -2129,7 +2129,7 @@ fn test_pass_principal_literal_to_trait( QualifiedContractIdentifier::local("target-contract").unwrap(), )); let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 5aa298a139..cab909ecc5 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -98,6 +98,7 @@ impl QualifiedContractIdentifier { Ok(Self::new(StandardPrincipalData::transient(), name)) } + #[allow(clippy::unwrap_used)] pub fn transient() -> QualifiedContractIdentifier { let name = String::from("__transient").try_into().unwrap(); Self { @@ -244,7 +245,7 @@ pub enum SequenceData { } impl SequenceData { - pub fn atom_values(&mut self) -> Vec { + pub fn atom_values(&mut self) -> Result> { match self { SequenceData::Buffer(ref mut data) => data.atom_values(), SequenceData::List(ref mut data) => data.atom_values(), @@ -253,13 +254,14 @@ impl SequenceData { } } - pub fn element_size(&self) -> u32 { - match self { - SequenceData::Buffer(..) => TypeSignature::min_buffer().size(), + pub fn element_size(&self) -> Result { + let out = match self { + SequenceData::Buffer(..) => TypeSignature::min_buffer()?.size(), SequenceData::List(ref data) => data.type_signature.get_list_item_type().size(), - SequenceData::String(CharType::ASCII(..)) => TypeSignature::min_string_ascii().size(), - SequenceData::String(CharType::UTF8(..)) => TypeSignature::min_string_utf8().size(), - } + SequenceData::String(CharType::ASCII(..)) => TypeSignature::min_string_ascii()?.size(), + SequenceData::String(CharType::UTF8(..)) => TypeSignature::min_string_utf8()?.size(), + }?; + Ok(out) } pub fn len(&self) -> usize { @@ -271,20 +273,19 @@ impl SequenceData { } } - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub fn element_at(self, index: usize) -> Option { + pub fn element_at(self, index: usize) -> Result> { if self.len() <= index { - return None; + return Ok(None); } let result = match self { SequenceData::Buffer(data) => Value::buff_from_byte(data.data[index]), SequenceData::List(mut data) => data.data.remove(index), SequenceData::String(CharType::ASCII(data)) => { - Value::string_ascii_from_bytes(vec![data.data[index]]) - .expect("BUG: failed to initialize single-byte ASCII buffer") + Value::string_ascii_from_bytes(vec![data.data[index]]).map_err(|_| { + InterpreterError::Expect( + "BUG: failed to initialize single-byte ASCII buffer".into(), + ) + })? } SequenceData::String(CharType::UTF8(mut data)) => { Value::Sequence(SequenceData::String(CharType::UTF8(UTF8Data { @@ -293,7 +294,7 @@ impl SequenceData { } }; - Some(result) + Ok(Some(result)) } pub fn replace_at(self, epoch: &StacksEpochId, index: usize, element: Value) -> Result { @@ -363,7 +364,7 @@ impl SequenceData { Ok(None) } } else { - Err(CheckErrors::TypeValueError(TypeSignature::min_buffer(), to_find).into()) + Err(CheckErrors::TypeValueError(TypeSignature::min_buffer()?, to_find).into()) } } SequenceData::List(ref data) => { @@ -389,7 +390,7 @@ impl SequenceData { } } else { Err( - CheckErrors::TypeValueError(TypeSignature::min_string_ascii(), to_find) + CheckErrors::TypeValueError(TypeSignature::min_string_ascii()?, to_find) .into(), ) } @@ -409,7 +410,7 @@ impl SequenceData { } } else { Err( - CheckErrors::TypeValueError(TypeSignature::min_string_utf8(), to_find) + CheckErrors::TypeValueError(TypeSignature::min_string_utf8()?, to_find) .into(), ) } @@ -429,7 +430,7 @@ impl SequenceData { let mut i = 0; while i != $data.data.len() { let atom_value = - SymbolicExpression::atom_value($seq_type::to_value(&$data.data[i])); + SymbolicExpression::atom_value($seq_type::to_value(&$data.data[i])?); match filter(atom_value) { Ok(res) if res == false => { $data.data.remove(i); @@ -595,18 +596,18 @@ impl fmt::Display for UTF8Data { } pub trait SequencedValue { - fn type_signature(&self) -> TypeSignature; + fn type_signature(&self) -> std::result::Result; fn items(&self) -> &Vec; fn drained_items(&mut self) -> Vec; - fn to_value(v: &T) -> Value; + fn to_value(v: &T) -> Result; - fn atom_values(&mut self) -> Vec { + fn atom_values(&mut self) -> Result> { self.drained_items() .iter() - .map(|item| SymbolicExpression::atom_value(Self::to_value(item))) + .map(|item| Ok(SymbolicExpression::atom_value(Self::to_value(&item)?))) .collect() } } @@ -620,12 +621,14 @@ impl SequencedValue for ListData { self.data.drain(..).collect() } - fn type_signature(&self) -> TypeSignature { - TypeSignature::SequenceType(SequenceSubtype::ListType(self.type_signature.clone())) + fn type_signature(&self) -> std::result::Result { + Ok(TypeSignature::SequenceType(SequenceSubtype::ListType( + self.type_signature.clone(), + ))) } - fn to_value(v: &Value) -> Value { - v.clone() + fn to_value(v: &Value) -> Result { + Ok(v.clone()) } } @@ -638,14 +641,17 @@ impl SequencedValue for BuffData { self.data.drain(..).collect() } - fn type_signature(&self) -> TypeSignature { - let buff_length = BufferLength::try_from(self.data.len()) - .expect("ERROR: Too large of a buffer successfully constructed."); - TypeSignature::SequenceType(SequenceSubtype::BufferType(buff_length)) + fn type_signature(&self) -> std::result::Result { + let buff_length = BufferLength::try_from(self.data.len()).map_err(|_| { + CheckErrors::Expects("ERROR: Too large of a buffer successfully constructed.".into()) + })?; + Ok(TypeSignature::SequenceType(SequenceSubtype::BufferType( + buff_length, + ))) } - fn to_value(v: &u8) -> Value { - Value::buff_from_byte(*v) + fn to_value(v: &u8) -> Result { + Ok(Value::buff_from_byte(*v)) } } @@ -658,17 +664,20 @@ impl SequencedValue for ASCIIData { self.data.drain(..).collect() } - fn type_signature(&self) -> TypeSignature { - let buff_length = BufferLength::try_from(self.data.len()) - .expect("ERROR: Too large of a buffer successfully constructed."); - TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( - buff_length, + fn type_signature(&self) -> std::result::Result { + let buff_length = BufferLength::try_from(self.data.len()).map_err(|_| { + CheckErrors::Expects("ERROR: Too large of a buffer successfully constructed.".into()) + })?; + Ok(TypeSignature::SequenceType(SequenceSubtype::StringType( + StringSubtype::ASCII(buff_length), ))) } - fn to_value(v: &u8) -> Value { - Value::string_ascii_from_bytes(vec![*v]) - .expect("ERROR: Invalid ASCII string successfully constructed") + fn to_value(v: &u8) -> Result { + Value::string_ascii_from_bytes(vec![*v]).map_err(|_| { + InterpreterError::Expect("ERROR: Invalid ASCII string successfully constructed".into()) + .into() + }) } } @@ -681,15 +690,20 @@ impl SequencedValue> for UTF8Data { self.data.drain(..).collect() } - fn type_signature(&self) -> TypeSignature { - let str_len = StringUTF8Length::try_from(self.data.len()) - .expect("ERROR: Too large of a buffer successfully constructed."); - TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8(str_len))) + fn type_signature(&self) -> std::result::Result { + let str_len = StringUTF8Length::try_from(self.data.len()).map_err(|_| { + CheckErrors::Expects("ERROR: Too large of a buffer successfully constructed.".into()) + })?; + Ok(TypeSignature::SequenceType(SequenceSubtype::StringType( + StringSubtype::UTF8(str_len), + ))) } - fn to_value(v: &Vec) -> Value { - Value::string_utf8_from_bytes(v.clone()) - .expect("ERROR: Invalid UTF8 string successfully constructed") + fn to_value(v: &Vec) -> Result { + Value::string_utf8_from_bytes(v.clone()).map_err(|_| { + InterpreterError::Expect("ERROR: Invalid UTF8 string successfully constructed".into()) + .into() + }) } } @@ -713,28 +727,32 @@ define_named_enum!(BurnBlockInfoProperty { }); impl OptionalData { - pub fn type_signature(&self) -> TypeSignature { + pub fn type_signature(&self) -> std::result::Result { let type_result = match self.data { - Some(ref v) => TypeSignature::new_option(TypeSignature::type_of(v)), + Some(ref v) => TypeSignature::new_option(TypeSignature::type_of(&v)?), None => TypeSignature::new_option(TypeSignature::NoType), }; - type_result.expect("Should not have constructed too large of a type.") + type_result.map_err(|_| { + CheckErrors::Expects("Should not have constructed too large of a type.".into()).into() + }) } } impl ResponseData { - pub fn type_signature(&self) -> TypeSignature { + pub fn type_signature(&self) -> std::result::Result { let type_result = match self.committed { true => TypeSignature::new_response( - TypeSignature::type_of(&self.data), + TypeSignature::type_of(&self.data)?, TypeSignature::NoType, ), false => TypeSignature::new_response( TypeSignature::NoType, - TypeSignature::type_of(&self.data), + TypeSignature::type_of(&self.data)?, ), }; - type_result.expect("Should not have constructed too large of a type.") + type_result.map_err(|_| { + CheckErrors::Expects("Should not have constructed too large of a type.".into()).into() + }) } } @@ -763,9 +781,9 @@ impl BlockInfoProperty { } impl BurnBlockInfoProperty { - pub fn type_result(&self) -> TypeSignature { + pub fn type_result(&self) -> std::result::Result { use self::BurnBlockInfoProperty::*; - match self { + let result = match self { HeaderHash => BUFF_32.clone(), PoxAddrs => TupleTypeSignature::try_from(vec![ ( @@ -776,17 +794,22 @@ impl BurnBlockInfoProperty { ("version".into(), BUFF_1.clone()), ("hashbytes".into(), BUFF_32.clone()), ]) - .expect("FATAL: bad type signature for pox addr"), + .map_err(|_| { + CheckErrors::Expects( + "FATAL: bad type signature for pox addr".into(), + ) + })?, ), 2, ) - .expect("FATAL: bad list type signature"), + .map_err(|_| CheckErrors::Expects("FATAL: bad list type signature".into()))?, ), ("payout".into(), TypeSignature::UIntType), ]) - .expect("FATAL: bad type signature for pox addr") + .map_err(|_| CheckErrors::Expects("FATAL: bad type signature for pox addr".into()))? .into(), - } + }; + Ok(result) } } @@ -806,9 +829,9 @@ pub const NONE: Value = Value::Optional(OptionalData { data: None }); impl Value { pub fn some(data: Value) -> Result { - if data.size() + WRAPPER_VALUE_SIZE > MAX_VALUE_SIZE { + if data.size()? + WRAPPER_VALUE_SIZE > MAX_VALUE_SIZE { Err(CheckErrors::ValueTooLarge.into()) - } else if data.depth() + 1 > MAX_TYPE_DEPTH { + } else if data.depth()? + 1 > MAX_TYPE_DEPTH { Err(CheckErrors::TypeSignatureTooDeep.into()) } else { Ok(Value::Optional(OptionalData { @@ -843,9 +866,9 @@ impl Value { } pub fn okay(data: Value) -> Result { - if data.size() + WRAPPER_VALUE_SIZE > MAX_VALUE_SIZE { + if data.size()? + WRAPPER_VALUE_SIZE > MAX_VALUE_SIZE { Err(CheckErrors::ValueTooLarge.into()) - } else if data.depth() + 1 > MAX_TYPE_DEPTH { + } else if data.depth()? + 1 > MAX_TYPE_DEPTH { Err(CheckErrors::TypeSignatureTooDeep.into()) } else { Ok(Value::Response(ResponseData { @@ -856,9 +879,9 @@ impl Value { } pub fn error(data: Value) -> Result { - if data.size() + WRAPPER_VALUE_SIZE > MAX_VALUE_SIZE { + if data.size()? + WRAPPER_VALUE_SIZE > MAX_VALUE_SIZE { Err(CheckErrors::ValueTooLarge.into()) - } else if data.depth() + 1 > MAX_TYPE_DEPTH { + } else if data.depth()? + 1 > MAX_TYPE_DEPTH { Err(CheckErrors::TypeSignatureTooDeep.into()) } else { Ok(Value::Response(ResponseData { @@ -868,12 +891,12 @@ impl Value { } } - pub fn size(&self) -> u32 { - TypeSignature::type_of(self).size() + pub fn size(&self) -> Result { + Ok(TypeSignature::type_of(self)?.size()?) } - pub fn depth(&self) -> u8 { - TypeSignature::type_of(self).depth() + pub fn depth(&self) -> Result { + Ok(TypeSignature::type_of(self)?.depth()) } /// Invariant: the supplied Values have already been "checked", i.e., it's a valid Value object @@ -968,14 +991,16 @@ impl Value { } pub fn string_utf8_from_string_utf8_literal(tokenized_str: String) -> Result { - let wrapped_codepoints_matcher = - Regex::new("^\\\\u\\{(?P[[:xdigit:]]+)\\}").unwrap(); + let wrapped_codepoints_matcher = Regex::new("^\\\\u\\{(?P[[:xdigit:]]+)\\}") + .map_err(|_| InterpreterError::Expect("Bad regex".into()))?; let mut window = tokenized_str.as_str(); let mut cursor = 0; let mut data: Vec> = vec![]; while !window.is_empty() { if let Some(captures) = wrapped_codepoints_matcher.captures(window) { - let matched = captures.name("value").unwrap(); + let matched = captures + .name("value") + .ok_or_else(|| InterpreterError::Expect("Expected capture".into()))?; let scalar_value = window[matched.start()..matched.end()].to_string(); let unicode_char = { let u = u32::from_str_radix(&scalar_value, 16) @@ -1023,160 +1048,167 @@ impl Value { )))) } - pub fn expect_ascii(self) -> String { + pub fn expect_ascii(self) -> Result { if let Value::Sequence(SequenceData::String(CharType::ASCII(ASCIIData { data }))) = self { - String::from_utf8(data).unwrap() + Ok(String::from_utf8(data) + .map_err(|_| InterpreterError::Expect("Non UTF-8 data in string".into()))?) } else { error!("Value '{:?}' is not an ASCII string", &self); - panic!(); + Err(InterpreterError::Expect("Expected ASCII string".into()).into()) } } - pub fn expect_u128(self) -> u128 { + pub fn expect_u128(self) -> Result { if let Value::UInt(inner) = self { - inner + Ok(inner) } else { error!("Value '{:?}' is not a u128", &self); - panic!(); + Err(InterpreterError::Expect("Expected u128".into()).into()) } } - pub fn expect_i128(self) -> i128 { + pub fn expect_i128(self) -> Result { if let Value::Int(inner) = self { - inner + Ok(inner) } else { error!("Value '{:?}' is not an i128", &self); - panic!(); + Err(InterpreterError::Expect("Expected i128".into()).into()) } } - pub fn expect_buff(self, sz: usize) -> Vec { + pub fn expect_buff(self, sz: usize) -> Result> { if let Value::Sequence(SequenceData::Buffer(buffdata)) = self { if buffdata.data.len() <= sz { - buffdata.data + Ok(buffdata.data) } else { error!( "Value buffer has len {}, expected {}", buffdata.data.len(), sz ); - panic!(); + Err(InterpreterError::Expect("Unexpected buff length".into()).into()) } } else { error!("Value '{:?}' is not a buff", &self); - panic!(); + Err(InterpreterError::Expect("Expected buff".into()).into()) } } - pub fn expect_list(self) -> Vec { + pub fn expect_list(self) -> Result> { if let Value::Sequence(SequenceData::List(listdata)) = self { - listdata.data + Ok(listdata.data) } else { error!("Value '{:?}' is not a list", &self); - panic!(); + Err(InterpreterError::Expect("Expected list".into()).into()) } } - pub fn expect_buff_padded(self, sz: usize, pad: u8) -> Vec { - let mut data = self.expect_buff(sz); + pub fn expect_buff_padded(self, sz: usize, pad: u8) -> Result> { + let mut data = self.expect_buff(sz)?; if sz > data.len() { for _ in data.len()..sz { data.push(pad) } } - data + Ok(data) } - pub fn expect_bool(self) -> bool { + pub fn expect_bool(self) -> Result { if let Value::Bool(b) = self { - b + Ok(b) } else { error!("Value '{:?}' is not a bool", &self); - panic!(); + Err(InterpreterError::Expect("Expected bool".into()).into()) } } - pub fn expect_tuple(self) -> TupleData { + pub fn expect_tuple(self) -> Result { if let Value::Tuple(data) = self { - data + Ok(data) } else { error!("Value '{:?}' is not a tuple", &self); - panic!(); + Err(InterpreterError::Expect("Expected tuple".into()).into()) } } - pub fn expect_optional(self) -> Option { + pub fn expect_optional(self) -> Result> { if let Value::Optional(opt) = self { - opt.data.map(|boxed_value| *boxed_value) + match opt.data { + Some(boxed_value) => Ok(Some(*boxed_value)), + None => Ok(None), + } } else { error!("Value '{:?}' is not an optional", &self); - panic!(); + Err(InterpreterError::Expect("Expected optional".into()).into()) } } - pub fn expect_principal(self) -> PrincipalData { + pub fn expect_principal(self) -> Result { if let Value::Principal(p) = self { - p + Ok(p) } else { error!("Value '{:?}' is not a principal", &self); - panic!(); + Err(InterpreterError::Expect("Expected principal".into()).into()) } } - pub fn expect_callable(self) -> CallableData { + pub fn expect_callable(self) -> Result { if let Value::CallableContract(t) = self { - t + Ok(t) } else { error!("Value '{:?}' is not a callable contract", &self); - panic!(); + Err(InterpreterError::Expect("Expected callable".into()).into()) } } - pub fn expect_result(self) -> std::result::Result { + pub fn expect_result(self) -> Result> { if let Value::Response(res_data) = self { if res_data.committed { - Ok(*res_data.data) + Ok(Ok(*res_data.data)) } else { - Err(*res_data.data) + Ok(Err(*res_data.data)) } } else { error!("Value '{:?}' is not a response", &self); - panic!(); + Err(InterpreterError::Expect("Expected response".into()).into()) } } - pub fn expect_result_ok(self) -> Value { + pub fn expect_result_ok(self) -> Result { if let Value::Response(res_data) = self { if res_data.committed { - *res_data.data + Ok(*res_data.data) } else { error!("Value is not a (ok ..)"); - panic!(); + Err(InterpreterError::Expect("Expected ok response".into()).into()) } } else { error!("Value '{:?}' is not a response", &self); - panic!(); + Err(InterpreterError::Expect("Expected response".into()).into()) } } - pub fn expect_result_err(self) -> Value { + pub fn expect_result_err(self) -> Result { if let Value::Response(res_data) = self { if !res_data.committed { - *res_data.data + Ok(*res_data.data) } else { error!("Value is not a (err ..)"); - panic!(); + Err(InterpreterError::Expect("Expected err response".into()).into()) } } else { error!("Value '{:?}' is not a response", &self); - panic!(); + Err(InterpreterError::Expect("Expected response".into()).into()) } } } impl BuffData { - pub fn len(&self) -> BufferLength { - self.data.len().try_into().unwrap() + pub fn len(&self) -> Result { + self.data + .len() + .try_into() + .map_err(|_| InterpreterError::Expect("Data length should be valid".into()).into()) } pub fn as_slice(&self) -> &[u8] { @@ -1194,12 +1226,11 @@ impl BuffData { } impl ListData { - pub fn len(&self) -> u32 { - self.data.len().try_into().unwrap() - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 + pub fn len(&self) -> Result { + self.data + .len() + .try_into() + .map_err(|_| InterpreterError::Expect("Data length should be valid".into()).into()) } fn append(&mut self, epoch: &StacksEpochId, other_seq: ListData) -> Result<()> { @@ -1224,8 +1255,11 @@ impl ASCIIData { Ok(()) } - pub fn len(&self) -> BufferLength { - self.data.len().try_into().unwrap() + pub fn len(&self) -> Result { + self.data + .len() + .try_into() + .map_err(|_| InterpreterError::Expect("Data length should be valid".into()).into()) } } @@ -1235,8 +1269,11 @@ impl UTF8Data { Ok(()) } - pub fn len(&self) -> BufferLength { - self.data.len().try_into().unwrap() + pub fn len(&self) -> Result { + self.data + .len() + .try_into() + .map_err(|_| InterpreterError::Expect("Data length should be valid".into()).into()) } } @@ -1488,7 +1525,7 @@ impl TupleData { let mut type_map = BTreeMap::new(); let mut data_map = BTreeMap::new(); for (name, value) in data.drain(..) { - let type_info = TypeSignature::type_of(&value); + let type_info = TypeSignature::type_of(&value)?; if type_map.contains_key(&name) { return Err(CheckErrors::NameAlreadyUsed(name.into()).into()); } else { @@ -1659,7 +1696,7 @@ mod test { #[test] fn simple_size_test() { - assert_eq!(Value::Int(10).size(), 16); + assert_eq!(Value::Int(10).size().unwrap(), 16); } #[test] @@ -1719,15 +1756,15 @@ mod test { let buff = Value::Sequence(SequenceData::Buffer(BuffData { data: vec![1, 2, 3, 4, 5], })); - assert_eq!(buff.clone().expect_buff(5), vec![1, 2, 3, 4, 5]); - assert_eq!(buff.clone().expect_buff(6), vec![1, 2, 3, 4, 5]); + assert_eq!(buff.clone().expect_buff(5).unwrap(), vec![1, 2, 3, 4, 5]); + assert_eq!(buff.clone().expect_buff(6).unwrap(), vec![1, 2, 3, 4, 5]); assert_eq!( - buff.clone().expect_buff_padded(6, 0), + buff.clone().expect_buff_padded(6, 0).unwrap(), vec![1, 2, 3, 4, 5, 0] ); - assert_eq!(buff.clone().expect_buff(10), vec![1, 2, 3, 4, 5]); + assert_eq!(buff.clone().expect_buff(10).unwrap(), vec![1, 2, 3, 4, 5]); assert_eq!( - buff.clone().expect_buff_padded(10, 1), + buff.clone().expect_buff_padded(10, 1).unwrap(), vec![1, 2, 3, 4, 5, 1, 1, 1, 1, 1] ); } @@ -1738,6 +1775,6 @@ mod test { let buff = Value::Sequence(SequenceData::Buffer(BuffData { data: vec![1, 2, 3, 4, 5], })); - let _ = buff.expect_buff(4); + let _ = buff.expect_buff(4).unwrap(); } } diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 6862f24b1e..8f6a8215a4 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -54,10 +54,14 @@ pub enum SerializationError { DeserializationError(String), DeserializeExpected(TypeSignature), LeftoverBytesInDeserialization, + SerializationError(String), } lazy_static! { - pub static ref NONE_SERIALIZATION_LEN: u64 = Value::none().serialize_to_vec().len() as u64; + pub static ref NONE_SERIALIZATION_LEN: u64 = { + #[allow(clippy::unwrap_used)] + u64::try_from(Value::none().serialize_to_vec().unwrap().len()).unwrap() + }; } /// Deserialization uses a specific epoch for passing to the type signature checks @@ -86,6 +90,9 @@ impl std::fmt::Display for SerializationError { SerializationError::DeserializationError(e) => { write!(f, "Deserialization error: {}", e) } + SerializationError::SerializationError(e) => { + write!(f, "Serialization error: {}", e) + } SerializationError::DeserializeExpected(e) => write!( f, "Deserialization expected the type of the input to be: {}", @@ -589,6 +596,7 @@ impl Value { return Err(CheckErrors::TypeSignatureTooDeep.into()); } + #[allow(clippy::expect_used)] let expected_type = stack .last() .expect("FATAL: stack.last() should always be some() because of loop condition") @@ -716,6 +724,8 @@ impl Value { None => (None, None), Some(TypeSignature::SequenceType(SequenceSubtype::ListType(list_type))) => { if len > list_type.get_max_len() { + // unwrap is safe because of the match condition + #[allow(clippy::unwrap_used)] return Err(SerializationError::DeserializeExpected( expected_type.unwrap().clone(), )); @@ -767,14 +777,20 @@ impl Value { Some(TypeSignature::TupleType(tuple_type)) => { if sanitize { if u64::from(len) < tuple_type.len() { + // unwrap is safe because of the match condition + #[allow(clippy::unwrap_used)] + return Err(SerializationError::DeserializeExpected( + expected_type.unwrap().clone(), + )); + } + } else { + if len as u64 != tuple_type.len() { + // unwrap is safe because of the match condition + #[allow(clippy::unwrap_used)] return Err(SerializationError::DeserializeExpected( expected_type.unwrap().clone(), )); } - } else if len as u64 != tuple_type.len() { - return Err(SerializationError::DeserializeExpected( - expected_type.unwrap().clone(), - )); } Some(tuple_type) } @@ -1014,7 +1030,7 @@ impl Value { )) } - pub fn serialize_write(&self, w: &mut W) -> std::io::Result<()> { + pub fn serialize_write(&self, w: &mut W) -> Result<(), SerializationError> { use super::CharType::*; use super::PrincipalData::*; use super::SequenceData::{self, *}; @@ -1042,13 +1058,23 @@ impl Value { value.serialize_write(w)?; } Sequence(List(data)) => { - w.write_all(&data.len().to_be_bytes())?; + let len_bytes = data + .len() + .map_err(|e| SerializationError::SerializationError(e.to_string()))? + .to_be_bytes(); + w.write_all(&len_bytes)?; for item in data.data.iter() { item.serialize_write(w)?; } } Sequence(Buffer(value)) => { - w.write_all(&(u32::from(value.len()).to_be_bytes()))?; + let len_bytes = u32::from( + value + .len() + .map_err(|e| SerializationError::SerializationError(e.to_string()))?, + ) + .to_be_bytes(); + w.write_all(&len_bytes)?; w.write_all(&value.data)? } Sequence(SequenceData::String(UTF8(value))) => { @@ -1059,11 +1085,20 @@ impl Value { } } Sequence(SequenceData::String(ASCII(value))) => { - w.write_all(&(u32::from(value.len()).to_be_bytes()))?; + let len_bytes = u32::from( + value + .len() + .map_err(|e| SerializationError::SerializationError(e.to_string()))?, + ) + .to_be_bytes(); + w.write_all(&len_bytes)?; w.write_all(&value.data)? } Tuple(data) => { - w.write_all(&u32::try_from(data.data_map.len()).unwrap().to_be_bytes())?; + let len_bytes = u32::try_from(data.data_map.len()) + .map_err(|e| SerializationError::SerializationError(e.to_string()))? + .to_be_bytes(); + w.write_all(&len_bytes)?; for (key, value) in data.data_map.iter() { key.serialize_write(w)?; value.serialize_write(w)?; @@ -1136,11 +1171,14 @@ impl Value { Value::try_deserialize_bytes_untyped(&data) } - pub fn serialized_size(&self) -> u32 { + pub fn serialized_size(&self) -> Result { let mut counter = WriteCounter { count: 0 }; - self.serialize_write(&mut counter) - .expect("Error: Failed to count serialization length of Clarity value"); - counter.count + self.serialize_write(&mut counter).map_err(|_| { + SerializationError::DeserializationError( + "Error: Failed to count serialization length of Clarity value".into(), + ) + })?; + Ok(counter.count) } } @@ -1172,17 +1210,17 @@ impl Write for WriteCounter { } impl Value { - pub fn serialize_to_vec(&self) -> Vec { + pub fn serialize_to_vec(&self) -> Result, InterpreterError> { let mut byte_serialization = Vec::new(); self.serialize_write(&mut byte_serialization) - .expect("IOError filling byte buffer."); - byte_serialization + .map_err(|_| InterpreterError::Expect("IOError filling byte buffer.".into()))?; + Ok(byte_serialization) } /// This does *not* perform any data sanitization - pub fn serialize_to_hex(&self) -> String { - let byte_serialization = self.serialize_to_vec(); - to_hex(byte_serialization.as_slice()) + pub fn serialize_to_hex(&self) -> Result { + let byte_serialization = self.serialize_to_vec()?; + Ok(to_hex(byte_serialization.as_slice())) } /// Sanitize `value` against pre-2.4 serialization @@ -1206,7 +1244,8 @@ impl Value { TypeSignature::SequenceType(SequenceSubtype::ListType(lt)) => lt, _ => return None, }; - if l.len() > lt.get_max_len() { + // if cannot compute l.len(), sanitization fails, so use ? operator can short return + if l.len().ok()? > lt.get_max_len() { return None; } let mut sanitized_items = vec![]; @@ -1299,23 +1338,19 @@ impl Value { impl ClaritySerializable for u32 { fn serialize(&self) -> String { - let mut buffer = Vec::new(); - buffer - .write_all(&self.to_be_bytes()) - .expect("u32 serialization: failed writing."); - to_hex(buffer.as_slice()) + to_hex(&self.to_be_bytes()) } } impl ClarityDeserializable for u32 { - fn deserialize(input: &str) -> Self { - let bytes = hex_bytes(input).expect("u32 deserialization: failed decoding bytes."); + fn deserialize(input: &str) -> Result { + let bytes = hex_bytes(&input).map_err(|_| { + InterpreterError::Expect("u32 deserialization: failed decoding bytes.".into()) + })?; assert_eq!(bytes.len(), 4); - u32::from_be_bytes( - bytes[0..4] - .try_into() - .expect("u32 deserialization: failed reading."), - ) + Ok(u32::from_be_bytes(bytes[0..4].try_into().map_err( + |_| InterpreterError::Expect("u32 deserialization: failed reading.".into()), + )?)) } } @@ -1323,7 +1358,10 @@ impl ClarityDeserializable for u32 { /// sanitize its serialization or deserialization. impl StacksMessageCodec for Value { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - self.serialize_write(fd).map_err(codec_error::WriteError) + self.serialize_write(fd).map_err(|e| match e { + SerializationError::IOError(io_e) => codec_error::WriteError(io_e.err), + other => codec_error::SerializeError(other.to_string()), + }) } fn consensus_deserialize(fd: &mut R) -> Result { @@ -1334,15 +1372,6 @@ impl StacksMessageCodec for Value { } } -impl std::hash::Hash for Value { - fn hash(&self, state: &mut H) { - let mut s = vec![]; - self.serialize_write(&mut s) - .expect("FATAL: failed to serialize to vec"); - s.hash(state); - } -} - #[cfg(test)] pub mod tests { use std::io::Write; @@ -1366,30 +1395,37 @@ pub mod tests { fn test_deser_ser(v: Value) { assert_eq!( &v, - &Value::try_deserialize_hex(&v.serialize_to_hex(), &TypeSignature::type_of(&v), false) - .unwrap() + &Value::try_deserialize_hex( + &v.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&v).unwrap(), + false + ) + .unwrap() ); assert_eq!( &v, - &Value::try_deserialize_hex_untyped(&v.serialize_to_hex()).unwrap() + &Value::try_deserialize_hex_untyped(&v.serialize_to_hex().unwrap()).unwrap() ); // test the serialized_size implementation assert_eq!( - v.serialized_size(), - v.serialize_to_hex().len() as u32 / 2, + v.serialized_size().unwrap(), + v.serialize_to_hex().unwrap().len() as u32 / 2, "serialized_size() should return the byte length of the serialization (half the length of the hex encoding)", ); } fn test_deser_u32_helper(num: u32) { - assert_eq!(num, u32::deserialize(&num.serialize())); + assert_eq!(num, u32::deserialize(&num.serialize()).unwrap()); } fn test_bad_expectation(v: Value, e: TypeSignature) { - assert!(matches!( - Value::try_deserialize_hex(&v.serialize_to_hex(), &e, false).unwrap_err(), - SerializationError::DeserializeExpected(_) - )) + assert!( + match Value::try_deserialize_hex(&v.serialize_to_hex().unwrap(), &e, false).unwrap_err() + { + SerializationError::DeserializeExpected(_) => true, + _ => false, + } + ) } #[test] @@ -1415,19 +1451,22 @@ pub mod tests { // Should be legal! Value::try_deserialize_hex( - &Value::list_from(vec![]).unwrap().serialize_to_hex(), + &Value::list_from(vec![]) + .unwrap() + .serialize_to_hex() + .unwrap(), &TypeSignature::from_string("(list 2 (list 3 int))", version, epoch), false, ) .unwrap(); Value::try_deserialize_hex( - &list_list_int.serialize_to_hex(), + &list_list_int.serialize_to_hex().unwrap(), &TypeSignature::from_string("(list 2 (list 3 int))", version, epoch), false, ) .unwrap(); Value::try_deserialize_hex( - &list_list_int.serialize_to_hex(), + &list_list_int.serialize_to_hex().unwrap(), &TypeSignature::from_string("(list 1 (list 4 int))", version, epoch), false, ) @@ -1651,51 +1690,54 @@ pub mod tests { // t_0 and t_1 are actually the same assert_eq!( Value::try_deserialize_hex( - &t_1.serialize_to_hex(), - &TypeSignature::type_of(&t_0), + &t_1.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_0).unwrap(), false ) .unwrap(), Value::try_deserialize_hex( - &t_0.serialize_to_hex(), - &TypeSignature::type_of(&t_0), + &t_0.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_0).unwrap(), false ) .unwrap() ); // field number not equal to expectations - assert!(matches!( - Value::try_deserialize_hex( - &t_3.serialize_to_hex(), - &TypeSignature::type_of(&t_1), - false - ) - .unwrap_err(), - SerializationError::DeserializeExpected(_) - )); + assert!(match Value::try_deserialize_hex( + &t_3.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_1).unwrap(), + false + ) + .unwrap_err() + { + SerializationError::DeserializeExpected(_) => true, + _ => false, + }); // field type mismatch - assert!(matches!( - Value::try_deserialize_hex( - &t_2.serialize_to_hex(), - &TypeSignature::type_of(&t_1), - false - ) - .unwrap_err(), - SerializationError::DeserializeExpected(_) - )); + assert!(match Value::try_deserialize_hex( + &t_2.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_1).unwrap(), + false + ) + .unwrap_err() + { + SerializationError::DeserializeExpected(_) => true, + _ => false, + }); // field not-present in expected - assert!(matches!( - Value::try_deserialize_hex( - &t_1.serialize_to_hex(), - &TypeSignature::type_of(&t_4), - false - ) - .unwrap_err(), - SerializationError::DeserializeExpected(_) - )); + assert!(match Value::try_deserialize_hex( + &t_1.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_4).unwrap(), + false + ) + .unwrap_err() + { + SerializationError::DeserializeExpected(_) => true, + _ => false, + }); } #[apply(test_clarity_versions)] @@ -2029,7 +2071,7 @@ pub mod tests { "Testing {}. Expected sanitization = {}", input_val, expected_out ); - let serialized = input_val.serialize_to_hex(); + let serialized = input_val.serialize_to_hex().unwrap(); let result = RollbackWrapper::deserialize_value(&serialized, good_type, &epoch).map(|x| x.value); @@ -2111,7 +2153,7 @@ pub mod tests { for (test, expected) in tests.iter() { if let Ok(x) = expected { - assert_eq!(test, &x.serialize_to_hex()); + assert_eq!(test, &x.serialize_to_hex().unwrap()); } assert_eq!(expected, &Value::try_deserialize_hex_untyped(test)); assert_eq!( @@ -2124,7 +2166,7 @@ pub mod tests { for (test, expected) in tests.iter() { if let Ok(value) = expected { assert_eq!( - value.serialized_size(), + value.serialized_size().unwrap(), test.len() as u32 / 2, "serialized_size() should return the byte length of the serialization (half the length of the hex encoding)", ); diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 86d0c83496..517cb9ec25 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -46,6 +46,7 @@ pub struct AssetIdentifier { } impl AssetIdentifier { + #[allow(clippy::unwrap_used)] pub fn STX() -> AssetIdentifier { AssetIdentifier { contract_identifier: QualifiedContractIdentifier::new( @@ -56,6 +57,7 @@ impl AssetIdentifier { } } + #[allow(clippy::unwrap_used)] pub fn STX_burned() -> AssetIdentifier { AssetIdentifier { contract_identifier: QualifiedContractIdentifier::new( @@ -125,9 +127,9 @@ pub enum SequenceSubtype { } impl SequenceSubtype { - pub fn unit_type(&self) -> TypeSignature { + pub fn unit_type(&self) -> Result { match &self { - SequenceSubtype::ListType(ref list_data) => list_data.clone().destruct().0, + SequenceSubtype::ListType(ref list_data) => Ok(list_data.clone().destruct().0), SequenceSubtype::BufferType(_) => TypeSignature::min_buffer(), SequenceSubtype::StringType(StringSubtype::ASCII(_)) => { TypeSignature::min_string_ascii() @@ -159,30 +161,54 @@ use self::TypeSignature::{ }; lazy_static! { - pub static ref BUFF_64: TypeSignature = SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(64u32).expect("BUG: Legal Clarity buffer length marked invalid") - )); - pub static ref BUFF_65: TypeSignature = SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(65u32).expect("BUG: Legal Clarity buffer length marked invalid") - )); - pub static ref BUFF_32: TypeSignature = SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(32u32).expect("BUG: Legal Clarity buffer length marked invalid") - )); - pub static ref BUFF_33: TypeSignature = SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(33u32).expect("BUG: Legal Clarity buffer length marked invalid") - )); - pub static ref BUFF_20: TypeSignature = SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(20u32).expect("BUG: Legal Clarity buffer length marked invalid") - )); - pub static ref BUFF_21: TypeSignature = SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(21u32).expect("BUG: Legal Clarity buffer length marked invalid") - )); - pub static ref BUFF_1: TypeSignature = SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(1u32).expect("BUG: Legal Clarity buffer length marked invalid") - )); - pub static ref BUFF_16: TypeSignature = SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(16u32).expect("BUG: Legal Clarity buffer length marked invalid") - )); + pub static ref BUFF_64: TypeSignature = { + #[allow(clippy::expect_used)] + SequenceType(SequenceSubtype::BufferType( + BufferLength::try_from(64u32).expect("BUG: Legal Clarity buffer length marked invalid"), + )) + }; + pub static ref BUFF_65: TypeSignature = { + #[allow(clippy::expect_used)] + SequenceType(SequenceSubtype::BufferType( + BufferLength::try_from(65u32).expect("BUG: Legal Clarity buffer length marked invalid"), + )) + }; + pub static ref BUFF_32: TypeSignature = { + #[allow(clippy::expect_used)] + SequenceType(SequenceSubtype::BufferType( + BufferLength::try_from(32u32).expect("BUG: Legal Clarity buffer length marked invalid"), + )) + }; + pub static ref BUFF_33: TypeSignature = { + #[allow(clippy::expect_used)] + SequenceType(SequenceSubtype::BufferType( + BufferLength::try_from(33u32).expect("BUG: Legal Clarity buffer length marked invalid"), + )) + }; + pub static ref BUFF_20: TypeSignature = { + #[allow(clippy::expect_used)] + SequenceType(SequenceSubtype::BufferType( + BufferLength::try_from(20u32).expect("BUG: Legal Clarity buffer length marked invalid"), + )) + }; + pub static ref BUFF_21: TypeSignature = { + #[allow(clippy::expect_used)] + SequenceType(SequenceSubtype::BufferType( + BufferLength::try_from(21u32).expect("BUG: Legal Clarity buffer length marked invalid"), + )) + }; + pub static ref BUFF_1: TypeSignature = { + #[allow(clippy::expect_used)] + SequenceType(SequenceSubtype::BufferType( + BufferLength::try_from(1u32).expect("BUG: Legal Clarity buffer length marked invalid"), + )) + }; + pub static ref BUFF_16: TypeSignature = { + #[allow(clippy::expect_used)] + SequenceType(SequenceSubtype::BufferType( + BufferLength::try_from(16u32).expect("BUG: Legal Clarity buffer length marked invalid"), + )) + }; } pub const ASCII_40: TypeSignature = SequenceType(SequenceSubtype::StringType( @@ -452,7 +478,7 @@ impl ListTypeData { max_len, }; let would_be_size = list_data - .inner_size() + .inner_size()? .ok_or_else(|| CheckErrors::ValueTooLarge)?; if would_be_size > MAX_VALUE_SIZE { Err(CheckErrors::ValueTooLarge) @@ -484,7 +510,7 @@ impl ListTypeData { impl TypeSignature { pub fn new_option(inner_type: TypeSignature) -> Result { - let new_size = WRAPPER_VALUE_SIZE + inner_type.size(); + let new_size = WRAPPER_VALUE_SIZE + inner_type.size()?; let new_depth = 1 + inner_type.depth(); if new_size > MAX_VALUE_SIZE { Err(CheckErrors::ValueTooLarge) @@ -496,7 +522,7 @@ impl TypeSignature { } pub fn new_response(ok_type: TypeSignature, err_type: TypeSignature) -> Result { - let new_size = WRAPPER_VALUE_SIZE + cmp::max(ok_type.size(), err_type.size()); + let new_size = WRAPPER_VALUE_SIZE + cmp::max(ok_type.size()?, err_type.size()?); let new_depth = 1 + cmp::max(ok_type.depth(), err_type.depth()); if new_size > MAX_VALUE_SIZE { @@ -517,7 +543,7 @@ impl TypeSignature { } pub fn admits(&self, epoch: &StacksEpochId, x: &Value) -> Result { - let x_type = TypeSignature::type_of(x); + let x_type = TypeSignature::type_of(x)?; self.admits_type(epoch, &x_type) } @@ -528,7 +554,9 @@ impl TypeSignature { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => self.admits_type_v2_1(other), - StacksEpochId::Epoch10 => unreachable!("epoch 1.0 not supported"), + StacksEpochId::Epoch10 => { + return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) + } } } @@ -615,8 +643,16 @@ impl TypeSignature { } } NoType => Err(CheckErrors::CouldNotDetermineType), - CallableType(_) => unreachable!("CallableType should not be used in epoch v2.0"), - ListUnionType(_) => unreachable!("ListUnionType should not be used in epoch v2.0"), + CallableType(_) => { + return Err(CheckErrors::Expects( + "CallableType should not be used in epoch v2.0".into(), + )) + } + ListUnionType(_) => { + return Err(CheckErrors::Expects( + "ListUnionType should not be used in epoch v2.0".into(), + )) + } _ => Ok(other == self), } } @@ -833,7 +869,7 @@ impl TryFrom> for TupleTypeSignature { } let result = TupleTypeSignature { type_map }; let would_be_size = result - .inner_size() + .inner_size()? .ok_or_else(|| CheckErrors::ValueTooLarge)?; if would_be_size > MAX_VALUE_SIZE { Err(CheckErrors::ValueTooLarge) @@ -955,59 +991,81 @@ impl FunctionArg { } impl TypeSignature { - pub fn empty_buffer() -> TypeSignature { - SequenceType(SequenceSubtype::BufferType(0_u32.try_into().unwrap())) + pub fn empty_buffer() -> Result { + Ok(SequenceType(SequenceSubtype::BufferType( + 0_u32.try_into().map_err(|_| { + CheckErrors::Expects("FAIL: Empty clarity value size is not realizable".into()) + })?, + ))) } - pub fn min_buffer() -> TypeSignature { - SequenceType(SequenceSubtype::BufferType(1_u32.try_into().unwrap())) + pub fn min_buffer() -> Result { + Ok(SequenceType(SequenceSubtype::BufferType( + 1_u32.try_into().map_err(|_| { + CheckErrors::Expects("FAIL: Min clarity value size is not realizable".into()) + })?, + ))) } - pub fn min_string_ascii() -> TypeSignature { - SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( - 1_u32.try_into().unwrap(), + pub fn min_string_ascii() -> Result { + Ok(SequenceType(SequenceSubtype::StringType( + StringSubtype::ASCII(1_u32.try_into().map_err(|_| { + CheckErrors::Expects("FAIL: Min clarity value size is not realizable".into()) + })?), ))) } - pub fn min_string_utf8() -> TypeSignature { - SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( - 1_u32.try_into().unwrap(), + pub fn min_string_utf8() -> Result { + Ok(SequenceType(SequenceSubtype::StringType( + StringSubtype::UTF8(1_u32.try_into().map_err(|_| { + CheckErrors::Expects("FAIL: Min clarity value size is not realizable".into()) + })?), ))) } - pub fn max_string_ascii() -> TypeSignature { - SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( - BufferLength::try_from(MAX_VALUE_SIZE) - .expect("FAIL: Max Clarity Value Size is no longer realizable in ASCII Type"), + pub fn max_string_ascii() -> Result { + Ok(SequenceType(SequenceSubtype::StringType( + StringSubtype::ASCII(BufferLength::try_from(MAX_VALUE_SIZE).map_err(|_| { + CheckErrors::Expects( + "FAIL: Max Clarity Value Size is no longer realizable in ASCII Type".into(), + ) + })?), ))) } - pub fn max_string_utf8() -> TypeSignature { - SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( - StringUTF8Length::try_from(MAX_VALUE_SIZE / 4) - .expect("FAIL: Max Clarity Value Size is no longer realizable in UTF8 Type"), + pub fn max_string_utf8() -> Result { + Ok(SequenceType(SequenceSubtype::StringType( + StringSubtype::UTF8(StringUTF8Length::try_from(MAX_VALUE_SIZE / 4).map_err(|_| { + CheckErrors::Expects( + "FAIL: Max Clarity Value Size is no longer realizable in UTF8 Type".into(), + ) + })?), ))) } - pub fn max_buffer() -> TypeSignature { - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(MAX_VALUE_SIZE) - .expect("FAIL: Max Clarity Value Size is no longer realizable in Buffer Type"), - )) + pub fn max_buffer() -> Result { + Ok(SequenceType(SequenceSubtype::BufferType( + BufferLength::try_from(MAX_VALUE_SIZE).map_err(|_| { + CheckErrors::Expects( + "FAIL: Max Clarity Value Size is no longer realizable in Buffer Type".into(), + ) + })?, + ))) } - pub fn contract_name_string_ascii_type() -> TypeSignature { - TypeSignature::bound_string_ascii_type( - CONTRACT_MAX_NAME_LENGTH - .try_into() - .expect("FAIL: contract name max length exceeds u32 space"), - ) + pub fn contract_name_string_ascii_type() -> Result { + TypeSignature::bound_string_ascii_type(CONTRACT_MAX_NAME_LENGTH.try_into().map_err( + |_| CheckErrors::Expects("FAIL: contract name max length exceeds u32 space".into()), + )?) } - pub fn bound_string_ascii_type(max_len: u32) -> TypeSignature { - SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( - BufferLength::try_from(max_len) - .expect("FAIL: Max Clarity Value Size is no longer realizable in ASCII Type"), + pub fn bound_string_ascii_type(max_len: u32) -> Result { + Ok(SequenceType(SequenceSubtype::StringType( + StringSubtype::ASCII(BufferLength::try_from(max_len).map_err(|_| { + CheckErrors::Expects( + "FAIL: Max Clarity Value Size is no longer realizable in ASCII Type".into(), + ) + })?), ))) } @@ -1058,7 +1116,9 @@ impl TypeSignature { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => Self::least_supertype_v2_1(a, b), - StacksEpochId::Epoch10 => unreachable!("Clarity 1.0 is not supported"), + StacksEpochId::Epoch10 => { + return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) + } } } @@ -1309,23 +1369,23 @@ impl TypeSignature { } } - pub fn type_of(x: &Value) -> TypeSignature { - match x { + pub fn type_of(x: &Value) -> Result { + let out = match x { Value::Principal(_) => PrincipalType, Value::Int(_v) => IntType, Value::UInt(_v) => UIntType, Value::Bool(_v) => BoolType, Value::Tuple(v) => TupleType(v.type_signature.clone()), - Value::Sequence(SequenceData::List(list_data)) => list_data.type_signature(), - Value::Sequence(SequenceData::Buffer(buff_data)) => buff_data.type_signature(), + Value::Sequence(SequenceData::List(list_data)) => list_data.type_signature()?, + Value::Sequence(SequenceData::Buffer(buff_data)) => buff_data.type_signature()?, Value::Sequence(SequenceData::String(CharType::ASCII(ascii_data))) => { - ascii_data.type_signature() + ascii_data.type_signature()? } Value::Sequence(SequenceData::String(CharType::UTF8(utf8_data))) => { - utf8_data.type_signature() + utf8_data.type_signature()? } - Value::Optional(v) => v.type_signature(), - Value::Response(v) => v.type_signature(), + Value::Optional(v) => v.type_signature()?, + Value::Response(v) => v.type_signature()?, Value::CallableContract(v) => { if let Some(trait_identifier) = &v.trait_identifier { CallableType(CallableSubtype::Trait(trait_identifier.clone())) @@ -1333,22 +1393,25 @@ impl TypeSignature { CallableType(CallableSubtype::Principal(v.contract_identifier.clone())) } } - } + }; + + Ok(out) } - pub fn literal_type_of(x: &Value) -> TypeSignature { + pub fn literal_type_of(x: &Value) -> Result { match x { - Value::Principal(PrincipalData::Contract(contract_id)) => { - CallableType(CallableSubtype::Principal(contract_id.clone())) - } + Value::Principal(PrincipalData::Contract(contract_id)) => Ok(CallableType( + CallableSubtype::Principal(contract_id.clone()), + )), _ => Self::type_of(x), } } // Checks if resulting type signature is of valid size. pub fn construct_parent_list_type(args: &[Value]) -> Result { - let children_types: Vec<_> = args.iter().map(TypeSignature::type_of).collect(); - TypeSignature::parent_list_type(&children_types) + let children_types: Result> = + args.iter().map(|x| TypeSignature::type_of(x)).collect(); + TypeSignature::parent_list_type(&children_types?) } pub fn parent_list_type( @@ -1649,14 +1712,18 @@ impl TypeSignature { } } - pub fn size(&self) -> u32 { - self.inner_size().expect( - "FAIL: .size() overflowed on too large of a type. construction should have failed!", - ) + pub fn size(&self) -> Result { + self.inner_size()?.ok_or_else(|| { + CheckErrors::Expects( + "FAIL: .size() overflowed on too large of a type. construction should have failed!" + .into(), + ) + .into() + }) } - fn inner_size(&self) -> Option { - match self { + fn inner_size(&self) -> Result> { + let out = match self { // NoType's may be asked for their size at runtime -- // legal constructions like `(ok 1)` have NoType parts (if they have unknown error variant types). NoType => Some(1), @@ -1664,27 +1731,28 @@ impl TypeSignature { UIntType => Some(16), BoolType => Some(1), PrincipalType => Some(148), // 20+128 - TupleType(tuple_sig) => tuple_sig.inner_size(), + TupleType(tuple_sig) => tuple_sig.inner_size()?, SequenceType(SequenceSubtype::BufferType(len)) | SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII(len))) => { Some(4 + u32::from(len)) } - SequenceType(SequenceSubtype::ListType(list_type)) => list_type.inner_size(), + SequenceType(SequenceSubtype::ListType(list_type)) => list_type.inner_size()?, SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8(len))) => { Some(4 + 4 * u32::from(len)) } - OptionalType(t) => t.size().checked_add(WRAPPER_VALUE_SIZE), + OptionalType(t) => t.size()?.checked_add(WRAPPER_VALUE_SIZE), ResponseType(v) => { // ResponseTypes are 1 byte for the committed bool, // plus max(err_type, ok_type) let (t, s) = (&v.0, &v.1); - let t_size = t.size(); - let s_size = s.size(); + let t_size = t.size()?; + let s_size = s.size()?; cmp::max(t_size, s_size).checked_add(WRAPPER_VALUE_SIZE) } CallableType(CallableSubtype::Principal(_)) | ListUnionType(_) => Some(148), // 20+128 CallableType(CallableSubtype::Trait(_)) | TraitReferenceType(_) => Some(276), // 20+128+128 - } + }; + Ok(out) } pub fn type_size(&self) -> Result { @@ -1719,16 +1787,21 @@ impl TypeSignature { impl ListTypeData { /// List Size: type_signature_size + max_len * entry_type.size() - fn inner_size(&self) -> Option { + fn inner_size(&self) -> Result> { let total_size = self .entry_type - .size() - .checked_mul(self.max_len)? - .checked_add(self.type_size()?)?; - if total_size > MAX_VALUE_SIZE { - None - } else { - Some(total_size) + .size()? + .checked_mul(self.max_len) + .and_then(|x| x.checked_add(self.type_size()?)); + match total_size { + Some(total_size) => { + if total_size > MAX_VALUE_SIZE { + Ok(None) + } else { + Ok(Some(total_size)) + } + } + None => Ok(None), } } @@ -1763,9 +1836,10 @@ impl TupleTypeSignature { } } - pub fn size(&self) -> u32 { - self.inner_size() - .expect("size() overflowed on a constructed type.") + pub fn size(&self) -> Result { + self.inner_size()?.ok_or_else(|| { + CheckErrors::Expects("size() overflowed on a constructed type.".into()).into() + }) } fn max_depth(&self) -> u8 { @@ -1779,24 +1853,33 @@ impl TupleTypeSignature { /// Tuple Size: /// size( btreemap ) + type_size /// size( btreemap ) = 2*map.len() + sum(names) + sum(values) - fn inner_size(&self) -> Option { - let mut total_size = u32::try_from(self.type_map.len()) - .ok()? - .checked_mul(2)? - .checked_add(self.type_size()?)?; + fn inner_size(&self) -> Result> { + let Some(mut total_size) = u32::try_from(self.type_map.len()) + .ok() + .and_then(|x| x.checked_mul(2)) + .and_then(|x| x.checked_add(self.type_size()?)) + else { + return Ok(None); + }; for (name, type_signature) in self.type_map.iter() { // we only accept ascii names, so 1 char = 1 byte. - total_size = total_size - .checked_add(type_signature.size())? - // name.len() is bound to MAX_STRING_LEN (128), so `as u32` won't ever truncate - .checked_add(name.len() as u32)?; + total_size = if let Some(new_size) = total_size.checked_add(type_signature.size()?) { + new_size + } else { + return Ok(None); + }; + total_size = if let Some(new_size) = total_size.checked_add(name.len() as u32) { + new_size + } else { + return Ok(None); + }; } if total_size > MAX_VALUE_SIZE { - None + Ok(None) } else { - Some(total_size) + Ok(Some(total_size)) } } } @@ -1959,7 +2042,7 @@ mod test { fn type_of_list_of_buffs(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let value = execute("(list \"abc\" \"abcde\")").unwrap().unwrap(); let type_descr = TypeSignature::from_string("(list 2 (string-ascii 5))", version, epoch); - assert_eq!(TypeSignature::type_of(&value), type_descr); + assert_eq!(TypeSignature::type_of(&value).unwrap(), type_descr); } #[apply(test_clarity_versions)] @@ -2075,8 +2158,8 @@ mod test { TypeSignature::BoolType, ), ( - (TypeSignature::NoType, TypeSignature::min_buffer()), - TypeSignature::min_buffer(), + (TypeSignature::NoType, TypeSignature::min_buffer().unwrap()), + TypeSignature::min_buffer().unwrap(), ), ( ( @@ -2088,13 +2171,16 @@ mod test { ( ( TypeSignature::NoType, - TypeSignature::bound_string_ascii_type(17), + TypeSignature::bound_string_ascii_type(17).unwrap(), ), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::bound_string_ascii_type(17).unwrap(), ), ( - (TypeSignature::NoType, TypeSignature::max_string_utf8()), - TypeSignature::max_string_utf8(), + ( + TypeSignature::NoType, + TypeSignature::max_string_utf8().unwrap(), + ), + TypeSignature::max_string_utf8().unwrap(), ), ( (TypeSignature::NoType, TypeSignature::PrincipalType), @@ -2175,8 +2261,11 @@ mod test { ((UIntType, UIntType), UIntType), ((BoolType, BoolType), BoolType), ( - (TypeSignature::max_buffer(), TypeSignature::max_buffer()), - TypeSignature::max_buffer(), + ( + TypeSignature::max_buffer().unwrap(), + TypeSignature::max_buffer().unwrap(), + ), + TypeSignature::max_buffer().unwrap(), ), ( ( @@ -2187,17 +2276,17 @@ mod test { ), ( ( - TypeSignature::bound_string_ascii_type(17), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::bound_string_ascii_type(17).unwrap(), ), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::bound_string_ascii_type(17).unwrap(), ), ( ( - TypeSignature::max_string_utf8(), - TypeSignature::max_string_utf8(), + TypeSignature::max_string_utf8().unwrap(), + TypeSignature::max_string_utf8().unwrap(), ), - TypeSignature::max_string_utf8(), + TypeSignature::max_string_utf8().unwrap(), ), ( (TypeSignature::PrincipalType, TypeSignature::PrincipalType), @@ -2281,8 +2370,11 @@ mod test { let matched_pairs = [ ( - (TypeSignature::max_buffer(), TypeSignature::min_buffer()), - TypeSignature::max_buffer(), + ( + TypeSignature::max_buffer().unwrap(), + TypeSignature::min_buffer().unwrap(), + ), + TypeSignature::max_buffer().unwrap(), ), ( ( @@ -2293,17 +2385,17 @@ mod test { ), ( ( - TypeSignature::min_string_ascii(), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::min_string_ascii().unwrap(), + TypeSignature::bound_string_ascii_type(17).unwrap(), ), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::bound_string_ascii_type(17).unwrap(), ), ( ( - TypeSignature::min_string_utf8(), - TypeSignature::max_string_utf8(), + TypeSignature::min_string_utf8().unwrap(), + TypeSignature::max_string_utf8().unwrap(), ), - TypeSignature::max_string_utf8(), + TypeSignature::max_string_utf8().unwrap(), ), ( ( @@ -2356,7 +2448,7 @@ mod test { 5, ) .unwrap(), - TypeSignature::list_of(TypeSignature::min_buffer(), 3).unwrap(), + TypeSignature::list_of(TypeSignature::min_buffer().unwrap(), 3).unwrap(), ), TypeSignature::list_of( TypeSignature::SequenceType(SequenceSubtype::BufferType( @@ -2371,14 +2463,14 @@ mod test { TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::min_string_ascii(), + TypeSignature::min_string_ascii().unwrap(), )]) .unwrap(), ), TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::bound_string_ascii_type(17).unwrap(), )]) .unwrap(), ), @@ -2386,17 +2478,19 @@ mod test { TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::bound_string_ascii_type(17).unwrap(), )]) .unwrap(), ), ), ( ( - TypeSignature::new_option(TypeSignature::min_string_ascii()).unwrap(), - TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17)).unwrap(), + TypeSignature::new_option(TypeSignature::min_string_ascii().unwrap()).unwrap(), + TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17).unwrap()) + .unwrap(), ), - TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17)).unwrap(), + TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17).unwrap()) + .unwrap(), ), ( ( @@ -2433,20 +2527,20 @@ mod test { (IntType, UIntType), (BoolType, IntType), ( - TypeSignature::max_buffer(), - TypeSignature::max_string_ascii(), + TypeSignature::max_buffer().unwrap(), + TypeSignature::max_string_ascii().unwrap(), ), ( TypeSignature::list_of(TypeSignature::UIntType, 42).unwrap(), TypeSignature::list_of(TypeSignature::IntType, 42).unwrap(), ), ( - TypeSignature::min_string_utf8(), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::min_string_utf8().unwrap(), + TypeSignature::bound_string_ascii_type(17).unwrap(), ), ( - TypeSignature::min_string_utf8(), - TypeSignature::min_buffer(), + TypeSignature::min_string_utf8().unwrap(), + TypeSignature::min_buffer().unwrap(), ), ( TypeSignature::TupleType( @@ -2460,7 +2554,7 @@ mod test { ), ( TypeSignature::new_option(TypeSignature::IntType).unwrap(), - TypeSignature::new_option(TypeSignature::min_string_utf8()).unwrap(), + TypeSignature::new_option(TypeSignature::min_string_utf8().unwrap()).unwrap(), ), ( TypeSignature::new_response(TypeSignature::IntType, TypeSignature::BoolType) @@ -2483,7 +2577,7 @@ mod test { ), (list_union.clone(), TypeSignature::PrincipalType), ( - TypeSignature::min_string_ascii(), + TypeSignature::min_string_ascii().unwrap(), list_union_principals.clone(), ), ( @@ -2494,13 +2588,13 @@ mod test { 5, ) .unwrap(), - TypeSignature::list_of(TypeSignature::min_string_ascii(), 3).unwrap(), + TypeSignature::list_of(TypeSignature::min_string_ascii().unwrap(), 3).unwrap(), ), ( TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::min_string_ascii(), + TypeSignature::min_string_ascii().unwrap(), )]) .unwrap(), ), @@ -2510,8 +2604,8 @@ mod test { ), ), ( - TypeSignature::new_option(TypeSignature::min_string_ascii()).unwrap(), - TypeSignature::new_option(TypeSignature::min_string_utf8()).unwrap(), + TypeSignature::new_option(TypeSignature::min_string_ascii().unwrap()).unwrap(), + TypeSignature::new_option(TypeSignature::min_string_utf8().unwrap()).unwrap(), ), ( TypeSignature::new_response(TypeSignature::PrincipalType, list_union.clone()) diff --git a/clarity/src/vm/variables.rs b/clarity/src/vm/variables.rs index 286d661b41..66de0f3b6e 100644 --- a/clarity/src/vm/variables.rs +++ b/clarity/src/vm/variables.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use std::convert::TryFrom; +use super::errors::InterpreterError; use crate::vm::contexts::{Environment, LocalContext}; use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::runtime_cost; @@ -82,8 +83,11 @@ pub fn lookup_reserved_variable( NativeVariables::TxSponsor => { let sponsor = match env.sponsor.clone() { None => Value::none(), - Some(p) => Value::some(Value::Principal(p)) - .expect("ERROR: principal should be a valid Clarity object"), + Some(p) => Value::some(Value::Principal(p)).map_err(|_| { + InterpreterError::Expect( + "ERROR: principal should be a valid Clarity object".into(), + ) + })?, }; Ok(Some(sponsor)) } @@ -97,15 +101,15 @@ pub fn lookup_reserved_variable( let burn_block_height = env .global_context .database - .get_current_burnchain_block_height(); - Ok(Some(Value::UInt(burn_block_height as u128))) + .get_current_burnchain_block_height()?; + Ok(Some(Value::UInt(u128::from(burn_block_height)))) } NativeVariables::NativeNone => Ok(Some(Value::none())), NativeVariables::NativeTrue => Ok(Some(Value::Bool(true))), NativeVariables::NativeFalse => Ok(Some(Value::Bool(false))), NativeVariables::TotalLiquidMicroSTX => { runtime_cost(ClarityCostFunction::FetchVar, env, 1)?; - let liq = env.global_context.database.get_total_liquid_ustx(); + let liq = env.global_context.database.get_total_liquid_ustx()?; Ok(Some(Value::UInt(liq))) } NativeVariables::Regtest => { diff --git a/contrib/init/org.stacks.stacks-blockchain.plist b/contrib/init/org.stacks.stacks-blockchain.plist index 965b4fa300..e0429e75f7 100644 --- a/contrib/init/org.stacks.stacks-blockchain.plist +++ b/contrib/init/org.stacks.stacks-blockchain.plist @@ -8,7 +8,7 @@ /usr/local/bin/stacks-node start - --config=/etc/stacks-blockchain/Config.toml + --config /etc/stacks-blockchain/Config.toml ProcessType diff --git a/contrib/init/stacks.init b/contrib/init/stacks.init index fb60c297e8..9ef1e8bbe4 100644 --- a/contrib/init/stacks.init +++ b/contrib/init/stacks.init @@ -60,7 +60,7 @@ start() { return 1 fi echo -n $"Starting $prog: " - $stacks_bin start --config="$stacks_config" > "$stacks_log" 2>&1 & + $stacks_bin start --config "$stacks_config" > "$stacks_log" 2>&1 & RETVAL=$? [ $RETVAL -eq 0 ] && touch "$lockfile" echo diff --git a/contrib/init/stacks.service b/contrib/init/stacks.service index edbb3cba65..9b83409ab1 100644 --- a/contrib/init/stacks.service +++ b/contrib/init/stacks.service @@ -11,7 +11,7 @@ ConditionPathExists=/etc/stacks-blockchain/Config.toml ConditionPathIsDirectory=/stacks-blockchain [Service] -ExecStart=/usr/local/bin/stacks-node start --config=/etc/stacks-blockchain/Config.toml +ExecStart=/usr/local/bin/stacks-node start --config /etc/stacks-blockchain/Config.toml # Make sure the config directory is readable by the service user PermissionsStartOnly=true diff --git a/docs/profiling.md b/docs/profiling.md index 35bbaf2f18..25f821d2c9 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -1,6 +1,7 @@ # Profiling Tools This document describes several techniques to profile (i.e. find performance bottlenecks) the stacks-node mining loop, including: + - configuring debug logging, - setting up a mock mining node, - recording inbound transactions, @@ -15,7 +16,7 @@ Note that all bash commands in this document are run from the stacks-blockchain Validating the config file using `stacks-node check-config`: ``` -$ cargo run -r -p stacks-node --bin stacks-node check-config --config=testnet/stacks-node/conf/mainnet-mockminer-conf.toml +$ cargo run -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64]) INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid config! @@ -24,7 +25,7 @@ INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid conf Enabling debug logging using environment variable `STACKS_LOG_DEBUG=1`: ``` -$ STACKS_LOG_DEBUG=1 cargo run -r -p stacks-node --bin stacks-node check-config --config=testnet/stacks-node/conf/mainnet-mockminer-conf.toml +$ STACKS_LOG_DEBUG=1 cargo run -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64]) INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("bitcoind.stacks.co"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("blockstack"), password: Some("blockstacksystem"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } @@ -34,7 +35,7 @@ INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid conf Enabling json logging using environment variable `STACKS_LOG_JSON=1` and feature flag `slog_json`: ``` -$ STACKS_LOG_JSON=1 cargo run -F slog_json -r -p stacks-node --bin stacks-node check-config --config=testnet/stacks-node/conf/mainnet-mockminer-conf.toml +$ STACKS_LOG_JSON=1 cargo run -F slog_json -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml {"msg":"stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64])","level":"INFO","ts":"2022-08-23T12:44:28.072462-05:00","thread":"main","line":82,"file":"testnet/stacks-node/src/main.rs"} {"msg":"Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml","level":"INFO","ts":"2022-08-23T12:44:28.074238-05:00","thread":"main","line":115,"file":"testnet/stacks-node/src/main.rs"} {"msg":"Valid config!","level":"INFO","ts":"2022-08-23T12:44:28.089960-05:00","thread":"main","line":128,"file":"testnet/stacks-node/src/main.rs"} @@ -53,6 +54,7 @@ $ export STACKS_SNAPSHOT_DIR=$STACKS_DIR/snapshot ## Setting up the mock mining node Download and extract an archived snapshot of mainnet working directory, provided by Hiro. + ``` $ wget -P $STACKS_DIR https://storage.googleapis.com/blockstack-publish/archiver-main/follower/mainnet-follower-latest.tar.gz $ tar xzvf $STACKS_DIR/mainnet-follower-latest.tar.gz -C $STACKS_DIR @@ -65,17 +67,19 @@ We'll be using the `stacks-node` config file available at: Note that, for convenience, the `stacks-node` binary uses the environment variable `$STACKS_WORKING_DIR` to override the working directory location in the config file. ``` -$ cargo run -r -p stacks-node --bin stacks-node start --config=testnet/stacks-node/conf/mocknet-miner-conf.toml +$ cargo run -r -p stacks-node --bin stacks-node start --config testnet/stacks-node/conf/mocknet-miner-conf.toml ``` The `stacks-node` process will receive blocks starting from the latest block available in the Hiro archive. Check the latest tip height of our node. + ``` $ curl -s 127.0.0.1:20443/v2/info | jq .stacks_tip_height ``` Compare our node's tip height to a public node's tip height to check when our node is fully synchronized. + ``` $ curl -s seed-0.mainnet.stacks.co:20443/v2/info | jq .stacks_tip_height ``` @@ -85,16 +89,19 @@ Once the node is synchronized, terminate the `stacks-node` process so we can set ## Recording blockchain events Run `stacks-events` to receive and archive events: + ``` $ cargo run -r -p stacks-node --bin stacks-events | tee $STACKS_DIR/events.log ``` Run `stacks-node` with an event observer: + ``` -$ STACKS_EVENT_OBSERVER=localhost:3700 cargo run -r -p stacks-node --bin stacks-node start --config=testnet/stacks-node/conf/mocknet-miner-conf.toml +$ STACKS_EVENT_OBSERVER=localhost:3700 cargo run -r -p stacks-node --bin stacks-node start --config testnet/stacks-node/conf/mocknet-miner-conf.toml ``` You should see output from `stacks-events` in `events.logs` similar to: + ``` $ tail -F $STACKS_DIR/events.log {"path":"drop_mempool_tx","payload":{"dropped_txids":["0x6f78047f15ac3309153fc34be94ed8895111304336aec1ff106b7de051021e17, ..., "ts":"2022-08-12T05:03:08.577Z"} @@ -103,21 +110,25 @@ $ tail -F $STACKS_DIR/events.log ## Historical Mining Discover the first recorded block height: + ``` $ cat $STACKS_DIR/events.log | egrep new_block | head -1 | jq .payload.block_height ``` Discover a lower bound number of recorded transactions. This is a lower bound because each line in the events file is a list of transactions. + ``` $ cat $STACKS_DIR/events.log | egrep new_mempool_tx | wc -l ``` Make a snapshot of the working directory: + ``` $ cp -r $STACKS_WORKING_DIR $STACKS_SNAPSHOT_DIR ``` Run the `tip-mine` benchmark: + ``` $ export STACKS_TIP_MINE_BLOCK_HEIGHT=71294 $ export STACKS_TIP_MINE_NUM_TXS=100 @@ -137,9 +148,11 @@ INFO [1661274285.417171] [src/chainstate/stacks/miner.rs:1628] [main] Miner: min Successfully mined block @ height = 71295 off of bd4fa09ece02e7fd53493c96bd69b89155058f7b28d4a659d87d89644208f41e (96cc06519e670eefb674aa2e9cfe0cfae103d4da/f0f0caa2afaae75417f14fe2fad1e3fd52b0169e66cb045b4954b9ab78611f31) in 7310ms. Block 4a64e0a4012acb6748a08784876c23f6f61aba08b7c826db5b57832935278f33: 3227082 uSTX, 31587 bytes, cost ExecutionCost { write_length: 84090, write_count: 1170, read_length: 20381499, read_count: 7529, runtime: 103717315 } ``` + In this run, `tip-mine` mined a block with 87 transactions. Alternatively, you can run `cargo build` separately from the target binary `stacks-inspect` to avoid re-building and speed up profiling: + ``` $ cargo build -F disable-costs -r --bin stacks-inspect $ ./target/release/stacks-inspect tip-mine $STACKS_SNAPSHOT_DIR $STACKS_DIR/events.log $STACKS_TIP_MINE_BLOCK_HEIGHT $STACKS_TIP_MINE_NUM_TXS @@ -158,16 +171,19 @@ $ cargo install flamegraph flamegraph-rs uses [dtrace](https://en.wikipedia.org/wiki/DTrace) for profiling on Mac. Build `stacks-inspect` using the feature `disable-costs` to disable the block cost limits: + ``` $ cargo build -F disable-costs -r --bin stacks-inspect ``` Generate a flame graph: + ``` $ flamegraph --root -o perf.svg -e cpu-clock --min-width 1 --deterministic -- ./target/release/stacks-inspect tip-mine $STACKS_SNAPSHOT_DIR $STACKS_DIR/events.log $STACKS_TIP_MINE_BLOCK_HEIGHT $STACKS_TIP_MINE_NUM_TXS ``` You can open the flame graph using a browser: + ``` $ open perf.svg ``` @@ -183,21 +199,25 @@ The Linux performance tool `perf` has a performance bug which has been fixed. If Background on the `perf` performance bug: https://eighty-twenty.org/2021/09/09/perf-addr2line-speed-improvement Find out your kernel version: + ``` $ uname -a Linux localhost 5.15.0-25-generic #26~16.04.1-Ubuntu SMP Tue Oct 1 16:30:39 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux ``` Install dependencies, clone the linux kernel source, checkout the version tag matching your kernel version and build perf: + ``` -$ sudo apt install -y git libzstd-dev libunwind-dev libcap-dev libdw-dev libdwarf-dev libbfd-dev libelf-dev systemtap-sdt-dev binutils-dev libnuma-dev libiberty-dev bison flex +$ sudo apt install -y git libzstd-dev libunwind-dev libcap-dev libdw-dev libdwarf-dev libbfd-dev libelf-dev systemtap-sdt-dev binutils-dev libnuma-dev libiberty-dev bison flex $ git clone https://github.com/torvalds/linux.git $ git checkout v5.15 $ cd linux/tools/perf && make ``` #### Running perf + Grant kernel permissions to perf: + ``` $ sudo sed -i "$ a kernel.perf_event_paranoid = -1" /etc/sysctl.conf $ sudo sed -i "$ a kernel.kptr_restrict = 0" /etc/sysctl.conf @@ -205,6 +225,7 @@ $ sysctl --system ``` Note that you need to uncomment the following in `.cargo/config` (see [flamegraph-rs](https://github.com/flamegraph-rs/flamegraph) for details) + ``` [target.x86_64-unknown-linux-gnu] linker = "/usr/bin/clang" @@ -212,11 +233,13 @@ rustflags = ["-Clink-arg=-fuse-ld=lld", "-Clink-arg=-Wl,--no-rosegment"] ``` Build `stacks-inspect` using the feature `disable-costs` to disable the block cost limits: + ``` $ cargo build -F disable-costs -r --bin stacks-inspect ``` Generate a flame graph using the locally built `perf` binary: + ``` $ PERF=~/linux/tools/perf/perf flamegraph --cmd "record -F 97 --call-graph dwarf,65528 -g -e cpu-clock" -o perf.svg --min-width 0.5 --deterministic -- ./target/release/stacks-inspect tip-mine $STACKS_SNAPSHOT_DIR $STACKS_DIR/events.log $STACKS_TIP_MINE_BLOCK_HEIGHT $STACKS_TIP_MINE_NUM_TXS ``` @@ -225,11 +248,11 @@ Output flame graph is in `perf.svg`. ## Profiling SQLite queries -Set the environment variable `STACKS_LOG_DEBUG=1` and use the cargo feature `profile-sqlite`: +Set the environment variable `STACKS_LOG_DEBUG=1` and use the cargo feature `profile-sqlite`: ``` $ STACKS_LOG_DEBUG=1 cargo run -F profile-sqlite,disable-costs -r --bin stacks-inspect try-mine $STACKS_WORKING_DIR ... DEBG [1661217664.809057] [src/util_lib/db.rs:666] [main] sqlite trace profile {"millis":1,"query":"SELECT value FROM data_table WHERE key = ?"} ... -``` \ No newline at end of file +``` diff --git a/net-test/bin/start.sh b/net-test/bin/start.sh index 4b750ad060..d21bb90f70 100755 --- a/net-test/bin/start.sh +++ b/net-test/bin/start.sh @@ -145,7 +145,7 @@ start_stacks_master_node() { logln "ok" log "[$$] Starting Stacks master node..." - BLOCKSTACK_DEBUG=1 RUST_BACKTRACE=full stacks-node start --config="$STACKS_MASTER_CONF" >"$STACKS_MASTER_LOGFILE" 2>&1 & + BLOCKSTACK_DEBUG=1 RUST_BACKTRACE=full stacks-node start --config "$STACKS_MASTER_CONF" >"$STACKS_MASTER_LOGFILE" 2>&1 & local STACKS_PID=$! logln "PID $STACKS_PID" @@ -233,7 +233,7 @@ start_stacks_miner_node() { logln "ok" log "[$$] Starting Stacks miner node..." - BLOCKSTACK_DEBUG=1 RUST_BACKTRACE=full stacks-node start --config="$STACKS_MINER_CONF" >"$STACKS_MINER_LOGFILE" 2>&1 & + BLOCKSTACK_DEBUG=1 RUST_BACKTRACE=full stacks-node start --config "$STACKS_MINER_CONF" >"$STACKS_MINER_LOGFILE" 2>&1 & local STACKS_PID=$! logln "PID $STACKS_PID" @@ -265,7 +265,7 @@ start_stacks_follower_node() { logln "ok" log "[$$] Starting Stacks follower node..." - BLOCKSTACK_DEBUG=1 RUST_BACKTRACE=full stacks-node start --config="$STACKS_FOLLOWER_CONF" >"$STACKS_FOLLOWER_LOGFILE" 2>&1 & + BLOCKSTACK_DEBUG=1 RUST_BACKTRACE=full stacks-node start --config "$STACKS_FOLLOWER_CONF" >"$STACKS_FOLLOWER_LOGFILE" 2>&1 & local STACKS_PID=$! logln "PID $STACKS_PID" diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index 9f44330c33..49ca9c38cd 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -382,10 +382,7 @@ pub fn synthesize_pox_2_or_3_event_info( test_debug!("Evaluate snippet:\n{}", &code_snippet); test_debug!("Evaluate data code:\n{}", &data_snippet); - let pox_2_contract = global_context - .database - .get_contract(contract_id) - .expect("FATAL: could not load PoX contract metadata"); + let pox_2_contract = global_context.database.get_contract(contract_id)?; let event_info = global_context .special_cc_handler_execute_read_only( @@ -414,8 +411,12 @@ pub fn synthesize_pox_2_or_3_event_info( })?; // merge them - let base_event_tuple = base_event_info.expect_tuple(); - let data_tuple = data_event_info.expect_tuple(); + let base_event_tuple = base_event_info + .expect_tuple() + .expect("FATAL: unexpected clarity value"); + let data_tuple = data_event_info + .expect_tuple() + .expect("FATAL: unexpected clarity value"); let event_tuple = TupleData::shallow_merge(base_event_tuple, data_tuple).map_err(|e| { error!("Failed to merge data-info and event-info: {:?}", &e); diff --git a/pox-locking/src/lib.rs b/pox-locking/src/lib.rs index b195f4cc9b..8829696452 100644 --- a/pox-locking/src/lib.rs +++ b/pox-locking/src/lib.rs @@ -47,6 +47,13 @@ pub enum LockingError { PoxExtendNotLocked, PoxIncreaseOnV1, PoxInvalidIncrease, + Clarity(ClarityError), +} + +impl From for LockingError { + fn from(e: ClarityError) -> LockingError { + LockingError::Clarity(e) + } } pub const POX_1_NAME: &str = "pox"; @@ -66,12 +73,14 @@ pub fn handle_contract_call_special_cases( if *contract_id == boot_code_id(POX_1_NAME, global_context.mainnet) { if !pox_1::is_read_only(function_name) && global_context.database.get_v1_unlock_height() - <= global_context.database.get_current_burnchain_block_height() + <= global_context + .database + .get_current_burnchain_block_height()? { // NOTE: get-pox-info is read-only, so it can call old pox v1 stuff warn!("PoX-1 function call attempted on an account after v1 unlock height"; "v1_unlock_ht" => global_context.database.get_v1_unlock_height(), - "current_burn_ht" => global_context.database.get_current_burnchain_block_height(), + "current_burn_ht" => global_context.database.get_current_burnchain_block_height()?, "function_name" => function_name, "contract_id" => %contract_id ); @@ -85,8 +94,8 @@ pub fn handle_contract_call_special_cases( if !pox_2::is_read_only(function_name) && global_context.epoch_id >= StacksEpochId::Epoch22 { warn!("PoX-2 function call attempted on an account after Epoch 2.2"; - "v2_unlock_ht" => global_context.database.get_v2_unlock_height(), - "current_burn_ht" => global_context.database.get_current_burnchain_block_height(), + "v2_unlock_ht" => global_context.database.get_v2_unlock_height()?, + "current_burn_ht" => global_context.database.get_current_burnchain_block_height()?, "function_name" => function_name, "contract_id" => %contract_id ); diff --git a/pox-locking/src/pox_1.rs b/pox-locking/src/pox_1.rs index 95e44809d3..e28ccb917e 100644 --- a/pox-locking/src/pox_1.rs +++ b/pox-locking/src/pox_1.rs @@ -34,33 +34,40 @@ use crate::LockingError; fn parse_pox_stacking_result_v1( result: &Value, ) -> std::result::Result<(PrincipalData, u128, u64), i128> { - match result.clone().expect_result() { + match result + .clone() + .expect_result() + .expect("FATAL: unexpected clarity value") + { Ok(res) => { // should have gotten back (ok (tuple (stacker principal) (lock-amount uint) (unlock-burn-height uint))) - let tuple_data = res.expect_tuple(); + let tuple_data = res.expect_tuple().expect("FATAL: unexpected clarity value"); let stacker = tuple_data .get("stacker") .expect("FATAL: no 'stacker'") .to_owned() - .expect_principal(); + .expect_principal() + .expect("FATAL: unexpected clarity value"); let lock_amount = tuple_data .get("lock-amount") .expect("FATAL: no 'lock-amount'") .to_owned() - .expect_u128(); + .expect_u128() + .expect("FATAL: unexpected clarity value"); let unlock_burn_height = tuple_data .get("unlock-burn-height") .expect("FATAL: no 'unlock-burn-height'") .to_owned() .expect_u128() + .expect("FATAL: unexpected clarity value") .try_into() .expect("FATAL: 'unlock-burn-height' overflow"); Ok((stacker, lock_amount, unlock_burn_height)) } - Err(e) => Err(e.expect_i128()), + Err(e) => Err(e.expect_i128().expect("FATAL: unexpected clarity value")), } } @@ -91,20 +98,20 @@ pub fn pox_lock_v1( assert!(unlock_burn_height > 0); assert!(lock_amount > 0); - let mut snapshot = db.get_stx_balance_snapshot(principal); + let mut snapshot = db.get_stx_balance_snapshot(principal)?; if snapshot.balance().was_locked_by_v2() { debug!("PoX Lock attempted on an account locked by v2"); return Err(LockingError::DefunctPoxContract); } - if snapshot.has_locked_tokens() { + if snapshot.has_locked_tokens()? { return Err(LockingError::PoxAlreadyLocked); } - if !snapshot.can_transfer(lock_amount) { + if !snapshot.can_transfer(lock_amount)? { return Err(LockingError::PoxInsufficientBalance); } - snapshot.lock_tokens_v1(lock_amount, unlock_burn_height); + snapshot.lock_tokens_v1(lock_amount, unlock_burn_height)?; debug!( "PoX v1 lock applied"; @@ -114,7 +121,7 @@ pub fn pox_lock_v1( "account" => %principal, ); - snapshot.save(); + snapshot.save()?; Ok(()) } diff --git a/pox-locking/src/pox_2.rs b/pox-locking/src/pox_2.rs index 34c2f3a957..aca2a5da86 100644 --- a/pox-locking/src/pox_2.rs +++ b/pox-locking/src/pox_2.rs @@ -62,33 +62,40 @@ pub fn is_read_only(func_name: &str) -> bool { pub fn parse_pox_stacking_result( result: &Value, ) -> std::result::Result<(PrincipalData, u128, u64), i128> { - match result.clone().expect_result() { + match result + .clone() + .expect_result() + .expect("FATAL: unexpected clarity value") + { Ok(res) => { // should have gotten back (ok { stacker: principal, lock-amount: uint, unlock-burn-height: uint .. } .. }))) - let tuple_data = res.expect_tuple(); + let tuple_data = res.expect_tuple().expect("FATAL: unexpected clarity value"); let stacker = tuple_data .get("stacker") .expect("FATAL: no 'stacker'") .to_owned() - .expect_principal(); + .expect_principal() + .expect("FATAL: unexpected clarity value"); let lock_amount = tuple_data .get("lock-amount") .expect("FATAL: no 'lock-amount'") .to_owned() - .expect_u128(); + .expect_u128() + .expect("FATAL: unexpected clarity value"); let unlock_burn_height = tuple_data .get("unlock-burn-height") .expect("FATAL: no 'unlock-burn-height'") .to_owned() .expect_u128() + .expect("FATAL: unexpected clarity value") .try_into() .expect("FATAL: 'unlock-burn-height' overflow"); Ok((stacker, lock_amount, unlock_burn_height)) } - Err(e) => Err(e.expect_i128()), + Err(e) => Err(e.expect_i128().expect("FATAL: unexpected clarity value")), } } @@ -96,28 +103,34 @@ pub fn parse_pox_stacking_result( /// into a format more readily digestible in rust. /// Panics if the supplied value doesn't match the expected tuple structure pub fn parse_pox_extend_result(result: &Value) -> std::result::Result<(PrincipalData, u64), i128> { - match result.clone().expect_result() { + match result + .clone() + .expect_result() + .expect("FATAL: unexpected clarity value") + { Ok(res) => { // should have gotten back (ok { stacker: principal, unlock-burn-height: uint .. } .. }) - let tuple_data = res.expect_tuple(); + let tuple_data = res.expect_tuple().expect("FATAL: unexpected clarity value"); let stacker = tuple_data .get("stacker") .expect("FATAL: no 'stacker'") .to_owned() - .expect_principal(); + .expect_principal() + .expect("FATAL: unexpected clarity value"); let unlock_burn_height = tuple_data .get("unlock-burn-height") .expect("FATAL: no 'unlock-burn-height'") .to_owned() .expect_u128() + .expect("FATAL: unexpected clarity value") .try_into() .expect("FATAL: 'unlock-burn-height' overflow"); Ok((stacker, unlock_burn_height)) } // in the error case, the function should have returned `int` error code - Err(e) => Err(e.expect_i128()), + Err(e) => Err(e.expect_i128().expect("FATAL: unexpected clarity value")), } } @@ -125,26 +138,32 @@ pub fn parse_pox_extend_result(result: &Value) -> std::result::Result<(Principal /// into a format more readily digestible in rust. /// Panics if the supplied value doesn't match the expected tuple structure pub fn parse_pox_increase(result: &Value) -> std::result::Result<(PrincipalData, u128), i128> { - match result.clone().expect_result() { + match result + .clone() + .expect_result() + .expect("FATAL: unexpected clarity value") + { Ok(res) => { // should have gotten back (ok { stacker: principal, total-locked: uint .. } .. }) - let tuple_data = res.expect_tuple(); + let tuple_data = res.expect_tuple().expect("FATAL: unexpected clarity value"); let stacker = tuple_data .get("stacker") .expect("FATAL: no 'stacker'") .to_owned() - .expect_principal(); + .expect_principal() + .expect("FATAL: unexpected clarity value"); let total_locked = tuple_data .get("total-locked") .expect("FATAL: no 'total-locked'") .to_owned() - .expect_u128(); + .expect_u128() + .expect("FATAL: unexpected clarity value"); Ok((stacker, total_locked)) } // in the error case, the function should have returned `int` error code - Err(e) => Err(e.expect_i128()), + Err(e) => Err(e.expect_i128().expect("FATAL: unexpected clarity value")), } } @@ -164,17 +183,17 @@ pub fn pox_lock_increase_v2( ) -> Result { assert!(new_total_locked > 0); - let mut snapshot = db.get_stx_balance_snapshot(principal); + let mut snapshot = db.get_stx_balance_snapshot(principal)?; - if !snapshot.has_locked_tokens() { + if !snapshot.has_locked_tokens()? { return Err(LockingError::PoxExtendNotLocked); } - if !snapshot.is_v2_locked() { + if !snapshot.is_v2_locked()? { return Err(LockingError::PoxIncreaseOnV1); } - let bal = snapshot.canonical_balance_repr(); + let bal = snapshot.canonical_balance_repr()?; let total_amount = bal .amount_unlocked() .checked_add(bal.amount_locked()) @@ -187,9 +206,9 @@ pub fn pox_lock_increase_v2( return Err(LockingError::PoxInvalidIncrease); } - snapshot.increase_lock_v2(new_total_locked); + snapshot.increase_lock_v2(new_total_locked)?; - let out_balance = snapshot.canonical_balance_repr(); + let out_balance = snapshot.canonical_balance_repr()?; debug!( "PoX v2 lock increased"; @@ -199,7 +218,7 @@ pub fn pox_lock_increase_v2( "account" => %principal, ); - snapshot.save(); + snapshot.save()?; Ok(out_balance) } @@ -217,13 +236,13 @@ pub fn pox_lock_extend_v2( ) -> Result { assert!(unlock_burn_height > 0); - let mut snapshot = db.get_stx_balance_snapshot(principal); + let mut snapshot = db.get_stx_balance_snapshot(principal)?; - if !snapshot.has_locked_tokens() { + if !snapshot.has_locked_tokens()? { return Err(LockingError::PoxExtendNotLocked); } - snapshot.extend_lock_v2(unlock_burn_height); + snapshot.extend_lock_v2(unlock_burn_height)?; let amount_locked = snapshot.balance().amount_locked(); @@ -235,7 +254,7 @@ pub fn pox_lock_extend_v2( "account" => %principal, ); - snapshot.save(); + snapshot.save()?; Ok(amount_locked) } @@ -249,15 +268,15 @@ fn pox_lock_v2( assert!(unlock_burn_height > 0); assert!(lock_amount > 0); - let mut snapshot = db.get_stx_balance_snapshot(principal); + let mut snapshot = db.get_stx_balance_snapshot(principal)?; - if snapshot.has_locked_tokens() { + if snapshot.has_locked_tokens()? { return Err(LockingError::PoxAlreadyLocked); } - if !snapshot.can_transfer(lock_amount) { + if !snapshot.can_transfer(lock_amount)? { return Err(LockingError::PoxInsufficientBalance); } - snapshot.lock_tokens_v2(lock_amount, unlock_burn_height); + snapshot.lock_tokens_v2(lock_amount, unlock_burn_height)?; debug!( "PoX v2 lock applied"; @@ -267,7 +286,7 @@ fn pox_lock_v2( "account" => %principal, ); - snapshot.save(); + snapshot.save()?; Ok(()) } diff --git a/pox-locking/src/pox_3.rs b/pox-locking/src/pox_3.rs index cccfbb2644..1b082703cd 100644 --- a/pox-locking/src/pox_3.rs +++ b/pox-locking/src/pox_3.rs @@ -43,15 +43,15 @@ pub fn pox_lock_v3( assert!(unlock_burn_height > 0); assert!(lock_amount > 0); - let mut snapshot = db.get_stx_balance_snapshot(principal); + let mut snapshot = db.get_stx_balance_snapshot(principal)?; - if snapshot.has_locked_tokens() { + if snapshot.has_locked_tokens()? { return Err(LockingError::PoxAlreadyLocked); } - if !snapshot.can_transfer(lock_amount) { + if !snapshot.can_transfer(lock_amount)? { return Err(LockingError::PoxInsufficientBalance); } - snapshot.lock_tokens_v3(lock_amount, unlock_burn_height); + snapshot.lock_tokens_v3(lock_amount, unlock_burn_height)?; debug!( "PoX v3 lock applied"; @@ -61,7 +61,7 @@ pub fn pox_lock_v3( "account" => %principal, ); - snapshot.save(); + snapshot.save()?; Ok(()) } @@ -79,13 +79,13 @@ pub fn pox_lock_extend_v3( ) -> Result { assert!(unlock_burn_height > 0); - let mut snapshot = db.get_stx_balance_snapshot(principal); + let mut snapshot = db.get_stx_balance_snapshot(principal)?; - if !snapshot.has_locked_tokens() { + if !snapshot.has_locked_tokens()? { return Err(LockingError::PoxExtendNotLocked); } - snapshot.extend_lock_v3(unlock_burn_height); + snapshot.extend_lock_v3(unlock_burn_height)?; let amount_locked = snapshot.balance().amount_locked(); @@ -97,7 +97,7 @@ pub fn pox_lock_extend_v3( "account" => %principal, ); - snapshot.save(); + snapshot.save()?; Ok(amount_locked) } @@ -115,13 +115,13 @@ pub fn pox_lock_increase_v3( ) -> Result { assert!(new_total_locked > 0); - let mut snapshot = db.get_stx_balance_snapshot(principal); + let mut snapshot = db.get_stx_balance_snapshot(principal)?; - if !snapshot.has_locked_tokens() { + if !snapshot.has_locked_tokens()? { return Err(LockingError::PoxExtendNotLocked); } - let bal = snapshot.canonical_balance_repr(); + let bal = snapshot.canonical_balance_repr()?; let total_amount = bal .amount_unlocked() .checked_add(bal.amount_locked()) @@ -134,9 +134,9 @@ pub fn pox_lock_increase_v3( return Err(LockingError::PoxInvalidIncrease); } - snapshot.increase_lock_v3(new_total_locked); + snapshot.increase_lock_v3(new_total_locked)?; - let out_balance = snapshot.canonical_balance_repr(); + let out_balance = snapshot.canonical_balance_repr()?; debug!( "PoX v3 lock increased"; @@ -146,7 +146,7 @@ pub fn pox_lock_increase_v3( "account" => %principal, ); - snapshot.save(); + snapshot.save()?; Ok(out_balance) } diff --git a/stacks-common/src/types/net.rs b/stacks-common/src/types/net.rs index 25c86a82de..0e3285b59a 100644 --- a/stacks-common/src/types/net.rs +++ b/stacks-common/src/types/net.rs @@ -209,12 +209,14 @@ impl PeerAddress { /// Is this a private IP address? pub fn is_in_private_range(&self) -> bool { if self.is_ipv4() { - // 10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16 + // 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, or 127.0.0.0/8 self.0[12] == 10 || (self.0[12] == 172 && self.0[13] >= 16 && self.0[13] <= 31) || (self.0[12] == 192 && self.0[13] == 168) + || self.0[12] == 127 } else { - self.0[0] >= 0xfc + // private address (fc00::/7) or localhost (::1) + self.0[0] >= 0xfc || (self.0[0..15] == [0u8; 15] && self.0[15] == 1) } } diff --git a/stacks-common/src/util/pipe.rs b/stacks-common/src/util/pipe.rs index 87cf461b8f..bb7482f949 100644 --- a/stacks-common/src/util/pipe.rs +++ b/stacks-common/src/util/pipe.rs @@ -168,12 +168,12 @@ impl PipeRead { } } - if disconnected && copied == 0 && self.buf.is_empty() { + if disconnected && copied == 0 { // out of data, and will never get more return Err(io::Error::from(io::ErrorKind::BrokenPipe)); } - if blocked && copied == 0 && self.buf.is_empty() { + if blocked && copied == 0 { return Err(io::Error::from(io::ErrorKind::WouldBlock)); } @@ -187,6 +187,10 @@ impl PipeWrite { } fn write_or_buffer(&mut self, buf: &[u8]) -> io::Result { + if buf.len() == 0 { + return Ok(0); + } + // add buf to our internal buffer... if self.buf.is_none() { let data = buf.to_vec(); @@ -528,10 +532,37 @@ mod test { assert_eq!(nr, segment.len()); assert_eq!(*segment, bytes); + // subsequent read fails with EWOULDBLOCK + let mut next_bytes = vec![0u8]; + let res = pipe_read.read(&mut next_bytes).unwrap_err(); + assert_eq!(res.kind(), io::ErrorKind::WouldBlock); + // flush should have succeeded let res = pipe_write.try_flush().unwrap(); assert!(res); } + + // subsequent read fails with EWOULDBLOCK + let mut next_bytes = vec![0u8]; + let res = pipe_read.read(&mut next_bytes).unwrap_err(); + assert_eq!(res.kind(), io::ErrorKind::WouldBlock); + + // once the write end is dropped, then this data is still consumable but we get broken-pipe + // once it's all been read. + let _ = pipe_write.write(&[1u8, 1u8]).unwrap(); + drop(pipe_write); + + let mut next_bytes = vec![0u8]; + let res = pipe_read.read(&mut next_bytes).unwrap(); + assert_eq!(res, 1); + + let mut next_bytes = vec![0u8]; + let res = pipe_read.read(&mut next_bytes).unwrap(); + assert_eq!(res, 1); + + let mut next_bytes = vec![0u8]; + let res = pipe_read.read(&mut next_bytes).unwrap_err(); + assert_eq!(res.kind(), io::ErrorKind::BrokenPipe); } #[test] @@ -586,6 +617,11 @@ mod test { assert_eq!(nr, segment.len() - 1); assert_eq!(*segment, bytes); + // subsequent read fails with EWOULDBLOCK + let mut next_bytes = vec![0u8]; + let res = pipe_read.read(&mut next_bytes).unwrap_err(); + assert_eq!(res.kind(), io::ErrorKind::WouldBlock); + // flush should have succeeded let res = pipe_write.try_flush().unwrap(); assert!(res); diff --git a/stacks-signer/src/stacks_client.rs b/stacks-signer/src/stacks_client.rs index 0621df4b09..729d77629c 100644 --- a/stacks-signer/src/stacks_client.rs +++ b/stacks-signer/src/stacks_client.rs @@ -359,8 +359,14 @@ impl StacksClient { debug!("Calling read-only function {}...", function_name); let args = function_args .iter() - .map(|arg| arg.serialize_to_hex()) + .filter_map(|arg| arg.serialize_to_hex().ok()) .collect::>(); + if args.len() != function_args.len() { + return Err(ClientError::ReadOnlyFailure( + "Failed to serialize Clarity function arguments".into(), + )); + } + let body = json!({"sender": self.stacks_address.to_string(), "arguments": args}).to_string(); let path = self.read_only_path(contract_addr, contract_name, function_name); diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 42fbffa50f..1ff1a423d3 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -641,7 +641,7 @@ fn make_genesis_block_with_recipients( .unwrap(); let block = builder.mine_anchored_block(&mut epoch_tx); - builder.epoch_finish(epoch_tx); + builder.epoch_finish(epoch_tx).unwrap(); let commit_outs = if let Some(recipients) = recipients { let mut commit_outs = recipients @@ -908,7 +908,7 @@ fn make_stacks_block_with_input( } let block = builder.mine_anchored_block(&mut epoch_tx); - builder.epoch_finish(epoch_tx); + builder.epoch_finish(epoch_tx).unwrap(); let commit_outs = if let Some(recipients) = recipients { let mut commit_outs = recipients @@ -2866,7 +2866,7 @@ fn test_pox_btc_ops() { |conn| { conn.with_clarity_db_readonly(|db| { ( - db.get_account_stx_balance(&stacker.clone().into()), + db.get_account_stx_balance(&stacker.clone().into()).unwrap(), db.get_current_block_height(), ) }) @@ -2883,11 +2883,13 @@ fn test_pox_btc_ops() { assert_eq!(stacker_balance.amount_locked(), stacked_amt); } else { assert_eq!( - stacker_balance.get_available_balance_at_burn_block( - burn_height as u64, - pox_v1_unlock_ht, - pox_v2_unlock_ht - ), + stacker_balance + .get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ) + .unwrap(), balance as u128, "No lock should be active" ); @@ -3163,7 +3165,7 @@ fn test_stx_transfer_btc_ops() { |conn| { conn.with_clarity_db_readonly(|db| { ( - db.get_account_stx_balance(&stacker.clone().into()), + db.get_account_stx_balance(&stacker.clone().into()).unwrap(), db.get_current_block_height(), ) }) @@ -3178,7 +3180,8 @@ fn test_stx_transfer_btc_ops() { |conn| { conn.with_clarity_db_readonly(|db| { ( - db.get_account_stx_balance(&recipient.clone().into()), + db.get_account_stx_balance(&recipient.clone().into()) + .unwrap(), db.get_current_block_height(), ) }) @@ -3188,38 +3191,46 @@ fn test_stx_transfer_btc_ops() { if ix > 2 { assert_eq!( - sender_balance.get_available_balance_at_burn_block( - burn_height as u64, - pox_v1_unlock_ht, - pox_v2_unlock_ht - ), + sender_balance + .get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ) + .unwrap(), (balance as u128) - transfer_amt, "Transfer should have decremented balance" ); assert_eq!( - recipient_balance.get_available_balance_at_burn_block( - burn_height as u64, - pox_v1_unlock_ht, - pox_v2_unlock_ht - ), + recipient_balance + .get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ) + .unwrap(), transfer_amt, "Recipient should have incremented balance" ); } else { assert_eq!( - sender_balance.get_available_balance_at_burn_block( - burn_height as u64, - pox_v1_unlock_ht, - pox_v2_unlock_ht - ), + sender_balance + .get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ) + .unwrap(), balance as u128, ); assert_eq!( - recipient_balance.get_available_balance_at_burn_block( - burn_height as u64, - pox_v1_unlock_ht, - pox_v2_unlock_ht - ), + recipient_balance + .get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ) + .unwrap(), 0, ); } @@ -3334,14 +3345,24 @@ fn get_delegation_info_pox_2( .unwrap() }) .unwrap() - .expect_optional(); + .expect_optional() + .unwrap(); match result { None => None, Some(tuple) => { - let data = tuple.expect_tuple().data_map; - let delegated_amt = data.get("amount-ustx").cloned().unwrap().expect_u128(); - let reward_addr_opt = if let Some(reward_addr) = - data.get("pox-addr").cloned().unwrap().expect_optional() + let data = tuple.expect_tuple().unwrap().data_map; + let delegated_amt = data + .get("amount-ustx") + .cloned() + .unwrap() + .expect_u128() + .unwrap(); + let reward_addr_opt = if let Some(reward_addr) = data + .get("pox-addr") + .cloned() + .unwrap() + .expect_optional() + .unwrap() { Some(PoxAddress::try_from_pox_tuple(false, &reward_addr).unwrap()) } else { @@ -4814,7 +4835,7 @@ fn get_total_stacked_info( reward_cycle ); - let result = env.eval_raw(&eval_str).map(|v| v.expect_u128()); + let result = env.eval_raw(&eval_str).map(|v| v.expect_u128().unwrap()); Ok(result) }, ) diff --git a/stackslib/src/chainstate/stacks/address.rs b/stackslib/src/chainstate/stacks/address.rs index d87d164da5..151dd037cc 100644 --- a/stackslib/src/chainstate/stacks/address.rs +++ b/stackslib/src/chainstate/stacks/address.rs @@ -926,7 +926,9 @@ mod test { ) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x00, vec![0x01; 20]).expect_tuple() + make_pox_addr_raw(0x00, vec![0x01; 20]) + .expect_tuple() + .unwrap() ); assert_eq!( PoxAddress::Standard( @@ -938,7 +940,9 @@ mod test { ) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x00, vec![0x02; 20]).expect_tuple() + make_pox_addr_raw(0x00, vec![0x02; 20]) + .expect_tuple() + .unwrap() ); assert!(PoxAddress::Standard( StacksAddress { @@ -969,7 +973,9 @@ mod test { ) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x01, vec![0x01; 20]).expect_tuple() + make_pox_addr_raw(0x01, vec![0x01; 20]) + .expect_tuple() + .unwrap() ); assert_eq!( PoxAddress::Standard( @@ -981,7 +987,9 @@ mod test { ) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x01, vec![0x02; 20]).expect_tuple() + make_pox_addr_raw(0x01, vec![0x02; 20]) + .expect_tuple() + .unwrap() ); assert!(PoxAddress::Standard( StacksAddress { @@ -1012,7 +1020,9 @@ mod test { ) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x02, vec![0x01; 20]).expect_tuple() + make_pox_addr_raw(0x02, vec![0x01; 20]) + .expect_tuple() + .unwrap() ); assert_eq!( PoxAddress::Standard( @@ -1024,7 +1034,9 @@ mod test { ) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x02, vec![0x02; 20]).expect_tuple() + make_pox_addr_raw(0x02, vec![0x02; 20]) + .expect_tuple() + .unwrap() ); assert!(PoxAddress::Standard( StacksAddress { @@ -1055,7 +1067,9 @@ mod test { ) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x03, vec![0x01; 20]).expect_tuple() + make_pox_addr_raw(0x03, vec![0x01; 20]) + .expect_tuple() + .unwrap() ); assert_eq!( PoxAddress::Standard( @@ -1067,7 +1081,9 @@ mod test { ) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x03, vec![0x02; 20]).expect_tuple() + make_pox_addr_raw(0x03, vec![0x02; 20]) + .expect_tuple() + .unwrap() ); assert!(PoxAddress::Standard( StacksAddress { @@ -1092,39 +1108,51 @@ mod test { PoxAddress::Addr20(true, PoxAddressType20::P2WPKH, [0x09; 20]) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x04, vec![0x09; 20]).expect_tuple() + make_pox_addr_raw(0x04, vec![0x09; 20]) + .expect_tuple() + .unwrap() ); assert_eq!( PoxAddress::Addr20(false, PoxAddressType20::P2WPKH, [0x09; 20]) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x04, vec![0x09; 20]).expect_tuple() + make_pox_addr_raw(0x04, vec![0x09; 20]) + .expect_tuple() + .unwrap() ); assert_eq!( PoxAddress::Addr32(true, PoxAddressType32::P2WSH, [0x09; 32]) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x05, vec![0x09; 32]).expect_tuple() + make_pox_addr_raw(0x05, vec![0x09; 32]) + .expect_tuple() + .unwrap() ); assert_eq!( PoxAddress::Addr32(false, PoxAddressType32::P2WSH, [0x09; 32]) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x05, vec![0x09; 32]).expect_tuple() + make_pox_addr_raw(0x05, vec![0x09; 32]) + .expect_tuple() + .unwrap() ); assert_eq!( PoxAddress::Addr32(true, PoxAddressType32::P2TR, [0x09; 32]) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x06, vec![0x09; 32]).expect_tuple() + make_pox_addr_raw(0x06, vec![0x09; 32]) + .expect_tuple() + .unwrap() ); assert_eq!( PoxAddress::Addr32(false, PoxAddressType32::P2TR, [0x09; 32]) .as_clarity_tuple() .unwrap(), - make_pox_addr_raw(0x06, vec![0x09; 32]).expect_tuple() + make_pox_addr_raw(0x06, vec![0x09; 32]) + .expect_tuple() + .unwrap() ); } diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 0c36912ec6..e2ee1736e1 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -212,7 +212,7 @@ impl ClarityTestSim { ) -> StacksEpochId { let mut clarity_db = store.as_clarity_db(headers_db, burn_db); clarity_db.begin(); - let parent_epoch = clarity_db.get_clarity_epoch_version(); + let parent_epoch = clarity_db.get_clarity_epoch_version().unwrap(); let sortition_epoch = clarity_db .get_stacks_epoch(headers_db.height as u32) .unwrap() @@ -220,10 +220,12 @@ impl ClarityTestSim { if parent_epoch != sortition_epoch { debug!("Set epoch to {}", &sortition_epoch); - clarity_db.set_clarity_epoch_version(sortition_epoch); + clarity_db + .set_clarity_epoch_version(sortition_epoch) + .unwrap(); } - clarity_db.commit(); + clarity_db.commit().unwrap(); sortition_epoch } @@ -708,7 +710,7 @@ fn pox_2_contract_caller_units() { "After revocation, stack-through still shouldn't be an allowed caller for User 1 in the PoX2 contract", ); - let until_height = Value::UInt(burn_height.clone().expect_u128() + 1); + let until_height = Value::UInt(burn_height.clone().expect_u128().unwrap() + 1); assert_eq!( env.execute_transaction( diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 2dbeecd563..f5b78fa72f 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -196,15 +196,22 @@ impl StacksChainState { /// Stacks fork in the opened `clarity_db`. pub fn handled_pox_cycle_start(clarity_db: &mut ClarityDatabase, cycle_number: u64) -> bool { let db_key = Self::handled_pox_cycle_start_key(cycle_number); - match clarity_db.get::(&db_key) { + match clarity_db + .get::(&db_key) + .expect("FATAL: DB error when checking PoX cycle start") + { Some(x) => x == POX_CYCLE_START_HANDLED_VALUE, None => false, } } - fn mark_pox_cycle_handled(db: &mut ClarityDatabase, cycle_number: u64) { + fn mark_pox_cycle_handled( + db: &mut ClarityDatabase, + cycle_number: u64, + ) -> Result<(), clarity::vm::errors::Error> { let db_key = Self::handled_pox_cycle_start_key(cycle_number); - db.put(&db_key, &POX_CYCLE_START_HANDLED_VALUE.to_string()); + db.put(&db_key, &POX_CYCLE_START_HANDLED_VALUE.to_string())?; + Ok(()) } /// Get the stacking state for a user, before deleting it as part of an unlock @@ -239,7 +246,9 @@ impl StacksChainState { }) .expect("FATAL: failed to query unlocked principal"); - user_stacking_state.expect_tuple() + user_stacking_state + .expect_tuple() + .expect("FATAL: unexpected PoX structure") } /// Synthesize the handle-unlock print event. This is done here, instead of pox-2, so we can @@ -345,7 +354,7 @@ impl StacksChainState { cycle_info: Option, pox_contract_name: &str, ) -> Result, Error> { - clarity.with_clarity_db(|db| Ok(Self::mark_pox_cycle_handled(db, cycle_number)))?; + clarity.with_clarity_db(|db| Ok(Self::mark_pox_cycle_handled(db, cycle_number)))??; debug!( "Handling PoX reward cycle start"; @@ -374,13 +383,14 @@ impl StacksChainState { // 4. delete the user's stacking-state entry. clarity.with_clarity_db(|db| { // lookup the Stacks account and alter their unlock height to next block - let mut balance = db.get_stx_balance_snapshot(&principal); - if balance.canonical_balance_repr().amount_locked() < *amount_locked { - panic!("Principal missed reward slots, but did not have as many locked tokens as expected. Actual: {}, Expected: {}", balance.canonical_balance_repr().amount_locked(), *amount_locked); + let mut balance = db.get_stx_balance_snapshot(&principal)?; + let canonical_locked = balance.canonical_balance_repr()?.amount_locked(); + if canonical_locked < *amount_locked { + panic!("Principal missed reward slots, but did not have as many locked tokens as expected. Actual: {}, Expected: {}", canonical_locked, *amount_locked); } - balance.accelerate_unlock(); - balance.save(); + balance.accelerate_unlock()?; + balance.save()?; Ok(()) }).expect("FATAL: failed to accelerate PoX unlock"); @@ -411,7 +421,9 @@ impl StacksChainState { .expect("FATAL: failed to handle PoX unlock"); // this must be infallible - result.expect_result_ok(); + result + .expect_result_ok() + .expect("FATAL: unexpected PoX structure"); // extract metadata about the unlock let event_info = @@ -454,9 +466,11 @@ impl StacksChainState { &NULL_HEADER_DB, &NULL_BURN_STATE_DB, ); - connection.with_clarity_db_readonly_owned(|mut clarity_db| { - (clarity_db.get_total_liquid_ustx(), clarity_db) - }) + connection + .with_clarity_db_readonly_owned(|mut clarity_db| { + (clarity_db.get_total_liquid_ustx(), clarity_db) + }) + .expect("FATAL: failed to get total liquid ustx") } /// Determine the minimum amount of STX per reward address required to stack in the _next_ @@ -473,7 +487,11 @@ impl StacksChainState { "pox", &format!("(get-stacking-minimum)"), ) - .map(|value| value.expect_u128()) + .map(|value| { + value + .expect_u128() + .expect("FATAL: unexpected PoX structure") + }) } pub fn get_total_ustx_stacked( @@ -509,7 +527,8 @@ impl StacksChainState { ) })? .ok_or_else(|| Error::NoSuchBlockError)?? - .expect_u128(); + .expect_u128() + .expect("FATAL: unexpected PoX structure"); Ok(result) } @@ -527,7 +546,11 @@ impl StacksChainState { "pox", &format!("(get-total-ustx-stacked u{})", reward_cycle), ) - .map(|value| value.expect_u128()) + .map(|value| { + value + .expect_u128() + .expect("FATAL: unexpected PoX structure") + }) } /// Is PoX active in the given reward cycle? @@ -544,7 +567,11 @@ impl StacksChainState { pox_contract, &format!("(is-pox-active u{})", reward_cycle), ) - .map(|value| value.expect_bool()) + .map(|value| { + value + .expect_bool() + .expect("FATAL: unexpected PoX structure") + }) } /// Given a threshold and set of registered addresses, return a reward set where @@ -721,7 +748,8 @@ impl StacksChainState { POX_1_NAME, &format!("(get-reward-set-size u{})", reward_cycle), )? - .expect_u128(); + .expect_u128() + .expect("FATAL: unexpected PoX structure"); debug!( "At block {:?} (reward cycle {}): {} PoX reward addresses", @@ -740,11 +768,13 @@ impl StacksChainState { &format!("(get-reward-set-pox-address u{} u{})", reward_cycle, i), )? .expect_optional() + .expect("FATAL: unexpected PoX structure") .expect(&format!( "FATAL: missing PoX address in slot {} out of {} in reward cycle {}", i, num_addrs, reward_cycle )) - .expect_tuple(); + .expect_tuple() + .expect("FATAL: unexpected PoX structure"); let pox_addr_tuple = tuple_data .get("pox-addr") @@ -761,7 +791,8 @@ impl StacksChainState { .get("total-ustx") .expect(&format!("FATAL: no 'total-ustx' in return value from (get-reward-set-pox-address u{} u{})", reward_cycle, i)) .to_owned() - .expect_u128(); + .expect_u128() .expect("FATAL: unexpected PoX structure") +; debug!( "PoX reward address (for {} ustx): {}", @@ -799,7 +830,8 @@ impl StacksChainState { POX_2_NAME, &format!("(get-reward-set-size u{})", reward_cycle), )? - .expect_u128(); + .expect_u128() + .expect("FATAL: unexpected PoX structure"); debug!( "At block {:?} (reward cycle {}): {} PoX reward addresses", @@ -817,11 +849,13 @@ impl StacksChainState { &format!("(get-reward-set-pox-address u{} u{})", reward_cycle, i), )? .expect_optional() + .expect("FATAL: unexpected PoX structure") .expect(&format!( "FATAL: missing PoX address in slot {} out of {} in reward cycle {}", i, num_addrs, reward_cycle )) - .expect_tuple(); + .expect_tuple() + .expect("FATAL: unexpected PoX structure"); let pox_addr_tuple = tuple .get("pox-addr") @@ -838,7 +872,8 @@ impl StacksChainState { .get("total-ustx") .expect(&format!("FATAL: no 'total-ustx' in return value from (get-reward-set-pox-address u{} u{})", reward_cycle, i)) .to_owned() - .expect_u128(); + .expect_u128() .expect("FATAL: unexpected PoX structure") +; let stacker = tuple .get("stacker") @@ -848,7 +883,12 @@ impl StacksChainState { )) .to_owned() .expect_optional() - .map(|value| value.expect_principal()); + .expect("FATAL: unexpected PoX structure") + .map(|value| { + value + .expect_principal() + .expect("FATAL: unexpected PoX structure") + }); debug!( "Parsed PoX reward address"; @@ -888,7 +928,8 @@ impl StacksChainState { POX_3_NAME, &format!("(get-reward-set-size u{})", reward_cycle), )? - .expect_u128(); + .expect_u128() + .expect("FATAL: unexpected PoX structure"); debug!( "At block {:?} (reward cycle {}): {} PoX reward addresses", @@ -906,11 +947,13 @@ impl StacksChainState { &format!("(get-reward-set-pox-address u{} u{})", reward_cycle, i), )? .expect_optional() + .expect("FATAL: unexpected PoX structure") .expect(&format!( "FATAL: missing PoX address in slot {} out of {} in reward cycle {}", i, num_addrs, reward_cycle )) - .expect_tuple(); + .expect_tuple() + .expect("FATAL: unexpected PoX structure"); let pox_addr_tuple = tuple .get("pox-addr") @@ -927,7 +970,8 @@ impl StacksChainState { .get("total-ustx") .expect(&format!("FATAL: no 'total-ustx' in return value from (get-reward-set-pox-address u{} u{})", reward_cycle, i)) .to_owned() - .expect_u128(); + .expect_u128() + .expect("FATAL: unexpected PoX structure"); let stacker = tuple .get("stacker") @@ -937,7 +981,12 @@ impl StacksChainState { )) .to_owned() .expect_optional() - .map(|value| value.expect_principal()); + .expect("FATAL: unexpected PoX structure") + .map(|value| { + value + .expect_principal() + .expect("FATAL: unexpected PoX structure") + }); debug!( "Parsed PoX reward address"; @@ -1357,22 +1406,39 @@ pub mod test { "pox", &format!("(get-stacker-info '{})", addr.to_string()), ); - let data = if let Some(d) = value_opt.expect_optional() { + let data = if let Some(d) = value_opt.expect_optional().unwrap() { d } else { return None; }; - let data = data.expect_tuple(); + let data = data.expect_tuple().unwrap(); - let amount_ustx = data.get("amount-ustx").unwrap().to_owned().expect_u128(); - let pox_addr = tuple_to_pox_addr(data.get("pox-addr").unwrap().to_owned().expect_tuple()); - let lock_period = data.get("lock-period").unwrap().to_owned().expect_u128(); + let amount_ustx = data + .get("amount-ustx") + .unwrap() + .to_owned() + .expect_u128() + .unwrap(); + let pox_addr = tuple_to_pox_addr( + data.get("pox-addr") + .unwrap() + .to_owned() + .expect_tuple() + .unwrap(), + ); + let lock_period = data + .get("lock-period") + .unwrap() + .to_owned() + .expect_u128() + .unwrap(); let first_reward_cycle = data .get("first-reward-cycle") .unwrap() .to_owned() - .expect_u128(); + .expect_u128() + .unwrap(); Some((amount_ustx, pox_addr, lock_period, first_reward_cycle)) } @@ -3402,9 +3468,9 @@ pub mod test { "(var-get test-run)", ); - assert!(alice_test_result.expect_bool()); - assert!(bob_test_result.expect_bool()); - assert!(charlie_test_result.expect_bool()); + assert!(alice_test_result.expect_bool().unwrap()); + assert!(bob_test_result.expect_bool().unwrap()); + assert!(charlie_test_result.expect_bool().unwrap()); let alice_test_result = eval_contract_at_tip( &mut peer, @@ -4836,7 +4902,8 @@ pub mod test { "charlie-try-stack", "(var-get test-passed)", ) - .expect_bool(); + .expect_bool() + .unwrap(); assert!(result, "charlie-try-stack test should be `true`"); let result = eval_contract_at_tip( &mut peer, @@ -4844,7 +4911,8 @@ pub mod test { "charlie-try-reject", "(var-get test-passed)", ) - .expect_bool(); + .expect_bool() + .unwrap(); assert!(result, "charlie-try-reject test should be `true`"); let result = eval_contract_at_tip( &mut peer, @@ -4852,7 +4920,8 @@ pub mod test { "alice-try-reject", "(var-get test-passed)", ) - .expect_bool(); + .expect_bool() + .unwrap(); assert!(result, "alice-try-reject test should be `true`"); } diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 54bdb226e2..5ac29b7a62 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -95,7 +95,9 @@ pub fn get_stx_account_at( ) -> STXBalance { with_clarity_db_ro(peer, tip, |db| { db.get_stx_balance_snapshot(account) + .unwrap() .canonical_balance_repr() + .unwrap() }) } @@ -110,7 +112,7 @@ pub fn get_stacking_state_pox( let lookup_tuple = Value::Tuple( TupleData::from_data(vec![("stacker".into(), account.clone().into())]).unwrap(), ); - let epoch = db.get_clarity_epoch_version(); + let epoch = db.get_clarity_epoch_version().unwrap(); db.fetch_entry_unknown_descriptor( &boot_code_id(pox_contract, false), "stacking-state", @@ -119,6 +121,7 @@ pub fn get_stacking_state_pox( ) .unwrap() .expect_optional() + .unwrap() }) } @@ -139,7 +142,7 @@ pub fn check_all_stacker_link_invariants( max_cycle_number: u64, ) { // if PoX-2 hasn't published yet, just return. - let epoch = with_clarity_db_ro(peer, tip, |db| db.get_clarity_epoch_version()); + let epoch = with_clarity_db_ro(peer, tip, |db| db.get_clarity_epoch_version()).unwrap(); if epoch < StacksEpochId::Epoch21 { eprintln!("Skipping invariant checks when PoX-2 has not published yet"); return; @@ -203,7 +206,14 @@ pub fn check_pox_print_event( data ); assert_eq!(data.key.1, "print"); - let outer_tuple = data.value.clone().expect_result().unwrap().expect_tuple(); + let outer_tuple = data + .value + .clone() + .expect_result() + .unwrap() + .unwrap() + .expect_tuple() + .unwrap(); test_debug!( "Check name: {:?} =?= {:?}", &outer_tuple @@ -211,7 +221,8 @@ pub fn check_pox_print_event( .get("name") .unwrap() .clone() - .expect_ascii(), + .expect_ascii() + .unwrap(), common_data.op_name ); assert_eq!( @@ -220,7 +231,8 @@ pub fn check_pox_print_event( .get("name") .unwrap() .clone() - .expect_ascii(), + .expect_ascii() + .unwrap(), common_data.op_name ); assert_eq!( @@ -244,7 +256,7 @@ pub fn check_pox_print_event( .data_map .get("data") .expect("The event tuple should have a field named `data`"); - let inner_tuple = args.clone().expect_tuple(); + let inner_tuple = args.clone().expect_tuple().unwrap(); test_debug!("Check for ops {:?}", &op_data); test_debug!("Inner tuple is {:?}", &inner_tuple); @@ -297,7 +309,9 @@ pub fn check_stacking_state_invariants( ) -> StackingStateCheckData { let account_state = with_clarity_db_ro(peer, tip, |db| { db.get_stx_balance_snapshot(stacker) + .unwrap() .canonical_balance_repr() + .unwrap() }); let tip_burn_height = StacksChainState::get_stacks_block_header_info_by_index_block_hash( @@ -313,17 +327,19 @@ pub fn check_stacking_state_invariants( "Invariant violated: reward-cycle entry has stacker field set, but not present in stacker-state (pox_contract = {})", active_pox_contract, )) - .expect_tuple(); + .expect_tuple().unwrap(); let first_cycle = stacking_state_entry .get("first-reward-cycle") .unwrap() .clone() - .expect_u128(); + .expect_u128() + .unwrap(); let lock_period = stacking_state_entry .get("lock-period") .unwrap() .clone() - .expect_u128(); + .expect_u128() + .unwrap(); let pox_addr = stacking_state_entry.get("pox-addr").unwrap(); let pox_addr = PoxAddress::try_from_pox_tuple(false, pox_addr).unwrap(); @@ -331,8 +347,9 @@ pub fn check_stacking_state_invariants( .get_owned("reward-set-indexes") .unwrap() .expect_list() + .unwrap() .into_iter() - .map(|x| x.expect_u128()) + .map(|x| x.expect_u128().unwrap()) .collect(); let stacking_state_unlock_ht = peer @@ -369,7 +386,7 @@ pub fn check_stacking_state_invariants( .unwrap(), ); let entry_value = with_clarity_db_ro(peer, tip, |db| { - let epoch = db.get_clarity_epoch_version(); + let epoch = db.get_clarity_epoch_version().unwrap(); db.fetch_entry_unknown_descriptor( &boot_code_id(active_pox_contract, false), "reward-cycle-pox-address-list", @@ -377,17 +394,17 @@ pub fn check_stacking_state_invariants( &epoch, ) .unwrap() - .expect_optional() + .expect_optional().unwrap() .expect("Invariant violated: stacking-state.reward-set-indexes pointed at a non-existent entry") - .expect_tuple() + .expect_tuple().unwrap() }); let entry_stacker = entry_value.get("stacker") .unwrap() .clone() - .expect_optional() + .expect_optional().unwrap() .expect("Invariant violated: stacking-state.reward-set-indexes pointed at an entry without a stacker set") - .expect_principal(); + .expect_principal().unwrap(); assert_eq!( &entry_stacker, stacker, @@ -553,7 +570,7 @@ pub fn get_reward_cycle_total(peer: &mut TestPeer, tip: &StacksBlockId, cycle_nu )]) .unwrap() .into(); - let epoch = db.get_clarity_epoch_version(); + let epoch = db.get_clarity_epoch_version().unwrap(); db.fetch_entry_unknown_descriptor( &boot_code_id(active_pox_contract, false), "reward-cycle-total-stacked", @@ -562,11 +579,14 @@ pub fn get_reward_cycle_total(peer: &mut TestPeer, tip: &StacksBlockId, cycle_nu ) .map(|v| { v.expect_optional() + .unwrap() .map(|v| { v.expect_tuple() + .unwrap() .get_owned("total-ustx") .expect("Malformed tuple returned by PoX contract") .expect_u128() + .unwrap() }) // if no entry yet, return 0 .unwrap_or(0) @@ -593,7 +613,7 @@ pub fn get_partial_stacked( ]) .unwrap() .into(); - let epoch = db.get_clarity_epoch_version(); + let epoch = db.get_clarity_epoch_version().unwrap(); db.fetch_entry_unknown_descriptor( &boot_code_id(pox_contract, false), "partial-stacked-by-cycle", @@ -602,13 +622,16 @@ pub fn get_partial_stacked( ) .map(|v| { v.expect_optional() + .unwrap() .expect("Expected fetch_entry to return a value") }) .unwrap() .expect_tuple() + .unwrap() .get_owned("stacked-amount") .expect("Malformed tuple returned by PoX contract") .expect_u128() + .unwrap() }) } @@ -1226,7 +1249,8 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { height_target + 1, burnchain.pox_constants.v1_unlock_height, burnchain.pox_constants.v2_unlock_height, - ); + ) + .unwrap(); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); while get_tip(peer.sortdb.as_ref()).block_height < height_target { @@ -1255,7 +1279,8 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { height_target + 1, burnchain.pox_constants.v1_unlock_height, burnchain.pox_constants.v2_unlock_height, - ); + ) + .unwrap(); assert_eq!(bob_bal.amount_locked(), 0); // but bob's still locked at (height_target): the unlock is accelerated to the "next" burn block @@ -1268,7 +1293,8 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { height_target + 1, burnchain.pox_constants.v1_unlock_height, burnchain.pox_constants.v2_unlock_height, - ); + ) + .unwrap(); assert_eq!(bob_bal.amount_locked(), 0); // check that the total reward cycle amounts have decremented correctly @@ -1296,7 +1322,8 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { &key_to_stacks_addr(&alice).to_account_principal(), ) .expect("Alice should have stacking-state entry") - .expect_tuple(); + .expect_tuple() + .unwrap(); let reward_indexes_str = format!("{}", alice_state.get("reward-set-indexes").unwrap()); assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); @@ -1838,7 +1865,9 @@ fn stack_increase() { ); assert_eq!( - get_stx_account_at(&mut peer, &latest_block, &alice_principal).get_total_balance(), + get_stx_account_at(&mut peer, &latest_block, &alice_principal) + .get_total_balance() + .unwrap(), total_balance, ); @@ -1897,7 +1926,9 @@ fn stack_increase() { ); assert_eq!( - get_stx_account_at(&mut peer, &latest_block, &alice_principal).get_total_balance(), + get_stx_account_at(&mut peer, &latest_block, &alice_principal) + .get_total_balance() + .unwrap(), total_balance, ); @@ -3541,13 +3572,14 @@ fn test_pox_2_getters() { )); eprintln!("{}", &result); - let data = result.expect_tuple().data_map; + let data = result.expect_tuple().unwrap().data_map; let alice_delegation_info = data .get("get-delegation-info-alice") .cloned() .unwrap() - .expect_optional(); + .expect_optional() + .unwrap(); assert!(alice_delegation_info.is_none()); let bob_delegation_info = data @@ -3556,23 +3588,28 @@ fn test_pox_2_getters() { .unwrap() .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .data_map; let bob_delegation_addr = bob_delegation_info .get("delegated-to") .cloned() .unwrap() - .expect_principal(); + .expect_principal() + .unwrap(); let bob_delegation_amt = bob_delegation_info .get("amount-ustx") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); let bob_pox_addr_opt = bob_delegation_info .get("pox-addr") .cloned() .unwrap() - .expect_optional(); + .expect_optional() + .unwrap(); assert_eq!(bob_delegation_addr, charlie_address.to_account_principal()); assert_eq!(bob_delegation_amt, LOCKUP_AMT as u128); assert!(bob_pox_addr_opt.is_none()); @@ -3581,27 +3618,30 @@ fn test_pox_2_getters() { .get("get-allowance-contract-callers") .cloned() .unwrap() - .expect_optional(); + .expect_optional() + .unwrap(); assert!(allowance.is_none()); let current_num_reward_addrs = data .get("get-num-reward-set-pox-addresses-current") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(current_num_reward_addrs, 2); let future_num_reward_addrs = data .get("get-num-reward-set-pox-addresses-future") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(future_num_reward_addrs, 0); for i in 0..3 { let key = ClarityName::try_from(format!("get-partial-stacked-by-cycle-bob-{}", &i)).unwrap(); - let partial_stacked = data.get(&key).cloned().unwrap().expect_optional(); + let partial_stacked = data.get(&key).cloned().unwrap().expect_optional().unwrap(); assert!(partial_stacked.is_none()); } let partial_stacked = data @@ -3610,33 +3650,39 @@ fn test_pox_2_getters() { .unwrap() .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .data_map .get("stacked-amount") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(partial_stacked, LOCKUP_AMT as u128); let rejected = data .get("get-total-pox-rejection-now") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(rejected, 0); let rejected = data .get("get-total-pox-rejection-next") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(rejected, LOCKUP_AMT as u128); let rejected = data .get("get-total-pox-rejection-future") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(rejected, 0); } @@ -3859,8 +3905,10 @@ fn test_get_pox_addrs() { }) .unwrap() .expect_optional() + .unwrap() .expect("FATAL: expected list") - .expect_tuple(); + .expect_tuple() + .unwrap(); eprintln!( "At block height {}: {:?}", @@ -3872,13 +3920,15 @@ fn test_get_pox_addrs() { .get("addrs") .unwrap() .to_owned() - .expect_list(); + .expect_list() + .unwrap(); let payout = addrs_and_payout .get("payout") .unwrap() .to_owned() - .expect_u128(); + .expect_u128() + .unwrap(); // there's always some burnchain tokens spent. assert!(payout > 0); @@ -4152,8 +4202,10 @@ fn test_stack_with_segwit() { }) .unwrap() .expect_optional() + .unwrap() .expect("FATAL: expected list") - .expect_tuple(); + .expect_tuple() + .unwrap(); eprintln!( "At block height {}: {:?}", @@ -4165,13 +4217,15 @@ fn test_stack_with_segwit() { .get("addrs") .unwrap() .to_owned() - .expect_list(); + .expect_list() + .unwrap(); let payout = addrs_and_payout .get("payout") .unwrap() .to_owned() - .expect_u128(); + .expect_u128() + .unwrap(); // there's always some burnchain tokens spent. assert!(payout > 0); @@ -4343,14 +4397,15 @@ fn test_pox_2_delegate_stx_addr_validation() { ); eprintln!("{}", &result); - let data = result.expect_tuple().data_map; + let data = result.expect_tuple().unwrap().data_map; // bob had an invalid PoX address let bob_delegation_info = data .get("get-delegation-info-bob") .cloned() .unwrap() - .expect_optional(); + .expect_optional() + .unwrap(); assert!(bob_delegation_info.is_none()); // alice was valid @@ -4360,23 +4415,28 @@ fn test_pox_2_delegate_stx_addr_validation() { .unwrap() .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .data_map; let alice_delegation_addr = alice_delegation_info .get("delegated-to") .cloned() .unwrap() - .expect_principal(); + .expect_principal() + .unwrap(); let alice_delegation_amt = alice_delegation_info .get("amount-ustx") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); let alice_pox_addr_opt = alice_delegation_info .get("pox-addr") .cloned() .unwrap() - .expect_optional(); + .expect_optional() + .unwrap(); assert_eq!( alice_delegation_addr, charlie_address.to_account_principal() @@ -4993,14 +5053,33 @@ fn stack_in_both_pox1_and_pox2() { } // alice's and bob's second transactions both failed with runtime errors - alice_txs.get(&0).unwrap().result.clone().expect_result_ok(); + alice_txs + .get(&0) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); alice_txs .get(&1) .unwrap() .result .clone() - .expect_result_err(); + .expect_result_err() + .unwrap(); - bob_txs.get(&0).unwrap().result.clone().expect_result_ok(); - bob_txs.get(&1).unwrap().result.clone().expect_result_err(); + bob_txs + .get(&0) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + bob_txs + .get(&1) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); } diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 4baf4b74e4..92497a2046 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -739,7 +739,8 @@ fn pox_auto_unlock(alice_first: bool) { &key_to_stacks_addr(&alice).to_account_principal(), ) .expect("Alice should have stacking-state entry") - .expect_tuple(); + .expect_tuple() + .unwrap(); let reward_indexes_str = format!("{}", alice_state.get("reward-set-indexes").unwrap()); assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); @@ -878,7 +879,8 @@ fn pox_auto_unlock(alice_first: bool) { POX_3_NAME, ) .expect("Alice should have stacking-state entry") - .expect_tuple(); + .expect_tuple() + .unwrap(); let reward_indexes_str = format!("{}", alice_state.get("reward-set-indexes").unwrap()); assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); @@ -1684,7 +1686,7 @@ fn stack_increase() { let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); assert_eq!(alice_bal.amount_locked(), first_lockup_amt); assert_eq!(alice_bal.unlock_height(), expected_pox_2_unlock_ht); - assert_eq!(alice_bal.get_total_balance(), total_balance,); + assert_eq!(alice_bal.get_total_balance().unwrap(), total_balance,); // check that the "raw" reward set will contain entries for alice at the cycle start for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { @@ -1738,7 +1740,7 @@ fn stack_increase() { let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); assert_eq!(alice_bal.amount_locked(), first_lockup_amt + increase_amt,); assert_eq!(alice_bal.unlock_height(), expected_pox_2_unlock_ht); - assert_eq!(alice_bal.get_total_balance(), total_balance,); + assert_eq!(alice_bal.get_total_balance().unwrap(), total_balance,); // check that the total reward cycle amounts have incremented correctly for cycle_number in first_v2_cycle..(first_v2_cycle + 2) { @@ -1850,7 +1852,7 @@ fn stack_increase() { let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); assert_eq!(alice_bal.amount_locked(), first_lockup_amt); assert_eq!(alice_bal.unlock_height(), expected_pox_3_unlock_ht); - assert_eq!(alice_bal.get_total_balance(), total_balance,); + assert_eq!(alice_bal.get_total_balance().unwrap(), total_balance,); // check that the "raw" reward set will contain entries for alice at the cycle start for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { @@ -1914,7 +1916,7 @@ fn stack_increase() { let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); assert_eq!(alice_bal.amount_locked(), first_lockup_amt + increase_amt,); assert_eq!(alice_bal.unlock_height(), expected_pox_3_unlock_ht); - assert_eq!(alice_bal.get_total_balance(), total_balance,); + assert_eq!(alice_bal.get_total_balance().unwrap(), total_balance,); // check that the total reward cycle amounts have incremented correctly for cycle_number in first_v3_cycle..(first_v3_cycle + 4) { @@ -3216,13 +3218,14 @@ fn pox_3_getters() { )); eprintln!("{}", &result); - let data = result.expect_tuple().data_map; + let data = result.expect_tuple().unwrap().data_map; let alice_delegation_info = data .get("get-delegation-info-alice") .cloned() .unwrap() - .expect_optional(); + .expect_optional() + .unwrap(); assert!(alice_delegation_info.is_none()); let bob_delegation_info = data @@ -3231,23 +3234,28 @@ fn pox_3_getters() { .unwrap() .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .data_map; let bob_delegation_addr = bob_delegation_info .get("delegated-to") .cloned() .unwrap() - .expect_principal(); + .expect_principal() + .unwrap(); let bob_delegation_amt = bob_delegation_info .get("amount-ustx") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); let bob_pox_addr_opt = bob_delegation_info .get("pox-addr") .cloned() .unwrap() - .expect_optional(); + .expect_optional() + .unwrap(); assert_eq!(bob_delegation_addr, charlie_address.to_account_principal()); assert_eq!(bob_delegation_amt, LOCKUP_AMT as u128); assert!(bob_pox_addr_opt.is_none()); @@ -3256,27 +3264,30 @@ fn pox_3_getters() { .get("get-allowance-contract-callers") .cloned() .unwrap() - .expect_optional(); + .expect_optional() + .unwrap(); assert!(allowance.is_none()); let current_num_reward_addrs = data .get("get-num-reward-set-pox-addresses-current") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(current_num_reward_addrs, 2); let future_num_reward_addrs = data .get("get-num-reward-set-pox-addresses-future") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(future_num_reward_addrs, 0); for i in 0..3 { let key = ClarityName::try_from(format!("get-partial-stacked-by-cycle-bob-{}", &i)).unwrap(); - let partial_stacked = data.get(&key).cloned().unwrap().expect_optional(); + let partial_stacked = data.get(&key).cloned().unwrap().expect_optional().unwrap(); assert!(partial_stacked.is_none()); } let partial_stacked = data @@ -3285,33 +3296,39 @@ fn pox_3_getters() { .unwrap() .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .data_map .get("stacked-amount") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(partial_stacked, LOCKUP_AMT as u128); let rejected = data .get("get-total-pox-rejection-now") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(rejected, LOCKUP_AMT as u128); let rejected = data .get("get-total-pox-rejection-next") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(rejected, 0); let rejected = data .get("get-total-pox-rejection-future") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(rejected, 0); } @@ -3344,14 +3361,17 @@ fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { }) .unwrap() .expect_optional() + .unwrap() .expect("FATAL: expected list") - .expect_tuple(); + .expect_tuple() + .unwrap(); let addrs = addrs_and_payout .get("addrs") .unwrap() .to_owned() .expect_list() + .unwrap() .into_iter() .map(|tuple| PoxAddress::try_from_pox_tuple(false, &tuple).unwrap()) .collect(); @@ -3360,7 +3380,8 @@ fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { .get("payout") .unwrap() .to_owned() - .expect_u128(); + .expect_u128() + .unwrap(); (addrs, payout) } @@ -4350,14 +4371,15 @@ fn pox_3_delegate_stx_addr_validation() { ); eprintln!("{}", &result); - let data = result.expect_tuple().data_map; + let data = result.expect_tuple().unwrap().data_map; // bob had an invalid PoX address let bob_delegation_info = data .get("get-delegation-info-bob") .cloned() .unwrap() - .expect_optional(); + .expect_optional() + .unwrap(); assert!(bob_delegation_info.is_none()); // alice was valid @@ -4367,23 +4389,28 @@ fn pox_3_delegate_stx_addr_validation() { .unwrap() .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .data_map; let alice_delegation_addr = alice_delegation_info .get("delegated-to") .cloned() .unwrap() - .expect_principal(); + .expect_principal() + .unwrap(); let alice_delegation_amt = alice_delegation_info .get("amount-ustx") .cloned() .unwrap() - .expect_u128(); + .expect_u128() + .unwrap(); let alice_pox_addr_opt = alice_delegation_info .get("pox-addr") .cloned() .unwrap() - .expect_optional(); + .expect_optional() + .unwrap(); assert_eq!( alice_delegation_addr, charlie_address.to_account_principal() diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 4e499c4a09..c95d8e715b 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -258,19 +258,25 @@ impl StacksChainState { clarity_tx: &mut T, principal: &PrincipalData, ) -> StacksAccount { - clarity_tx.with_clarity_db_readonly(|ref mut db| { - let stx_balance = db.get_account_stx_balance(principal); - let nonce = db.get_account_nonce(principal); - StacksAccount { - principal: principal.clone(), - stx_balance, - nonce, - } - }) + clarity_tx + .with_clarity_db_readonly(|ref mut db| { + let stx_balance = db.get_account_stx_balance(principal)?; + let nonce = db.get_account_nonce(principal)?; + Ok(StacksAccount { + principal: principal.clone(), + stx_balance, + nonce, + }) + }) + .map_err(Error::ClarityError) + .unwrap() } pub fn get_nonce(clarity_tx: &mut T, principal: &PrincipalData) -> u64 { - clarity_tx.with_clarity_db_readonly(|ref mut db| db.get_account_nonce(principal)) + clarity_tx + .with_clarity_db_readonly(|ref mut db| db.get_account_nonce(principal)) + .map_err(|x| Error::ClarityError(x.into())) + .unwrap() } pub fn get_account_ft( @@ -316,21 +322,21 @@ impl StacksChainState { ) { clarity_tx .with_clarity_db(|ref mut db| { - let mut snapshot = db.get_stx_balance_snapshot(principal); + let mut snapshot = db.get_stx_balance_snapshot(principal)?; // last line of defense: if we don't have sufficient funds, panic. // This should be checked by the block validation logic. - if !snapshot.can_transfer(amount as u128) { + if !snapshot.can_transfer(amount as u128)? { panic!( "Tried to debit {} from account {} (which only has {})", amount, principal, - snapshot.get_available_balance() + snapshot.get_available_balance()? ); } - snapshot.debit(amount as u128); - snapshot.save(); + snapshot.debit(amount as u128)?; + snapshot.save()?; Ok(()) }) .expect("FATAL: failed to debit account") @@ -345,11 +351,11 @@ impl StacksChainState { ) { clarity_tx .with_clarity_db(|ref mut db| { - let mut snapshot = db.get_stx_balance_snapshot(principal); - snapshot.credit(amount as u128); + let mut snapshot = db.get_stx_balance_snapshot(principal)?; + snapshot.credit(amount as u128)?; - let new_balance = snapshot.get_available_balance(); - snapshot.save(); + let new_balance = snapshot.get_available_balance()?; + snapshot.save()?; info!("{} credited: {} uSTX", principal, new_balance); Ok(()) @@ -365,9 +371,9 @@ impl StacksChainState { ) { clarity_tx .with_clarity_db(|ref mut db| { - let mut snapshot = db.get_stx_balance_snapshot_genesis(principal); - snapshot.credit(amount); - snapshot.save(); + let mut snapshot = db.get_stx_balance_snapshot_genesis(principal)?; + snapshot.credit(amount)?; + snapshot.save()?; Ok(()) }) .expect("FATAL: failed to credit account") @@ -382,7 +388,7 @@ impl StacksChainState { clarity_tx .with_clarity_db(|ref mut db| { let next_nonce = cur_nonce.checked_add(1).expect("OUT OF NONCES"); - db.set_account_nonce(&principal, next_nonce); + db.set_account_nonce(&principal, next_nonce)?; Ok(()) }) .expect("FATAL: failed to set account nonce") @@ -694,10 +700,8 @@ impl StacksChainState { height: u64, ) -> Result, Error> { let principal_seq_opt = clarity_tx - .with_clarity_db_readonly(|ref mut db| { - Ok(db.get_microblock_poison_report(height as u32)) - }) - .map_err(Error::ClarityError)?; + .with_clarity_db_readonly(|ref mut db| db.get_microblock_poison_report(height as u32)) + .map_err(|e| Error::ClarityError(e.into()))?; Ok(principal_seq_opt.map(|(principal, seq)| (principal.into(), seq))) } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 1bd8188815..b65f0423d5 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -314,6 +314,12 @@ impl From for MemPoolRejection { } } +impl From for MemPoolRejection { + fn from(e: clarity::vm::errors::Error) -> MemPoolRejection { + MemPoolRejection::Other(e.to_string()) + } +} + // These constants are mempool acceptance heuristics, but // not part of the protocol consensus (i.e., a block // that includes a transaction that violates these won't @@ -4119,12 +4125,13 @@ impl StacksChainState { chain_tip_burn_header_height: u32, ) -> Result<(bool, Vec), Error> { // is this stacks block the first of a new epoch? - let (stacks_parent_epoch, sortition_epoch) = clarity_tx.with_clarity_db_readonly(|db| { - ( - db.get_clarity_epoch_version(), - db.get_stacks_epoch(chain_tip_burn_header_height), - ) - }); + let (stacks_parent_epoch, sortition_epoch) = clarity_tx + .with_clarity_db_readonly::<_, Result<_, clarity::vm::errors::Error>>(|db| { + Ok(( + db.get_clarity_epoch_version()?, + db.get_stacks_epoch(chain_tip_burn_header_height), + )) + })?; let mut receipts = vec![]; let mut applied = false; @@ -4571,16 +4578,16 @@ impl StacksChainState { )) }; - let mut snapshot = db.get_stx_balance_snapshot(&recipient_principal); - snapshot.credit(miner_reward_total); + let mut snapshot = db.get_stx_balance_snapshot(&recipient_principal)?; + snapshot.credit(miner_reward_total)?; debug!( "Balance available for {} is {} uSTX (earned {} uSTX)", &recipient_principal, - snapshot.get_available_balance(), + snapshot.get_available_balance()?, miner_reward_total ); - snapshot.save(); + snapshot.save()?; Ok(()) }) @@ -4633,7 +4640,7 @@ impl StacksChainState { })?; let entries = match result { - Value::Optional(_) => match result.expect_optional() { + Value::Optional(_) => match result.expect_optional()? { Some(Value::Sequence(SequenceData::List(entries))) => entries.data, _ => return Ok((0, vec![])), }, @@ -4643,17 +4650,17 @@ impl StacksChainState { let mut total_minted = 0; let mut events = vec![]; for entry in entries.into_iter() { - let schedule: TupleData = entry.expect_tuple(); + let schedule: TupleData = entry.expect_tuple()?; let amount = schedule .get("amount") .expect("Lockup malformed") .to_owned() - .expect_u128(); + .expect_u128()?; let recipient = schedule .get("recipient") .expect("Lockup malformed") .to_owned() - .expect_principal(); + .expect_principal()?; total_minted += amount; StacksChainState::account_credit(tx_connection, &recipient, amount as u64); let event = STXEventType::STXMintEvent(STXMintEventData { recipient, amount }); @@ -6550,13 +6557,15 @@ impl StacksChainState { } let (block_height, v1_unlock_height, v2_unlock_height) = clarity_connection - .with_clarity_db_readonly(|ref mut db| { - ( - db.get_current_burnchain_block_height() as u64, - db.get_v1_unlock_height(), - db.get_v2_unlock_height(), - ) - }); + .with_clarity_db_readonly::<_, Result<_, clarity::vm::errors::Error>>( + |ref mut db| { + Ok(( + db.get_current_burnchain_block_height()? as u64, + db.get_v1_unlock_height(), + db.get_v2_unlock_height()?, + )) + }, + )?; // 5: the paying account must have enough funds if !payer.stx_balance.can_transfer_at_burn_block( @@ -6564,7 +6573,7 @@ impl StacksChainState { block_height, v1_unlock_height, v2_unlock_height, - ) { + )? { match &tx.payload { TransactionPayload::TokenTransfer(..) => { // pass: we'll return a total_spent failure below. @@ -6576,7 +6585,7 @@ impl StacksChainState { block_height, v1_unlock_height, v2_unlock_height, - ), + )?, )); } } @@ -6600,14 +6609,14 @@ impl StacksChainState { block_height, v1_unlock_height, v2_unlock_height, - ) { + )? { return Err(MemPoolRejection::NotEnoughFunds( total_spent, origin.stx_balance.get_available_balance_at_burn_block( block_height, v1_unlock_height, v2_unlock_height, - ), + )?, )); } @@ -6618,14 +6627,14 @@ impl StacksChainState { block_height, v1_unlock_height, v2_unlock_height, - ) { + )? { return Err(MemPoolRejection::NotEnoughFunds( fee as u128, payer.stx_balance.get_available_balance_at_burn_block( block_height, v1_unlock_height, v2_unlock_height, - ), + )?, )); } } @@ -11062,7 +11071,7 @@ pub mod test { peer.sortdb.replace(sortdb); assert_eq!( - account.stx_balance.get_total_balance(), + account.stx_balance.get_total_balance().unwrap(), 1000000000 - (1000 + 2000 + 3000 + 4000 + 5000 + 6000 + 7000 + 8000 + 9000) ); @@ -11074,8 +11083,19 @@ pub mod test { &format!("(get-delegation-info '{})", &del_addr), ); - let data = result.expect_optional().unwrap().expect_tuple().data_map; - let delegation_amt = data.get("amount-ustx").cloned().unwrap().expect_u128(); + let data = result + .expect_optional() + .unwrap() + .unwrap() + .expect_tuple() + .unwrap() + .data_map; + let delegation_amt = data + .get("amount-ustx") + .cloned() + .unwrap() + .expect_u128() + .unwrap(); assert_eq!(delegation_amt, 1000 * (i as u128 + 1)); } @@ -11734,7 +11754,7 @@ pub mod test { // skipped tenure 6's TransferSTX assert_eq!( - account.stx_balance.get_total_balance(), + account.stx_balance.get_total_balance().unwrap(), 1000000000 - (1000 + 2000 @@ -11772,8 +11792,19 @@ pub mod test { ), ); - let data = result.expect_optional().unwrap().expect_tuple().data_map; - let delegation_amt = data.get("amount-ustx").cloned().unwrap().expect_u128(); + let data = result + .expect_optional() + .unwrap() + .unwrap() + .expect_tuple() + .unwrap() + .data_map; + let delegation_amt = data + .get("amount-ustx") + .cloned() + .unwrap() + .expect_u128() + .unwrap(); assert_eq!(delegation_amt, 1000 * (i as u128 + 1)); } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 5e1996f3bb..ba38c0378f 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -431,8 +431,11 @@ impl<'a, 'b> ClarityTx<'a, 'b> { self.block.commit_block(); } - pub fn commit_mined_block(self, block_hash: &StacksBlockId) -> ExecutionCost { - self.block.commit_mined_block(block_hash).get_total() + pub fn commit_mined_block( + self, + block_hash: &StacksBlockId, + ) -> Result { + Ok(self.block.commit_mined_block(block_hash)?.get_total()) } pub fn commit_to_block( @@ -2276,7 +2279,9 @@ impl StacksChainState { let height_opt = clarity_tx .connection() .with_clarity_db_readonly::<_, Result<_, ()>>(|ref mut db| { - let height_opt = db.get_microblock_pubkey_hash_height(mblock_pubkey_hash); + let height_opt = db + .get_microblock_pubkey_hash_height(mblock_pubkey_hash) + .expect("FATAL: failed to query microblock public key hash"); Ok(height_opt) }) .expect("FATAL: failed to query microblock public key hash"); diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 6348883d1a..e5e0bf19e9 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use std::collections::{HashMap, HashSet}; +use std::convert::{TryFrom, TryInto}; use std::io::prelude::*; use std::io::{Read, Write}; use std::path::{Path, PathBuf}; @@ -32,6 +33,7 @@ use clarity::vm::costs::{cost_functions, runtime_cost, CostTracker, ExecutionCos use clarity::vm::database::ClarityDatabase; use clarity::vm::errors::Error as InterpreterError; use clarity::vm::representations::{ClarityName, ContractName}; +use clarity::vm::types::serialization::SerializationError as ClaritySerializationError; use clarity::vm::types::{ AssetIdentifier, BuffData, PrincipalData, QualifiedContractIdentifier, SequenceData, StacksAddressExtensions as ClarityStacksAddressExt, StandardPrincipalData, TupleData, @@ -50,6 +52,33 @@ use crate::net::Error as net_error; use crate::util_lib::db::{query_count, query_rows, DBConn, Error as db_error}; use crate::util_lib::strings::{StacksString, VecDisplay}; +/// This is a safe-to-hash Clarity value +#[derive(PartialEq, Eq)] +struct HashableClarityValue(Value); + +impl TryFrom for HashableClarityValue { + type Error = InterpreterError; + + fn try_from(value: Value) -> Result { + // check that serialization _will_ be successful when hashed + let _bytes = value.serialize_to_vec().map_err(|_| { + InterpreterError::Interpreter(clarity::vm::errors::InterpreterError::Expect( + "Failed to serialize asset in NFT during post-condition checks".into(), + )) + })?; + Ok(Self(value)) + } +} + +impl std::hash::Hash for HashableClarityValue { + fn hash(&self, state: &mut H) { + #[allow(clippy::unwrap_used)] + // this unwrap is safe _as long as_ TryFrom was used as a constructor + let bytes = self.0.serialize_to_vec().unwrap(); + bytes.hash(state); + } +} + impl StacksTransactionReceipt { pub fn from_stx_transfer( tx: StacksTransaction, @@ -343,11 +372,14 @@ pub fn handle_clarity_runtime_error(error: clarity_error) -> ClarityRuntimeTxErr err_type: "short return/panic", } } - clarity_error::Interpreter(InterpreterError::Unchecked(CheckErrors::SupertypeTooLarge)) => { - ClarityRuntimeTxError::Rejectable(error) - } clarity_error::Interpreter(InterpreterError::Unchecked(check_error)) => { - ClarityRuntimeTxError::AnalysisError(check_error) + if check_error.rejectable() { + ClarityRuntimeTxError::Rejectable(clarity_error::Interpreter( + InterpreterError::Unchecked(check_error), + )) + } else { + ClarityRuntimeTxError::AnalysisError(check_error) + } } clarity_error::AbortedByCallback(val, assets, events) => { ClarityRuntimeTxError::AbortedByCallback(val, assets, events) @@ -443,13 +475,13 @@ impl StacksChainState { payer_account: StacksAccount, ) -> Result { let (cur_burn_block_height, v1_unlock_ht, v2_unlock_ht) = clarity_tx - .with_clarity_db_readonly(|ref mut db| { - ( - db.get_current_burnchain_block_height(), + .with_clarity_db_readonly::<_, Result<_, InterpreterError>>(|ref mut db| { + Ok(( + db.get_current_burnchain_block_height()?, db.get_v1_unlock_height(), - db.get_v2_unlock_height(), - ) - }); + db.get_v2_unlock_height()?, + )) + })?; let consolidated_balance = payer_account .stx_balance @@ -457,7 +489,7 @@ impl StacksChainState { cur_burn_block_height as u64, v1_unlock_ht, v2_unlock_ht, - ); + )?; if consolidated_balance < fee as u128 { return Err(Error::InvalidFee); @@ -518,12 +550,12 @@ impl StacksChainState { post_condition_mode: &TransactionPostConditionMode, origin_account: &StacksAccount, asset_map: &AssetMap, - ) -> bool { + ) -> Result { let mut checked_fungible_assets: HashMap> = HashMap::new(); let mut checked_nonfungible_assets: HashMap< PrincipalData, - HashMap>, + HashMap>, > = HashMap::new(); let allow_unchecked_assets = *post_condition_mode == TransactionPostConditionMode::Allow; @@ -548,7 +580,7 @@ impl StacksChainState { "Post-condition check failure on STX owned by {}: {:?} {:?} {}", account_principal, amount_sent_condition, condition_code, amount_sent ); - return false; + return Ok(false); } if let Some(ref mut asset_ids) = @@ -591,7 +623,7 @@ impl StacksChainState { .unwrap_or(0); if !condition_code.check(*amount_sent_condition as u128, amount_sent) { info!("Post-condition check failure on fungible asset {} owned by {}: {} {:?} {}", &asset_id, account_principal, amount_sent_condition, condition_code, amount_sent); - return false; + return Ok(false); } if let Some(ref mut asset_ids) = @@ -625,23 +657,23 @@ impl StacksChainState { .unwrap_or(&empty_assets); if !condition_code.check(asset_value, assets_sent) { info!("Post-condition check failure on non-fungible asset {} owned by {}: {:?} {:?}", &asset_id, account_principal, &asset_value, condition_code); - return false; + return Ok(false); } if let Some(ref mut asset_id_map) = checked_nonfungible_assets.get_mut(&account_principal) { if let Some(ref mut asset_values) = asset_id_map.get_mut(&asset_id) { - asset_values.insert(asset_value.clone()); + asset_values.insert(asset_value.clone().try_into()?); } else { let mut asset_set = HashSet::new(); - asset_set.insert(asset_value.clone()); + asset_set.insert(asset_value.clone().try_into()?); asset_id_map.insert(asset_id, asset_set); } } else { let mut asset_id_map = HashMap::new(); let mut asset_set = HashSet::new(); - asset_set.insert(asset_value.clone()); + asset_set.insert(asset_value.clone().try_into()?); asset_id_map.insert(asset_id, asset_set); checked_nonfungible_assets.insert(account_principal, asset_id_map); } @@ -665,20 +697,20 @@ impl StacksChainState { { // each value must be covered for v in values { - if !nfts.contains(&v) { + if !nfts.contains(&v.clone().try_into()?) { info!("Post-condition check failure: Non-fungible asset {} value {:?} was moved by {} but not checked", &asset_identifier, &v, &principal); - return false; + return Ok(false); } } } else { // no values covered info!("Post-condition check failure: No checks for non-fungible asset type {} moved by {}", &asset_identifier, &principal); - return false; + return Ok(false); } } else { // no NFT for this principal info!("Post-condition check failure: No checks for any non-fungible assets, but moved {} by {}", &asset_identifier, &principal); - return false; + return Ok(false); } } _ => { @@ -688,18 +720,18 @@ impl StacksChainState { { if !checked_ft_asset_ids.contains(&asset_identifier) { info!("Post-condition check failure: checks did not cover transfer of {} by {}", &asset_identifier, &principal); - return false; + return Ok(false); } } else { info!("Post-condition check failure: No checks for fungible token type {} moved by {}", &asset_identifier, &principal); - return false; + return Ok(false); } } } } } } - return true; + return Ok(true); } /// Given two microblock headers, were they signed by the same key? @@ -774,7 +806,7 @@ impl StacksChainState { let microblock_height_opt = env .global_context .database - .get_microblock_pubkey_hash_height(&pubkh); + .get_microblock_pubkey_hash_height(&pubkh)?; let current_height = env.global_context.database.get_current_block_height(); // for the microblock public key hash we had to process @@ -821,11 +853,15 @@ impl StacksChainState { let (reporter_principal, reported_seq) = if let Some((reporter, seq)) = env .global_context .database - .get_microblock_poison_report(mblock_pubk_height) + .get_microblock_poison_report(mblock_pubk_height)? { // account for report loaded - env.add_memory(TypeSignature::PrincipalType.size() as u64) - .map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?; + env.add_memory( + TypeSignature::PrincipalType + .size() + .map_err(InterpreterError::from)? as u64, + ) + .map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?; // u128 sequence env.add_memory(16) @@ -974,6 +1010,7 @@ impl StacksChainState { origin_account, asset_map, ) + .expect("FATAL: error while evaluating post-conditions") }, ); @@ -1010,7 +1047,7 @@ impl StacksChainState { tx.clone(), events, value.expect("BUG: Post condition contract call must provide would-have-been-returned value"), - assets.get_stx_burned_total(), + assets.get_stx_burned_total()?, total_cost); return Ok(receipt); } @@ -1062,7 +1099,7 @@ impl StacksChainState { tx.clone(), events, result, - asset_map.get_stx_burned_total(), + asset_map.get_stx_burned_total()?, total_cost, ); Ok(receipt) @@ -1132,8 +1169,14 @@ impl StacksChainState { } } } + if let clarity_error::Parse(err) = &other_error { + if err.rejectable() { + info!("Transaction {} is problematic and should have prevented this block from being relayed", tx.txid()); + return Err(Error::ClarityError(other_error)); + } + } if let clarity_error::Analysis(err) = &other_error { - if let CheckErrors::SupertypeTooLarge = err.err { + if err.err.rejectable() { info!("Transaction {} is problematic and should have prevented this block from being relayed", tx.txid()); return Err(Error::ClarityError(other_error)); } @@ -1184,6 +1227,7 @@ impl StacksChainState { origin_account, asset_map, ) + .expect("FATAL: error while evaluating post-conditions") }, ); @@ -1230,7 +1274,7 @@ impl StacksChainState { StacksTransactionReceipt::from_condition_aborted_smart_contract( tx.clone(), events, - assets.get_stx_burned_total(), + assets.get_stx_burned_total()?, contract_analysis, total_cost, ); @@ -1287,7 +1331,7 @@ impl StacksChainState { let receipt = StacksTransactionReceipt::from_smart_contract( tx.clone(), events, - asset_map.get_stx_burned_total(), + asset_map.get_stx_burned_total()?, contract_analysis, total_cost, ); @@ -1440,7 +1484,9 @@ impl StacksChainState { tx_receipt }; - transaction.commit(); + transaction + .commit() + .map_err(|e| Error::InvalidStacksTransaction(e.to_string(), false))?; Ok((fee, tx_receipt)) } @@ -1515,7 +1561,8 @@ pub mod test { // bump the epoch in the Clarity DB tx_conn .with_clarity_db(|db| { - db.set_clarity_epoch_version(StacksEpochId::Epoch21); + db.set_clarity_epoch_version(StacksEpochId::Epoch21) + .unwrap(); Ok(()) }) .unwrap(); @@ -6749,7 +6796,8 @@ pub mod test { mode, origin, &ft_transfer_2, - ); + ) + .unwrap(); if result != expected_result { eprintln!( "test failed:\nasset map: {:?}\nscenario: {:?}\n", @@ -7101,7 +7149,8 @@ pub mod test { mode, origin, &nft_transfer_2, - ); + ) + .unwrap(); if result != expected_result { eprintln!( "test failed:\nasset map: {:?}\nscenario: {:?}\n", @@ -7917,7 +7966,8 @@ pub mod test { post_condition_mode, origin_account, asset_map, - ); + ) + .unwrap(); if result != expected_result { eprintln!( "test failed:\nasset map: {:?}\nscenario: {:?}\n", @@ -8059,7 +8109,8 @@ pub mod test { assert_eq!( StacksChainState::get_account(&mut conn, &addr.into()) .stx_balance - .get_available_balance_at_burn_block(0, 0, 0), + .get_available_balance_at_burn_block(0, 0, 0) + .unwrap(), (1000000000 - fee) as u128 ); @@ -8207,28 +8258,32 @@ pub mod test { assert_eq!(report_opt.unwrap(), (reporter_addr, 123)); // result must encode poison information - let result_data = receipt.result.expect_tuple(); + let result_data = receipt.result.expect_tuple().unwrap(); let height = result_data .get("block_height") .unwrap() .to_owned() - .expect_u128(); + .expect_u128() + .unwrap(); let mblock_pubkh = result_data .get("microblock_pubkey_hash") .unwrap() .to_owned() - .expect_buff(20); + .expect_buff(20) + .unwrap(); let reporter = result_data .get("reporter") .unwrap() .to_owned() - .expect_principal(); + .expect_principal() + .unwrap(); let seq = result_data .get("sequence") .unwrap() .to_owned() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(height, 1); assert_eq!(mblock_pubkh, block_pubkh.0.to_vec()); @@ -8457,28 +8512,32 @@ pub mod test { assert_eq!(report_opt.unwrap(), (reporter_addr_2, 122)); // result must encode poison information - let result_data = receipt.result.expect_tuple(); + let result_data = receipt.result.expect_tuple().unwrap(); let height = result_data .get("block_height") .unwrap() .to_owned() - .expect_u128(); + .expect_u128() + .unwrap(); let mblock_pubkh = result_data .get("microblock_pubkey_hash") .unwrap() .to_owned() - .expect_buff(20); + .expect_buff(20) + .unwrap(); let reporter = result_data .get("reporter") .unwrap() .to_owned() - .expect_principal(); + .expect_principal() + .unwrap(); let seq = result_data .get("sequence") .unwrap() .to_owned() - .expect_u128(); + .expect_u128() + .unwrap(); assert_eq!(height, 1); assert_eq!(mblock_pubkh, block_pubkh.0.to_vec()); diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index c3d11c9627..66ee0e57cb 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -502,7 +502,8 @@ impl StacksChainState { ); unconfirmed .clarity_inst - .drop_unconfirmed_state(&unconfirmed.confirmed_chain_tip); + .drop_unconfirmed_state(&unconfirmed.confirmed_chain_tip) + .expect("FATAL: failed to drop unconfirmed state"); debug!( "Dropped unconfirmed state off of {} ({})", &unconfirmed.confirmed_chain_tip, &unconfirmed.unconfirmed_chain_tip @@ -856,7 +857,9 @@ mod test { .chainstate() .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db.get_account_stx_balance(&recv_addr.into()) + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() }) }) .unwrap() @@ -873,7 +876,9 @@ mod test { .chainstate() .with_read_only_clarity_tx(&sortdb.index_conn(), &canonical_tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db.get_account_stx_balance(&recv_addr.into()) + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() }) }) .unwrap(); @@ -1081,7 +1086,9 @@ mod test { .chainstate() .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db.get_account_stx_balance(&recv_addr.into()) + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() }) }) .unwrap() @@ -1101,7 +1108,9 @@ mod test { .chainstate() .with_read_only_clarity_tx(&sortdb.index_conn(), &canonical_tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db.get_account_stx_balance(&recv_addr.into()) + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() }) }) .unwrap(); @@ -1378,7 +1387,9 @@ mod test { .chainstate() .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db.get_account_stx_balance(&recv_addr.into()) + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() }) }) .unwrap() diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 534f81f725..fd664a7dbf 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2132,7 +2132,7 @@ impl StacksBlockBuilder { } /// Finish up mining an epoch's transactions - pub fn epoch_finish(self, tx: ClarityTx) -> ExecutionCost { + pub fn epoch_finish(self, tx: ClarityTx) -> Result { let new_consensus_hash = MINER_BLOCK_CONSENSUS_HASH.clone(); let new_block_hash = MINER_BLOCK_HEADER_HASH.clone(); @@ -2144,7 +2144,7 @@ impl StacksBlockBuilder { // let moved_name = format!("{}.mined", index_block_hash); // write out the trie... - let consumed = tx.commit_mined_block(&index_block_hash); + let consumed = tx.commit_mined_block(&index_block_hash)?; test_debug!( "\n\nMiner {}: Finished mining child of {}/{}. Trie is in mined_blocks table.\n", @@ -2153,7 +2153,7 @@ impl StacksBlockBuilder { self.chain_tip.anchored_header.block_hash() ); - consumed + Ok(consumed) } /// Unconditionally build an anchored block from a list of transactions. /// Used in test cases @@ -2230,7 +2230,7 @@ impl StacksBlockBuilder { None }; - let cost = builder.epoch_finish(epoch_tx); + let cost = builder.epoch_finish(epoch_tx)?; Ok((block, size, cost, mblock_opt)) } @@ -2624,7 +2624,7 @@ impl StacksBlockBuilder { // save the block so we can build microblocks off of it let block = builder.mine_anchored_block(&mut epoch_tx); let size = builder.bytes_so_far; - let consumed = builder.epoch_finish(epoch_tx); + let consumed = builder.epoch_finish(epoch_tx)?; let ts_end = get_epoch_time_ms(); diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index 1bd328d810..ab5a807217 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -1041,10 +1041,10 @@ fn test_get_block_info_v210() { ) .unwrap(); - let list = list_val.expect_list(); - let block_reward_opt = list.get(0).cloned().unwrap().expect_optional(); - let miner_spend_winner = list.get(1).cloned().unwrap().expect_optional().unwrap().expect_u128(); - let miner_spend_total = list.get(2).cloned().unwrap().expect_optional().unwrap().expect_u128(); + let list = list_val.expect_list().unwrap(); + let block_reward_opt = list.get(0).cloned().unwrap().expect_optional().unwrap(); + let miner_spend_winner = list.get(1).cloned().unwrap().expect_optional().unwrap().unwrap().expect_u128().unwrap(); + let miner_spend_total = list.get(2).cloned().unwrap().expect_optional().unwrap().unwrap().expect_u128().unwrap(); eprintln!("i = {}, block_reward = {:?}, miner_spend_winner = {:?}, miner_spend_total = {:?}", i, &block_reward_opt, &miner_spend_winner, &miner_spend_total); @@ -1083,7 +1083,7 @@ fn test_get_block_info_v210() { }; eprintln!("i = {}, {} + {} + {} + {}", i, coinbase, tx_fees_anchored, tx_fees_streamed_produced, tx_fees_streamed_confirmed); - assert_eq!(block_reward_opt.unwrap().expect_u128(), coinbase + tx_fees_anchored + tx_fees_streamed_produced + tx_fees_streamed_confirmed); + assert_eq!(block_reward_opt.unwrap().expect_u128().unwrap(), coinbase + tx_fees_anchored + tx_fees_streamed_produced + tx_fees_streamed_confirmed); } else { // genesis, or not yet mature @@ -1343,10 +1343,10 @@ fn test_get_block_info_v210_no_microblocks() { ) .unwrap(); - let list = list_val.expect_list(); - let block_reward_opt = list.get(0).cloned().unwrap().expect_optional(); - let miner_spend_winner = list.get(1).cloned().unwrap().expect_optional().unwrap().expect_u128(); - let miner_spend_total = list.get(2).cloned().unwrap().expect_optional().unwrap().expect_u128(); + let list = list_val.expect_list().unwrap(); + let block_reward_opt = list.get(0).cloned().unwrap().expect_optional().unwrap(); + let miner_spend_winner = list.get(1).cloned().unwrap().expect_optional().unwrap().unwrap().expect_u128().unwrap(); + let miner_spend_total = list.get(2).cloned().unwrap().expect_optional().unwrap().unwrap().expect_u128().unwrap(); eprintln!("i = {}, block_reward = {:?}, miner_spend_winner = {:?}, miner_spend_total = {:?}", i, &block_reward_opt, &miner_spend_winner, &miner_spend_total); @@ -1373,7 +1373,7 @@ fn test_get_block_info_v210_no_microblocks() { let tx_fees_streamed_confirmed = 0; eprintln!("i = {}, {} + {} + {} + {}", i, coinbase, tx_fees_anchored, tx_fees_streamed_produced, tx_fees_streamed_confirmed); - assert_eq!(block_reward_opt.unwrap().expect_u128(), coinbase + tx_fees_anchored + tx_fees_streamed_produced + tx_fees_streamed_confirmed); + assert_eq!(block_reward_opt.unwrap().expect_u128().unwrap(), coinbase + tx_fees_anchored + tx_fees_streamed_produced + tx_fees_streamed_confirmed); } else { // genesis, or not yet mature @@ -1810,10 +1810,10 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { ) .unwrap(); - let list = list_val.expect_list(); - let block_reward_opt = list.get(0).cloned().unwrap().expect_optional(); - let miner_spend_winner = list.get(1).cloned().unwrap().expect_optional().unwrap().expect_u128(); - let miner_spend_total = list.get(2).cloned().unwrap().expect_optional().unwrap().expect_u128(); + let list = list_val.expect_list().unwrap(); + let block_reward_opt = list.get(0).cloned().unwrap().expect_optional().unwrap(); + let miner_spend_winner = list.get(1).cloned().unwrap().expect_optional().unwrap().unwrap().expect_u128().unwrap(); + let miner_spend_total = list.get(2).cloned().unwrap().expect_optional().unwrap().unwrap().expect_u128().unwrap(); if i >= 1 { assert_eq!(miner_spend_winner, (1000 + i - 1) as u128); @@ -1836,7 +1836,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { |env| env.eval_raw(&format!("(get-block-info? miner-address u{})", i)) ) .unwrap(); - let miner_address = miner_val.expect_optional().unwrap().expect_principal(); + let miner_address = miner_val.expect_optional().unwrap().unwrap().expect_principal().unwrap(); eprintln!("i = {}, block_reward = {:?}, miner_spend_winner = {:?}, miner_spend_total = {:?}, miner address = {}", i, &block_reward_opt, &miner_spend_winner, &miner_spend_total, miner_address); assert_eq!(miner_address, coinbase_addresses[i - 1].to_account_principal()); @@ -1872,11 +1872,11 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { } eprintln!("i = {}, {} + {} + {} + {}", i, coinbase, tx_fees_anchored, tx_fees_streamed_produced, tx_fees_streamed_confirmed); - assert_eq!(block_reward_opt.clone().unwrap().expect_u128(), coinbase + tx_fees_anchored + tx_fees_streamed_produced + tx_fees_streamed_confirmed); + assert_eq!(block_reward_opt.clone().unwrap().expect_u128().unwrap(), coinbase + tx_fees_anchored + tx_fees_streamed_produced + tx_fees_streamed_confirmed); if i > 2 { - eprintln!("recipient_total_reward: {} = {} + {}", recipient_total_reward + block_reward_opt.clone().unwrap().expect_u128(), recipient_total_reward, block_reward_opt.clone().unwrap().expect_u128()); - recipient_total_reward += block_reward_opt.clone().unwrap().expect_u128(); + eprintln!("recipient_total_reward: {} = {} + {}", recipient_total_reward + block_reward_opt.clone().unwrap().expect_u128().unwrap(), recipient_total_reward, block_reward_opt.clone().unwrap().expect_u128().unwrap()); + recipient_total_reward += block_reward_opt.clone().unwrap().expect_u128().unwrap(); } } else { @@ -1921,7 +1921,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { }, ) .unwrap(); - recipient_balance_val.expect_u128() + recipient_balance_val.expect_u128().unwrap() }) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index c81a57b098..8882bc8397 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -2675,17 +2675,17 @@ fn test_build_microblock_stream_forks() { test_debug!( "Test {}: {}", &account.principal.to_string(), - account.stx_balance.get_total_balance() + account.stx_balance.get_total_balance().unwrap() ); if (i as u64) < (num_blocks as u64) - MINER_REWARD_MATURITY - 1 { assert_eq!( - account.stx_balance.get_total_balance(), + account.stx_balance.get_total_balance().unwrap(), (initial_balance as u128) + (expected_coinbase * POISON_MICROBLOCK_COMMISSION_FRACTION) / 100 ); } else { assert_eq!( - account.stx_balance.get_total_balance(), + account.stx_balance.get_total_balance().unwrap(), initial_balance as u128 ); } @@ -3691,7 +3691,7 @@ fn test_contract_call_across_clarity_versions() { &addr_anchored, tenure_id )) .unwrap(); - let call_count = call_count_value.expect_u128(); + let call_count = call_count_value.expect_u128().unwrap(); assert_eq!(call_count, (num_blocks - tenure_id - 1) as u128); // contract-call transaction worked @@ -3701,7 +3701,7 @@ fn test_contract_call_across_clarity_versions() { &addr_anchored, tenure_id )) .unwrap(); - let call_count = call_count_value.expect_u128(); + let call_count = call_count_value.expect_u128().unwrap(); assert_eq!(call_count, (num_blocks - tenure_id - 1) as u128); // at-block transaction worked @@ -3711,7 +3711,7 @@ fn test_contract_call_across_clarity_versions() { &addr_anchored, tenure_id )) .unwrap(); - let call_count = at_block_count_value.expect_u128(); + let call_count = at_block_count_value.expect_u128().unwrap(); if tenure_id < num_blocks - 1 { assert_eq!(call_count, 1); @@ -4356,12 +4356,12 @@ fn mempool_incorporate_pox_unlocks() { let available_balance = chainstate.with_read_only_clarity_tx(&sortdb.index_conn(), &parent_tip.index_block_hash(), |clarity_tx| { clarity_tx.with_clarity_db_readonly(|db| { - let burn_block_height = db.get_current_burnchain_block_height() as u64; + let burn_block_height = db.get_current_burnchain_block_height().unwrap() as u64; let v1_unlock_height = db.get_v1_unlock_height(); - let v2_unlock_height = db.get_v2_unlock_height(); - let balance = db.get_account_stx_balance(&principal); + let v2_unlock_height = db.get_v2_unlock_height().unwrap(); + let balance = db.get_account_stx_balance(&principal).unwrap(); info!("Checking balance"; "v1_unlock_height" => v1_unlock_height, "burn_block_height" => burn_block_height); - balance.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height, v2_unlock_height) + balance.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height, v2_unlock_height).unwrap() }) }).unwrap(); diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index 1ed4bbc425..8085f8a8cb 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -173,7 +173,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -359,7 +359,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -506,7 +506,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -554,7 +554,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -843,7 +843,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -891,7 +891,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -1108,7 +1108,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -1157,7 +1157,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -1456,7 +1456,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -1501,7 +1501,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -1703,7 +1703,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -1751,7 +1751,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -2011,7 +2011,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -2056,7 +2056,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -2258,7 +2258,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); @@ -2306,7 +2306,7 @@ where &all_prev_mining_rewards )); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 0be3d75f6b..ee97e1602b 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -518,6 +518,7 @@ impl TestStacksNode { pub fn get_miner_balance(clarity_tx: &mut ClarityTx, addr: &StacksAddress) -> u128 { clarity_tx.with_clarity_db_readonly(|db| { db.get_account_stx_balance(&StandardPrincipalData::from(addr.clone()).into()) + .unwrap() .amount_unlocked() }) } @@ -845,6 +846,7 @@ pub fn check_mining_reward( // what was the miner's total spend? let miner_nonce = clarity_tx.with_clarity_db_readonly(|db| { db.get_account_nonce(&StandardPrincipalData::from(miner.origin_address().unwrap()).into()) + .unwrap() }); let mut spent_total = 0; diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 9c384a2316..77150c76d9 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -362,7 +362,9 @@ where let (headers_return, result) = { let marf_tx = marf_kv.begin(&from, &to); let (headers_return, marf_return, result) = f(headers_db, marf_tx); - marf_return.commit_to(&to); + marf_return + .commit_to(&to) + .expect("FATAL: failed to commit block"); (headers_return, result) }; (headers_return, marf_kv, result) @@ -844,6 +846,8 @@ fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C) res }, ) + .unwrap() + .0 .unwrap(); } @@ -939,7 +943,7 @@ pub fn add_assets(result: &mut serde_json::Value, assets: bool, asset_map: Asset pub fn add_serialized_output(result: &mut serde_json::Value, value: Value) { let result_raw = { - let bytes = (&value).serialize_to_vec(); + let bytes = (&value).serialize_to_vec().unwrap(); bytes_to_hex(&bytes) }; result["output_serialized"] = serde_json::to_value(result_raw.as_str()).unwrap(); @@ -1030,15 +1034,15 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option (i32, Option = events .into_iter() - .map(|event| event.json_serialize(0, &Txid([0u8; 32]), true)) + .map(|event| event.json_serialize(0, &Txid([0u8; 32]), true).unwrap()) .collect(); result["events"] = serde_json::Value::Array(events_json); @@ -1823,7 +1829,9 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option = events .into_iter() - .map(|event| event.json_serialize(0, &Txid([0u8; 32]), true)) + .map(|event| { + event.json_serialize(0, &Txid([0u8; 32]), true).unwrap() + }) .collect(); result["events"] = serde_json::Value::Array(events_json); diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index fce78929f9..151fa09e5b 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -519,9 +519,10 @@ impl ClarityInstance { conn } - pub fn drop_unconfirmed_state(&mut self, block: &StacksBlockId) { + pub fn drop_unconfirmed_state(&mut self, block: &StacksBlockId) -> Result<(), Error> { let datastore = self.datastore.begin_unconfirmed(block); - datastore.rollback_unconfirmed() + datastore.rollback_unconfirmed()?; + Ok(()) } pub fn begin_unconfirmed<'a, 'b>( @@ -584,9 +585,9 @@ impl ClarityInstance { let mut db = datastore.as_clarity_db(header_db, burn_state_db); db.begin(); let result = db.get_clarity_epoch_version(); - db.roll_back(); + db.roll_back()?; result - }; + }?; Ok(ClarityReadOnlyConnection { datastore, @@ -617,9 +618,9 @@ impl ClarityInstance { let epoch_id = { clarity_db.begin(); let result = clarity_db.get_clarity_epoch_version(); - clarity_db.roll_back(); + clarity_db.roll_back()?; result - }; + }?; let mut env = OwnedEnvironment::new_free(self.mainnet, self.chain_id, clarity_db, epoch_id); env.eval_read_only_with_rules(contract, program, ast_rules) @@ -642,7 +643,8 @@ impl<'a, 'b> ClarityConnection for ClarityBlockConnection<'a, 'b> { ClarityDatabase::new(&mut self.datastore, &self.header_db, &self.burn_state_db); db.begin(); let (result, mut db) = to_do(db); - db.roll_back(); + db.roll_back() + .expect("FATAL: failed to roll back from read-only context"); result } @@ -653,7 +655,8 @@ impl<'a, 'b> ClarityConnection for ClarityBlockConnection<'a, 'b> { let mut db = AnalysisDatabase::new(&mut self.datastore); db.begin(); let result = to_do(&mut db); - db.roll_back(); + db.roll_back() + .expect("FATAL: failed to roll back from read-only context"); result } @@ -673,7 +676,8 @@ impl ClarityConnection for ClarityReadOnlyConnection<'_> { .as_clarity_db(&self.header_db, &self.burn_state_db); db.begin(); let (result, mut db) = to_do(db); - db.roll_back(); + db.roll_back() + .expect("FATAL: failed to roll back changes in read-only context"); result } @@ -684,7 +688,8 @@ impl ClarityConnection for ClarityReadOnlyConnection<'_> { let mut db = self.datastore.as_analysis_db(); db.begin(); let result = to_do(&mut db); - db.roll_back(); + db.roll_back() + .expect("FATAL: failed to roll back changes in read-only context"); result } @@ -696,7 +701,9 @@ impl ClarityConnection for ClarityReadOnlyConnection<'_> { impl<'a> PreCommitClarityBlock<'a> { pub fn commit(self) { debug!("Committing Clarity block connection"; "index_block" => %self.commit_to); - self.datastore.commit_to(&self.commit_to); + self.datastore + .commit_to(&self.commit_to) + .expect("FATAL: failed to commit block"); } } @@ -718,7 +725,9 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // this is a "lower-level" rollback than the roll backs performed in // ClarityDatabase or AnalysisDatabase -- this is done at the backing store level. debug!("Rollback unconfirmed Clarity datastore"); - self.datastore.rollback_unconfirmed(); + self.datastore + .rollback_unconfirmed() + .expect("FATAL: failed to rollback block"); } /// Commits all changes in the current block by @@ -749,7 +758,9 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { /// time of opening). pub fn commit_to_block(self, final_bhh: &StacksBlockId) -> LimitedCostTracker { debug!("Commit Clarity datastore to {}", final_bhh); - self.datastore.commit_to(final_bhh); + self.datastore + .commit_to(final_bhh) + .expect("FATAL: failed to commit block"); self.cost_track.unwrap() } @@ -760,11 +771,11 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { /// before this saves, it updates the metadata headers in /// the sidestore so that they don't get stepped on after /// a miner re-executes a constructed block. - pub fn commit_mined_block(self, bhh: &StacksBlockId) -> LimitedCostTracker { + pub fn commit_mined_block(self, bhh: &StacksBlockId) -> Result { debug!("Commit mined Clarity datastore to {}", bhh); - self.datastore.commit_mined_block(bhh); + self.datastore.commit_mined_block(bhh)?; - self.cost_track.unwrap() + Ok(self.cost_track.unwrap()) } /// Save all unconfirmed state by @@ -800,6 +811,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let boot_code_auth = boot_code_tx_auth(boot_code_address); let boot_code_nonce = self.with_clarity_db_readonly(|db| { db.get_account_nonce(&boot_code_address.clone().into()) + .expect("FATAL: Failed to boot account nonce") }); let boot_code_account = boot_code_acc(boot_code_address, boot_code_nonce); @@ -827,7 +839,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // bump the epoch in the Clarity DB tx_conn .with_clarity_db(|db| { - db.set_clarity_epoch_version(StacksEpochId::Epoch2_05); + db.set_clarity_epoch_version(StacksEpochId::Epoch2_05)?; Ok(()) }) .unwrap(); @@ -912,6 +924,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let boot_code_nonce = self.with_clarity_db_readonly(|db| { db.get_account_nonce(&boot_code_address.clone().into()) + .expect("FATAL: Failed to boot account nonce") }); let boot_code_account = StacksAccount { @@ -949,7 +962,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // bump the epoch in the Clarity DB tx_conn .with_clarity_db(|db| { - db.set_clarity_epoch_version(StacksEpochId::Epoch21); + db.set_clarity_epoch_version(StacksEpochId::Epoch21)?; Ok(()) }) .unwrap(); @@ -1020,7 +1033,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // bump the epoch in the Clarity DB tx_conn .with_clarity_db(|db| { - db.set_clarity_epoch_version(StacksEpochId::Epoch21); + db.set_clarity_epoch_version(StacksEpochId::Epoch21)?; Ok(()) }) .unwrap(); @@ -1073,7 +1086,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // bump the epoch in the Clarity DB tx_conn .with_clarity_db(|db| { - db.set_clarity_epoch_version(StacksEpochId::Epoch22); + db.set_clarity_epoch_version(StacksEpochId::Epoch22)?; Ok(()) }) .unwrap(); @@ -1102,7 +1115,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // bump the epoch in the Clarity DB tx_conn .with_clarity_db(|db| { - db.set_clarity_epoch_version(StacksEpochId::Epoch23); + db.set_clarity_epoch_version(StacksEpochId::Epoch23)?; Ok(()) }) .unwrap(); @@ -1129,7 +1142,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // bump the epoch in the Clarity DB tx_conn .with_clarity_db(|db| { - db.set_clarity_epoch_version(StacksEpochId::Epoch24); + db.set_clarity_epoch_version(StacksEpochId::Epoch24)?; Ok(()) }) .unwrap(); @@ -1176,6 +1189,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let boot_code_nonce = self.with_clarity_db_readonly(|db| { db.get_account_nonce(&boot_code_address.clone().into()) + .expect("FATAL: Failed to boot account nonce") }); let boot_code_account = StacksAccount { @@ -1293,7 +1307,8 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let mut tx = self.start_transaction_processing(); let r = todo(&mut tx); - tx.commit(); + tx.commit() + .expect("FATAL: failed to commit unconditional free transaction"); (old_cost_tracker, r) }) } @@ -1308,7 +1323,8 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { { let mut tx = self.start_transaction_processing(); let r = todo(&mut tx); - tx.commit(); + tx.commit() + .expect("FATAL: failed to commit unconditional transaction"); r } @@ -1341,7 +1357,8 @@ impl<'a, 'b> ClarityConnection for ClarityTransactionConnection<'a, 'b> { ); db.begin(); let (r, mut db) = to_do(db); - db.roll_back(); + db.roll_back() + .expect("FATAL: failed to rollback changes during read-only connection"); (db.destroy().into(), r) }) } @@ -1353,7 +1370,8 @@ impl<'a, 'b> ClarityConnection for ClarityTransactionConnection<'a, 'b> { self.with_analysis_db(|mut db, cost_tracker| { db.begin(); let result = to_do(&mut db); - db.roll_back(); + db.roll_back() + .expect("FATAL: failed to rollback changes during read-only connection"); (cost_tracker, result) }) } @@ -1392,6 +1410,7 @@ impl<'a, 'b> TransactionConnection for ClarityTransactionConnection<'a, 'b> { where A: FnOnce(&AssetMap, &mut ClarityDatabase) -> bool, F: FnOnce(&mut OwnedEnvironment) -> Result<(R, AssetMap, Vec), E>, + E: From, { using!(self.log, "log", |log| { using!(self.cost_track, "cost tracker", |cost_track| { @@ -1421,16 +1440,18 @@ impl<'a, 'b> TransactionConnection for ClarityTransactionConnection<'a, 'b> { let result = match result { Ok((value, asset_map, events)) => { let aborted = abort_call_back(&asset_map, &mut db); - if aborted { - db.roll_back(); - } else { - db.commit(); + let db_result = if aborted { db.roll_back() } else { db.commit() }; + match db_result { + Ok(_) => Ok((value, asset_map, events, aborted)), + Err(e) => Err(e.into()), } - Ok((value, asset_map, events, aborted)) } Err(e) => { - db.roll_back(); - Err(e) + let db_result = db.roll_back(); + match db_result { + Ok(_) => Err(e), + Err(db_err) => Err(db_err.into()), + } } }; @@ -1470,11 +1491,16 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { db.begin(); let result = to_do(&mut db); - if result.is_ok() { - db.commit(); + let db_result = if result.is_ok() { + db.commit() } else { - db.roll_back(); - } + db.roll_back() + }; + + let result = match db_result { + Ok(_) => result, + Err(e) => Err(e.into()), + }; (db.destroy().into(), result) }) @@ -1520,7 +1546,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { /// Commit the changes from the edit log. /// panics if there is more than one open savepoint - pub fn commit(mut self) { + pub fn commit(mut self) -> Result<(), Error> { let log = self .log .take() @@ -1532,12 +1558,13 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { rollback_wrapper.depth() ); } - rollback_wrapper.commit(); + rollback_wrapper.commit().map_err(InterpreterError::from)?; // now we can reset the memory usage for the edit-log self.cost_track .as_mut() .expect("BUG: Transaction connection lost cost tracker connection.") .reset_memory(); + Ok(()) } /// Evaluate a raw Clarity snippit @@ -1770,7 +1797,7 @@ mod tests { tx.save_analysis(&contract_identifier, &ct_analysis) .unwrap(); - tx.commit(); + tx.commit().unwrap(); } // should fail since the prior contract @@ -1802,7 +1829,7 @@ mod tests { ) .contains("ContractAlreadyExists")); - tx.commit(); + tx.commit().unwrap(); } } } diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index c4c869d119..25d28ded9b 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -385,26 +385,29 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { .expect("Attempted to get the open chain tip from an unopened context.") } - fn get_with_proof(&mut self, key: &str) -> Option<(String, Vec)> { + fn get_with_proof(&mut self, key: &str) -> InterpreterResult)>> { self.marf .get_with_proof(&self.chain_tip, key) .or_else(|e| match e { Error::NotFoundError => Ok(None), _ => Err(e), }) - .expect("ERROR: Unexpected MARF Failure on GET") + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? .map(|(marf_value, proof)| { let side_key = marf_value.to_hex(); let data = - SqliteConnection::get(self.get_side_store(), &side_key).expect(&format!( - "ERROR: MARF contained value_hash not found in side storage: {}", - side_key - )); - (data, proof.serialize_to_vec()) + SqliteConnection::get(self.get_side_store(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + })?; + Ok((data, proof.serialize_to_vec())) }) + .transpose() } - fn get(&mut self, key: &str) -> Option { + fn get(&mut self, key: &str) -> InterpreterResult> { trace!("MarfedKV get: {:?} tip={}", key, &self.chain_tip); self.marf .get(&self.chain_tip, key) @@ -419,18 +422,22 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { } _ => Err(e), }) - .expect("ERROR: Unexpected MARF Failure on GET") + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? .map(|marf_value| { let side_key = marf_value.to_hex(); trace!("MarfedKV get side-key for {:?}: {:?}", key, &side_key); - SqliteConnection::get(self.get_side_store(), &side_key).expect(&format!( - "ERROR: MARF contained value_hash not found in side storage: {}", - side_key - )) + SqliteConnection::get(self.get_side_store(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + .into() + }) }) + .transpose() } - fn put_all(&mut self, _items: Vec<(String, String)>) { + fn put_all(&mut self, _items: Vec<(String, String)>) -> InterpreterResult<()> { error!("Attempted to commit changes to read-only MARF"); panic!("BUG: attempted commit to read-only MARF"); } @@ -453,26 +460,28 @@ impl<'a> WritableMarfStore<'a> { self.marf.drop_current(); } - pub fn rollback_unconfirmed(self) { + pub fn rollback_unconfirmed(self) -> InterpreterResult<()> { debug!("Drop unconfirmed MARF trie {}", &self.chain_tip); - SqliteConnection::drop_metadata(self.marf.sqlite_tx(), &self.chain_tip); + SqliteConnection::drop_metadata(self.marf.sqlite_tx(), &self.chain_tip)?; self.marf.drop_unconfirmed(); + Ok(()) } - pub fn commit_to(self, final_bhh: &StacksBlockId) { + pub fn commit_to(self, final_bhh: &StacksBlockId) -> InterpreterResult<()> { debug!("commit_to({})", final_bhh); - SqliteConnection::commit_metadata_to(self.marf.sqlite_tx(), &self.chain_tip, final_bhh); + SqliteConnection::commit_metadata_to(self.marf.sqlite_tx(), &self.chain_tip, final_bhh)?; let _ = self.marf.commit_to(final_bhh).map_err(|e| { error!("Failed to commit to MARF block {}: {:?}", &final_bhh, &e); - panic!(); - }); + InterpreterError::Expect("Failed to commit to MARF block".into()) + })?; + Ok(()) } #[cfg(test)] pub fn test_commit(self) { let bhh = self.chain_tip.clone(); - self.commit_to(&bhh); + self.commit_to(&bhh).unwrap(); } pub fn commit_unconfirmed(self) { @@ -487,7 +496,7 @@ impl<'a> WritableMarfStore<'a> { // This is used by miners // so that the block validation and processing logic doesn't // reprocess the same data as if it were already loaded - pub fn commit_mined_block(self, will_move_to: &StacksBlockId) { + pub fn commit_mined_block(self, will_move_to: &StacksBlockId) -> InterpreterResult<()> { debug!( "commit_mined_block: ({}->{})", &self.chain_tip, will_move_to @@ -497,14 +506,15 @@ impl<'a> WritableMarfStore<'a> { // included in the processed chainstate (like a block constructed during mining) // _if_ for some reason, we do want to be able to access that mined chain state in the future, // we should probably commit the data to a different table which does not have uniqueness constraints. - SqliteConnection::drop_metadata(self.marf.sqlite_tx(), &self.chain_tip); + SqliteConnection::drop_metadata(self.marf.sqlite_tx(), &self.chain_tip)?; let _ = self.marf.commit_mined(will_move_to).map_err(|e| { error!( "Failed to commit to mined MARF block {}: {:?}", &will_move_to, &e ); - panic!(); - }); + InterpreterError::Expect("Failed to commit to MARF block".into()) + })?; + Ok(()) } pub fn seal(&mut self) -> TrieHash { @@ -543,7 +553,7 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { Some(&handle_contract_call_special_cases) } - fn get(&mut self, key: &str) -> Option { + fn get(&mut self, key: &str) -> InterpreterResult> { trace!("MarfedKV get: {:?} tip={}", key, &self.chain_tip); self.marf .get(&self.chain_tip, key) @@ -558,34 +568,41 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { } _ => Err(e), }) - .expect("ERROR: Unexpected MARF Failure on GET") + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? .map(|marf_value| { let side_key = marf_value.to_hex(); trace!("MarfedKV get side-key for {:?}: {:?}", key, &side_key); - SqliteConnection::get(self.marf.sqlite_tx(), &side_key).expect(&format!( - "ERROR: MARF contained value_hash not found in side storage: {}", - side_key - )) + SqliteConnection::get(self.marf.sqlite_tx(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + .into() + }) }) + .transpose() } - fn get_with_proof(&mut self, key: &str) -> Option<(String, Vec)> { + fn get_with_proof(&mut self, key: &str) -> InterpreterResult)>> { self.marf .get_with_proof(&self.chain_tip, key) .or_else(|e| match e { Error::NotFoundError => Ok(None), _ => Err(e), }) - .expect("ERROR: Unexpected MARF Failure on GET") + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? .map(|(marf_value, proof)| { let side_key = marf_value.to_hex(); let data = - SqliteConnection::get(self.marf.sqlite_tx(), &side_key).expect(&format!( - "ERROR: MARF contained value_hash not found in side storage: {}", - side_key - )); - (data, proof.serialize_to_vec()) + SqliteConnection::get(self.marf.sqlite_tx(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + })?; + Ok((data, proof.serialize_to_vec())) }) + .transpose() } fn get_side_store(&mut self) -> &Connection { @@ -649,18 +666,18 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { } } - fn put_all(&mut self, items: Vec<(String, String)>) { + fn put_all(&mut self, items: Vec<(String, String)>) -> InterpreterResult<()> { let mut keys = Vec::new(); let mut values = Vec::new(); for (key, value) in items.into_iter() { trace!("MarfedKV put '{}' = '{}'", &key, &value); let marf_value = MARFValue::from_value(&value); - SqliteConnection::put(self.get_side_store(), &marf_value.to_hex(), &value); + SqliteConnection::put(self.get_side_store(), &marf_value.to_hex(), &value)?; keys.push(key); values.push(marf_value); } self.marf .insert_batch(&keys, values) - .expect("ERROR: Unexpected MARF Failure"); + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure".into()).into()) } } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 4e19f5ef2e..9a7bb43640 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -665,12 +665,12 @@ impl ClarityBackingStore for MemoryBackingStore { Err(RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash(bhh.0)).into()) } - fn get(&mut self, key: &str) -> Option { + fn get(&mut self, key: &str) -> InterpreterResult> { SqliteConnection::get(self.get_side_store(), key) } - fn get_with_proof(&mut self, key: &str) -> Option<(String, Vec)> { - SqliteConnection::get(self.get_side_store(), key).map(|x| (x, vec![])) + fn get_with_proof(&mut self, key: &str) -> InterpreterResult)>> { + Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) } fn get_side_store(&mut self) -> &Connection { @@ -701,9 +701,10 @@ impl ClarityBackingStore for MemoryBackingStore { Some(&handle_contract_call_special_cases) } - fn put_all(&mut self, items: Vec<(String, String)>) { + fn put_all(&mut self, items: Vec<(String, String)>) -> InterpreterResult<()> { for (key, value) in items.into_iter() { - SqliteConnection::put(self.get_side_store(), &key, &value); + SqliteConnection::put(self.get_side_store(), &key, &value)?; } + Ok(()) } } diff --git a/stackslib/src/clarity_vm/tests/analysis_costs.rs b/stackslib/src/clarity_vm/tests/analysis_costs.rs index 87e1759290..321d4939a0 100644 --- a/stackslib/src/clarity_vm/tests/analysis_costs.rs +++ b/stackslib/src/clarity_vm/tests/analysis_costs.rs @@ -114,7 +114,7 @@ fn setup_tracked_cost_test( ); assert_eq!( - conn.with_clarity_db_readonly(|db| db.get_clarity_epoch_version()), + conn.with_clarity_db_readonly(|db| db.get_clarity_epoch_version().unwrap()), epoch ); diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index e07e06451f..a09eead2d9 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -1177,7 +1177,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity confirmed-height: u1 }}", intercepted, "\"intercepted-function\"", cost_definer, "\"cost-definition\"" ); - let epoch = db.get_clarity_epoch_version(); + let epoch = db.get_clarity_epoch_version().unwrap(); db.set_entry_unknown_descriptor( voting_contract_to_use, "confirmed-proposals", @@ -1186,7 +1186,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity &epoch, ) .unwrap(); - db.commit(); + db.commit().unwrap(); store.test_commit(); } @@ -1499,7 +1499,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi confirmed-height: u1 }}", intercepted_ct, intercepted_f, cost_ct, cost_f ); - let epoch = db.get_clarity_epoch_version(); + let epoch = db.get_clarity_epoch_version().unwrap(); db.set_entry_unknown_descriptor( voting_contract_to_use, "confirmed-proposals", @@ -1509,7 +1509,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi ) .unwrap(); } - db.commit(); + db.commit().unwrap(); store.test_commit(); } @@ -1599,7 +1599,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi confirmed-height: u1 }}", intercepted_ct, intercepted_f, cost_ct, cost_f ); - let epoch = db.get_clarity_epoch_version(); + let epoch = db.get_clarity_epoch_version().unwrap(); db.set_entry_unknown_descriptor( voting_contract_to_use, "confirmed-proposals", @@ -1609,7 +1609,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi ) .unwrap(); } - db.commit(); + db.commit().unwrap(); store.test_commit(); } diff --git a/stackslib/src/clarity_vm/tests/events.rs b/stackslib/src/clarity_vm/tests/events.rs index ef3c1f59e2..7037e8dcf3 100644 --- a/stackslib/src/clarity_vm/tests/events.rs +++ b/stackslib/src/clarity_vm/tests/events.rs @@ -46,7 +46,7 @@ fn helper_execute_epoch( ) -> (Value, Vec) { let contract_id = QualifiedContractIdentifier::local("contract").unwrap(); let address = "'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"; - let sender = execute(address).expect_principal(); + let sender = execute(address).expect_principal().unwrap(); let marf_kv = MarfedKV::temporary(); let chain_id = test_only_mainnet_to_chain_id(use_mainnet); @@ -72,7 +72,7 @@ fn helper_execute_epoch( // bump the epoch in the Clarity DB tx_conn .with_clarity_db(|db| { - db.set_clarity_epoch_version(epoch); + db.set_clarity_epoch_version(epoch).unwrap(); Ok(()) }) .unwrap(); diff --git a/stackslib/src/clarity_vm/tests/forking.rs b/stackslib/src/clarity_vm/tests/forking.rs index b56472a566..c74cb0c8b0 100644 --- a/stackslib/src/clarity_vm/tests/forking.rs +++ b/stackslib/src/clarity_vm/tests/forking.rs @@ -82,7 +82,7 @@ fn test_at_block_mutations(#[case] version: ClarityVersion, #[case] epoch: Stack to_exec: &str, ) -> Result { let c = QualifiedContractIdentifier::local("contract").unwrap(); - let p1 = execute(p1_str).expect_principal(); + let p1 = execute(p1_str).expect_principal().unwrap(); let mut placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); eprintln!("Branched execution..."); @@ -161,7 +161,7 @@ fn test_at_block_good(#[case] version: ClarityVersion, #[case] epoch: StacksEpoc to_exec: &str, ) -> Result { let c = QualifiedContractIdentifier::local("contract").unwrap(); - let p1 = execute(p1_str).expect_principal(); + let p1 = execute(p1_str).expect_principal().unwrap(); let mut placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); eprintln!("Branched execution..."); diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 9a626e8b04..4883a14eaa 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -114,7 +114,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac gb.as_transaction(|tx| { tx.with_clarity_db(|db| { - db.set_clarity_epoch_version(epoch); + db.set_clarity_epoch_version(epoch).unwrap(); Ok(()) }) .unwrap(); @@ -506,7 +506,7 @@ fn inner_test_simple_naming_system(owned_env: &mut OwnedEnvironment, version: Cl { let mut env = owned_env.get_exec_environment( - Some(p2.clone().expect_principal()), + Some(p2.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -525,7 +525,7 @@ fn inner_test_simple_naming_system(owned_env: &mut OwnedEnvironment, version: Cl { let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -553,7 +553,7 @@ fn inner_test_simple_naming_system(owned_env: &mut OwnedEnvironment, version: Cl { // shouldn't be able to register a name you didn't preorder! let mut env = owned_env.get_exec_environment( - Some(p2.clone().expect_principal()), + Some(p2.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -572,7 +572,7 @@ fn inner_test_simple_naming_system(owned_env: &mut OwnedEnvironment, version: Cl { // should work! let mut env = owned_env.get_exec_environment( - Some(p1.clone().expect_principal()), + Some(p1.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); @@ -590,7 +590,7 @@ fn inner_test_simple_naming_system(owned_env: &mut OwnedEnvironment, version: Cl { // try to underpay! let mut env = owned_env.get_exec_environment( - Some(p2.clone().expect_principal()), + Some(p2.clone().expect_principal().unwrap()), None, &mut placeholder_context, ); diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 03d2d2edfa..7117a5d6f3 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -68,13 +68,14 @@ use blockstack_lib::cost_estimates::UnitEstimator; use blockstack_lib::net::db::LocalPeer; use blockstack_lib::net::p2p::PeerNetwork; use blockstack_lib::net::relay::Relayer; +use blockstack_lib::net::StacksMessage; use blockstack_lib::util_lib::db::sqlite_open; use blockstack_lib::util_lib::strings::UrlString; use libstackerdb::StackerDBChunkData; use rusqlite::types::ToSql; use rusqlite::{Connection, OpenFlags}; use serde_json::Value; -use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, StacksAddress, StacksBlockId, }; @@ -232,6 +233,35 @@ fn main() { process::exit(0); } + if argv[1] == "decode-net-message" { + let data: String = argv[2].clone(); + let buf = if data == "-" { + let mut buffer = vec![]; + io::stdin().read_to_end(&mut buffer).unwrap(); + buffer + } else { + let data: serde_json::Value = serde_json::from_str(data.as_str()).unwrap(); + let data_array = data.as_array().unwrap(); + let mut buf = vec![]; + for elem in data_array { + buf.push(elem.as_u64().unwrap() as u8); + } + buf + }; + match read_next::(&mut &buf[..]) { + Ok(msg) => { + println!("{:#?}", &msg); + process::exit(0); + } + Err(_) => { + let ptr = &mut &buf[..]; + let mut debug_cursor = LogReader::from_reader(ptr); + let _ = read_next::(&mut debug_cursor); + process::exit(1); + } + } + } + if argv[1] == "get-tenure" { if argv.len() < 4 { eprintln!("Usage: {} get-tenure CHAIN_STATE_DIR BLOCK_HASH", argv[0]); diff --git a/stackslib/src/net/api/callreadonly.rs b/stackslib/src/net/api/callreadonly.rs index 7ab2a728c3..f1ec41a1af 100644 --- a/stackslib/src/net/api/callreadonly.rs +++ b/stackslib/src/net/api/callreadonly.rs @@ -280,11 +280,17 @@ impl RPCRequestHandler for RPCCallReadOnlyRequestHandler { // decode the response let data_resp = match data_resp { - Ok(Some(Ok(data))) => CallReadOnlyResponse { - okay: true, - result: Some(format!("0x{}", data.serialize_to_hex())), - cause: None, - }, + Ok(Some(Ok(data))) => { + let hex_result = data + .serialize_to_hex() + .map_err(|e| NetError::SerializeError(format!("{:?}", &e)))?; + + CallReadOnlyResponse { + okay: true, + result: Some(format!("0x{}", hex_result)), + cause: None, + } + } Ok(Some(Err(e))) => match e { Unchecked(CheckErrors::CostBalanceExceeded(actual_cost, _)) if actual_cost.write_count > 0 => diff --git a/stackslib/src/net/api/getaccount.rs b/stackslib/src/net/api/getaccount.rs index 684096cd1f..3ed0f22113 100644 --- a/stackslib/src/net/api/getaccount.rs +++ b/stackslib/src/net/api/getaccount.rs @@ -146,17 +146,21 @@ impl RPCRequestHandler for RPCGetAccountRequestHandler { clarity_tx.with_clarity_db_readonly(|clarity_db| { let key = ClarityDatabase::make_key_for_account_balance(&account); let burn_block_height = - clarity_db.get_current_burnchain_block_height() as u64; + clarity_db.get_current_burnchain_block_height().ok()? as u64; let v1_unlock_height = clarity_db.get_v1_unlock_height(); - let v2_unlock_height = clarity_db.get_v2_unlock_height(); + let v2_unlock_height = clarity_db.get_v2_unlock_height().ok()?; let (balance, balance_proof) = if with_proof { clarity_db .get_with_proof::(&key) + .ok() + .flatten() .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) } else { clarity_db .get::(&key) + .ok() + .flatten() .map(|a| (a, None)) .unwrap_or_else(|| (STXBalance::zero(), None)) }; @@ -165,20 +169,26 @@ impl RPCRequestHandler for RPCGetAccountRequestHandler { let (nonce, nonce_proof) = if with_proof { clarity_db .get_with_proof(&key) + .ok() + .flatten() .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) .unwrap_or_else(|| (0, Some("".into()))) } else { clarity_db .get(&key) + .ok() + .flatten() .map(|a| (a, None)) .unwrap_or_else(|| (0, None)) }; - let unlocked = balance.get_available_balance_at_burn_block( - burn_block_height, - v1_unlock_height, - v2_unlock_height, - ); + let unlocked = balance + .get_available_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) + .ok()?; let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( burn_block_height, v1_unlock_height, @@ -188,14 +198,14 @@ impl RPCRequestHandler for RPCGetAccountRequestHandler { let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); - AccountEntryResponse { + Some(AccountEntryResponse { balance, locked, unlock_height, nonce, balance_proof, nonce_proof, - } + }) }) }) }); diff --git a/stackslib/src/net/api/getconstantval.rs b/stackslib/src/net/api/getconstantval.rs index a7845c7fec..0704d48073 100644 --- a/stackslib/src/net/api/getconstantval.rs +++ b/stackslib/src/net/api/getconstantval.rs @@ -147,7 +147,8 @@ impl RPCRequestHandler for RPCGetConstantValRequestHandler { let cst = contract .contract_context .lookup_variable(constant_name.as_str())? - .serialize_to_hex(); + .serialize_to_hex() + .ok()?; let data = format!("0x{cst}"); Some(ConstantValResponse { data }) diff --git a/stackslib/src/net/api/getcontractabi.rs b/stackslib/src/net/api/getcontractabi.rs index 38d614b1f4..ce821fac72 100644 --- a/stackslib/src/net/api/getcontractabi.rs +++ b/stackslib/src/net/api/getcontractabi.rs @@ -131,8 +131,9 @@ impl RPCRequestHandler for RPCGetContractAbiRequestHandler { chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { let epoch = clarity_tx.get_epoch(); clarity_tx.with_analysis_db_readonly(|db| { - let contract = db.load_contract(&contract_identifier, &epoch)?; - contract.contract_interface + db.load_contract(&contract_identifier, &epoch) + .ok()? + .map(|contract| contract.contract_interface) }) }) }); diff --git a/stackslib/src/net/api/getcontractsrc.rs b/stackslib/src/net/api/getcontractsrc.rs index f670d1020c..505299d769 100644 --- a/stackslib/src/net/api/getcontractsrc.rs +++ b/stackslib/src/net/api/getcontractsrc.rs @@ -142,12 +142,14 @@ impl RPCRequestHandler for RPCGetContractSrcRequestHandler { let contract_commit_key = make_contract_hash_key(&contract_identifier); let (contract_commit, proof) = if with_proof { db.get_with_proof::(&contract_commit_key) - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .expect("BUG: obtained source, but couldn't get contract commit") + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? } else { db.get::(&contract_commit_key) - .map(|a| (a, None)) - .expect("BUG: obtained source, but couldn't get contract commit") + .ok() + .flatten() + .map(|a| (a, None))? }; let publish_height = contract_commit.block_height; diff --git a/stackslib/src/net/api/getdatavar.rs b/stackslib/src/net/api/getdatavar.rs index 0594dc8639..aa1c1116af 100644 --- a/stackslib/src/net/api/getdatavar.rs +++ b/stackslib/src/net/api/getdatavar.rs @@ -143,22 +143,23 @@ impl RPCRequestHandler for RPCGetDataVarRequestHandler { }; let with_proof = contents.get_with_proof(); + let key = ClarityDatabase::make_key_for_trip( + &contract_identifier, + StoreType::Variable, + &var_name, + ); let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { - let key = ClarityDatabase::make_key_for_trip( - &contract_identifier, - StoreType::Variable, - &var_name, - ); - let (value_hex, marf_proof): (String, _) = if with_proof { clarity_db .get_with_proof(&key) + .ok() + .flatten() .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? } else { - clarity_db.get(&key).map(|a| (a, None))? + clarity_db.get(&key).ok().flatten().map(|a| (a, None))? }; let data = format!("0x{}", value_hex); diff --git a/stackslib/src/net/api/getistraitimplemented.rs b/stackslib/src/net/api/getistraitimplemented.rs index 8aa0a8fbef..bbc765c6a4 100644 --- a/stackslib/src/net/api/getistraitimplemented.rs +++ b/stackslib/src/net/api/getistraitimplemented.rs @@ -158,19 +158,24 @@ impl RPCRequestHandler for RPCGetIsTraitImplementedRequestHandler { node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|db| { - let analysis = db.load_contract_analysis(&contract_identifier)?; + let analysis = db + .load_contract_analysis(&contract_identifier) + .ok() + .flatten()?; if analysis.implemented_traits.contains(&trait_id) { Some(GetIsTraitImplementedResponse { is_implemented: true, }) } else { - let trait_defining_contract = - db.load_contract_analysis(&trait_id.contract_identifier)?; + let trait_defining_contract = db + .load_contract_analysis(&trait_id.contract_identifier) + .ok() + .flatten()?; let trait_definition = trait_defining_contract.get_defined_trait(&trait_id.name)?; let is_implemented = analysis .check_trait_compliance( - &db.get_clarity_epoch_version(), + &db.get_clarity_epoch_version().ok()?, &trait_id, trait_definition, ) diff --git a/stackslib/src/net/api/getmapentry.rs b/stackslib/src/net/api/getmapentry.rs index 9a5cc24e82..099ae260bd 100644 --- a/stackslib/src/net/api/getmapentry.rs +++ b/stackslib/src/net/api/getmapentry.rs @@ -170,29 +170,37 @@ impl RPCRequestHandler for RPCGetMapEntryRequestHandler { } }; let with_proof = contents.get_with_proof(); + let key = + ClarityDatabase::make_key_for_data_map_entry(&contract_identifier, &map_name, &key) + .map_err(|e| NetError::SerializeError(format!("{:?}", &e)))?; + let none_response = Value::none() + .serialize_to_hex() + .map_err(|e| NetError::SerializeError(format!("{:?}", &e)))?; let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { - let key = ClarityDatabase::make_key_for_data_map_entry( - &contract_identifier, - &map_name, - &key, - ); let (value_hex, marf_proof): (String, _) = if with_proof { clarity_db .get_with_proof(&key) + .ok() + .flatten() .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) .unwrap_or_else(|| { test_debug!("No value for '{}' in {}", &key, tip); - (Value::none().serialize_to_hex(), Some("".into())) + (none_response, Some("".into())) }) } else { - clarity_db.get(&key).map(|a| (a, None)).unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (Value::none().serialize_to_hex(), None) - }) + clarity_db + .get(&key) + .ok() + .flatten() + .map(|a| (a, None)) + .unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (none_response, None) + }) }; let data = format!("0x{}", value_hex); @@ -253,7 +261,10 @@ impl StacksHttpRequest { HttpRequestContents::new() .for_tip(tip_req) .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()) - .payload_json(serde_json::Value::String(key.serialize_to_hex())), + .payload_json(serde_json::Value::String( + key.serialize_to_hex() + .expect("FATAL: invalid key could not be serialized"), + )), ) .expect("FATAL: failed to construct request from infallible data") } diff --git a/stackslib/src/net/api/getpoxinfo.rs b/stackslib/src/net/api/getpoxinfo.rs index 70d02fe198..0257b148c5 100644 --- a/stackslib/src/net/api/getpoxinfo.rs +++ b/stackslib/src/net/api/getpoxinfo.rs @@ -169,7 +169,7 @@ impl RPCPoxInfoData { .map_err(|_| NetError::NotFoundError)?; let res = match data { - Some(Ok(res)) => res.expect_result_ok().expect_tuple(), + Some(Ok(res)) => res.expect_result_ok()?.expect_tuple()?, _ => return Err(NetError::DBError(DBError::NotFoundError)), }; @@ -177,49 +177,49 @@ impl RPCPoxInfoData { .get("first-burnchain-block-height") .expect(&format!("FATAL: no 'first-burnchain-block-height'")) .to_owned() - .expect_u128() as u64; + .expect_u128()? as u64; let min_stacking_increment_ustx = res .get("min-amount-ustx") .expect(&format!("FATAL: no 'min-amount-ustx'")) .to_owned() - .expect_u128() as u64; + .expect_u128()? as u64; let prepare_cycle_length = res .get("prepare-cycle-length") .expect(&format!("FATAL: no 'prepare-cycle-length'")) .to_owned() - .expect_u128() as u64; + .expect_u128()? as u64; let rejection_fraction = res .get("rejection-fraction") .expect(&format!("FATAL: no 'rejection-fraction'")) .to_owned() - .expect_u128() as u64; + .expect_u128()? as u64; let reward_cycle_id = res .get("reward-cycle-id") .expect(&format!("FATAL: no 'reward-cycle-id'")) .to_owned() - .expect_u128() as u64; + .expect_u128()? as u64; let reward_cycle_length = res .get("reward-cycle-length") .expect(&format!("FATAL: no 'reward-cycle-length'")) .to_owned() - .expect_u128() as u64; + .expect_u128()? as u64; let current_rejection_votes = res .get("current-rejection-votes") .expect(&format!("FATAL: no 'current-rejection-votes'")) .to_owned() - .expect_u128() as u64; + .expect_u128()? as u64; let total_liquid_supply_ustx = res .get("total-liquid-supply-ustx") .expect(&format!("FATAL: no 'total-liquid-supply-ustx'")) .to_owned() - .expect_u128() as u64; + .expect_u128()? as u64; let total_required = (total_liquid_supply_ustx as u128 / 100) .checked_mul(rejection_fraction as u128) diff --git a/stackslib/src/net/api/liststackerdbreplicas.rs b/stackslib/src/net/api/liststackerdbreplicas.rs new file mode 100644 index 0000000000..0d4acb0f04 --- /dev/null +++ b/stackslib/src/net/api/liststackerdbreplicas.rs @@ -0,0 +1,219 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::{ClarityName, ContractName}; +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::hash::to_hex; +use {serde, serde_json}; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainError, StacksBlock}; +use crate::net::db::PeerDB; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::{Error as NetError, NeighborAddress, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Largest number of replicas returned +pub const MAX_LIST_REPLICAS: usize = 64; + +#[derive(Clone)] +pub struct RPCListStackerDBReplicasRequestHandler { + pub contract_identifier: Option, +} + +impl RPCListStackerDBReplicasRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCListStackerDBReplicasRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r#"^/v2/stackerdb/(?P
{})/(?P{})/replicas$"#, + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + self.contract_identifier = Some(contract_identifier); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCListStackerDBReplicasRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("`contract_identifier` not set".into()))?; + + let (replicas_resp, local_peer, allow_private) = + node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { + let replicas_resp = PeerDB::find_stacker_db_replicas( + network.peerdb_conn(), + network.bound_neighbor_key().network_id, + &contract_identifier, + get_epoch_time_secs().saturating_sub(network.get_connection_opts().max_neighbor_age), + MAX_LIST_REPLICAS + ) + .map_err(|e| { + warn!("Failed to find stackerdb replicas"; "contract_id" => %contract_identifier, "error" => %e); + StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new("Unable to list replicas of StackerDB".to_string()) + ) + }); + let local_peer_resp = network.get_local_peer().clone(); + (replicas_resp, local_peer_resp, network.get_connection_opts().private_neighbors) + }); + + let mut naddrs = match replicas_resp { + Ok(neighbors) => neighbors + .into_iter() + .map(|neighbor| NeighborAddress::from_neighbor(&neighbor)) + .filter(|naddr| { + if naddr.addrbytes.is_anynet() { + // don't expose 0.0.0.0 or ::1 + return false; + } + if !allow_private && naddr.addrbytes.is_in_private_range() { + // filter unroutable network addresses + return false; + } + true + }) + .collect::>(), + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + if local_peer + .stacker_dbs + .iter() + .find(|contract_id| contract_id == &&contract_identifier) + .is_some() + { + naddrs.insert(0, local_peer.to_public_neighbor_addr()); + } + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&naddrs)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCListStackerDBReplicasRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let metadata: Vec = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(metadata)?) + } +} + +impl StacksHttpRequest { + pub fn new_list_stackerdb_replicas( + host: PeerHost, + stackerdb_contract_id: QualifiedContractIdentifier, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/stackerdb/{}/{}/replicas", + &stackerdb_contract_id.issuer, &stackerdb_contract_id.name + ), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Decode an HTTP response into a list of replicas + /// If it fails, return Self::Error(..) + pub fn decode_stackerdb_replicas(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: Vec = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 3eaa6148d2..d8b40b4680 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -55,6 +55,7 @@ pub mod getstackerdbchunk; pub mod getstackerdbmetadata; pub mod getstxtransfercost; pub mod gettransaction_unconfirmed; +pub mod liststackerdbreplicas; pub mod postblock; pub mod postfeerate; pub mod postmempoolquery; @@ -106,6 +107,9 @@ impl StacksHttp { self.register_rpc_endpoint( gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), ); + self.register_rpc_endpoint( + liststackerdbreplicas::RPCListStackerDBReplicasRequestHandler::new(), + ); self.register_rpc_endpoint(postblock::RPCPostBlockRequestHandler::new()); self.register_rpc_endpoint(postfeerate::RPCPostFeeRateRequestHandler::new()); self.register_rpc_endpoint(postmempoolquery::RPCMempoolQueryRequestHandler::new()); diff --git a/stackslib/src/net/api/tests/liststackerdbreplicas.rs b/stackslib/src/net/api/tests/liststackerdbreplicas.rs new file mode 100644 index 0000000000..c26f29c520 --- /dev/null +++ b/stackslib/src/net/api/tests/liststackerdbreplicas.rs @@ -0,0 +1,132 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::Address; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::MessageSignature; + +use super::test_rpc; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let contract_identifier = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed", + ) + .unwrap(); + let request = + StacksHttpRequest::new_list_stackerdb_replicas(addr.into(), contract_identifier.clone()); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = liststackerdbreplicas::RPCListStackerDBReplicasRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!( + handler.contract_identifier, + Some(contract_identifier.clone()) + ); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + let contract_identifier = + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(); + let none_contract_identifier = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-ext", + ) + .unwrap(); + + let request = + StacksHttpRequest::new_list_stackerdb_replicas(addr.into(), contract_identifier.clone()); + requests.push(request); + + // no contract + let request = StacksHttpRequest::new_list_stackerdb_replicas( + addr.into(), + none_contract_identifier.clone(), + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_stackerdb_replicas().unwrap(); + assert_eq!(resp.len(), 2); + + let naddr = resp.last().clone().unwrap(); + assert_eq!(naddr.addrbytes, PeerAddress::from_ipv4(127, 0, 0, 1)); + assert_eq!(naddr.port, 0); + assert_eq!( + naddr.public_key_hash, + Hash160::from_hex("9b92533ccc243e25eb6197bd03c9164642c7c8a8").unwrap() + ); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + let resp = response.decode_stackerdb_replicas().unwrap(); + assert_eq!(resp.len(), 0); +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index cc52a80e6e..6476962d1f 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -24,6 +24,7 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, }; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::pipe::Pipe; @@ -38,6 +39,7 @@ use crate::chainstate::stacks::{ TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionVersion, }; use crate::core::MemPoolDB; +use crate::net::db::PeerDB; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::relay::Relayer; use crate::net::rpc::ConversationHttp; @@ -68,6 +70,7 @@ mod getstackerdbchunk; mod getstackerdbmetadata; mod getstxtransfercost; mod gettransaction_unconfirmed; +mod liststackerdbreplicas; mod postblock; mod postfeerate; mod postmempoolquery; @@ -237,6 +240,9 @@ impl<'a> TestRPC<'a> { let mut peer_1_config = TestPeerConfig::new(&format!("{}-peer1", test_name), 0, 0); let mut peer_2_config = TestPeerConfig::new(&format!("{}-peer2", test_name), 0, 0); + peer_1_config.private_key = privk1.clone(); + peer_2_config.private_key = privk2.clone(); + peer_1_config.connection_opts.read_only_call_limit = ExecutionCost { write_length: 0, write_count: 0, @@ -370,6 +376,31 @@ impl<'a> TestRPC<'a> { bytes.len() as u64 }; + // force peer 2 to know about peer 1 + { + let tx = peer_2.network.peerdb.tx_begin().unwrap(); + let mut neighbor = peer_1.config.to_neighbor(); + neighbor.last_contact_time = get_epoch_time_secs(); + PeerDB::try_insert_peer( + &tx, + &neighbor, + &[QualifiedContractIdentifier::new( + addr1.clone().into(), + "hello-world".into(), + )], + ) + .unwrap(); + tx.commit().unwrap(); + } + // force peer 1 to know about peer 2 + { + let tx = peer_1.network.peerdb.tx_begin().unwrap(); + let mut neighbor = peer_2.config.to_neighbor(); + neighbor.last_contact_time = get_epoch_time_secs(); + PeerDB::try_insert_peer(&tx, &neighbor, &[]).unwrap(); + tx.commit().unwrap(); + } + let tip = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 664ab52c30..223b7e1bbd 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -986,7 +986,8 @@ impl ConversationP2P { let _seq = msg.request_id(); let mut handle = self.connection.make_relay_handle(self.conn_id)?; - msg.consensus_serialize(&mut handle)?; + let buf = msg.serialize_to_vec(); + handle.write_all(&buf).map_err(net_error::WriteError)?; self.stats.msgs_tx += 1; @@ -1011,7 +1012,8 @@ impl ConversationP2P { let mut handle = self.connection .make_request_handle(msg.request_id(), ttl, self.conn_id)?; - msg.consensus_serialize(&mut handle)?; + let buf = msg.serialize_to_vec(); + handle.write_all(&buf).map_err(net_error::WriteError)?; self.stats.msgs_tx += 1; @@ -2485,7 +2487,7 @@ impl ConversationP2P { Ok(None) } _ => { - test_debug!( + debug!( "{:?}: Got unauthenticated message (type {}), will NACK", &self, msg.payload.get_message_name() diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 522a0f6343..88f3fff39b 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -77,7 +77,7 @@ impl ReceiverNotify

{ match self.receiver_input.send(msg) { Ok(_) => {} Err(e) => { - warn!( + debug!( "Failed to reply message {} ({} {}): {:?}", self.expected_seq, msg_name, msg_id, &e ); @@ -249,7 +249,11 @@ impl NetworkReplyHandle

{ } }; self.request_pipe_write = fd_opt; - Ok(ret) + if drop_on_success { + Ok(self.request_pipe_write.is_none()) + } else { + Ok(ret) + } } /// Try to flush the inner pipe writer. If we succeed, drop the inner pipe. @@ -387,6 +391,8 @@ pub struct ConnectionOptions { pub socket_recv_buffer_size: u32, /// socket write buffer size pub socket_send_buffer_size: u32, + /// whether or not to announce or accept neighbors that are behind private networks + pub private_neighbors: bool, // fault injection pub disable_neighbor_walk: bool, @@ -478,6 +484,7 @@ impl std::default::Default for ConnectionOptions { mempool_sync_timeout: 180, // how long a mempool sync can go for (3 minutes) socket_recv_buffer_size: 131072, // Linux default socket_send_buffer_size: 16384, // Linux default + private_neighbors: true, // no faults on by default disable_neighbor_walk: false, @@ -1055,9 +1062,8 @@ impl ConnectionOutbox

{ let mut total_sent = 0; let mut blocked = false; let mut disconnected = false; - while !blocked && !disconnected { - let mut message_eof = false; - + let mut message_eof = false; + while !blocked && !disconnected && !message_eof { if self.pending_message_fd.is_none() { self.pending_message_fd = self.begin_next_message(); } @@ -1174,9 +1180,10 @@ impl ConnectionOutbox

{ } test_debug!( - "Connection send_bytes finished: blocked = {}, disconnected = {}", + "Connection send_bytes finished: blocked = {}, disconnected = {}, eof = {}", blocked, - disconnected + disconnected, + message_eof, ); if total_sent == 0 { diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 25c4ed7e62..246210bb28 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -185,6 +185,21 @@ impl LocalPeer { )), } } + + /// Best-effort attempt to calculate a publicly-routable neighbor address for local peer + pub fn to_public_neighbor_addr(&self) -> NeighborAddress { + if let Some((peer_addr, peer_port)) = self.public_ip_address.as_ref() { + NeighborAddress { + addrbytes: peer_addr.clone(), + port: *peer_port, + public_key_hash: Hash160::from_node_public_key(&StacksPublicKey::from_private( + &self.private_key, + )), + } + } else { + self.to_neighbor_addr() + } + } } impl FromRow for LocalPeer { @@ -1770,29 +1785,27 @@ impl PeerDB { } /// Find out which peers replicate a particular stacker DB. - /// Return a randomized list of up to the given size. + /// Return a randomized list of up to the given size, where all + /// peers returned have a last-contact time greater than the given minimum age. pub fn find_stacker_db_replicas( conn: &DBConn, network_id: u32, smart_contract: &QualifiedContractIdentifier, + min_age: u64, max_count: usize, ) -> Result, db_error> { if max_count == 0 { return Ok(vec![]); } - let mut slots = PeerDB::get_stacker_db_slots(conn, smart_contract)?; - slots.shuffle(&mut thread_rng()); - - let mut ret = vec![]; - for slot in slots { - if let Some(neighbor) = PeerDB::get_peer_at(conn, network_id, slot)? { - ret.push(neighbor); - if ret.len() >= max_count { - break; - } - } - } - Ok(ret) + let qry = "SELECT DISTINCT frontier.* FROM frontier JOIN stackerdb_peers ON stackerdb_peers.peer_slot = frontier.slot WHERE stackerdb_peers.smart_contract_id = ?1 AND frontier.network_id = ?2 AND frontier.last_contact_time >= ?3 ORDER BY RANDOM() LIMIT ?4"; + let max_count_u32 = u32::try_from(max_count).unwrap_or(u32::MAX); + let args: &[&dyn ToSql] = &[ + &smart_contract.to_string(), + &network_id, + &u64_to_sql(min_age)?, + &max_count_u32, + ]; + query_rows(conn, qry, args) } } @@ -2479,21 +2492,21 @@ mod test { } let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &stackerdbs[0], 1).unwrap(); + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &stackerdbs[0], 0, 1).unwrap(); assert_eq!(replicas.len(), 1); assert_eq!(replicas[0], neighbor); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &stackerdbs[0], 2).unwrap(); + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &stackerdbs[0], 0, 2).unwrap(); assert_eq!(replicas.len(), 1); assert_eq!(replicas[0], neighbor); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &stackerdbs[0], 0).unwrap(); + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &stackerdbs[0], 0, 0).unwrap(); assert_eq!(replicas.len(), 0); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef1, &stackerdbs[0], 1).unwrap(); + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef1, &stackerdbs[0], 0, 1).unwrap(); assert_eq!(replicas.len(), 0); // insert new stacker DBs -- keep one the same, and add a different one @@ -2523,17 +2536,50 @@ mod test { assert_eq!(neighbor_stackerdbs, changed_stackerdbs); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[0], 1) + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[0], 0, 1) .unwrap(); assert_eq!(replicas.len(), 1); assert_eq!(replicas[0], neighbor); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[1], 1) + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[1], 0, 1) .unwrap(); assert_eq!(replicas.len(), 1); assert_eq!(replicas[0], neighbor); + // query stacker DBs filtering by last-contact time + let replicas = PeerDB::find_stacker_db_replicas( + &db.conn, + 0x9abcdef0, + &changed_stackerdbs[1], + 1552509641, + 1, + ) + .unwrap(); + assert_eq!(replicas.len(), 1); + assert_eq!(replicas[0], neighbor); + + let replicas = PeerDB::find_stacker_db_replicas( + &db.conn, + 0x9abcdef0, + &changed_stackerdbs[1], + 1552509642, + 1, + ) + .unwrap(); + assert_eq!(replicas.len(), 1); + assert_eq!(replicas[0], neighbor); + + let replicas = PeerDB::find_stacker_db_replicas( + &db.conn, + 0x9abcdef0, + &changed_stackerdbs[1], + 1552509643, + 1, + ) + .unwrap(); + assert_eq!(replicas.len(), 0); + // clear stacker DBs { let tx = db.tx_begin().unwrap(); @@ -2549,12 +2595,12 @@ mod test { assert_eq!(neighbor_stackerdbs, []); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[0], 1) + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[0], 0, 1) .unwrap(); assert_eq!(replicas.len(), 0); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[1], 1) + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[1], 0, 1) .unwrap(); assert_eq!(replicas.len(), 0); @@ -2587,32 +2633,54 @@ mod test { assert_eq!(neighbor_stackerdbs, replace_stackerdbs); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdee0, &stackerdbs[0], 1).unwrap(); + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdee0, &stackerdbs[0], 0, 1) + .unwrap(); assert_eq!(replicas.len(), 0); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdee0, &stackerdbs[1], 1).unwrap(); + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdee0, &stackerdbs[1], 0, 1) + .unwrap(); assert_eq!(replicas.len(), 0); - let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[0], 1) - .unwrap(); + let replicas = PeerDB::find_stacker_db_replicas( + &db.conn, + 0x9abcdef0, + &changed_stackerdbs[0], + 0, + 1, + ) + .unwrap(); assert_eq!(replicas.len(), 0); - let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[1], 1) - .unwrap(); + let replicas = PeerDB::find_stacker_db_replicas( + &db.conn, + 0x9abcdef0, + &changed_stackerdbs[1], + 0, + 1, + ) + .unwrap(); assert_eq!(replicas.len(), 0); - let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &replace_stackerdbs[0], 1) - .unwrap(); + let replicas = PeerDB::find_stacker_db_replicas( + &db.conn, + 0x9abcdef0, + &replace_stackerdbs[0], + 0, + 1, + ) + .unwrap(); assert_eq!(replicas.len(), 1); assert_eq!(replicas[0], neighbor); - let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &replace_stackerdbs[1], 1) - .unwrap(); + let replicas = PeerDB::find_stacker_db_replicas( + &db.conn, + 0x9abcdef0, + &replace_stackerdbs[1], + 0, + 1, + ) + .unwrap(); assert_eq!(replicas.len(), 1); assert_eq!(replicas[0], neighbor); } @@ -2631,30 +2699,30 @@ mod test { } let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &stackerdbs[0], 1).unwrap(); + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &stackerdbs[0], 0, 1).unwrap(); assert_eq!(replicas.len(), 0); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &stackerdbs[1], 1).unwrap(); + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &stackerdbs[1], 0, 1).unwrap(); assert_eq!(replicas.len(), 0); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[0], 1) + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[0], 0, 1) .unwrap(); assert_eq!(replicas.len(), 0); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[1], 1) + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &changed_stackerdbs[1], 0, 1) .unwrap(); assert_eq!(replicas.len(), 0); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &replace_stackerdbs[0], 1) + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &replace_stackerdbs[0], 0, 1) .unwrap(); assert_eq!(replicas.len(), 0); let replicas = - PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &replace_stackerdbs[1], 1) + PeerDB::find_stacker_db_replicas(&db.conn, 0x9abcdef0, &replace_stackerdbs[1], 0, 1) .unwrap(); assert_eq!(replicas.len(), 0); } diff --git a/stackslib/src/net/download.rs b/stackslib/src/net/download.rs index 022152be54..a802767f01 100644 --- a/stackslib/src/net/download.rs +++ b/stackslib/src/net/download.rs @@ -3132,6 +3132,7 @@ pub mod test { clarity_tx.with_clarity_db_readonly(|clarity_db| { clarity_db .get_account_nonce(&spending_account.origin_address().unwrap().into()) + .unwrap() }) }) .unwrap() diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 90698e126e..2d806e2866 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -28,6 +28,7 @@ use std::{error, fmt, io}; use clarity::vm::analysis::contract_interface_builder::ContractInterface; use clarity::vm::costs::ExecutionCost; +use clarity::vm::errors::Error as InterpreterError; use clarity::vm::types::{ PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, TraitIdentifier, }; @@ -550,6 +551,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: InterpreterError) -> Self { + Error::ClarityError(e.into()) + } +} + #[cfg(test)] impl PartialEq for Error { /// (make I/O errors comparable for testing purposes) @@ -2138,6 +2145,7 @@ pub mod test { peerdb.conn(), local_peer.network_id, &contract_id, + 0, 10000000, ) .unwrap() @@ -2152,8 +2160,7 @@ pub mod test { &db_config, PeerNetworkComms::new(), stacker_dbs, - ) - .expect(&format!("FATAL: could not open '{}'", stackerdb_path)); + ); stacker_db_syncs.insert(contract_id.clone(), (db_config.clone(), stacker_db_sync)); } @@ -2386,6 +2393,7 @@ pub mod test { &config.stacker_dbs, &config.stacker_db_configs, ); + let stackerdb_contracts: Vec<_> = stacker_dbs.keys().map(|cid| cid.clone()).collect(); let mut peer_network = PeerNetwork::new( peerdb, @@ -2411,8 +2419,25 @@ pub mod test { let p2p_port = peer_network.bound_neighbor_key().port; let http_port = peer_network.http.as_ref().unwrap().http_server_addr.port(); - config.server_port = p2p_port; - config.http_port = http_port; + config.data_url = + UrlString::try_from(format!("http://127.0.0.1:{}", http_port).as_str()).unwrap(); + + peer_network + .peerdb + .update_local_peer( + config.network_id, + config.burnchain.network_id, + config.data_url.clone(), + p2p_port, + &stackerdb_contracts, + ) + .unwrap(); + + let local_peer = PeerDB::get_local_peer(peer_network.peerdb.conn()).unwrap(); + debug!( + "{:?}: initial neighbors: {:?}", + &local_peer, &config.initial_neighbors + ); TestPeer { config: config, @@ -3370,7 +3395,7 @@ pub mod test { parent_microblock_header_opt.as_ref(), ); - builder.epoch_finish(epoch); + builder.epoch_finish(epoch).unwrap(); (stacks_block, microblocks) }, ); diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index 75ea4c2ab6..38c59461fc 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -357,7 +357,7 @@ pub trait NeighborComms { } Err(Err(e)) => { // disconnected - test_debug!( + debug!( "{:?}: Failed to get reply: {:?}", network.get_local_peer(), &e @@ -395,11 +395,12 @@ pub trait NeighborComms { } } - /// Are we connected already to a neighbor? + /// Are we connected and handshake'd already to a neighbor? fn has_neighbor_session(&self, network: &PeerNetwork, nk: &NK) -> bool { - network - .get_neighbor_convo(&nk.to_neighbor_key(network)) - .is_some() + let Some(convo) = network.get_neighbor_convo(&nk.to_neighbor_key(network)) else { + return false; + }; + convo.is_authenticated() && convo.peer_version > 0 } /// Reset all comms diff --git a/stackslib/src/net/neighbors/db.rs b/stackslib/src/net/neighbors/db.rs index fd85f0ce1f..5a40ac9677 100644 --- a/stackslib/src/net/neighbors/db.rs +++ b/stackslib/src/net/neighbors/db.rs @@ -176,6 +176,34 @@ pub trait NeighborWalkDB { /// Get the number of peers in a given AS fn get_asn_count(&self, network: &PeerNetwork, asn: u32) -> u64; + /// Pick neighbors with a minimum age for a walk + fn pick_walk_neighbors( + network: &PeerNetwork, + num_neighbors: u64, + min_age: u64, + ) -> Result, net_error> { + let block_height = network.get_chain_view().burn_block_height; + let cur_epoch = network.get_current_epoch(); + let neighbors = PeerDB::get_random_walk_neighbors( + &network.peerdb_conn(), + network.get_local_peer().network_id, + cur_epoch.network_epoch, + min_age, + num_neighbors as u32, + block_height, + ) + .map_err(net_error::DBError)?; + + if neighbors.len() == 0 { + debug!( + "{:?}: No neighbors available in the peer DB!", + network.get_local_peer() + ); + return Err(net_error::NoSuchNeighbor); + } + Ok(neighbors) + } + /// Get a random starting neighbor for an ongoing walk. /// Older but still fresh neighbors will be preferred -- a neighbor from the first 50th /// percentile of neighbors (by last contact time) will be selected at random. @@ -184,17 +212,46 @@ pub trait NeighborWalkDB { fn get_next_walk_neighbor(&self, network: &PeerNetwork) -> Result { // pick a random neighbor as a walking point. // favor neighbors with older last-contact times - let mut next_neighbors = self + let next_neighbors_res = self .get_fresh_random_neighbors(network, (NUM_NEIGHBORS as u64) * 2) .map_err(|e| { debug!( - "{:?}: Failed to load initial walk neighbors: {:?}", + "{:?}: Failed to load fresh initial walk neighbors: {:?}", + network.get_local_peer(), + &e + ); + e + }); + + let db_neighbors = if let Ok(neighbors) = next_neighbors_res { + neighbors + } else { + let any_neighbors = Self::pick_walk_neighbors(network, (NUM_NEIGHBORS as u64) * 2, 0) + .map_err(|e| { + info!( + "{:?}: Failed to load any initial walk neighbors: {:?}", network.get_local_peer(), &e ); e })?; + any_neighbors + }; + + let mut next_neighbors: Vec<_> = db_neighbors + .into_iter() + .filter_map(|neighbor| { + if !network.get_connection_opts().private_neighbors + && neighbor.addr.addrbytes.is_in_private_range() + { + None + } else { + Some(neighbor) + } + }) + .collect(); + if next_neighbors.len() == 0 { return Err(net_error::NoSuchNeighbor); } @@ -246,28 +303,9 @@ impl NeighborWalkDB for PeerDBNeighborWalk { network: &PeerNetwork, num_neighbors: u64, ) -> Result, net_error> { - let block_height = network.get_chain_view().burn_block_height; let min_age = get_epoch_time_secs().saturating_sub(network.connection_opts.max_neighbor_age); - let cur_epoch = network.get_current_epoch(); - let neighbors = PeerDB::get_random_walk_neighbors( - &network.peerdb_conn(), - network.get_local_peer().network_id, - cur_epoch.network_epoch, - min_age, - num_neighbors as u32, - block_height, - ) - .map_err(net_error::DBError)?; - - if neighbors.len() == 0 { - debug!( - "{:?}: No neighbors available in the peer DB!", - network.get_local_peer() - ); - return Err(net_error::NoSuchNeighbor); - } - Ok(neighbors) + Self::pick_walk_neighbors(network, num_neighbors, min_age) } fn lookup_stale_neighbors( diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 031e9f91a5..9f2e78151c 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -213,9 +213,14 @@ impl PeerNetwork { &self.local_peer, self.walk_attempts ); + let (num_always_connected, total_always_connected) = self + .count_connected_always_allowed_peers() + .unwrap_or((0, 0)); + // always ensure we're connected to always-allowed outbound peers - let walk_res = if ibd { - // always connect to bootstrap peers if in IBD + let walk_res = if ibd || (num_always_connected == 0 && total_always_connected > 0) { + // always connect to bootstrap peers if in IBD, or if we're not connected to an + // always-allowed peer already NeighborWalk::instantiate_walk_to_always_allowed( self.get_neighbor_walk_db(), self.get_neighbor_comms(), @@ -298,25 +303,10 @@ impl PeerNetwork { /// Returns true if we instantiated the walk. /// Returns false if not. fn setup_walk(&mut self, ibd: bool) -> bool { - // we unconditionally need to begin walking if we're not connected to any always-allowed - // peer - let mut need_new_peers = false; - let (num_always_connected, total_always_connected) = self - .count_connected_always_allowed_peers() - .unwrap_or((0, 0)); - if num_always_connected == 0 && total_always_connected > 0 { - // force a reset - debug!("{:?}: not connected to any always-allowed peers; forcing a walk reset to try and fix this", &self.local_peer); - self.reset_walk(); - - need_new_peers = true; - } - if self.walk.is_none() { // time to do a walk yet? - if !need_new_peers - && (self.walk_count > self.connection_opts.num_initial_walks - || self.walk_retries > self.connection_opts.walk_retry_count) + if (self.walk_count > self.connection_opts.num_initial_walks + || self.walk_retries > self.connection_opts.walk_retry_count) && self.walk_deadline > get_epoch_time_secs() { // we've done enough walks for an initial mixing, or we can't connect to anyone, diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index 5cbda3c774..642195a589 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -565,9 +565,14 @@ impl NeighborWalk { } /// Select neighbors that are routable, and ignore ones that are not. - /// TODO: expand if we ever want to filter by unroutable network class or something - fn filter_sensible_neighbors(mut neighbors: Vec) -> Vec { + fn filter_sensible_neighbors( + mut neighbors: Vec, + private_neighbors: bool, + ) -> Vec { neighbors.retain(|neighbor| !neighbor.addrbytes.is_anynet()); + if !private_neighbors { + neighbors.retain(|neighbor| !neighbor.addrbytes.is_in_private_range()); + } neighbors } @@ -644,8 +649,6 @@ impl NeighborWalk { } } - /// Determine if a peer is routable from us - /// Handle a HandshakeAcceptData. /// Update the PeerDB information from the handshake data, as well as `self.cur_neighbor`, if /// this neighbor was routable. If it's not routable (i.e. we walked to an inbound neighbor), @@ -670,7 +673,9 @@ impl NeighborWalk { // just use the one we used to contact it. This can happen if the // node is behind a load-balancer, or is doing port-forwarding, // etc. - if neighbor_from_handshake.addr.addrbytes.is_in_private_range() { + if neighbor_from_handshake.addr.addrbytes.is_in_private_range() + || neighbor_from_handshake.addr.addrbytes.is_anynet() + { debug!( "{}: outbound neighbor gave private IP address {:?}; assuming it meant {:?}", local_peer_str, &neighbor_from_handshake.addr, &self.cur_neighbor.addr @@ -834,7 +839,10 @@ impl NeighborWalk { &self.cur_neighbor.addr, data.neighbors ); - let neighbors = Self::filter_sensible_neighbors(data.neighbors.clone()); + let neighbors = Self::filter_sensible_neighbors( + data.neighbors.clone(), + network.get_connection_opts().private_neighbors, + ); let (mut found, to_resolve) = self .neighbor_db .lookup_stale_neighbors(network, &neighbors)?; @@ -1278,7 +1286,10 @@ impl NeighborWalk { &nkey, &data.neighbors ); - let neighbors = Self::filter_sensible_neighbors(data.neighbors.clone()); + let neighbors = Self::filter_sensible_neighbors( + data.neighbors.clone(), + network.get_connection_opts().private_neighbors, + ); self.resolved_getneighbors_neighbors .insert(naddr, neighbors); } @@ -1861,7 +1872,7 @@ impl NeighborWalk { &self.state, self.walk_state_timeout ); - return Ok(None); + return Err(net_error::StepTimeout); } can_continue = match self.state { diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 9c4492720b..3e182ddf3c 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -989,9 +989,9 @@ impl PeerNetwork { let num_allowed_peers = allowed_peers.len(); let mut count = 0; for allowed in allowed_peers { - if self.events.contains_key(&allowed.addr) { - count += 1; - } + let pubkh = Hash160::from_node_public_key(&allowed.public_key); + let events = self.get_pubkey_events(&pubkh); + count += events.len() as u64; } Ok((count, num_allowed_peers as u64)) } @@ -1509,6 +1509,15 @@ impl PeerNetwork { return Err(net_error::AlreadyConnected(event_id, neighbor_key.clone())); } + // unroutable? + if !self.connection_opts.private_neighbors && neighbor_key.addrbytes.is_in_private_range() { + debug!("{:?}: Peer {:?} is in private range and we are configured to drop private neighbors", + &self.local_peer, + &neighbor_key + ); + return Err(net_error::Denied); + } + // consider rate-limits on in-bound peers let num_outbound = PeerNetwork::count_outbound_conversations(&self.peers); if !outbound && (self.peers.len() as u64) - num_outbound >= self.connection_opts.num_clients diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index eda4a543b9..4349de0674 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -3868,9 +3868,11 @@ pub mod test { .chainstate .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db.get_account_nonce( - &spending_account.origin_address().unwrap().into(), - ) + clarity_db + .get_account_nonce( + &spending_account.origin_address().unwrap().into(), + ) + .unwrap() }) }) .unwrap(); diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index 8cd3147558..ee1d49cce8 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -168,10 +168,10 @@ impl StackerDBConfig { "(stackerdb-get-signer-slots)", )?; - let result = value.expect_result(); + let result = value.expect_result()?; let slot_list = match result { Err(err_val) => { - let err_code = err_val.expect_u128(); + let err_code = err_val.expect_u128()?; let reason = format!( "Contract {} failed to run `stackerdb-get-signer-slots`: error u{}", contract_id, &err_code @@ -182,23 +182,23 @@ impl StackerDBConfig { reason, )); } - Ok(ok_val) => ok_val.expect_list(), + Ok(ok_val) => ok_val.expect_list()?, }; let mut total_num_slots = 0u32; let mut ret = vec![]; for slot_value in slot_list.into_iter() { - let slot_data = slot_value.expect_tuple(); + let slot_data = slot_value.expect_tuple()?; let signer_principal = slot_data .get("signer") .expect("FATAL: no 'signer'") .clone() - .expect_principal(); + .expect_principal()?; let num_slots_uint = slot_data .get("num-slots") .expect("FATAL: no 'num-slots'") .clone() - .expect_u128(); + .expect_u128()?; if num_slots_uint > (STACKERDB_INV_MAX as u128) { let reason = format!( @@ -264,10 +264,10 @@ impl StackerDBConfig { let value = chainstate.eval_read_only(burn_dbconn, tip, contract_id, "(stackerdb-get-config)")?; - let result = value.expect_result(); + let result = value.expect_result()?; let config_tuple = match result { Err(err_val) => { - let err_code = err_val.expect_u128(); + let err_code = err_val.expect_u128()?; let reason = format!( "Contract {} failed to run `stackerdb-get-config`: err u{}", contract_id, &err_code @@ -278,14 +278,14 @@ impl StackerDBConfig { reason, )); } - Ok(ok_val) => ok_val.expect_tuple(), + Ok(ok_val) => ok_val.expect_tuple()?, }; let chunk_size = config_tuple .get("chunk-size") .expect("FATAL: missing 'chunk-size'") .clone() - .expect_u128(); + .expect_u128()?; if chunk_size > STACKERDB_MAX_CHUNK_SIZE as u128 { let reason = format!( @@ -303,7 +303,7 @@ impl StackerDBConfig { .get("write-freq") .expect("FATAL: missing 'write-freq'") .clone() - .expect_u128(); + .expect_u128()?; if write_freq > u64::MAX as u128 { let reason = format!( "Contract {} stipulates a write frequency beyond u64::MAX", @@ -320,7 +320,7 @@ impl StackerDBConfig { .get("max-writes") .expect("FATAL: missing 'max-writes'") .clone() - .expect_u128(); + .expect_u128()?; if max_writes > u32::MAX as u128 { let reason = format!( "Contract {} stipulates a max-write bound beyond u32::MAX", @@ -337,7 +337,7 @@ impl StackerDBConfig { .get("max-neighbors") .expect("FATAL: missing 'max-neighbors'") .clone() - .expect_u128(); + .expect_u128()?; if max_neighbors > usize::MAX as u128 { let reason = format!( "Contract {} stipulates a maximum number of neighbors beyond usize::MAX", @@ -354,30 +354,30 @@ impl StackerDBConfig { .get("hint-replicas") .expect("FATAL: missing 'hint-replicas'") .clone() - .expect_list(); + .expect_list()?; let mut hint_replicas = vec![]; for hint_replica_value in hint_replicas_list.into_iter() { - let hint_replica_data = hint_replica_value.expect_tuple(); + let hint_replica_data = hint_replica_value.expect_tuple()?; let addr_byte_list = hint_replica_data .get("addr") .expect("FATAL: missing 'addr'") .clone() - .expect_list(); + .expect_list()?; let port = hint_replica_data .get("port") .expect("FATAL: missing 'port'") .clone() - .expect_u128(); + .expect_u128()?; let pubkey_hash_bytes = hint_replica_data .get("public-key-hash") .expect("FATAL: missing 'public-key-hash") .clone() - .expect_buff_padded(20, 0); + .expect_buff_padded(20, 0)?; let mut addr_bytes = vec![]; for byte_val in addr_byte_list.into_iter() { - let byte = byte_val.expect_u128(); + let byte = byte_val.expect_u128()?; if byte > (u8::MAX as u128) { let reason = format!( "Contract {} stipulates an addr byte above u8::MAX", @@ -468,7 +468,7 @@ impl StackerDBConfig { clarity_tx.with_clarity_db_readonly(|db| { // contract must exist or this errors out let analysis = db - .load_contract_analysis(contract_id) + .load_contract_analysis(contract_id)? .ok_or(net_error::NoSuchStackerDB(contract_id.clone()))?; // contract must be consistent with StackerDB control interface diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 8520cec1f0..b37fde4e10 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -306,10 +306,12 @@ impl PeerNetwork { Ok(Some(result)) => { // clear broken nodes for broken in result.broken.iter() { + debug!("StackerDB replica is broken: {:?}", broken); self.deregister_and_ban_neighbor(broken); } // clear dead nodes for dead in result.dead.iter() { + debug!("StackerDB replica is dead: {:?}", dead); self.deregister_neighbor(dead); } results.push(result); @@ -320,12 +322,7 @@ impl PeerNetwork { "Failed to run StackerDB state machine for {}: {:?}", &sc, &e ); - if let Err(e) = stacker_db_sync.reset(Some(self), config) { - info!( - "Failed to reset StackerDB state machine for {}: {:?}", - &sc, &e - ); - } + stacker_db_sync.reset(Some(self), config); } } } else { diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index a5d875d86e..d01d4ff03f 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -48,7 +48,7 @@ impl StackerDBSync { config: &StackerDBConfig, comms: NC, stackerdbs: StackerDBs, - ) -> Result, net_error> { + ) -> StackerDBSync { let mut dbsync = StackerDBSync { state: StackerDBSyncState::ConnectBegin, smart_contract_id: smart_contract, @@ -73,8 +73,61 @@ impl StackerDBSync { last_run_ts: 0, need_resync: false, }; - dbsync.reset(None, config)?; - Ok(dbsync) + dbsync.reset(None, config); + dbsync + } + + /// Find stackerdb replicas and apply filtering rules + fn find_qualified_replicas( + &self, + network: &PeerNetwork, + ) -> Result, net_error> { + let mut found = HashSet::new(); + let mut min_age = + get_epoch_time_secs().saturating_sub(network.get_connection_opts().max_neighbor_age); + while found.len() < self.max_neighbors { + let peers_iter = PeerDB::find_stacker_db_replicas( + network.peerdb_conn(), + network.get_local_peer().network_id, + &self.smart_contract_id, + min_age, + self.max_neighbors, + )? + .into_iter() + .map(|neighbor| { + ( + NeighborAddress::from_neighbor(&neighbor), + neighbor.last_contact_time, + ) + }) + .filter(|(naddr, _)| { + if naddr.addrbytes.is_anynet() { + return false; + } + if !network.get_connection_opts().private_neighbors + && naddr.addrbytes.is_in_private_range() + { + return false; + } + true + }); + + for (peer, last_contact) in peers_iter { + found.insert(peer); + if found.len() >= self.max_neighbors { + break; + } + min_age = min_age.min(last_contact); + } + + // search for older neighbors + if min_age > 1 { + min_age = 1; + } else if min_age <= 1 { + break; + } + } + Ok(found) } /// Calculate the new set of replicas to contact. @@ -89,18 +142,12 @@ impl StackerDBSync { // keep all connected replicas, and replenish from config hints and the DB as needed let mut peers = config.hint_replicas.clone(); if let Some(network) = network { - let extra_peers: Vec<_> = PeerDB::find_stacker_db_replicas( - network.peerdb_conn(), - network.get_local_peer().network_id, - &self.smart_contract_id, - self.max_neighbors, - )? - .into_iter() - .map(|neighbor| NeighborAddress::from_neighbor(&neighbor)) - .collect(); + let extra_peers = self.find_qualified_replicas(network)?; peers.extend(extra_peers); } + peers.shuffle(&mut thread_rng()); + for peer in peers { if connected_replicas.len() >= config.max_neighbors { break; @@ -116,7 +163,8 @@ impl StackerDBSync { &mut self, network: Option<&PeerNetwork>, config: &StackerDBConfig, - ) -> Result { + ) -> StackerDBSyncResult { + debug!("Reset with config {:?}", config); let mut chunks = vec![]; let downloaded_chunks = mem::replace(&mut self.downloaded_chunks, HashMap::new()); for (_, mut data) in downloaded_chunks.into_iter() { @@ -135,7 +183,12 @@ impl StackerDBSync { // keep all connected replicas, and replenish from config hints and the DB as needed let connected_replicas = mem::replace(&mut self.connected_replicas, HashSet::new()); let next_connected_replicas = - self.find_new_replicas(connected_replicas, network, config)?; + if let Ok(new_replicas) = self.find_new_replicas(connected_replicas, network, config) { + new_replicas + } else { + self.replicas.clone() + }; + self.replicas = next_connected_replicas; self.chunk_fetch_priorities.clear(); @@ -154,8 +207,10 @@ impl StackerDBSync { self.write_freq = config.write_freq; self.need_resync = false; + self.last_run_ts = get_epoch_time_secs(); - Ok(result) + self.state = StackerDBSyncState::ConnectBegin; + result } /// Get the set of connection IDs in use @@ -201,7 +256,7 @@ impl StackerDBSync { for (i, local_version) in local_slot_versions.iter().enumerate() { let write_ts = local_write_timestamps[i]; if write_ts + self.write_freq > now { - test_debug!( + debug!( "{:?}: Chunk {} was written too frequently ({} + {} >= {}), so will not fetch chunk", network.get_local_peer(), i, @@ -275,7 +330,7 @@ impl StackerDBSync { schedule.sort_by(|item_1, item_2| item_1.1.len().cmp(&item_2.1.len())); schedule.reverse(); - test_debug!( + debug!( "{:?}: Will request up to {} chunks for {}", network.get_local_peer(), &schedule.len(), @@ -367,7 +422,7 @@ impl StackerDBSync { .collect(); schedule.sort_by(|item_1, item_2| item_1.1.len().cmp(&item_2.1.len())); - test_debug!( + debug!( "{:?}: Will push up to {} chunks for {}", network.get_local_peer(), &schedule.len(), @@ -443,7 +498,7 @@ impl StackerDBSync { for (old_slot_id, old_version) in old_inv.slot_versions.iter().enumerate() { if *old_version < new_inv.slot_versions[old_slot_id] { // remote peer indicated that it has a newer version of this chunk. - test_debug!( + debug!( "{:?}: peer {:?} has a newer version of slot {} ({} < {})", _network.get_local_peer(), &naddr, @@ -501,6 +556,10 @@ impl StackerDBSync { } let naddr = convo.to_neighbor_address(); + if sent_naddr_set.contains(&naddr) { + continue; + } + let has_reciprocal_outbound = network .get_pubkey_events(&naddr.public_key_hash) .iter() @@ -529,7 +588,7 @@ impl StackerDBSync { } for (naddr, chunks_req) in to_send.into_iter() { - test_debug!("{:?}: send_getchunksinv_to_inbound_neighbors: Send StackerDBGetChunkInv to inbound {:?}", network.get_local_peer(), &naddr); + debug!("{:?}: send_getchunksinv_to_inbound_neighbors: Send StackerDBGetChunkInv to inbound {:?}", network.get_local_peer(), &naddr); if let Err(_e) = self.comms.neighbor_send(network, &naddr, chunks_req) { info!( "{:?}: Failed to send StackerDBGetChunkInv to inbound {:?}: {:?}", @@ -550,18 +609,10 @@ impl StackerDBSync { pub fn connect_begin(&mut self, network: &mut PeerNetwork) -> Result { if self.replicas.len() == 0 { // find some from the peer Db - let replicas = PeerDB::find_stacker_db_replicas( - network.peerdb_conn(), - network.get_local_peer().network_id, - &self.smart_contract_id, - self.max_neighbors, - )? - .into_iter() - .map(|neighbor| NeighborAddress::from_neighbor(&neighbor)) - .collect(); + let replicas = self.find_qualified_replicas(network)?; self.replicas = replicas; } - test_debug!( + debug!( "{:?}: connect_begin: establish StackerDB sessions to {} neighbors", network.get_local_peer(), self.replicas.len() @@ -574,7 +625,7 @@ impl StackerDBSync { let naddrs = mem::replace(&mut self.replicas, HashSet::new()); for naddr in naddrs.into_iter() { if self.comms.has_neighbor_session(network, &naddr) { - test_debug!( + debug!( "{:?}: connect_begin: already connected to StackerDB peer {:?}", network.get_local_peer(), &naddr @@ -583,7 +634,7 @@ impl StackerDBSync { continue; } - test_debug!( + debug!( "{:?}: connect_begin: Send Handshake to StackerDB peer {:?}", network.get_local_peer(), &naddr @@ -591,7 +642,7 @@ impl StackerDBSync { match self.comms.neighbor_session_begin(network, &naddr) { Ok(true) => { // connected! - test_debug!( + debug!( "{:?}: connect_begin: connected to StackerDB peer {:?}", network.get_local_peer(), &naddr @@ -632,7 +683,7 @@ impl StackerDBSync { } StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed us with code {}", + "{:?}: remote peer {:?} NACK'ed our StackerDBHandshake with code {}", &network.get_local_peer(), &naddr, data.error_code @@ -662,7 +713,7 @@ impl StackerDBSync { continue; } - test_debug!( + debug!( "{:?}: connect_try_finish: Received StackerDBHandshakeAccept from {:?} for {:?}", network.get_local_peer(), &naddr, @@ -680,7 +731,7 @@ impl StackerDBSync { if self.connected_replicas.len() == 0 { // no one to talk to - test_debug!( + debug!( "{:?}: connect_try_finish: no valid replicas", network.get_local_peer() ); @@ -698,13 +749,13 @@ impl StackerDBSync { pub fn getchunksinv_begin(&mut self, network: &mut PeerNetwork) { let naddrs = mem::replace(&mut self.connected_replicas, HashSet::new()); let mut already_sent = vec![]; - test_debug!( + debug!( "{:?}: getchunksinv_begin: Send StackerDBGetChunksInv to {} replicas", network.get_local_peer(), naddrs.len() ); for naddr in naddrs.into_iter() { - test_debug!( + debug!( "{:?}: getchunksinv_begin: Send StackerDBGetChunksInv to {:?}", network.get_local_peer(), &naddr @@ -744,7 +795,7 @@ impl StackerDBSync { } StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed us with code {}", + "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunksInv us with code {}", &network.get_local_peer(), &naddr, data.error_code @@ -756,7 +807,7 @@ impl StackerDBSync { continue; } }; - test_debug!( + debug!( "{:?}: getchunksinv_try_finish: Received StackerDBChunkInv from {:?}", network.get_local_peer(), &naddr @@ -781,20 +832,22 @@ impl StackerDBSync { /// Ask each prioritized replica for some chunks we need. /// Return Ok(true) if we processed all requested chunks /// Return Ok(false) if there are still some requests to make - pub fn getchunks_begin(&mut self, network: &mut PeerNetwork) -> bool { + pub fn getchunks_begin(&mut self, network: &mut PeerNetwork) -> Result { if self.chunk_fetch_priorities.len() == 0 { // done - return true; + return Ok(true); } let mut cur_priority = self.next_chunk_fetch_priority % self.chunk_fetch_priorities.len(); - test_debug!( + debug!( "{:?}: getchunks_begin: Issue up to {} StackerDBGetChunk requests", &network.get_local_peer(), self.request_capacity ); + let mut requested = 0; + // fill up our comms with $capacity requests for _i in 0..self.request_capacity { if self.comms.count_inflight() >= self.request_capacity { @@ -814,7 +867,7 @@ impl StackerDBSync { continue; }; - test_debug!( + debug!( "{:?}: getchunks_begin: Send StackerDBGetChunk(db={},id={},ver={}) to {}", &network.get_local_peer(), &self.smart_contract_id, @@ -840,15 +893,21 @@ impl StackerDBSync { continue; } + requested += 1; + // don't ask this neighbor again self.chunk_fetch_priorities[cur_priority].1.remove(idx); // next-prioritized chunk cur_priority = (cur_priority + 1) % self.chunk_fetch_priorities.len(); } + if requested == 0 && self.comms.count_inflight() == 0 { + return Err(net_error::PeerNotConnected); + } + self.next_chunk_fetch_priority = cur_priority; - self.chunk_fetch_priorities.len() == 0 + Ok(self.chunk_fetch_priorities.len() == 0) } /// Collect chunk replies from neighbors @@ -890,7 +949,7 @@ impl StackerDBSync { } // update bookkeeping - test_debug!( + debug!( "{:?}: getchunks_try_finish: Received StackerDBChunk from {:?}", network.get_local_peer(), &naddr @@ -916,12 +975,14 @@ impl StackerDBSync { let mut cur_priority = self.next_chunk_push_priority % self.chunk_push_priorities.len(); - test_debug!( + debug!( "{:?}: pushchunks_begin: Send up to {} StackerDBChunk pushes", &network.get_local_peer(), self.chunk_push_priorities.len() ); + let mut pushed = 0; + // fill up our comms with $capacity requests for _i in 0..self.request_capacity { if self.comms.count_inflight() >= self.request_capacity { @@ -938,7 +999,7 @@ impl StackerDBSync { let (idx, selected_neighbor) = if let Some(x) = selected_neighbor_opt { x } else { - test_debug!("{:?}: pushchunks_begin: no available neighbor to send StackerDBChunk(db={},id={},ver={}) to", + debug!("{:?}: pushchunks_begin: no available neighbor to send StackerDBChunk(db={},id={},ver={}) to", &network.get_local_peer(), &self.smart_contract_id, chunk_push.chunk_data.slot_id, @@ -947,7 +1008,7 @@ impl StackerDBSync { continue; }; - test_debug!( + debug!( "{:?}: pushchunks_begin: Send StackerDBChunk(db={},id={},ver={}) to {}", &network.get_local_peer(), &self.smart_contract_id, @@ -975,6 +1036,8 @@ impl StackerDBSync { continue; } + pushed += 1; + // record what we just sent self.chunk_push_receipts .insert(selected_neighbor.clone(), (slot_id, slot_version)); @@ -985,6 +1048,9 @@ impl StackerDBSync { // next-prioritized chunk cur_priority = (cur_priority + 1) % self.chunk_push_priorities.len(); } + if pushed == 0 { + return Err(net_error::PeerNotConnected); + } self.next_chunk_push_priority = cur_priority; Ok(self.chunk_push_priorities.len() == 0) } @@ -1022,7 +1088,7 @@ impl StackerDBSync { } // update bookkeeping - test_debug!( + debug!( "{:?}: pushchunks_try_finish: Received StackerDBChunkInv from {:?}", network.get_local_peer(), &naddr @@ -1060,7 +1126,7 @@ impl StackerDBSync { /// Forcibly wake up the state machine if it is throttled pub fn wakeup(&mut self) { - test_debug!("wake up StackerDB sync for {}", &self.smart_contract_id); + debug!("wake up StackerDB sync for {}", &self.smart_contract_id); self.last_run_ts = 0; } @@ -1073,8 +1139,8 @@ impl StackerDBSync { config: &StackerDBConfig, ) -> Result, net_error> { // throttle to write_freq - if self.last_run_ts + config.write_freq > get_epoch_time_secs() { - test_debug!( + if self.last_run_ts + config.write_freq.max(1) > get_epoch_time_secs() { + debug!( "{:?}: stacker DB sync for {} is throttled until {}", network.get_local_peer(), &self.smart_contract_id, @@ -1084,7 +1150,7 @@ impl StackerDBSync { } loop { - test_debug!( + debug!( "{:?}: stacker DB sync state is {:?}", network.get_local_peer(), &self.state @@ -1126,7 +1192,7 @@ impl StackerDBSync { continue; } - let requests_finished = self.getchunks_begin(network); + let requests_finished = self.getchunks_begin(network)?; let inflight_finished = self.getchunks_try_finish(network, config)?; let done = requests_finished && inflight_finished; if done { @@ -1155,9 +1221,8 @@ impl StackerDBSync { } } StackerDBSyncState::Finished => { - let result = self.reset(Some(network), config)?; + let result = self.reset(Some(network), config); self.state = StackerDBSyncState::ConnectBegin; - self.last_run_ts = get_epoch_time_secs(); return Ok(Some(result)); } }; diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 92187820c0..7e1c5f15da 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -149,8 +149,7 @@ fn setup_stackerdb(peer: &mut TestPeer, idx: usize, fill: bool, num_slots: usize .unwrap() .get_mut(contract_id) .unwrap() - .reset(None, stackerdb_config) - .unwrap(); + .reset(None, stackerdb_config); } /// Load up the entire stacker DB, including its metadata @@ -228,8 +227,11 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { let mut i = 0; loop { // run peer network state-machines - let res_1 = peer_1.step(); - let res_2 = peer_2.step(); + peer_1.network.stacker_db_configs = peer_1_db_configs.clone(); + peer_2.network.stacker_db_configs = peer_2_db_configs.clone(); + + let res_1 = peer_1.step_with_ibd(false); + let res_2 = peer_2.step_with_ibd(false); if let Ok(mut res) = res_1 { Relayer::process_stacker_db_chunks( @@ -347,8 +349,11 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port let mut i = 0; loop { // run peer network state-machines - let res_1 = peer_1.step(); - let res_2 = peer_2.step(); + peer_1.network.stacker_db_configs = peer_1_db_configs.clone(); + peer_2.network.stacker_db_configs = peer_2_db_configs.clone(); + + let res_1 = peer_1.step_with_ibd(false); + let res_2 = peer_2.step_with_ibd(false); if let Ok(mut res) = res_1 { Relayer::process_stacker_db_chunks( @@ -485,7 +490,8 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, loop { // run peer network state-machines for i in 0..num_peers { - let res = peers[i].step(); + peers[i].network.stacker_db_configs = peer_db_configs[i].clone(); + let res = peers[i].step_with_ibd(false); if let Ok(mut res) = res { let rc_consensus_hash = peers[i].network.get_chain_view().rc_consensus_hash.clone(); diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index 333543e7ca..f1937cb89b 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -2687,7 +2687,7 @@ where debug!("Random order = {:?}", &random_order); for i in random_order.into_iter() { - let _ = peers[i].step(); + let _ = peers[i].step_with_ibd(false); let nk = peers[i].config.to_neighbor().addr; debug!("Step peer {:?}", &nk); diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index fb4d6d91b0..7dd30eea21 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1225,6 +1225,7 @@ impl Config { handshake_timeout: opts.handshake_timeout.unwrap_or(5), max_sockets: opts.max_sockets.unwrap_or(800) as usize, antientropy_public: opts.antientropy_public.unwrap_or(true), + private_neighbors: opts.private_neighbors.unwrap_or(true), ..ConnectionOptions::default() } } @@ -1941,7 +1942,7 @@ impl NodeConfig { let (pubkey_str, hostport) = (parts[0], parts[1]); let pubkey = Secp256k1PublicKey::from_hex(pubkey_str) .expect(&format!("Invalid public key '{}'", pubkey_str)); - info!("Resolve '{}'", &hostport); + debug!("Resolve '{}'", &hostport); let sockaddr = hostport.to_socket_addrs().unwrap().next().unwrap(); let neighbor = NodeConfig::default_neighbor(sockaddr, pubkey, chain_id, peer_version); self.bootstrap_node.push(neighbor); @@ -2049,7 +2050,7 @@ impl MinerConfig { first_attempt_time_ms: 10, subsequent_attempt_time_ms: 120_000, microblock_attempt_time_ms: 30_000, - probability_pick_no_estimate_tx: 5, + probability_pick_no_estimate_tx: 25, block_reward_recipient: None, segwit: false, wait_for_block_download: true, @@ -2097,7 +2098,6 @@ pub struct ConnectionOptionsFile { pub max_inflight_attachments: Option, pub read_only_call_limit_write_length: Option, pub read_only_call_limit_read_length: Option, - pub read_only_call_limit_write_count: Option, pub read_only_call_limit_read_count: Option, pub read_only_call_limit_runtime: Option, @@ -2112,6 +2112,7 @@ pub struct ConnectionOptionsFile { pub disable_block_download: Option, pub force_disconnect_interval: Option, pub antientropy_public: Option, + pub private_neighbors: Option, } #[derive(Clone, Deserialize, Default, Debug)] diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 235627b063..2a09ff174f 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -232,12 +232,16 @@ impl EventObserver { }; let raw_result = { - let bytes = receipt.result.serialize_to_vec(); + let bytes = receipt + .result + .serialize_to_vec() + .expect("FATAL: failed to serialize transaction receipt"); bytes_to_hex(&bytes) }; let contract_interface_json = { match &receipt.contract_analysis { - Some(analysis) => json!(build_contract_interface(analysis)), + Some(analysis) => json!(build_contract_interface(analysis) + .expect("FATAL: failed to serialize contract publish receipt")), None => json!(null), } }; @@ -310,7 +314,9 @@ impl EventObserver { let serialized_events: Vec = filtered_events .iter() .map(|(event_index, (committed, txid, event))| { - event.json_serialize(*event_index, txid, *committed) + event + .json_serialize(*event_index, txid, *committed) + .unwrap() }) .collect(); @@ -366,7 +372,9 @@ impl EventObserver { let serialized_events: Vec = filtered_events .iter() .map(|(event_index, (committed, txid, event))| { - event.json_serialize(*event_index, txid, *committed) + event + .json_serialize(*event_index, txid, *committed) + .unwrap() }) .collect(); diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 6495beab74..a9b7a46e78 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -487,14 +487,14 @@ start\t\tStart a node with a config of your own. Can be used for joining a netwo \t\tArguments: \t\t --config: path of the config (such as https://github.com/blockstack/stacks-blockchain/blob/master/testnet/stacks-node/conf/testnet-follower-conf.toml). \t\tExample: -\t\t stacks-node start --config=/path/to/config.toml +\t\t stacks-node start --config /path/to/config.toml check-config\t\tValidates the config file without starting up the node. Uses same arguments as start subcommand. version\t\tDisplay information about the current version and our release cycle. key-for-seed\tOutput the associated secret key for a burnchain signer created with a given seed. -\t\tCan be passed a config file for the seed via the `--config=` option *or* by supplying the hex seed on +\t\tCan be passed a config file for the seed via the `--config ` option *or* by supplying the hex seed on \t\tthe command line directly. help\t\tDisplay this help. diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 56f777076e..c44fe9448b 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4801,12 +4801,14 @@ impl StacksNode { tx.commit().unwrap(); } - // update services to indicate we can support mempool sync + // update services to indicate we can support mempool sync and stackerdb { let mut tx = peerdb.tx_begin().unwrap(); PeerDB::set_local_services( &mut tx, - (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16), + (ServiceFlags::RPC as u16) + | (ServiceFlags::RELAY as u16) + | (ServiceFlags::STACKERDB as u16), ) .unwrap(); tx.commit().unwrap(); @@ -4893,21 +4895,12 @@ impl StacksNode { } } } - let stacker_db_sync = match StackerDBSync::new( + let stacker_db_sync = StackerDBSync::new( stackerdb_contract_id.clone(), &stacker_db_config, PeerNetworkComms::new(), stackerdbs, - ) { - Ok(s) => s, - Err(e) => { - warn!( - "Failed to instantiate StackerDB sync machine for {}: {:?}", - stackerdb_contract_id, &e - ); - continue; - } - }; + ); stackerdb_machines.insert( stackerdb_contract_id.clone(), diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 8be3edad0f..1c19e167cd 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -425,19 +425,21 @@ fn transition_adds_burn_block_height() { false, ) .unwrap(); - let pair = clarity_value.expect_tuple(); - let height = pair.get("height").unwrap().clone().expect_u128() as u64; - let bhh_opt = - pair.get("hash") - .unwrap() - .clone() - .expect_optional() - .map(|inner_buff| { - let buff_bytes_vec = inner_buff.expect_buff(32); - let mut buff_bytes = [0u8; 32]; - buff_bytes.copy_from_slice(&buff_bytes_vec[0..32]); - BurnchainHeaderHash(buff_bytes) - }); + let pair = clarity_value.expect_tuple().unwrap(); + let height = + pair.get("height").unwrap().clone().expect_u128().unwrap() as u64; + let bhh_opt = pair + .get("hash") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .map(|inner_buff| { + let buff_bytes_vec = inner_buff.expect_buff(32).unwrap(); + let mut buff_bytes = [0u8; 32]; + buff_bytes.copy_from_slice(&buff_bytes_vec[0..32]); + BurnchainHeaderHash(buff_bytes) + }); header_hashes.insert(height, bhh_opt); } @@ -1121,7 +1123,7 @@ fn transition_adds_get_pox_addr_recipients() { ); submit_tx(&http_origin, &tx); - expected_pox_addrs.insert(pox_addr_tuple); + expected_pox_addrs.insert(pox_addr_tuple.to_string()); } // stack some STX to segwit addressses @@ -1161,7 +1163,7 @@ fn transition_adds_get_pox_addr_recipients() { ); submit_tx(&http_origin, &tx); - expected_pox_addrs.insert(pox_addr_tuple); + expected_pox_addrs.insert(pox_addr_tuple.to_string()); } let contract = " @@ -1257,25 +1259,36 @@ fn transition_adds_get_pox_addr_recipients() { false, ) .unwrap(); - let pair = clarity_value.expect_tuple(); - let burn_block_height = - pair.get("burn-height").unwrap().clone().expect_u128() as u64; - let pox_addr_tuples_opt = - pair.get("pox-addrs").unwrap().clone().expect_optional(); + let pair = clarity_value.expect_tuple().unwrap(); + let burn_block_height = pair + .get("burn-height") + .unwrap() + .clone() + .expect_u128() + .unwrap() as u64; + let pox_addr_tuples_opt = pair + .get("pox-addrs") + .unwrap() + .clone() + .expect_optional() + .unwrap(); if let Some(pox_addr_tuples_list) = pox_addr_tuples_opt { - let pox_addrs_and_payout_tuple = pox_addr_tuples_list.expect_tuple(); + let pox_addrs_and_payout_tuple = + pox_addr_tuples_list.expect_tuple().unwrap(); let pox_addr_tuples = pox_addrs_and_payout_tuple .get("addrs") .unwrap() .to_owned() - .expect_list(); + .expect_list() + .unwrap(); let payout = pox_addrs_and_payout_tuple .get("payout") .unwrap() .to_owned() - .expect_u128(); + .expect_u128() + .unwrap(); // NOTE: there's an even number of payouts here, so this works eprintln!("payout at {} = {}", burn_block_height, &payout); @@ -1326,7 +1339,7 @@ fn transition_adds_get_pox_addr_recipients() { .map(|addr| Value::Tuple(addr.as_clarity_tuple().unwrap())) { eprintln!("Contains: {:?}", &addr); - assert!(expected_pox_addrs.contains(&addr)); + assert!(expected_pox_addrs.contains(&addr.to_string())); } } @@ -5126,10 +5139,13 @@ fn test_v1_unlock_height_with_current_stackers() { ) .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .get_owned("addrs") .unwrap() - .expect_list(); + .expect_list() + .unwrap(); if height < 215 { if !burnchain_config.is_in_prepare_phase(height) { @@ -5402,10 +5418,13 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { ) .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .get_owned("addrs") .unwrap() - .expect_list(); + .expect_list() + .unwrap(); debug!("Test burnchain height {}", height); if !burnchain_config.is_in_prepare_phase(height) { diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 99863c95e0..223480f163 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -409,10 +409,13 @@ fn disable_pox() { ) .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .get_owned("addrs") .unwrap() - .expect_list(); + .expect_list() + .unwrap(); debug!("Test burnchain height {}", height); if !burnchain_config.is_in_prepare_phase(height) { @@ -1074,10 +1077,13 @@ fn pox_2_unlock_all() { ) .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .get_owned("addrs") .unwrap() - .expect_list(); + .expect_list() + .unwrap(); debug!("Test burnchain height {}", height); if !burnchain_config.is_in_prepare_phase(height) { diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 4376da2d41..90577d8c7b 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -502,10 +502,13 @@ fn fix_to_pox_contract() { ) .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .get_owned("addrs") .unwrap() - .expect_list(); + .expect_list() + .unwrap(); debug!("Test burnchain height {}", height); if !burnchain_config.is_in_prepare_phase(height) { @@ -1218,10 +1221,13 @@ fn verify_auto_unlock_behavior() { ) .expect_optional() .unwrap() + .unwrap() .expect_tuple() + .unwrap() .get_owned("addrs") .unwrap() - .expect_list(); + .expect_list() + .unwrap(); if !burnchain_config.is_in_prepare_phase(height) { if pox_addrs.len() > 0 { diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index ffc7873dfc..dbf987371b 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -485,7 +485,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize_to_hex()) + .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); @@ -500,7 +500,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize_to_hex()) + .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); @@ -517,7 +517,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize_to_hex()) + .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); @@ -538,7 +538,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize_to_hex()) + .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); @@ -621,7 +621,7 @@ fn integration_test_get_info() { let res = client.get(&path).send().unwrap().json::().unwrap(); let contract_analysis = mem_type_check(GET_INFO_CONTRACT, ClarityVersion::Clarity2, StacksEpochId::Epoch21).unwrap().1; - let expected_interface = build_contract_interface(&contract_analysis); + let expected_interface = build_contract_interface(&contract_analysis).unwrap(); eprintln!("{}", serde_json::to_string(&expected_interface).unwrap()); @@ -666,7 +666,7 @@ fn integration_test_get_info() { let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), sponsor: None, - arguments: vec![Value::UInt(3).serialize_to_hex()] + arguments: vec![Value::UInt(3).serialize_to_hex().unwrap()] }; let res = client.post(&path) @@ -734,7 +734,7 @@ fn integration_test_get_info() { let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), sponsor: None, - arguments: vec![Value::UInt(3).serialize_to_hex()] + arguments: vec![Value::UInt(3).serialize_to_hex().unwrap()] }; let res = client.post(&path) @@ -757,7 +757,7 @@ fn integration_test_get_info() { let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), sponsor: None, - arguments: vec![Value::UInt(100).serialize_to_hex()] + arguments: vec![Value::UInt(100).serialize_to_hex().unwrap()] }; let res = client.post(&path) @@ -1269,6 +1269,7 @@ fn contract_stx_transfer() { db.get_account_stx_balance( &contract_identifier.clone().into(), ) + .unwrap() .amount_unlocked() }) } @@ -1286,7 +1287,9 @@ fn contract_stx_transfer() { &StacksBlockHeader::make_index_block_hash(&cur_tip.0, &cur_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { - db.get_account_stx_balance(&addr_3).amount_unlocked() + db.get_account_stx_balance(&addr_3) + .unwrap() + .amount_unlocked() }) } ) @@ -1320,7 +1323,9 @@ fn contract_stx_transfer() { &StacksBlockHeader::make_index_block_hash(&cur_tip.0, &cur_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { - db.get_account_stx_balance(&addr_2).amount_unlocked() + db.get_account_stx_balance(&addr_2) + .unwrap() + .amount_unlocked() }) } ) @@ -1338,6 +1343,7 @@ fn contract_stx_transfer() { db.get_account_stx_balance( &contract_identifier.clone().into(), ) + .unwrap() .amount_unlocked() }) } @@ -1370,6 +1376,7 @@ fn contract_stx_transfer() { db.get_account_stx_balance( &contract_identifier.clone().into(), ) + .unwrap() .amount_unlocked() }) } @@ -1387,7 +1394,9 @@ fn contract_stx_transfer() { &StacksBlockHeader::make_index_block_hash(&cur_tip.0, &cur_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { - db.get_account_stx_balance(&addr_3).amount_unlocked() + db.get_account_stx_balance(&addr_3) + .unwrap() + .amount_unlocked() }) } ) @@ -1540,6 +1549,7 @@ fn mine_transactions_out_of_order() { db.get_account_stx_balance( &contract_identifier.clone().into(), ) + .unwrap() .amount_unlocked() }) } @@ -1796,6 +1806,7 @@ fn bad_contract_tx_rollback() { db.get_account_stx_balance( &contract_identifier.clone().into(), ) + .unwrap() .amount_unlocked() }) } @@ -1813,7 +1824,9 @@ fn bad_contract_tx_rollback() { &StacksBlockHeader::make_index_block_hash(&cur_tip.0, &cur_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { - db.get_account_stx_balance(&addr_3).amount_unlocked() + db.get_account_stx_balance(&addr_3) + .unwrap() + .amount_unlocked() }) } ) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 52a03b60ed..afe9d52a11 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1408,7 +1408,7 @@ fn liquid_ustx_integration() { if contract_call.function_name.as_str() == "execute" { let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); - let liquid_ustx = parsed.expect_result_ok().expect_u128(); + let liquid_ustx = parsed.expect_result_ok().unwrap().expect_u128().unwrap(); assert!(liquid_ustx > 0, "Should be more liquid ustx than 0"); tested = true; } @@ -4938,7 +4938,15 @@ fn mining_events_integration_test() { execution_cost, .. }) => { - assert_eq!(result.clone().expect_result_ok().expect_bool(), true); + assert_eq!( + result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap(), + true + ); assert_eq!(fee, &620000); assert_eq!( execution_cost, @@ -4970,7 +4978,15 @@ fn mining_events_integration_test() { txid.to_string(), "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6" ); - assert_eq!(result.clone().expect_result_ok().expect_bool(), true); + assert_eq!( + result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap(), + true + ); } _ => panic!("unexpected event type"), } @@ -4983,7 +4999,15 @@ fn mining_events_integration_test() { execution_cost, .. }) => { - assert_eq!(result.clone().expect_result_ok().expect_bool(), true); + assert_eq!( + result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap(), + true + ); assert_eq!(fee, &600000); assert_eq!( execution_cost,