diff --git a/Cargo.lock b/Cargo.lock index 1a4d604311d..90b0f3fbe40 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2347,10 +2347,13 @@ dependencies = [ "iroha_crypto", "iroha_data_model", "iroha_logger", + "iroha_macro", "iroha_primitives", "iroha_schema", + "once_cell", "serde", "serde_json", + "thiserror", "tracing", ] diff --git a/cli/src/event.rs b/cli/src/event.rs index 601ab41bb6f..79d085f4a83 100644 --- a/cli/src/event.rs +++ b/cli/src/event.rs @@ -58,7 +58,8 @@ impl Consumer { #[iroha_futures::telemetry_future] pub async fn new(mut stream: WebSocket) -> Result { let subscription_request: VersionedEventSubscriptionRequest = stream.recv().await?; - let EventSubscriptionRequest(filter) = subscription_request.into_v1(); + let VersionedEventSubscriptionRequest::V1(EventSubscriptionRequest(filter)) = + subscription_request; Ok(Consumer { stream, filter }) } diff --git a/cli/src/lib.rs b/cli/src/lib.rs index ba5b2a4eb58..d944b7bfeae 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -9,7 +9,7 @@ clippy::std_instead_of_core, clippy::std_instead_of_alloc )] -use core::sync::atomic::{AtomicBool, Ordering}; +use core::sync::atomic::AtomicBool; use std::sync::Arc; use color_eyre::eyre::{eyre, Result, WrapErr}; @@ -30,7 +30,7 @@ use iroha_core::{ IrohaNetwork, }; use iroha_data_model::prelude::*; -use iroha_genesis::GenesisNetwork; +use iroha_genesis::{GenesisNetwork, GENESIS_ACCOUNT_ID, GENESIS_DOMAIN_ID}; use iroha_logger::prelude::span; use tokio::{ signal, @@ -149,15 +149,22 @@ impl NetworkRelay { use iroha_core::NetworkMessage::*; #[cfg(debug_assertions)] - if self.freeze_status.load(Ordering::SeqCst) { + if self + .freeze_status + .load(core::sync::atomic::Ordering::SeqCst) + { return; } match msg { SumeragiPacket(data) => { - self.sumeragi.incoming_message(data.into_v1()); + let iroha_core::sumeragi::message::VersionedPacket::V1(data) = *data; + self.sumeragi.incoming_message(data); + } + BlockSync(data) => { + let iroha_core::block_sync::message::VersionedMessage::V1(data) = *data; + self.block_sync.message(data).await } - BlockSync(data) => self.block_sync.message(data.into_v1()).await, Health => {} } } @@ -250,7 +257,7 @@ impl Iroha { let wsv_clone = wsv.clone(); transaction_validator - .validate_every(genesis.iter().cloned(), &wsv_clone) + .validate_every::(genesis.iter().cloned(), &wsv_clone) .wrap_err("Transaction validation failed in genesis block")?; span.exit(); } @@ -374,6 +381,7 @@ impl Iroha { if let Some((substrate_telemetry, telemetry_future)) = telemetry { #[cfg(feature = "dev-telemetry")] { + // TODO: JoinHandle is just dropped here. It should be joined iroha_telemetry::dev::start(&config.telemetry, telemetry_future) .await .wrap_err("Failed to setup telemetry for futures")?; @@ -426,17 +434,15 @@ impl Iroha { } fn genesis_account(public_key: iroha_crypto::PublicKey) -> Account { - let genesis_account_id = AccountId::genesis(); - Account::new(genesis_account_id.clone(), [public_key]).build(genesis_account_id) + Account::new(GENESIS_ACCOUNT_ID.clone(), [public_key]).build(GENESIS_ACCOUNT_ID.clone()) } fn genesis_domain(configuration: &Configuration) -> Domain { - let genesis_account_id = AccountId::genesis(); let account_public_key = &configuration.genesis.account_public_key; - let mut domain = Domain::new(DomainId::genesis()).build(genesis_account_id); + let mut domain = Domain::new(GENESIS_DOMAIN_ID.clone()).build(GENESIS_ACCOUNT_ID.clone()); domain.accounts.insert( - ::Id::genesis(), + GENESIS_ACCOUNT_ID.clone(), genesis_account(account_public_key.clone()), ); diff --git a/cli/src/torii/mod.rs b/cli/src/torii/mod.rs index 777a6aa59f3..4b12a455c39 100644 --- a/cli/src/torii/mod.rs +++ b/cli/src/torii/mod.rs @@ -50,7 +50,7 @@ pub enum Error { Query(#[from] iroha_data_model::query::error::QueryExecutionFailure), /// Failed to accept transaction #[error("Failed to accept transaction: {0}")] - AcceptTransaction(#[from] iroha_data_model::transaction::error::AcceptTransactionFailure), + AcceptTransaction(#[from] iroha_genesis::AcceptTransactionFailure), /// Error while getting or setting configuration #[error("Configuration error: {0}")] Config(#[source] eyre::Report), diff --git a/cli/src/torii/routing.rs b/cli/src/torii/routing.rs index 89c75260eef..1287bf4a077 100644 --- a/cli/src/torii/routing.rs +++ b/cli/src/torii/routing.rs @@ -19,18 +19,16 @@ use iroha_config::{ use iroha_core::smartcontracts::isi::query::ValidQueryRequest; use iroha_crypto::SignatureOf; use iroha_data_model::{ - block::{ - stream::{ - BlockMessage, BlockSubscriptionRequest, VersionedBlockMessage, - VersionedBlockSubscriptionRequest, - }, - VersionedCommittedBlock, + block::stream::{ + BlockMessage, BlockSubscriptionRequest, VersionedBlockMessage, + VersionedBlockSubscriptionRequest, }, predicate::PredicateBox, prelude::*, query, query::error::QueryExecutionFailure, }; +use iroha_genesis::AcceptedTransaction; #[cfg(feature = "telemetry")] use iroha_telemetry::metrics::Status; use pagination::{paginate, Paginate}; @@ -75,7 +73,11 @@ impl VerifiedQuery { ))); } wsv.validator_view() - .validate(wsv, &self.payload.account_id, self.payload.query.clone()) + .validate( + wsv, + &self.payload.account_id, + self.payload.query.clone().into(), + ) .map_err(|err| QueryExecutionFailure::Permission(err.to_string()))?; Ok(( ValidQueryRequest::new(self.payload.query), @@ -106,12 +108,9 @@ pub(crate) async fn handle_instructions( sumeragi: Arc, transaction: VersionedSignedTransaction, ) -> Result { - let transaction: SignedTransaction = transaction.into_v1(); let transaction = AcceptedTransaction::accept::(transaction, &iroha_cfg.sumeragi.transaction_limits) - .map_err(Error::AcceptTransaction)? - .into(); - #[allow(clippy::map_err_ignore)] + .map_err(|(_tx, error)| Error::AcceptTransaction(error))?; queue .push(transaction, &sumeragi.wsv_mutex_access()) .map_err(|queue::Failure { tx, err }| { @@ -233,11 +232,9 @@ async fn handle_pending_transactions( queue .all_transactions(&sumeragi.wsv_mutex_access()) .into_iter() - .map(VersionedAcceptedTransaction::into_v1) - .map(SignedTransaction::from) + .map(VersionedSignedTransaction::from) .paginate(pagination) - .collect::() - .into(), + .collect::(), )) } @@ -283,7 +280,8 @@ async fn handle_post_configuration( #[iroha_futures::telemetry_future] async fn handle_blocks_stream(kura: Arc, mut stream: WebSocket) -> eyre::Result<()> { let subscription_request: VersionedBlockSubscriptionRequest = stream.recv().await?; - let BlockSubscriptionRequest(mut from_height) = subscription_request.into_v1(); + let VersionedBlockSubscriptionRequest::V1(BlockSubscriptionRequest(mut from_height)) = + subscription_request; let mut interval = tokio::time::interval(std::time::Duration::from_millis(10)); loop { @@ -313,7 +311,7 @@ async fn handle_blocks_stream(kura: Arc, mut stream: WebSocket) -> eyre::R stream // TODO: to avoid clone `VersionedBlockMessage` could be split into sending and receiving parts .send(VersionedBlockMessage::from( - BlockMessage(VersionedCommittedBlock::clone(&block)), + BlockMessage(VersionedSignedBlock::clone(&block)), )) .await?; from_height += 1; @@ -415,7 +413,6 @@ mod subscription { async fn handle_version(sumeragi: Arc) -> Json { use iroha_version::Version; - #[allow(clippy::expect_used)] let string = sumeragi .wsv_mutex_access() .latest_block_ref() @@ -490,7 +487,6 @@ impl Torii { } } - #[allow(opaque_hidden_inferred_bound)] #[cfg(feature = "telemetry")] /// Helper function to create router. This router can tested without starting up an HTTP server fn create_telemetry_router( @@ -528,7 +524,6 @@ impl Torii { } /// Helper function to create router. This router can tested without starting up an HTTP server - #[allow(opaque_hidden_inferred_bound)] pub(crate) fn create_api_router( &self, ) -> impl warp::Filter + Clone + Send { diff --git a/client/Cargo.toml b/client/Cargo.toml index 7d0ab4d6f96..9111770a7ae 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -46,7 +46,7 @@ futures-util = "0.3.25" [dev-dependencies] iroha_wasm_builder = { version = "=2.0.0-pre-rc.13", path = "../wasm_builder" } -# TODO: These three activate `transparent_api` but client should never activate this feature. +# TODO: These three activate `transparent-api` but client should never activate this feature. # Additionally there is a dependency on iroha_core in dev-dependencies in telemetry/derive # Hopefully, once the integration tests migration is finished these can be removed iroha = { path = "../cli", features = ["dev-telemetry", "telemetry"] } diff --git a/client/benches/tps/utils.rs b/client/benches/tps/utils.rs index 59aeaff31ca..a206227d036 100644 --- a/client/benches/tps/utils.rs +++ b/client/benches/tps/utils.rs @@ -126,8 +126,11 @@ impl Config { let block = blocks .next() .expect("The block is not yet in WSV. Need more sleep?"); - let block = block.as_v1(); - (block.transactions.len(), block.rejected_transactions.len()) + let payload = block.payload(); + ( + payload.transactions().len(), + payload.rejected_transactions.len(), + ) }) .fold((0, 0), |acc, pair| (acc.0 + pair.0, acc.1 + pair.1)); #[allow(clippy::float_arithmetic, clippy::cast_precision_loss)] @@ -182,7 +185,7 @@ impl MeasurerUnit { 100_000, ) .sign(keypair)?; - self.client.submit_transaction_blocking(grant_tx)?; + self.client.submit_transaction_blocking(&grant_tx)?; let mint_a_rose = MintBox::new(1_u32, asset_id); self.client.submit_blocking(mint_a_rose)?; @@ -236,7 +239,7 @@ impl MeasurerUnit { let transaction = submitter .sign_transaction(transaction) .expect("Failed to sign transaction"); - if let Err(error) = submitter.submit_transaction(transaction) { + if let Err(error) = submitter.submit_transaction(&transaction) { iroha_logger::error!(?error, "Failed to submit transaction"); } nonce = nonce.wrapping_add(1); diff --git a/client/examples/tutorial.rs b/client/examples/tutorial.rs index 2c66f458165..ee4d7ce79c1 100644 --- a/client/examples/tutorial.rs +++ b/client/examples/tutorial.rs @@ -83,7 +83,7 @@ fn domain_registration_test(config: &Configuration) -> Result<(), Error> { // #region domain_register_example_submit_tx // Submit a prepared domain registration transaction iroha_client - .submit_transaction(tx) + .submit_transaction(&tx) .wrap_err("Failed to submit transaction")?; // #endregion domain_register_example_submit_tx @@ -152,7 +152,7 @@ fn account_registration_test(config: &Configuration) -> Result<(), Error> { // #region register_account_submit_tx // Submit a prepared account registration transaction - iroha_client.submit_transaction(tx)?; + iroha_client.submit_transaction(&tx)?; // #endregion register_account_submit_tx // Finish the test successfully diff --git a/client/src/client.rs b/client/src/client.rs index eccce391a73..12580e5f53a 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -14,7 +14,7 @@ use http_default::{AsyncWebSocketStream, WebSocketStream}; use iroha_config::{client::Configuration, torii::uri, GetConfiguration, PostConfiguration}; use iroha_crypto::{HashOf, KeyPair}; use iroha_data_model::{ - block::VersionedCommittedBlock, predicate::PredicateBox, prelude::*, + block::VersionedSignedBlock, predicate::PredicateBox, prelude::*, query::error::QueryExecutionFailure, transaction::TransactionPayload, }; use iroha_logger::prelude::*; @@ -328,7 +328,7 @@ impl Client { &self, instructions: impl Into, metadata: UnlimitedMetadata, - ) -> Result { + ) -> Result { let transaction = TransactionBuilder::new( self.account_id.clone(), instructions, @@ -352,7 +352,10 @@ impl Client { /// /// # Errors /// Fails if signature generation fails - pub fn sign_transaction(&self, transaction: Tx) -> Result { + pub fn sign_transaction( + &self, + transaction: Tx, + ) -> Result { transaction .sign(self.key_pair.clone()) .wrap_err("Failed to sign transaction") @@ -376,7 +379,7 @@ impl Client { pub fn submit( &self, instruction: impl Into + Debug, - ) -> Result> { + ) -> Result> { let isi = instruction.into(); self.submit_all([isi]) } @@ -389,7 +392,7 @@ impl Client { pub fn submit_all( &self, instructions: impl IntoIterator, - ) -> Result> { + ) -> Result> { self.submit_all_with_metadata(instructions, UnlimitedMetadata::new()) } @@ -403,7 +406,7 @@ impl Client { &self, instruction: InstructionBox, metadata: UnlimitedMetadata, - ) -> Result> { + ) -> Result> { self.submit_all_with_metadata([instruction], metadata) } @@ -417,8 +420,8 @@ impl Client { &self, instructions: impl IntoIterator, metadata: UnlimitedMetadata, - ) -> Result> { - self.submit_transaction(self.build_transaction(instructions, metadata)?) + ) -> Result> { + self.submit_transaction(&self.build_transaction(instructions, metadata)?) } /// Submit a prebuilt transaction. @@ -428,8 +431,8 @@ impl Client { /// Fails if sending transaction to peer fails or if it response with error pub fn submit_transaction( &self, - transaction: SignedTransaction, - ) -> Result> { + transaction: &VersionedSignedTransaction, + ) -> Result> { iroha_logger::trace!(tx=?transaction, "Submitting"); let (req, hash, resp_handler) = self.prepare_transaction_request::(transaction)?; @@ -448,8 +451,8 @@ impl Client { /// Fails if sending a transaction to a peer fails or there is an error in the response pub fn submit_transaction_blocking( &self, - transaction: SignedTransaction, - ) -> Result> { + transaction: &VersionedSignedTransaction, + ) -> Result> { let (init_sender, init_receiver) = tokio::sync::oneshot::channel(); let hash = transaction.hash(); @@ -478,8 +481,8 @@ impl Client { fn listen_for_tx_confirmation( &self, init_sender: tokio::sync::oneshot::Sender, - hash: HashOf, - ) -> Result> { + hash: HashOf, + ) -> Result> { let deadline = tokio::time::Instant::now() + self.transaction_status_timeout; let rt = tokio::runtime::Builder::new_current_thread() .enable_all() @@ -501,22 +504,19 @@ impl Client { event_iterator_result? }; - let result = tokio::time::timeout_at( + tokio::time::timeout_at( deadline, - Self::listen_for_tx_confirmation_loop(&mut event_iterator, hash), + Self::listen_for_tx_confirmation_loop(&mut event_iterator), ) .await .map_err(Into::into) - .and_then(std::convert::identity); + .and_then(std::convert::identity)?; event_iterator.close().await; - result + Ok(hash) }) } - async fn listen_for_tx_confirmation_loop( - event_iterator: &mut AsyncEventStream, - hash: HashOf, - ) -> Result> { + async fn listen_for_tx_confirmation_loop(event_iterator: &mut AsyncEventStream) -> Result<()> { while let Some(event) = event_iterator.next().await { if let Event::Pipeline(this_event) = event? { match this_event.status() { @@ -524,7 +524,7 @@ impl Client { PipelineStatus::Rejected(reason) => { return Err(reason.clone()).wrap_err("Transaction rejected") } - PipelineStatus::Committed => return Ok(hash.transmute()), + PipelineStatus::Committed => return Ok(()), } } } @@ -545,14 +545,9 @@ impl Client { /// Fails if transaction check fails pub fn prepare_transaction_request( &self, - transaction: SignedTransaction, - ) -> Result<( - B, - HashOf, - TransactionResponseHandler, - )> { + transaction: &VersionedSignedTransaction, + ) -> Result<(B, HashOf, TransactionResponseHandler)> { transaction.check_limits(&self.transaction_limits)?; - let transaction: VersionedSignedTransaction = transaction.into(); let hash = transaction.hash(); let transaction_bytes: Vec = transaction.encode_versioned(); @@ -576,7 +571,7 @@ impl Client { pub fn submit_blocking( &self, instruction: impl Into, - ) -> Result> { + ) -> Result> { self.submit_all_blocking(vec![instruction.into()]) } @@ -588,7 +583,7 @@ impl Client { pub fn submit_all_blocking( &self, instructions: impl IntoIterator, - ) -> Result> { + ) -> Result> { self.submit_all_blocking_with_metadata(instructions, UnlimitedMetadata::new()) } @@ -602,7 +597,7 @@ impl Client { &self, instruction: impl Into, metadata: UnlimitedMetadata, - ) -> Result> { + ) -> Result> { self.submit_all_blocking_with_metadata(vec![instruction.into()], metadata) } @@ -616,9 +611,9 @@ impl Client { &self, instructions: impl IntoIterator, metadata: UnlimitedMetadata, - ) -> Result> { + ) -> Result> { let transaction = self.build_transaction(instructions, metadata)?; - self.submit_transaction_blocking(transaction) + self.submit_transaction_blocking(&transaction) } /// Lower-level Query API entry point. Prepares an http-request and returns it with an http-response handler. @@ -914,7 +909,7 @@ impl Client { pub fn listen_for_blocks( &self, height: u64, - ) -> Result>> { + ) -> Result>> { blocks_api::BlockIterator::new(self.blocks_handler(height)?) } @@ -959,11 +954,11 @@ impl Client { /// - if subscribing to websocket fails pub fn get_original_transaction_with_pagination( &self, - transaction: &SignedTransaction, + transaction: &VersionedSignedTransaction, retry_count: u32, retry_in: Duration, pagination: Pagination, - ) -> Result> { + ) -> Result> { let pagination: Vec<_> = pagination.into(); for _ in 0..retry_count { let response = DefaultRequestBuilder::new( @@ -1009,10 +1004,10 @@ impl Client { /// - if sending request fails pub fn get_original_transaction( &self, - transaction: &SignedTransaction, + transaction: &VersionedSignedTransaction, retry_count: u32, retry_in: Duration, - ) -> Result> { + ) -> Result> { self.get_original_transaction_with_pagination( transaction, retry_count, @@ -1347,8 +1342,8 @@ pub mod events_api { type Event = iroha_data_model::prelude::Event; fn message(&self, message: Vec) -> Result { - let event_socket_message = - VersionedEventMessage::decode_all_versioned(&message)?.into_v1(); + let VersionedEventMessage::V1(event_socket_message) = + VersionedEventMessage::decode_all_versioned(&message)?; Ok(event_socket_message.into()) } } @@ -1426,10 +1421,11 @@ mod blocks_api { pub struct Events; impl FlowEvents for Events { - type Event = iroha_data_model::block::VersionedCommittedBlock; + type Event = iroha_data_model::block::VersionedSignedBlock; fn message(&self, message: Vec) -> Result { - let block_msg = VersionedBlockMessage::decode_all_versioned(&message)?.into_v1(); + let VersionedBlockMessage::V1(block_msg) = + VersionedBlockMessage::decode_all_versioned(&message)?; Ok(block_msg.into()) } } @@ -1498,7 +1494,7 @@ pub mod asset { pub mod block { //! Module with queries related to blocks - use iroha_crypto::Hash; + use iroha_data_model::block::BlockPayload; use super::*; @@ -1513,7 +1509,9 @@ pub mod block { } /// Construct a query to find block header by hash - pub fn header_by_hash(hash: impl Into>) -> FindBlockHeaderByHash { + pub fn header_by_hash( + hash: impl Into>>, + ) -> FindBlockHeaderByHash { FindBlockHeaderByHash::new(hash) } } @@ -1535,7 +1533,6 @@ pub mod domain { pub mod transaction { //! Module with queries for transactions - use iroha_crypto::Hash; use super::*; @@ -1552,7 +1549,9 @@ pub mod transaction { } /// Construct a query to retrieve transaction by hash - pub fn by_hash(hash: impl Into>) -> FindTransactionByHash { + pub fn by_hash( + hash: impl Into>>, + ) -> FindTransactionByHash { FindTransactionByHash::new(hash) } } @@ -1664,11 +1663,11 @@ mod tests { .unwrap() }; let tx1 = build_transaction(); - let mut tx2 = build_transaction(); + let VersionedSignedTransaction::V1(mut tx2) = build_transaction(); - tx2.payload.creation_time = tx1.payload.creation_time; + tx2.payload.creation_time_ms = tx1.payload().creation_time_ms; assert_ne!(tx1.hash(), tx2.hash()); - tx2.payload.nonce = tx1.payload.nonce; + tx2.payload.nonce = tx1.payload().nonce; assert_eq!(tx1.hash(), tx2.hash()); } diff --git a/client/tests/integration/asset.rs b/client/tests/integration/asset.rs index 6c8c8f858d1..c9a085c02df 100644 --- a/client/tests/integration/asset.rs +++ b/client/tests/integration/asset.rs @@ -99,7 +99,7 @@ fn client_add_asset_quantity_to_existing_asset_should_increase_asset_amount() -> ); let instructions: Vec = vec![create_asset.into(), mint.into()]; let tx = test_client.build_transaction(instructions, metadata)?; - test_client.submit_transaction(tx)?; + test_client.submit_transaction(&tx)?; test_client.poll_request(client::asset::by_account_id(account_id), |result| { result.iter().any(|asset| { asset.id().definition_id == asset_definition_id @@ -130,7 +130,7 @@ fn client_add_big_asset_quantity_to_existing_asset_should_increase_asset_amount( ); let instructions: Vec = vec![create_asset.into(), mint.into()]; let tx = test_client.build_transaction(instructions, metadata)?; - test_client.submit_transaction(tx)?; + test_client.submit_transaction(&tx)?; test_client.poll_request(client::asset::by_account_id(account_id), |result| { result.iter().any(|asset| { asset.id().definition_id == asset_definition_id @@ -162,7 +162,7 @@ fn client_add_asset_with_decimal_should_increase_asset_amount() -> Result<()> { ); let instructions: Vec = vec![create_asset.into(), mint.into()]; let tx = test_client.build_transaction(instructions, metadata)?; - test_client.submit_transaction(tx)?; + test_client.submit_transaction(&tx)?; test_client.poll_request(client::asset::by_account_id(account_id.clone()), |result| { result.iter().any(|asset| { asset.id().definition_id == asset_definition_id @@ -271,7 +271,7 @@ fn find_rate_and_make_exchange_isi_should_succeed() { .expect("Failed to sign seller transaction"); test_client - .submit_transaction_blocking(grant_asset_transfer_tx) + .submit_transaction_blocking(&grant_asset_transfer_tx) .expect(&format!( "Failed to grant permission alice to transfer {}.", asset_id diff --git a/client/tests/integration/burn_public_keys.rs b/client/tests/integration/burn_public_keys.rs index 6a1ce05546b..508213d6167 100644 --- a/client/tests/integration/burn_public_keys.rs +++ b/client/tests/integration/burn_public_keys.rs @@ -21,9 +21,8 @@ fn submit_and_get( }; let hash = tx.hash(); - let _ = client.submit_transaction_blocking(tx); - - client.request(transaction::by_hash(*hash)).unwrap() + let _ = client.submit_transaction_blocking(&tx); + client.request(transaction::by_hash(hash)).unwrap().tx_value } fn account_keys_count(client: &mut Client, account_id: AccountId) -> usize { diff --git a/client/tests/integration/events/data.rs b/client/tests/integration/events/data.rs index 03b2a43e7ac..516ef4f38aa 100644 --- a/client/tests/integration/events/data.rs +++ b/client/tests/integration/events/data.rs @@ -117,7 +117,7 @@ fn transaction_execution_should_produce_events( let transaction = client .build_transaction(executable, UnlimitedMetadata::new()) .unwrap(); - client.submit_transaction_blocking(transaction)?; + client.submit_transaction_blocking(&transaction)?; // assertion for i in 0..4_usize { diff --git a/client/tests/integration/events/pipeline.rs b/client/tests/integration/events/pipeline.rs index 6024150765b..e76409c8c25 100644 --- a/client/tests/integration/events/pipeline.rs +++ b/client/tests/integration/events/pipeline.rs @@ -54,7 +54,7 @@ fn test_with_instruction_and_status_and_port( handles.push(handle_validated); } // When - submitter.submit_transaction(transaction)?; + submitter.submit_transaction(&transaction)?; thread::sleep(pipeline_time * 2); // Then for handle in handles { @@ -66,7 +66,7 @@ fn test_with_instruction_and_status_and_port( #[derive(Clone)] struct Checker { listener: iroha_client::client::Client, - hash: iroha_crypto::HashOf, + hash: iroha_crypto::HashOf, } impl Checker { @@ -78,7 +78,7 @@ impl Checker { PipelineEventFilter::new() .entity_kind(PipelineEntityKind::Transaction) .status_kind(status_kind) - .hash(*self.hash), + .hash(self.hash.into()), )) .expect("Failed to create event iterator."); let event_result = event_iterator.next().expect("Stream closed"); diff --git a/client/tests/integration/multisignature_transaction.rs b/client/tests/integration/multisignature_transaction.rs index 317a3bd2e82..4cc21bb990d 100644 --- a/client/tests/integration/multisignature_transaction.rs +++ b/client/tests/integration/multisignature_transaction.rs @@ -67,7 +67,7 @@ fn multisignature_transactions_should_wait_for_all_signatures() { .expect("Failed to create transaction."); iroha_client .submit_transaction( - iroha_client + &iroha_client .sign_transaction(transaction) .expect("Failed to sign transaction."), ) @@ -101,7 +101,7 @@ fn multisignature_transactions_should_wait_for_all_signatures() { .expect("Found no pending transaction for this account."); iroha_client_2 .submit_transaction( - iroha_client_2 + &iroha_client_2 .sign_transaction(transaction) .expect("Failed to sign transaction."), ) diff --git a/client/tests/integration/non_mintable.rs b/client/tests/integration/non_mintable.rs index b74a18b831b..15a47b9178a 100644 --- a/client/tests/integration/non_mintable.rs +++ b/client/tests/integration/non_mintable.rs @@ -32,7 +32,7 @@ fn non_mintable_asset_can_be_minted_once_but_not_twice() -> Result<()> { let tx = test_client.build_transaction(instructions, metadata)?; // We can register and mint the non-mintable token - test_client.submit_transaction(tx)?; + test_client.submit_transaction(&tx)?; test_client.poll_request(client::asset::by_account_id(account_id.clone()), |result| { result.iter().any(|asset| { asset.id().definition_id == asset_definition_id diff --git a/client/tests/integration/permissions.rs b/client/tests/integration/permissions.rs index b9c504c71c0..9c5cbe0dd5c 100644 --- a/client/tests/integration/permissions.rs +++ b/client/tests/integration/permissions.rs @@ -238,7 +238,7 @@ fn permissions_differ_not_only_by_names() { .sign(mouse_keypair.clone()) .expect("Failed to sign mouse transaction"); client - .submit_transaction_blocking(grant_hats_access_tx) + .submit_transaction_blocking(&grant_hats_access_tx) .expect("Failed grant permission to modify Mouse's hats"); // Checking that Alice can modify Mouse's hats ... @@ -278,7 +278,7 @@ fn permissions_differ_not_only_by_names() { .expect("Failed to sign mouse transaction"); client - .submit_transaction_blocking(grant_shoes_access_tx) + .submit_transaction_blocking(&grant_shoes_access_tx) .expect("Failed grant permission to modify Mouse's shoes"); // Checking that Alice can modify Mouse's shoes diff --git a/client/tests/integration/roles.rs b/client/tests/integration/roles.rs index 45ba5c1d2fa..205bcc0618d 100644 --- a/client/tests/integration/roles.rs +++ b/client/tests/integration/roles.rs @@ -76,7 +76,7 @@ fn register_and_grant_role_for_metadata_access() -> Result<()> { let grant_role = GrantBox::new(role_id.clone(), alice_id.clone()); let grant_role_tx = TransactionBuilder::new(mouse_id.clone(), vec![grant_role.into()], 100_000) .sign(mouse_key_pair)?; - test_client.submit_transaction_blocking(grant_role_tx)?; + test_client.submit_transaction_blocking(&grant_role_tx)?; // Alice modifies Mouse's metadata let set_key_value = SetKeyValueBox::new( diff --git a/client/tests/integration/triggers/by_call_trigger.rs b/client/tests/integration/triggers/by_call_trigger.rs index 768d287b98f..5407eabc8c1 100644 --- a/client/tests/integration/triggers/by_call_trigger.rs +++ b/client/tests/integration/triggers/by_call_trigger.rs @@ -357,7 +357,7 @@ fn trigger_in_genesis_using_base64() -> Result<()> { // Registering trigger in genesis let mut genesis = GenesisNetwork::test(true).expect("Expected genesis"); - match &mut genesis.transactions[0].as_mut_v1().payload.instructions { + match &mut genesis.transactions[0].payload.instructions { Executable::Instructions(instructions) => { instructions.push(RegisterBox::new(trigger).into()); } diff --git a/client/tests/integration/tx_history.rs b/client/tests/integration/tx_history.rs index fe840d55520..7d018183f16 100644 --- a/client/tests/integration/tx_history.rs +++ b/client/tests/integration/tx_history.rs @@ -51,7 +51,7 @@ fn client_has_rejected_and_acepted_txs_should_return_tx_history() { .build_transaction(instructions, UnlimitedMetadata::new()) .expect("Failed to create transaction"); iroha_client - .submit_transaction(transaction) + .submit_transaction(&transaction) .expect("Failed to submit transaction"); } thread::sleep(pipeline_time * 5); @@ -69,7 +69,7 @@ fn client_has_rejected_and_acepted_txs_should_return_tx_history() { for tx in &transactions { assert_eq!(&tx.payload().account_id, &account_id); //check sorted - assert!(tx.payload().creation_time >= prev_creation_time); - prev_creation_time = tx.payload().creation_time; + assert!(tx.payload().creation_time_ms >= prev_creation_time); + prev_creation_time = tx.payload().creation_time_ms; } } diff --git a/client/tests/integration/unstable_network.rs b/client/tests/integration/unstable_network.rs index df1ebaecd44..e11e087f4d4 100644 --- a/client/tests/integration/unstable_network.rs +++ b/client/tests/integration/unstable_network.rs @@ -1,4 +1,5 @@ #![allow(clippy::restriction)] +#![cfg(debug_assertions)] use core::sync::atomic::Ordering; use std::thread; diff --git a/client/tests/integration/upgrade.rs b/client/tests/integration/upgrade.rs index ec124b77a0b..c21890dc1bc 100644 --- a/client/tests/integration/upgrade.rs +++ b/client/tests/integration/upgrade.rs @@ -34,7 +34,7 @@ fn validator_upgrade_should_work() -> Result<()> { ) .sign(admin_keypair.clone())?; let _ = client - .submit_transaction_blocking(transfer_rose_tx) + .submit_transaction_blocking(&transfer_rose_tx) .expect_err("Should fail"); // Upgrade Validator @@ -66,7 +66,7 @@ fn validator_upgrade_should_work() -> Result<()> { TransactionBuilder::new(admin_id, vec![transfer_alice_rose.into()], 100_000) .sign(admin_keypair)?; client - .submit_transaction_blocking(transfer_rose_tx) + .submit_transaction_blocking(&transfer_rose_tx) .expect("Should succeed"); Ok(()) diff --git a/client_cli/src/main.rs b/client_cli/src/main.rs index db315ebea33..628b909def1 100644 --- a/client_cli/src/main.rs +++ b/client_cli/src/main.rs @@ -217,7 +217,7 @@ pub fn submit( #[cfg(not(debug_assertions))] let err_msg = "Failed to submit transaction."; let hash = iroha_client - .submit_transaction_blocking(tx) + .submit_transaction_blocking(&tx) .wrap_err(err_msg)?; Ok(Box::new(hash)) } @@ -542,7 +542,7 @@ mod account { impl RunArgs for ListPermissions { fn run(self, cfg: &ClientConfiguration) -> Result> { let client = Client::new(cfg)?; - let find_all_permissions = FindPermissionTokensByAccountId { id: self.id.into() }; + let find_all_permissions = FindPermissionTokensByAccountId::new(self.id); let permissions = client .request(find_all_permissions) .wrap_err("Failed to get all account permissions")?; diff --git a/config/src/client.rs b/config/src/client.rs index 6e95fec8c50..31ff6ef36ca 100644 --- a/config/src/client.rs +++ b/config/src/client.rs @@ -6,10 +6,12 @@ use derive_more::Display; use eyre::{Result, WrapErr}; use iroha_config_base::derive::{Documented, Error as ConfigError, Proxy}; use iroha_crypto::prelude::*; -use iroha_data_model::{prelude::*, transaction}; +use iroha_data_model::prelude::*; use iroha_primitives::small::SmallStr; use serde::{Deserialize, Serialize}; +use crate::sumeragi::{DEFAULT_MAX_INSTRUCTION_NUMBER, DEFAULT_MAX_WASM_SIZE_BYTES}; + const DEFAULT_TRANSACTION_TIME_TO_LIVE_MS: u64 = 100_000; const DEFAULT_TRANSACTION_STATUS_TIMEOUT_MS: u64 = 15_000; const DEFAULT_ADD_TRANSACTION_NONCE: bool = false; @@ -99,8 +101,8 @@ impl Default for ConfigurationProxy { transaction_time_to_live_ms: Some(DEFAULT_TRANSACTION_TIME_TO_LIVE_MS), transaction_status_timeout_ms: Some(DEFAULT_TRANSACTION_STATUS_TIMEOUT_MS), transaction_limits: Some(TransactionLimits::new( - transaction::DEFAULT_MAX_INSTRUCTION_NUMBER, - transaction::DEFAULT_MAX_WASM_SIZE_BYTES, + DEFAULT_MAX_INSTRUCTION_NUMBER, + DEFAULT_MAX_WASM_SIZE_BYTES, )), add_transaction_nonce: Some(DEFAULT_ADD_TRANSACTION_NONCE), } @@ -271,8 +273,8 @@ mod tests { transaction_time_to_live_ms in prop::option::of(Just(DEFAULT_TRANSACTION_TIME_TO_LIVE_MS)), transaction_status_timeout_ms in prop::option::of(Just(DEFAULT_TRANSACTION_STATUS_TIMEOUT_MS)), transaction_limits in prop::option::of(Just(TransactionLimits::new( - transaction::DEFAULT_MAX_INSTRUCTION_NUMBER, - transaction::DEFAULT_MAX_WASM_SIZE_BYTES, + DEFAULT_MAX_INSTRUCTION_NUMBER, + DEFAULT_MAX_WASM_SIZE_BYTES, ))), add_transaction_nonce in prop::option::of(Just(DEFAULT_ADD_TRANSACTION_NONCE)), ) diff --git a/config/src/sumeragi.rs b/config/src/sumeragi.rs index b33f87f7d20..f712e53e7f3 100644 --- a/config/src/sumeragi.rs +++ b/config/src/sumeragi.rs @@ -5,7 +5,7 @@ use std::{collections::HashSet, fmt::Debug, fs::File, io::BufReader, path::Path} use eyre::{Result, WrapErr}; use iroha_config_base::derive::{view, Documented, Proxy}; use iroha_crypto::prelude::*; -use iroha_data_model::{prelude::*, transaction}; +use iroha_data_model::prelude::*; use serde::{Deserialize, Serialize}; /// Default Amount of time peer waits for transactions before creating a block. @@ -15,6 +15,10 @@ pub const DEFAULT_COMMIT_TIME_LIMIT_MS: u64 = 4000; const DEFAULT_ACTOR_CHANNEL_CAPACITY: u32 = 100; const DEFAULT_GOSSIP_PERIOD_MS: u64 = 1000; const DEFAULT_GOSSIP_BATCH_SIZE: u32 = 500; +/// Default maximum number of instructions and expressions per transaction +pub const DEFAULT_MAX_INSTRUCTION_NUMBER: u64 = 2_u64.pow(12); +/// Default maximum number of instructions and expressions per transaction +pub const DEFAULT_MAX_WASM_SIZE_BYTES: u64 = 2_u64.pow(22); // 4 MiB /// Default estimation of consensus duration #[allow(clippy::integer_division)] @@ -30,21 +34,24 @@ view! { #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "SUMERAGI_")] pub struct Configuration { + /// Current Peer Identification. + pub peer_id: PeerId, /// The key pair consisting of a private and a public key. //TODO: consider putting a `#[serde(skip)]` on the proxy struct here #[view(ignore)] pub key_pair: KeyPair, - /// Current Peer Identification. - pub peer_id: PeerId, - /// The period of time a peer waits for the `CreatedBlock` message after getting a `TransactionReceipt` - pub block_time_ms: u64, - /// Optional list of predefined trusted peers. + /// List of predefined trusted peers. pub trusted_peers: TrustedPeers, - /// The period of time a peer waits for `CommitMessage` from the proxy tail. + /// Time a peer waits to produce a new block since the beginning of the voting round + /// if it cannot satisfy `max_transactions_in_block`. Unless there is no transactions, + /// after this time has elapsed the block will be committed regardless. + pub block_time_ms: u64, + /// Time a peer waits for the block to be committed since the beginning of the voting round pub commit_time_limit_ms: u64, /// The limits to which transactions must adhere pub transaction_limits: TransactionLimits, /// Buffer capacity of actor's MPSC channel + #[deprecated(since = "2.0.0-pre-rc.13", note = "Will be removed in future versions")] pub actor_channel_capacity: u32, /// Maximum number of transactions in tx gossip batch message. While configuring this, pay attention to `p2p` max message size. pub gossip_batch_size: u32, @@ -66,8 +73,8 @@ impl Default for ConfigurationProxy { block_time_ms: Some(DEFAULT_BLOCK_TIME_MS), commit_time_limit_ms: Some(DEFAULT_COMMIT_TIME_LIMIT_MS), transaction_limits: Some(TransactionLimits::new( - transaction::DEFAULT_MAX_INSTRUCTION_NUMBER, - transaction::DEFAULT_MAX_WASM_SIZE_BYTES, + DEFAULT_MAX_INSTRUCTION_NUMBER, + DEFAULT_MAX_WASM_SIZE_BYTES, )), actor_channel_capacity: Some(DEFAULT_ACTOR_CHANNEL_CAPACITY), gossip_batch_size: Some(DEFAULT_GOSSIP_BATCH_SIZE), @@ -106,8 +113,7 @@ impl Configuration { } } -/// `SumeragiConfiguration` provides an ability to define parameters -/// such as `BLOCK_TIME_MS` and a list of `TRUSTED_PEERS`. +/// Trusted peers #[derive(Debug, Clone, Default, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "UPPERCASE")] #[serde(transparent)] @@ -197,8 +203,8 @@ pub mod tests { trusted_peers in Just(None), commit_time_limit_ms in prop::option::of(Just(DEFAULT_COMMIT_TIME_LIMIT_MS)), transaction_limits in prop::option::of(Just(TransactionLimits::new( - transaction::DEFAULT_MAX_INSTRUCTION_NUMBER, - transaction::DEFAULT_MAX_WASM_SIZE_BYTES, + DEFAULT_MAX_INSTRUCTION_NUMBER, + DEFAULT_MAX_WASM_SIZE_BYTES, ))), actor_channel_capacity in prop::option::of(Just(DEFAULT_ACTOR_CHANNEL_CAPACITY)), gossip_batch_size in prop::option::of(Just(DEFAULT_GOSSIP_BATCH_SIZE)), diff --git a/configs/peer/config.json b/configs/peer/config.json index 2798e25cdba..8cbabfab005 100644 --- a/configs/peer/config.json +++ b/configs/peer/config.json @@ -10,10 +10,10 @@ "DEBUG_OUTPUT_NEW_BLOCKS": false }, "SUMERAGI": { - "KEY_PAIR": null, "PEER_ID": null, - "BLOCK_TIME_MS": 2000, + "KEY_PAIR": null, "TRUSTED_PEERS": null, + "BLOCK_TIME_MS": 2000, "COMMIT_TIME_LIMIT_MS": 4000, "TRANSACTION_LIMITS": { "max_instruction_number": 4096, diff --git a/configs/peer/validator.wasm b/configs/peer/validator.wasm index d806ab45480..298e8635357 100644 Binary files a/configs/peer/validator.wasm and b/configs/peer/validator.wasm differ diff --git a/core/Cargo.toml b/core/Cargo.toml index c03d31667ca..195cc90d029 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -37,7 +37,7 @@ is-it-maintained-open-issues = { repository = "https://github.com/hyperledger/ir maintenance = { status = "actively-developed" } [dependencies] -iroha_data_model = { version = "=2.0.0-pre-rc.13", path = "../data_model", features = ["transparent_api"] } +iroha_data_model = { version = "=2.0.0-pre-rc.13", path = "../data_model", features = ["transparent-api"] } iroha_macro = { version = "=2.0.0-pre-rc.13", path = "../macro" } iroha_p2p = { version = "=2.0.0-pre-rc.13", path = "../p2p" } iroha_logger = { version = "=2.0.0-pre-rc.13", path = "../logger"} diff --git a/core/benches/kura.rs b/core/benches/kura.rs index 97402e59592..371c19e011c 100644 --- a/core/benches/kura.rs +++ b/core/benches/kura.rs @@ -4,9 +4,13 @@ use std::str::FromStr as _; use byte_unit::Byte; use criterion::{criterion_group, criterion_main, Criterion}; -use iroha_core::{block::*, kura::BlockStore, prelude::*, tx::TransactionValidator, wsv::World}; +use iroha_core::{ + block::*, kura::BlockStore, prelude::*, sumeragi::network_topology::Topology, + tx::TransactionValidator, wsv::World, +}; use iroha_crypto::KeyPair; -use iroha_data_model::{block::VersionedCommittedBlock, prelude::*}; +use iroha_data_model::prelude::*; +use iroha_genesis::AcceptedTransaction; use iroha_version::scale::EncodeVersioned; use tokio::{fs, runtime::Runtime}; @@ -35,43 +39,40 @@ async fn measure_block_size_for_n_validators(n_validators: u32) { max_wasm_size_bytes: 0, }; let tx = AcceptedTransaction::accept::(tx, &transaction_limits) - .expect("Failed to accept Transaction.") - .into(); + .expect("Failed to accept Transaction."); let dir = tempfile::tempdir().expect("Could not create tempfile."); - let kura = - iroha_core::kura::Kura::new(iroha_config::kura::Mode::Strict, dir.path(), false).unwrap(); + let kura = iroha_core::kura::Kura::new(iroha_config::kura::Mode::Strict, dir.path(), false) + .expect("Valid"); let _thread_handle = iroha_core::kura::Kura::start(kura.clone()); - let mut block = BlockBuilder { - transactions: vec![tx], - event_recommendations: Vec::new(), - height: 1, - previous_block_hash: None, - view_change_index: 0, - committed_with_topology: iroha_core::sumeragi::network_topology::Topology::new(Vec::new()), - key_pair: KeyPair::generate().expect("Failed to generate KeyPair"), - transaction_validator: &TransactionValidator::new(transaction_limits), - wsv: WorldStateView::new(World::new(), kura), - } - .build(); + let topology = Topology::new(Vec::new()); + let wsv = WorldStateView::new(World::new(), kura); + let transaction_validator = TransactionValidator::new(transaction_limits); + let mut block = BlockBuilder::new(vec![tx], topology.clone(), Vec::new()) + .chain_first(&transaction_validator, wsv) + .sign(KeyPair::generate().expect("Failed to generate KeyPair")) + .expect("Valid"); for _ in 1..n_validators { block = block .sign(KeyPair::generate().expect("Failed to generate KeyPair.")) - .unwrap(); + .expect("Valid"); } - let block: VersionedCommittedBlock = block.commit_unchecked().into(); let mut block_store = BlockStore::new(dir.path()) .lock() .expect("Failed to lock store"); - block_store.create_files_if_they_do_not_exist().unwrap(); + block_store + .create_files_if_they_do_not_exist() + .expect("Valid"); - let serialized_block: Vec = block.encode_versioned(); + let serialized_block: Vec = VersionedSignedBlock::from(block).encode_versioned(); block_store .append_block_to_chain(&serialized_block) - .unwrap(); + .expect("Valid"); - let metadata = fs::metadata(dir.path().join("blocks.data")).await.unwrap(); + let metadata = fs::metadata(dir.path().join("blocks.data")) + .await + .expect("Valid"); let file_size = Byte::from_bytes(u128::from(metadata.len())).get_appropriate_unit(false); println!("For {n_validators} validators: {file_size}"); } @@ -85,7 +86,9 @@ async fn measure_block_size_async() { } fn measure_block_size(_criterion: &mut Criterion) { - Runtime::new().unwrap().block_on(measure_block_size_async()); + Runtime::new() + .expect("Valid") + .block_on(measure_block_size_async()); } criterion_group!(kura, measure_block_size); diff --git a/core/benches/validation.rs b/core/benches/validation.rs index a47ad11d6d0..4aa4ac7b658 100644 --- a/core/benches/validation.rs +++ b/core/benches/validation.rs @@ -8,6 +8,7 @@ use iroha_core::{ sumeragi::network_topology::Topology, tx::TransactionValidator, wsv::World, }; use iroha_data_model::prelude::*; +use iroha_genesis::AcceptedTransaction; const TRANSACTION_TIME_TO_LIVE_MS: u64 = 100_000; @@ -19,7 +20,7 @@ const TRANSACTION_LIMITS: TransactionLimits = TransactionLimits { max_wasm_size_bytes: 0, }; -fn build_test_transaction(keys: KeyPair) -> SignedTransaction { +fn build_test_transaction(keys: KeyPair) -> VersionedSignedTransaction { let domain_name = "domain"; let domain_id = DomainId::from_str(domain_name).expect("does not panic"); let create_domain = RegisterBox::new(Domain::new(domain_id)); @@ -119,9 +120,8 @@ fn validate_transaction(criterion: &mut Criterion) { let _ = criterion.bench_function("validate", move |b| { let transaction_validator = TransactionValidator::new(TRANSACTION_LIMITS); b.iter(|| { - match transaction_validator.validate( + match transaction_validator.validate::( transaction.clone(), - false, &Arc::new(build_test_and_transient_wsv(keys.clone())), ) { Ok(_) => success_count += 1, @@ -132,46 +132,74 @@ fn validate_transaction(criterion: &mut Criterion) { println!("Success count: {success_count}, Failure count: {failure_count}"); } +fn chain_blocks(criterion: &mut Criterion) { + let keys = KeyPair::generate().expect("Failed to generate keys"); + let transaction = AcceptedTransaction::accept::( + build_test_transaction(keys.clone()), + &TRANSACTION_LIMITS, + ) + .expect("Failed to accept transaction."); + let transaction_validator = TransactionValidator::new(TRANSACTION_LIMITS); + let wsv = build_test_and_transient_wsv(keys); + let topology = Topology::new(Vec::new()); + let block = BlockBuilder::new(vec![transaction], topology, Vec::new()); + let previous_block = block + .clone() + .chain_first(&transaction_validator, wsv.clone()); + let mut previous_block_hash = previous_block.hash(); + + let mut success_count = 0; + let _ = criterion.bench_function("chain_block", |b| { + b.iter(|| { + success_count += 1; + + let new_block = block.clone().chain( + success_count, + Some(previous_block_hash), + 0, + &transaction_validator, + wsv.clone(), + ); + + previous_block_hash = new_block.hash(); + }); + }); + println!("Total count: {success_count}"); +} + fn sign_blocks(criterion: &mut Criterion) { let keys = KeyPair::generate().expect("Failed to generate keys"); - let transaction = - AcceptedTransaction::accept::(build_test_transaction(keys), &TRANSACTION_LIMITS) - .expect("Failed to accept transaction."); + let transaction = AcceptedTransaction::accept::( + build_test_transaction(keys.clone()), + &TRANSACTION_LIMITS, + ) + .expect("Failed to accept transaction."); let transaction_validator = TransactionValidator::new(TRANSACTION_LIMITS); + let wsv = build_test_and_transient_wsv(keys); + let topology = Topology::new(Vec::new()); + let block = BlockBuilder::new(vec![transaction], topology, Vec::new()) + .chain_first(&transaction_validator, wsv); let key_pair = KeyPair::generate().expect("Failed to generate KeyPair."); - let kura = iroha_core::kura::Kura::blank_kura_for_testing(); - let mut success_count = 0; let mut failures_count = 0; let _ = criterion.bench_function("sign_block", |b| { - b.iter(|| { - let block = BlockBuilder { - transactions: vec![transaction.clone().into()], - event_recommendations: Vec::new(), - height: 1, - previous_block_hash: None, - view_change_index: 0, - committed_with_topology: Topology::new(Vec::new()), - key_pair: key_pair.clone(), - transaction_validator: &transaction_validator, - wsv: WorldStateView::new(World::new(), kura.clone()), - } - .build(); - - match block.sign(key_pair.clone()) { - Ok(_) => success_count += 1, - Err(_) => failures_count += 1, - } + b.iter(|| match block.clone().sign(key_pair.clone()) { + Ok(_) => success_count += 1, + Err(_) => failures_count += 1, }); }); println!("Success count: {success_count}, Failures count: {failures_count}"); } -criterion_group!( +criterion_group! { transactions, accept_transaction, sign_transaction, - validate_transaction -); -criterion_group!(blocks, sign_blocks); + validate_transaction, +} +criterion_group! { + blocks, + chain_blocks, + sign_blocks, +} criterion_main!(transactions, blocks); diff --git a/core/src/block.rs b/core/src/block.rs index 325b4562b8a..bad4c9e7184 100644 --- a/core/src/block.rs +++ b/core/src/block.rs @@ -1,8 +1,7 @@ //! This module contains [`Block`] structures for each state, it's -//! transitions, implementations and related traits -//! implementations. [`Block`]s are organised into a linear sequence -//! over time (also known as the block chain). A Block's life-cycle -//! starts from [`PendingBlock`]. +//! transitions, implementations and related trait implementations. +//! [`Block`]s are organised into a linear sequence over time (also known as the block chain). + #![allow( clippy::module_name_repetitions, clippy::std_instead_of_core, @@ -10,596 +9,696 @@ clippy::arithmetic_side_effects )] -use std::error::Error; +use std::error::Error as _; -use eyre::{bail, eyre, Context, Result}; +use eyre::{eyre, Context, Result}; use iroha_config::sumeragi::DEFAULT_CONSENSUS_ESTIMATION_MS; use iroha_crypto::{HashOf, KeyPair, MerkleTree, SignatureOf, SignaturesOf}; use iroha_data_model::{block::*, events::prelude::*, transaction::prelude::*}; -use parity_scale_codec::{Decode, Encode}; +use iroha_genesis::AcceptedTransaction; +pub use self::{chained::Chained, commit::CommittedBlock, pending::Pending, valid::ValidBlock}; use crate::{ prelude::*, sumeragi::network_topology::{Role, Topology}, tx::TransactionValidator, }; -/// Transaction data is permanently recorded in chunks called -/// blocks. -#[derive(Debug, Clone, Decode, Encode)] -pub struct PendingBlock { - /// Block header - pub header: BlockHeader, - /// Array of rejected transactions. - pub rejected_transactions: Vec, - /// Array of transactions, which successfully passed validation and consensus step. - pub transactions: Vec, - /// Event recommendations. - pub event_recommendations: Vec, - /// Signatures of peers which approved this block - pub signatures: SignaturesOf, -} +/// Buidler for blocks +#[derive(Debug, Clone)] +pub struct BlockBuilder(B); -/// Builder for `PendingBlock` -pub struct BlockBuilder<'a> { - /// Block's transactions. - pub transactions: Vec, - /// Block's event recommendations. - pub event_recommendations: Vec, - /// The height of the block. - pub height: u64, - /// The hash of the previous block if there is one. - pub previous_block_hash: Option>, - /// The view change index this block was committed with. Produced by consensus. - pub view_change_index: u64, - /// The topology thihs block was committed with. Produced by consensus. - pub committed_with_topology: Topology, - /// The keypair used to sign this block. - pub key_pair: KeyPair, - /// The transaction validator to be used when validating the block. - pub transaction_validator: &'a TransactionValidator, - /// The world state to be used when validating the block. - pub wsv: WorldStateView, -} +mod pending { + use super::*; -impl BlockBuilder<'_> { - /// Create a new [`PendingBlock`] from transactions. - pub fn build(self) -> PendingBlock { - let timestamp = crate::current_time().as_millis(); - // TODO: Need to check if the `transactions` vector is empty. It shouldn't be allowed. - - let mut header = BlockHeader { - timestamp, - consensus_estimation: DEFAULT_CONSENSUS_ESTIMATION_MS, - height: self.height, - view_change_index: self.view_change_index, - previous_block_hash: self.previous_block_hash, - transactions_hash: None, - rejected_transactions_hash: None, - committed_with_topology: self.committed_with_topology.sorted_peers, - }; + /// First stage in the life-cycle of a [`Block`]. + /// In the beginning the block is assumed to be verified and to contain only accepted transactions. + /// Additionally the block must retain events emitted during the execution of on-chain logic during + /// the previous round, which might then be processed by the trigger system. + #[derive(Debug, Clone)] + pub struct Pending { + /// Unix timestamp + timestamp_ms: u128, + /// Collection of transactions which have been accepted. + /// Transaction will be validated when block is chained. + transactions: Vec, + /// The topology at the time of block commit. + commit_topology: Topology, + /// Event recommendations for use in triggers and off-chain work + event_recommendations: Vec, + } - let mut txs = Vec::new(); - let mut rejected = Vec::new(); + impl BlockBuilder { + /// Create [`Self`] + /// + /// # Panics + /// + /// if the given list of transaction is empty + #[inline] + pub fn new( + transactions: Vec, + commit_topology: Topology, + event_recommendations: Vec, + ) -> Self { + assert!(!transactions.is_empty(), "Empty block created"); + + Self(Pending { + timestamp_ms: iroha_data_model::current_time().as_millis(), + transactions, + commit_topology, + event_recommendations, + }) + } - for tx in self.transactions { - match self - .transaction_validator - .validate(tx.into_v1(), header.is_genesis(), &self.wsv) - { - Ok(tx) => txs.push(tx), - Err(tx) => { - iroha_logger::warn!( - reason = %tx.as_v1().rejection_reason, - caused_by = ?tx.as_v1().rejection_reason.source(), - "Transaction validation failed", - ); - rejected.push(tx) + fn make_header( + timestamp_ms: u128, + height: u64, + previous_block_hash: Option>, + view_change_index: u64, + transactions: &[VersionedSignedTransaction], + rejected_transactions: &[(VersionedSignedTransaction, TransactionRejectionReason)], + commit_topology: Topology, + ) -> BlockHeader { + BlockHeader { + timestamp_ms, + consensus_estimation_ms: DEFAULT_CONSENSUS_ESTIMATION_MS, + height: height + 1, + view_change_index, + previous_block_hash, + transactions_hash: transactions + .iter() + .map(VersionedSignedTransaction::hash) + .collect::>() + .hash(), + rejected_transactions_hash: rejected_transactions + .iter() + .map(|(tx, _error)| tx.hash()) + .collect::>() + .hash(), + commit_topology: commit_topology.ordered_peers, + } + } + + // NOTE: Transactions are applied to WSV clone + #[allow(clippy::needless_pass_by_value)] + fn categorize_transactions( + transactions: Vec, + transaction_validator: &TransactionValidator, + wsv: WorldStateView, + ) -> ( + Vec, + Vec<(VersionedSignedTransaction, TransactionRejectionReason)>, + ) { + let (mut valid, mut rejected) = (Vec::new(), Vec::new()); + + for tx in transactions { + match transaction_validator.validate::(tx, &wsv) { + Ok(tx) => valid.push(tx), + Err(tx) => { + iroha_logger::warn!( + reason = %tx.1, + caused_by = ?tx.1.source(), + "Transaction validation failed", + ); + rejected.push(tx) + } } } + + (valid, rejected) + } + + /// Chain the block with existing blockchain. + pub fn chain( + self, + height: u64, + previous_block_hash: Option>, + view_change_index: u64, + transaction_validator: &TransactionValidator, + wsv: WorldStateView, + ) -> BlockBuilder { + let (transactions, rejected_transactions) = Self::categorize_transactions::( + self.0.transactions, + transaction_validator, + wsv, + ); + + BlockBuilder(Chained(BlockPayload { + header: Self::make_header( + self.0.timestamp_ms, + height, + previous_block_hash, + view_change_index, + &transactions, + &rejected_transactions, + self.0.commit_topology, + ), + transactions: transactions.into_iter().map(Into::into).collect(), + rejected_transactions: rejected_transactions.into_iter().map(Into::into).collect(), + event_recommendations: self.0.event_recommendations, + })) } - header.transactions_hash = txs - .iter() - .map(VersionedValidTransaction::hash) - .collect::>() - .hash(); - header.rejected_transactions_hash = rejected - .iter() - .map(VersionedRejectedTransaction::hash) - .collect::>() - .hash(); - // TODO: Validate Event recommendations somehow? - - let signature = SignatureOf::from_hash(self.key_pair, &HashOf::new(&header).transmute()) - .expect("Signing of new block failed."); - let signatures = SignaturesOf::from(signature); - - PendingBlock { - header, - rejected_transactions: rejected, - transactions: txs, - event_recommendations: self.event_recommendations, - signatures, + + /// Create a new blockchain with current block as the first block. + pub fn chain_first( + self, + transaction_validator: &TransactionValidator, + wsv: WorldStateView, + ) -> BlockBuilder { + let (transactions, rejected_transactions) = Self::categorize_transactions::( + self.0.transactions, + transaction_validator, + wsv, + ); + + BlockBuilder(Chained(BlockPayload { + header: Self::make_header( + self.0.timestamp_ms, + 0, + None, + 0, + &transactions, + &rejected_transactions, + self.0.commit_topology, + ), + transactions: transactions.into_iter().map(Into::into).collect(), + rejected_transactions: rejected_transactions.into_iter().map(Into::into).collect(), + event_recommendations: self.0.event_recommendations, + })) } } } -impl PendingBlock { - /// Calculate the hash of the current block. - pub fn hash(&self) -> HashOf { - HashOf::new(&self.header).transmute() - } +mod chained { + use super::*; + + /// When a [`Pending`] block is chained with the blockchain it becomes [`Chained`] block. + #[derive(Debug, Clone)] + pub struct Chained(pub(crate) BlockPayload); - /// Return signatures that are verified with the `hash` of this block, - /// removing all other signatures. - #[inline] - pub fn retain_verified_signatures(&mut self) -> impl Iterator> { - self.signatures.retain_verified_by_hash(self.hash()) + impl BlockBuilder { + /// Hash of the block being built + pub fn hash(&self) -> HashOf { + HashOf::new(&self.0 .0.header).transmute() + } + + /// Sign this block and get [`SignedBlock`]. + /// + /// # Errors + /// + /// Fails if signature generation fails + pub fn sign(self, key_pair: KeyPair) -> Result { + let hash = self.hash(); + + let signature = SignatureOf::from_hash(key_pair, &hash) + .wrap_err(format!("Failed to sign block with hash {}", hash))?; + let signatures = SignaturesOf::from(signature); + Ok(ValidBlock( + SignedBlock { + payload: self.0 .0, + signatures, + } + .into(), + )) + } } +} + +mod valid { + use eyre::bail; + + use super::*; + + /// Block that was validated and accepted + #[derive(Debug, Clone)] + #[repr(transparent)] + pub struct ValidBlock(pub(crate) VersionedSignedBlock); - /// Commit block to the store. - /// When calling this function, the user is responsible for the validity of the block signatures. - /// Preference should be given to [`Self::commit`], where signature verification is built in. - #[inline] - pub fn commit_unchecked(self) -> CommittedBlock { - let Self { - header, - rejected_transactions, - transactions, - event_recommendations, - signatures, - } = self; - - CommittedBlock { - event_recommendations, - header, - rejected_transactions, - transactions, - signatures: signatures.transmute(), + impl Block for ValidBlock { + fn payload(&self) -> &BlockPayload { + self.0.payload() + } + fn signatures(&self) -> &SignaturesOf { + self.0.signatures() } } - /// Verify signatures and commit block to the store. - /// - /// # Errors - /// - /// Not enough signatures - #[inline] - pub fn commit(mut self, topology: &Topology) -> Result { - let verified_signatures = self.retain_verified_signatures(); - - if topology - .filter_signatures_by_roles( - &[ - Role::ValidatingPeer, - Role::Leader, - Role::ProxyTail, - Role::ObservingPeer, - ], - verified_signatures, + impl ValidBlock { + /// Validate a block against the current state of the world. + /// + /// # Errors + /// + /// - Block is empty + /// - There is a mismatch between candidate block height and actual blockchain height + /// - There is a mismatch between candidate block previous block hash and actual latest block hash + /// - Block has committed transactions + /// - Block header transaction hashes don't match with computed transaction hashes + /// - Error during validation of individual transactions + /// - Topology field is incorrect + pub fn validate( + block: VersionedSignedBlock, + expected_block_height: u64, + expected_previous_block_hash: Option>, + topology: &Topology, + transaction_validator: &TransactionValidator, + wsv: WorldStateView, + ) -> Result { + let actual_commit_topology = &block.header().commit_topology; + let expected_commit_topology = &topology.ordered_peers; + + if actual_commit_topology != expected_commit_topology { + let msg = eyre!("Block topology incorrect. Expected: {expected_commit_topology:#?}, actual: {actual_commit_topology:#?}"); + return Err((block, msg)); + } + + if !block.header().is_genesis() + && topology + .filter_signatures_by_roles(&[Role::Leader], block.signatures()) + .is_empty() + { + return Err((block, eyre!("Block is not signed by the leader"))); + } + + Self::validate_without_validating_topology( + block, + expected_block_height, + expected_previous_block_hash, + transaction_validator, + wsv, ) - .len() - .lt(&topology.min_votes_for_commit()) - { - return Err(( - self, - eyre!("The block doesn't have enough valid signatures to be committed."), - )); } - Ok(self.commit_unchecked()) - } + /// Validate a block against the current state of the world. + /// + /// # Errors + /// + /// - Block is empty + /// - There is a mismatch between candidate block height and actual blockchain height + /// - There is a mismatch between candidate block previous block hash and actual latest block hash + /// - Block has committed transactions + /// - Block header transaction hashes don't match with computed transaction hashes + /// - Error during validation of individual transactions + #[deprecated( + since = "2.0.0-pre-rc.13", + note = "This method exists only because some tests are failing, but it shouldn't" + )] + // TODO: a committed block should always contains Leader and Proxy tail signatures + // TODO: Is it ok to not validate topology filed of the header in block_sync? + // NOTE: Transactions are applied to WSV clone + #[allow(clippy::needless_pass_by_value)] + pub fn validate_without_validating_topology( + block: VersionedSignedBlock, + expected_block_height: u64, + expected_previous_block_hash: Option>, + transaction_validator: &TransactionValidator, + wsv: WorldStateView, + ) -> Result { + let actual_height = block.header().height; + if expected_block_height != actual_height { + return Err((block, eyre!("Mismatch between the actual and expected heights of the block. Expected: {expected_block_height}, actual: {actual_height}"))); + } + let actual_block_hash = block.header().previous_block_hash; + if expected_previous_block_hash != actual_block_hash { + return Err((block, eyre!("Mismatch between the actual and expected hashes of the latest block. Expected: {expected_previous_block_hash:?}, actual: {actual_block_hash:?}"))); + } - /// Add additional signatures for [`SignedBlock`]. - /// - /// # Errors - /// Fails if signature generation fails - pub fn sign(mut self, key_pair: KeyPair) -> Result { - SignatureOf::from_hash(key_pair, &self.hash()) - .wrap_err(format!("Failed to sign block with hash {}", self.hash())) - .map(|signature| { - self.signatures.insert(signature); - self - }) - } + if let Err(error) = Self::validate_transactions(&block, transaction_validator, &wsv) { + return Err((block, error)); + } + if let Err(error) = + Self::validate_rejected_transactions(&block, transaction_validator, &wsv) + { + return Err((block, error)); + } - /// Add additional signature for [`SignedBlock`] - /// - /// # Errors - /// Fails if given signature doesn't match block hash - pub fn add_signature(&mut self, signature: SignatureOf) -> Result<()> { - signature - .verify_hash(&self.hash()) - .map(|_| { - self.signatures.insert(signature); - }) - .wrap_err(format!( - "Provided signature doesn't match block with hash {}", - self.hash() + let VersionedSignedBlock::V1(block) = block; + Ok(ValidBlock( + SignedBlock { + payload: block.payload, + signatures: block.signatures, + } + .into(), )) - } + } - /// Create dummy [`ValidBlock`]. Used in tests - /// - /// # Panics - /// If generating keys or block signing fails. - #[allow(clippy::restriction)] - #[cfg(test)] - pub fn new_dummy() -> Self { - let timestamp = crate::current_time().as_millis(); - - let header = BlockHeader { - timestamp, - consensus_estimation: DEFAULT_CONSENSUS_ESTIMATION_MS, - height: 1, - view_change_index: 0, - previous_block_hash: None, - transactions_hash: None, - rejected_transactions_hash: None, - committed_with_topology: Vec::new(), - }; + fn validate_transactions( + block: &VersionedSignedBlock, + transaction_validator: &TransactionValidator, + wsv: &WorldStateView, + ) -> Result<(), eyre::Report> { + let committed_txns: Vec<_> = block + .payload() + .transactions + .iter() + .filter(|transaction| transaction.is_in_blockchain(wsv)) + .collect(); + if !committed_txns.is_empty() { + bail!("Found committed transactions: {committed_txns:?}"); + } + + // Check that valid transactions are still valid + block.payload() + .transactions + .iter() + // TODO: Unnecessary clone? + .cloned() + .map(|tx| { + let limits = &transaction_validator.transaction_limits; + + let tx_result = if block.header().is_genesis() { + AcceptedTransaction::accept::(tx, limits) + } else { + AcceptedTransaction::accept::(tx, limits) + }; + + match tx_result { + Ok(tx) => Ok(tx), + Err((_tx, err)) => Err(err).wrap_err("Failed to accept transaction") + } + }) + .map(|accepted_tx| { + accepted_tx.and_then(|tx| { + let tx_result = if block.header().is_genesis() { + transaction_validator.validate::(tx, wsv) + } else { + transaction_validator.validate::(tx, wsv) + }; + + tx_result + .map_err(|tx| tx.1) + .wrap_err("Failed to validate transaction") + }) + }) + .try_fold(Vec::new(), |mut acc, tx| { + tx.map(|valid_tx| { + acc.push(valid_tx); + acc + }) + }) + .wrap_err("Error during transaction validation")?; - let key_pair = KeyPair::generate().unwrap(); - let signature = SignatureOf::from_hash(key_pair, &HashOf::new(&header).transmute()) - .expect("Signing of new block failed."); - let signatures = SignaturesOf::from(signature); - - Self { - header, - rejected_transactions: Vec::new(), - transactions: Vec::new(), - event_recommendations: Vec::new(), - signatures, + Ok(()) } - } -} -/// This trait represents the ability to revalidate a block. Should be -/// implemented for both `PendingBlock` and `VersionedCommittedBlock`. -pub trait Revalidate: Sized { - /// # Errors - /// - When the block is deemed invalid. - fn revalidate( - &self, - transaction_validator: &TransactionValidator, - wsv: WorldStateView, - latest_block: Option>, - block_height: u64, - ) -> Result<(), eyre::Report>; - - /// Return whether or not the block contains transactions already committed. - fn has_committed_transactions(&self, wsv: &WorldStateView) -> bool; -} + fn validate_rejected_transactions( + block: &VersionedSignedBlock, + transaction_validator: &TransactionValidator, + wsv: &WorldStateView, + ) -> Result<(), eyre::Report> { + let committed_rejected_txns: Vec<_> = block + .payload() + .rejected_transactions + .iter() + .filter(|(transaction, _)| transaction.is_in_blockchain(wsv)) + .collect(); + + if !committed_rejected_txns.is_empty() { + bail!("Found committed rejected transactions: {committed_rejected_txns:?}"); + } + + // Check that rejected transactions are indeed rejected + block.payload() + .rejected_transactions + .iter() + // TODO: Unnecessary clone? + .cloned() + .map(|tx| { + let limits = &transaction_validator.transaction_limits; + + let tx_result = if block.header().is_genesis() { + AcceptedTransaction::accept::(tx.0, limits) + } else { + AcceptedTransaction::accept::(tx.0, limits) + }; + + match tx_result { + Ok(tx) => Ok(tx), + Err((_tx, err)) => Err(err).wrap_err("Failed to accept transaction") + } + }) + .map(|accepted_tx| { + accepted_tx.and_then(|tx| { + let tx_result = if block.header().is_genesis() { + transaction_validator.validate::(tx, wsv) + } else { + transaction_validator.validate::(tx, wsv) + }; + + match tx_result { + Err(rejected_transaction) => Ok(rejected_transaction), + Ok(_) => Err(eyre!("Transactions which supposed to be rejected is valid")), + + } + }) + }) + .try_fold(Vec::new(), |mut acc, rejected_tx| { + rejected_tx.map(|tx| { + acc.push(tx); + acc + }) + }) + .wrap_err("Error during transaction validation")?; -impl Revalidate for PendingBlock { - /// Revalidate a block against the current state of the world. - /// - /// # Errors - /// - Block is empty - /// - Block has committed transactions - /// - There is a mismatch between candidate block height and actual blockchain height - /// - There is a mismatch between candidate block previous block hash and actual latest block hash - /// - Block header transaction hashes don't match with computed transaction hashes - /// - Error during revalidation of individual transactions - #[allow(clippy::too_many_lines)] - fn revalidate( - &self, - transaction_validator: &TransactionValidator, - wsv: WorldStateView, - latest_block: Option>, - block_height: u64, - ) -> Result<(), eyre::Report> { - if self.transactions.is_empty() && self.rejected_transactions.is_empty() { - bail!("Block is empty"); + Ok(()) } - if self.has_committed_transactions(&wsv) { - bail!("Block has committed transactions"); + /// Verify signatures and commit block to the store. + /// + /// # Errors + /// + /// - Not enough signatures + /// - Not signed by proxy tail + pub fn commit( + self, + topology: &Topology, + ) -> Result<(CommittedBlock, Vec), (Self, eyre::Report)> { + // TODO: Should the peer that serves genesis have a fixed role of ProxyTail in topology? + if !self.header().is_genesis() + && topology.is_consensus_required() + && topology + .filter_signatures_by_roles(&[Role::ProxyTail], self.signatures()) + .is_empty() + { + return Err((self, eyre!("Block is not signed by the proxy tail"))); + } + + self.commit_without_proxy_tail_signature(topology) } - if latest_block != self.header.previous_block_hash { - bail!( - "Mismatch between the actual and expected hashes of the latest block. Expected: {:?}, actual: {:?}", - latest_block, - &self.header.previous_block_hash - ); + /// Verify signatures and commit block to the store. + /// The block doesn't have to be signed by the proxy tail. + /// + /// # Errors + /// + /// - Not enough signatures + // TODO: a committed block should always contains Leader and Proxy tail signatures + #[deprecated( + since = "2.0.0-pre-rc.13", + note = "This method exists only because some tests are failing, but it shouldn't" + )] + pub(crate) fn commit_without_proxy_tail_signature( + self, + topology: &Topology, + ) -> Result<(CommittedBlock, Vec), (Self, eyre::Report)> { + #[allow(clippy::collapsible_else_if)] + if self.header().is_genesis() { + // If we receive a committed genesis block that is valid, use it without question. + // At genesis round we blindly take on the network topology from the genesis block. + } else { + // TODO: What is the point of filtering by all roles? + let roles = [ + Role::ValidatingPeer, + Role::Leader, + Role::ProxyTail, + Role::ObservingPeer, + ]; + + if topology + .filter_signatures_by_roles(&roles, self.signatures()) + .len() + .lt(&topology.min_votes_for_commit()) + { + return Err(( + self, + eyre!("The block doesn't have enough valid signatures to be committed."), + )); + } + } + + let block = CommittedBlock(self.0); + let events = block.produce_events(); + + Ok((block, events)) } - if block_height + 1 != self.header.height { - bail!( - "Mismatch between the actual and expected heights of the block. Expected: {}, actual: {}", - block_height + 1, - self.header.height - ); + /// Replace signatures in this block with the given + pub fn replace_signatures(&mut self, signatures: SignaturesOf) { + let VersionedSignedBlock::V1(block) = &mut self.0; + + block.signatures.clear(); + for signature in signatures { + if let Err(err) = self.add_signature(signature) { + // TODO: Is this something to be tolerated or should the block be rejected? + iroha_logger::warn!(?err, "Signature not valid"); + } + } } - // Validate that header transactions hashes are matched with actual hashes - self.transactions - .iter() - .map(VersionedValidTransaction::hash) - .collect::>() - .hash() - .eq(&self.header.transactions_hash) - .then_some(()) - .ok_or_else(|| { - eyre!("The transaction hash stored in the block header does not match the actual transaction hash.") - })?; - - self.rejected_transactions - .iter() - .map(VersionedRejectedTransaction::hash) - .collect::>() - .hash() - .eq(&self.header.rejected_transactions_hash) - .then_some(()) - .ok_or_else(|| eyre!("The hash of a rejected transaction stored in the block header does not match the actual hash or this transaction."))?; - - // Check that valid transactions are still valid - let _transactions = self - .transactions - .iter() - .cloned() - .map(VersionedValidTransaction::into_v1) - .map(|tx_v| { - let tx = SignedTransaction { - payload: tx_v.payload, - signatures: tx_v.signatures.into(), - }; - AcceptedTransaction::accept::( - tx, - &transaction_validator.transaction_limits, - ) - .wrap_err("Failed to accept transaction") - }) - .map(|accepted_tx| { - accepted_tx.and_then(|tx| { - transaction_validator - .validate(tx, self.header.is_genesis(), &wsv) - .map_err(|rejected_tx| rejected_tx.into_v1().rejection_reason) - .wrap_err("Failed to validate transaction") - }) - }) - .try_fold(Vec::new(), |mut acc, tx| { - tx.map(|valid_tx| { - acc.push(valid_tx); - acc - }) - }) - .wrap_err("Error during transaction revalidation")?; - - // Check that rejected transactions are indeed rejected - let _rejected_transactions = self - .rejected_transactions - .iter() - .cloned() - .map(VersionedRejectedTransaction::into_v1) - .map(|tx_r| { - let tx = SignedTransaction { - payload: tx_r.payload, - signatures: tx_r.signatures.into(), - }; - AcceptedTransaction::accept::( - tx, - &transaction_validator.transaction_limits, - ) - .wrap_err("Failed to accept transaction") - }) - .map(|accepted_tx| { - accepted_tx.and_then(|tx| { - match transaction_validator.validate(tx, self.header.is_genesis(), &wsv) { - Err(rejected_transaction) => Ok(rejected_transaction), - Ok(_) => Err(eyre!("Transactions which supposed to be rejected is valid")), - } - }) - }) - .try_fold(Vec::new(), |mut acc, rejected_tx| { - rejected_tx.map(|tx| { - acc.push(tx); - acc - }) - }) - .wrap_err("Error during transaction revalidation")?; - Ok(()) - } + #[cfg(test)] + #[deprecated( + since = "2.0.0-pre-rc.13", + note = "This method exists only because some tests are failing, but it shouldn't" + )] + pub(crate) fn commit_unchecked(self) -> (CommittedBlock, Vec) { + let block = CommittedBlock(self.0); + let events = block.produce_events(); - /// Check if a block has transactions that are already in the blockchain. - fn has_committed_transactions(&self, wsv: &WorldStateView) -> bool { - self.transactions - .iter() - .any(|transaction| transaction.is_in_blockchain(wsv)) - || self - .rejected_transactions - .iter() - .any(|transaction| transaction.is_in_blockchain(wsv)) + (block, events) + } + + /// Add additional signatures for [`Self`]. + /// + /// # Errors + /// + /// If signature generation fails + pub fn sign(self, key_pair: KeyPair) -> Result { + Ok(ValidBlock( + SignatureOf::from_hash(key_pair, &self.hash()) + .wrap_err(format!("Failed to sign block with hash {}", self.hash())) + .map(|signature| { + let VersionedSignedBlock::V1(mut block) = self.0; + block.signatures.insert(signature); + VersionedSignedBlock::from(block) + })?, + )) + } + + /// Add additional signature for [`Self`] + /// + /// # Errors + /// + /// If given signature doesn't match block hash + pub fn add_signature(&mut self, signature: SignatureOf) -> Result<()> { + let VersionedSignedBlock::V1(block) = &mut self.0; + + signature + .verify_hash(&block.hash()) + .map(|_| block.signatures.insert(signature)) + .wrap_err(format!( + "Provided signature doesn't match block with hash {}", + self.hash() + )) + } + + #[cfg(test)] + pub(crate) fn new_dummy() -> Self { + BlockBuilder(Chained(BlockPayload { + header: BlockHeader { + timestamp_ms: 0, + consensus_estimation_ms: DEFAULT_CONSENSUS_ESTIMATION_MS, + height: 1, + view_change_index: 0, + previous_block_hash: None, + transactions_hash: None, + rejected_transactions_hash: None, + commit_topology: Vec::new(), + }, + transactions: Vec::new(), + rejected_transactions: Vec::new(), + event_recommendations: Vec::new(), + })) + .sign(KeyPair::generate().unwrap()) + .unwrap() + } } -} -impl Revalidate for VersionedCommittedBlock { - /// Revalidate a block against the current state of the world. - /// - /// # Errors - /// - Block is empty - /// - Block has committed transactions - /// - There is a mismatch between candidate block height and actual blockchain height - /// - There is a mismatch between candidate block previous block hash and actual latest block hash - /// - Block header transaction hashes don't match with computed transaction hashes - /// - Error during revalidation of individual transactions - #[allow(clippy::too_many_lines)] - fn revalidate( - &self, - transaction_validator: &TransactionValidator, - wsv: WorldStateView, - latest_block: Option>, - block_height: u64, - ) -> Result<(), eyre::Report> { - if self.has_committed_transactions(&wsv) { - bail!("Block has committed transactions"); + impl From for VersionedSignedBlock { + fn from(source: ValidBlock) -> Self { + source.0 } - match self { - VersionedCommittedBlock::V1(block) => { - if block.transactions.is_empty() && block.rejected_transactions.is_empty() { - bail!("Block is empty"); - } + } +} - if latest_block != block.header.previous_block_hash { - bail!( - "Mismatch between the actual and expected hashes of the latest block. Expected: {:?}, actual: {:?}", - latest_block, - &block.header.previous_block_hash - ); - } +mod commit { + use iroha_data_model::block::Block; - if block_height + 1 != block.header.height { - bail!( - "Mismatch between the actual and expected heights of the block. Expected: {}, actual: {}", - block_height + 1, - block.header.height - ); - } + use super::*; - // Validate that header transactions hashes are matched with actual hashes - block.transactions - .iter() - .map(VersionedValidTransaction::hash) - .collect::>() - .hash() - .eq(&block.header.transactions_hash) - .then_some(()) - .ok_or_else(|| { - eyre!("The transaction hash stored in the block header does not match the actual transaction hash.") - })?; - - block.rejected_transactions - .iter() - .map(VersionedRejectedTransaction::hash) - .collect::>() - .hash() - .eq(&block.header.rejected_transactions_hash) - .then_some(()) - .ok_or_else(|| eyre!("The hash of a rejected transaction stored in the block header does not match the actual hash or this transaction."))?; - - // Check that valid transactions are still valid - let _transactions = block - .transactions - .iter() - .cloned() - .map(VersionedValidTransaction::into_v1) - .map(|tx_v| { - let tx = SignedTransaction { - payload: tx_v.payload, - signatures: tx_v.signatures.into(), - }; - AcceptedTransaction::accept::( - tx, - &transaction_validator.transaction_limits, - ) - .wrap_err("Failed to accept transaction") - }) - .map(|accepted_tx| { - accepted_tx.and_then(|tx| { - transaction_validator - .validate(tx, block.header.is_genesis(), &wsv) - .map_err(|rejected_tx| rejected_tx.into_v1().rejection_reason) - .wrap_err("Failed to validate transaction") - }) - }) - .try_fold(Vec::new(), |mut acc, tx| { - tx.map(|valid_tx| { - acc.push(valid_tx); - acc - }) - }) - .wrap_err("Error during transaction revalidation")?; + /// Represents a block accepted by consensus. + /// Every [`Self`] will have a different height. + #[derive(Debug, Clone)] + pub struct CommittedBlock(pub(super) VersionedSignedBlock); - // Check that rejected transactions are indeed rejected - let _rejected_transactions = block - .rejected_transactions - .iter() - .cloned() - .map(VersionedRejectedTransaction::into_v1) - .map(|tx_r| { - let tx = SignedTransaction { - payload: tx_r.payload, - signatures: tx_r.signatures.into(), - }; - AcceptedTransaction::accept::( - tx, - &transaction_validator.transaction_limits, - ) - .wrap_err("Failed to accept transaction") - }) - .map(|accepted_tx| { - accepted_tx.and_then(|tx| { - match transaction_validator.validate( - tx, - block.header.is_genesis(), - &wsv, - ) { - Err(rejected_transaction) => Ok(rejected_transaction), - Ok(_) => Err(eyre!( - "Transactions which supposed to be rejected is valid" - )), - } - }) - }) - .try_fold(Vec::new(), |mut acc, rejected_tx| { - rejected_tx.map(|tx| { - acc.push(tx); - acc - }) - }) - .wrap_err("Error during transaction revalidation")?; + impl Block for CommittedBlock { + fn payload(&self) -> &BlockPayload { + self.0.payload() + } + fn signatures(&self) -> &SignaturesOf { + self.0.signatures() + } + } - Ok(()) - } + impl CommittedBlock { + #[deprecated( + since = "2.0.0-pre-rc.13", + note = "This method exists only because some tests are failing, but it shouldn't" + )] + pub(crate) fn commit_without_validation( + block: VersionedSignedBlock, + ) -> (CommittedBlock, Vec) { + let VersionedSignedBlock::V1(block) = block; + let block = CommittedBlock(block.into()); + let events = block.produce_events(); + + (block, events) } } - /// Check if a block has transactions that are already in the blockchain. - fn has_committed_transactions(&self, wsv: &WorldStateView) -> bool { - match self { - VersionedCommittedBlock::V1(block) => { - block - .transactions - .iter() - .any(|transaction| transaction.is_in_blockchain(wsv)) - || block - .rejected_transactions - .iter() - .any(|transaction| transaction.is_in_blockchain(wsv)) + impl From for VersionedSignedBlock { + fn from(source: CommittedBlock) -> Self { + let VersionedSignedBlock::V1(block) = source.0; + + SignedBlock { + payload: block.payload, + signatures: block.signatures, } + .into() } } -} -impl From<&PendingBlock> for Vec { - fn from(block: &PendingBlock) -> Self { - block - .transactions - .iter() - .map(|transaction| -> Event { + impl CommittedBlock { + pub(super) fn produce_events(&self) -> Vec { + let rejected_tx = self + .payload() + .rejected_transactions + .iter() + .map(|transaction| { + PipelineEvent { + entity_kind: PipelineEntityKind::Transaction, + status: PipelineStatus::Rejected(transaction.1.clone().into()), + hash: transaction.0.hash().into(), + } + .into() + }); + let tx = self.payload().transactions.iter().map(|transaction| { PipelineEvent { entity_kind: PipelineEntityKind::Transaction, - status: PipelineStatus::Validating, + status: PipelineStatus::Committed, hash: transaction.hash().into(), } .into() - }) - .chain(block.rejected_transactions.iter().map(|transaction| { + }); + let current_block = core::iter::once( PipelineEvent { - entity_kind: PipelineEntityKind::Transaction, - status: PipelineStatus::Validating, - hash: transaction.hash().into(), + entity_kind: PipelineEntityKind::Block, + status: PipelineStatus::Committed, + hash: self.hash().into(), } - .into() - })) - .chain([PipelineEvent { - entity_kind: PipelineEntityKind::Block, - status: PipelineStatus::Validating, - hash: block.hash().into(), - } - .into()]) - .collect() + .into(), + ); + + tx.chain(rejected_tx).chain(current_block).collect() + } } } @@ -609,17 +708,18 @@ mod tests { use std::str::FromStr; - use iroha_data_model::prelude::*; + use iroha_data_model::{block::Block, prelude::*}; use super::*; use crate::{kura::Kura, smartcontracts::isi::Registrable as _}; #[test] pub fn committed_and_valid_block_hashes_are_equal() { - let valid_block = PendingBlock::new_dummy(); - let committed_block = valid_block.clone().commit_unchecked(); + let topology = Topology::new(Vec::new()); + let valid_block = ValidBlock::new_dummy(); + let committed_block = valid_block.clone().commit(&topology).expect("Valid"); - assert_eq!(*valid_block.hash(), *committed_block.hash()) + assert_eq!(valid_block.hash(), committed_block.0.hash()) } #[test] @@ -648,32 +748,22 @@ mod tests { }; let transaction_validator = TransactionValidator::new(transaction_limits); let tx = TransactionBuilder::new(alice_id, [create_asset_definition], 4000) - .sign(alice_keys.clone()) + .sign(alice_keys) + .expect("Valid"); + let tx: AcceptedTransaction = AcceptedTransaction::accept::(tx, &transaction_limits) + .map(Into::into) .expect("Valid"); - let tx: VersionedAcceptedTransaction = - AcceptedTransaction::accept::(tx, &transaction_limits) - .map(Into::into) - .expect("Valid"); // Creating a block of two identical transactions and validating it let transactions = vec![tx.clone(), tx]; - let valid_block = BlockBuilder { - transactions, - event_recommendations: Vec::new(), - height: 1, - previous_block_hash: None, - view_change_index: 0, - committed_with_topology: Topology::new(Vec::new()), - key_pair: alice_keys, - transaction_validator: &transaction_validator, - wsv, - } - .build(); + let topology = Topology::new(Vec::new()); + let block = BlockBuilder::new(transactions, topology, Vec::new()) + .chain_first(&transaction_validator, wsv); // The first transaction should be confirmed - assert_eq!(valid_block.transactions.len(), 1); + assert_eq!(block.0 .0.transactions.len(), 1); // The second transaction should be rejected - assert_eq!(valid_block.rejected_transactions.len(), 1); + assert_eq!(block.0 .0.rejected_transactions.len(), 1); } } diff --git a/core/src/block_sync.rs b/core/src/block_sync.rs index 10f8e0c43a1..fec1da7f32d 100644 --- a/core/src/block_sync.rs +++ b/core/src/block_sync.rs @@ -8,7 +8,7 @@ use std::{fmt::Debug, sync::Arc, time::Duration}; use iroha_config::block_sync::Configuration; use iroha_crypto::*; -use iroha_data_model::{block::VersionedCommittedBlock, prelude::*}; +use iroha_data_model::prelude::*; use iroha_logger::prelude::*; use iroha_macro::*; use iroha_p2p::Post; @@ -125,41 +125,20 @@ impl BlockSynchronizer { pub mod message { //! Module containing messages for [`BlockSynchronizer`](super::BlockSynchronizer). + use iroha_data_model::block::{Block, BlockPayload, VersionedSignedBlock}; + use super::*; use crate::sumeragi::view_change::ProofChain; declare_versioned_with_scale!(VersionedMessage 1..2, Debug, Clone, iroha_macro::FromVariant); - impl VersionedMessage { - /// Convert from `&VersionedMessage` to V1 reference - pub const fn as_v1(&self) -> &Message { - match self { - Self::V1(v1) => v1, - } - } - - /// Convert from `&mut VersionedMessage` to V1 mutable reference - pub fn as_mut_v1(&mut self) -> &mut Message { - match self { - Self::V1(v1) => v1, - } - } - - /// Performs the conversion from `VersionedMessage` to V1 - pub fn into_v1(self) -> Message { - match self { - Self::V1(v1) => v1, - } - } - } - /// Get blocks after some block #[derive(Debug, Clone, Decode, Encode)] pub struct GetBlocksAfter { /// Hash of latest available block - pub latest_hash: Option>, + pub latest_hash: Option>, /// Hash of second to latest block - pub previous_hash: Option>, + pub previous_hash: Option>, /// Peer id pub peer_id: PeerId, } @@ -167,8 +146,8 @@ pub mod message { impl GetBlocksAfter { /// Construct [`GetBlocksAfter`]. pub const fn new( - latest_hash: Option>, - previous_hash: Option>, + latest_hash: Option>, + previous_hash: Option>, peer_id: PeerId, ) -> Self { Self { @@ -183,15 +162,20 @@ pub mod message { #[derive(Debug, Clone, Decode, Encode)] pub struct ShareBlocks { /// Blocks - pub blocks: Vec, + pub blocks: Vec, /// Peer id pub peer_id: PeerId, } impl ShareBlocks { /// Construct [`ShareBlocks`]. - pub const fn new(blocks: Vec, peer_id: PeerId) -> Self { - Self { blocks, peer_id } + fn new(blocks: Vec, peer_id: PeerId) -> Self { + Self { + // Converting into non-validated block because it's not possible + // to guarantee that the sending peer sent a valid committed block + blocks: blocks.into_iter().map(Into::into).collect(), + peer_id, + } } } @@ -242,7 +226,7 @@ pub mod message { .take(1 + block_sync.block_batch_size as usize) .map_while(|height| block_sync.kura.get_block_by_height(height)) .skip_while(|block| Some(block.hash()) == *latest_hash) - .map(|block| VersionedCommittedBlock::clone(&block)) + .map(|block| VersionedSignedBlock::clone(&block)) .collect::>(); if blocks.is_empty() { @@ -258,11 +242,12 @@ pub mod message { } } Message::ShareBlocks(ShareBlocks { blocks, .. }) => { - use crate::sumeragi::message::{Message, MessagePacket}; + use crate::sumeragi::message::{BlockSyncUpdate, Message, MessagePacket}; + for block in blocks.clone() { block_sync.sumeragi.incoming_message(MessagePacket::new( ProofChain::default(), - Message::BlockSyncUpdate(block.into()), + Message::BlockSyncUpdate(BlockSyncUpdate { block }), )); } } diff --git a/core/src/kura.rs b/core/src/kura.rs index c21514d7b04..68a015cc18b 100644 --- a/core/src/kura.rs +++ b/core/src/kura.rs @@ -1,6 +1,6 @@ //! Translates to warehouse. File-system and persistence-related //! logic. [`Kura`] is the main entity which should be used to store -//! new [`Block`](`crate::block::VersionedCommittedBlock`)s on the +//! new [`Block`](`crate::block::VersionedSignedBlock`)s on the //! blockchain. #![allow(clippy::std_instead_of_alloc, clippy::arithmetic_side_effects)] use std::{ @@ -14,12 +14,12 @@ use std::{ use derive_more::Deref; use iroha_config::kura::Mode; use iroha_crypto::HashOf; -use iroha_data_model::block::VersionedCommittedBlock; +use iroha_data_model::block::{Block, BlockPayload, VersionedSignedBlock}; use iroha_logger::prelude::*; use iroha_version::scale::{DecodeVersioned, EncodeVersioned}; use parking_lot::Mutex; -use crate::handler::ThreadHandler; +use crate::{block::CommittedBlock, handler::ThreadHandler}; const INDEX_FILE_NAME: &str = "blocks.index"; const DATA_FILE_NAME: &str = "blocks.data"; @@ -36,12 +36,7 @@ pub struct Kura { block_store: Mutex>, /// The array of block hashes and a slot for an arc of the block. This is normally recovered from the index file. #[allow(clippy::type_complexity)] - block_data: Mutex< - Vec<( - HashOf, - Option>, - )>, - >, + block_data: Mutex, Option>)>>, /// Path to file for plain text blocks. block_plain_text_path: Option, } @@ -100,7 +95,12 @@ impl Kura { }); let shutdown = move || { - let _result = shutdown_sender.send(()); + if let Err(error) = shutdown_sender.send(()) { + iroha_logger::error!( + ?error, + "Failed to send shut down signal to kura. Thead might already be shut down." + ); + } }; ThreadHandler::new(Box::new(shutdown), thread_handle) @@ -113,7 +113,7 @@ impl Kura { /// - file storage is unavailable /// - data in file storage is invalid or corrupted #[iroha_logger::log(skip_all, name = "kura_init")] - pub fn init(&self) -> Result>> { + pub fn init(&self) -> Result>> { let block_store = self.block_store.lock(); let block_index_count: usize = block_store @@ -123,15 +123,15 @@ impl Kura { let mut block_indices = vec![BlockIndex::default(); block_index_count]; block_store.read_block_indices(0, &mut block_indices)?; - let mut block_hashes: Vec> = Vec::new(); + let mut block_hashes = Vec::new(); for block in block_indices { // This is re-allocated every iteration. This could cause a problem. let mut block_data_buffer = vec![0_u8; block.length.try_into()?]; match block_store.read_block_data(block.start, &mut block_data_buffer) { - Ok(_) => match VersionedCommittedBlock::decode_all_versioned(&block_data_buffer) { + Ok(_) => match VersionedSignedBlock::decode_all_versioned(&block_data_buffer) { Ok(decoded_block) => { - block_hashes.push(decoded_block.hash()); + block_hashes.push(Block::hash(&decoded_block)); } Err(error) => { error!(?error, "Encountered malformed block. Not reading any blocks beyond this height."); @@ -243,7 +243,7 @@ impl Kura { /// Get the hash of the block at the provided height. #[allow(clippy::expect_used)] - pub fn get_block_hash(&self, block_height: u64) -> Option> { + pub fn get_block_hash(&self, block_height: u64) -> Option> { let hash_data_guard = self.block_data.lock(); if block_height == 0 || block_height > hash_data_guard.len() as u64 { return None; @@ -255,7 +255,7 @@ impl Kura { } /// Search through blocks for the height of the block with the given hash. - pub fn get_block_height_by_hash(&self, hash: &HashOf) -> Option { + pub fn get_block_height_by_hash(&self, hash: &HashOf) -> Option { self.block_data .lock() .iter() @@ -267,7 +267,7 @@ impl Kura { #[allow(clippy::expect_used)] // The below lint suggests changing the code into something that does not compile due // to the borrow checker. - pub fn get_block_by_height(&self, block_height: u64) -> Option> { + pub fn get_block_by_height(&self, block_height: u64) -> Option> { let mut data_array_guard = self.block_data.lock(); if block_height == 0 || block_height > data_array_guard.len() as u64 { return None; @@ -290,8 +290,8 @@ impl Kura { block_store .read_block_data(start, &mut block_buf) .expect("Failed to read block data."); - let block = VersionedCommittedBlock::decode_all_versioned(&block_buf) - .expect("Failed to decode block"); + let block = + VersionedSignedBlock::decode_all_versioned(&block_buf).expect("Failed to decode block"); let block_arc = Arc::new(block); data_array_guard[block_number].1 = Some(Arc::clone(&block_arc)); @@ -305,8 +305,8 @@ impl Kura { /// call `get_block_by_height` directly. pub fn get_block_by_hash( &self, - block_hash: &HashOf, - ) -> Option> { + block_hash: &HashOf, + ) -> Option> { let index = self .block_data .lock() @@ -317,15 +317,19 @@ impl Kura { } /// Put a block in kura's in memory block store. - pub fn store_block(&self, block: VersionedCommittedBlock) { + pub fn store_block(&self, block: CommittedBlock) { + let block = VersionedSignedBlock::from(block); + self.block_data .lock() .push((block.hash(), Some(Arc::new(block)))); } /// Replace the block in `Kura`'s in memory block store. - pub fn replace_top_block(&self, block: VersionedCommittedBlock) { + pub fn replace_top_block(&self, block: CommittedBlock) { + let block = VersionedSignedBlock::from(block); let mut data = self.block_data.lock(); + data.pop(); data.push((block.hash(), Some(Arc::new(block)))); } diff --git a/core/src/lib.rs b/core/src/lib.rs index 616de19fda3..f42c2f9b8b2 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -97,7 +97,10 @@ pub mod handler { fn drop(&mut self) { (self.shutdown.take().expect("Always some after init"))(); let handle = self.handle.take().expect("Always some after init"); - let _joined = handle.join(); + + if let Err(error) = handle.join() { + iroha_logger::error!(?error, "Fatal error: thread panicked"); + } } } } @@ -111,10 +114,6 @@ pub mod prelude { #[doc(inline)] pub use crate::{ smartcontracts::ValidQuery, - tx::{ - AcceptedTransaction, ValidTransaction, VersionedAcceptedTransaction, - VersionedValidTransaction, - }, wsv::{World, WorldStateView}, IsInBlockchain, }; diff --git a/core/src/queue.rs b/core/src/queue.rs index e592cc48469..9e497fa6ea7 100644 --- a/core/src/queue.rs +++ b/core/src/queue.rs @@ -15,6 +15,7 @@ use eyre::{Report, Result}; use iroha_config::queue::Configuration; use iroha_crypto::HashOf; use iroha_data_model::transaction::prelude::*; +use iroha_genesis::AcceptedTransaction; use iroha_logger::{debug, info, trace, warn}; use iroha_primitives::{must_use::MustUse, riffle_iter::RiffleIter}; use rand::seq::IteratorRandom; @@ -28,13 +29,13 @@ use crate::{prelude::*, tx::CheckSignatureCondition as _}; #[derive(Debug)] pub struct Queue { /// The queue for transactions that passed signature check - queue: ArrayQueue>, + queue: ArrayQueue>, /// The queue for transactions that didn't pass signature check and are waiting for additional signatures /// /// Second queue is needed to prevent situation when multisig transactions prevent ordinary transactions from being added into the queue - signature_buffer: ArrayQueue>, - /// [`VersionedAcceptedTransaction`]s addressed by `Hash`. - txs: DashMap, VersionedAcceptedTransaction>, + signature_buffer: ArrayQueue>, + /// [`AcceptedTransaction`]s addressed by `Hash`. + txs: DashMap, AcceptedTransaction>, /// The maximum number of transactions in the block pub txs_in_block: usize, /// The maximum number of transactions in the queue @@ -71,7 +72,7 @@ pub enum Error { #[error("Failure during signature condition execution, tx hash: {tx_hash}, reason: {reason}")] SignatureCondition { /// Transaction hash - tx_hash: HashOf, + tx_hash: HashOf, /// Failure reason reason: Report, }, @@ -81,7 +82,7 @@ pub enum Error { #[derive(Debug)] pub struct Failure { /// Transaction failed to be pushed into the queue - pub tx: VersionedAcceptedTransaction, + pub tx: AcceptedTransaction, /// Push failure reason pub err: Error, } @@ -103,12 +104,12 @@ impl Queue { } } - fn is_pending(&self, tx: &VersionedAcceptedTransaction, wsv: &WorldStateView) -> bool { + fn is_pending(&self, tx: &AcceptedTransaction, wsv: &WorldStateView) -> bool { !tx.is_expired(self.tx_time_to_live) && !tx.is_in_blockchain(wsv) } /// Returns all pending transactions. - pub fn all_transactions(&self, wsv: &WorldStateView) -> Vec { + pub fn all_transactions(&self, wsv: &WorldStateView) -> Vec { self.txs .iter() .filter(|e| self.is_pending(e.value(), wsv)) @@ -117,11 +118,7 @@ impl Queue { } /// Returns `n` randomly selected transaction from the queue. - pub fn n_random_transactions( - &self, - n: u32, - wsv: &WorldStateView, - ) -> Vec { + pub fn n_random_transactions(&self, n: u32, wsv: &WorldStateView) -> Vec { self.txs .iter() .filter(|e| self.is_pending(e.value(), wsv)) @@ -134,14 +131,14 @@ impl Queue { fn check_tx( &self, - tx: &VersionedAcceptedTransaction, + tx: &AcceptedTransaction, wsv: &WorldStateView, ) -> Result, Error> { if tx.is_in_future(self.future_threshold) { Err(Error::InFuture) } else if tx.is_expired(self.tx_time_to_live) { Err(Error::Expired { - time_to_live_ms: tx.payload().time_to_live_ms, + time_to_live_ms: tx.payload.time_to_live_ms, }) } else if tx.is_in_blockchain(wsv) { Err(Error::InBlockchain) @@ -158,11 +155,7 @@ impl Queue { /// /// # Errors /// See [`enum@Error`] - pub fn push( - &self, - tx: VersionedAcceptedTransaction, - wsv: &WorldStateView, - ) -> Result<(), Failure> { + pub fn push(&self, tx: AcceptedTransaction, wsv: &WorldStateView) -> Result<(), Failure> { trace!(?tx, "Pushing to the queue"); let signature_check_succeed = match self.check_tx(&tx, wsv) { Err(err) => { @@ -178,11 +171,7 @@ impl Queue { let entry = match self.txs.entry(hash) { Entry::Occupied(mut old_tx) => { // MST case - old_tx - .get_mut() - .as_mut_v1() - .signatures - .extend(tx.as_v1().signatures.clone()); + old_tx.get_mut().signatures.extend(tx.signatures); info!("Signature added to existing multisignature transaction"); return Ok(()); } @@ -229,10 +218,10 @@ impl Queue { /// Pop single transaction from the signature buffer. Record all visited and not removed transactions in `seen`. fn pop_from_signature_buffer( &self, - seen: &mut Vec>, + seen: &mut Vec>, wsv: &WorldStateView, - expired_transactions: &mut Vec, - ) -> Option { + expired_transactions: &mut Vec, + ) -> Option { // NOTE: `SKIP_SIGNATURE_CHECK=false` because `signature_buffer` contains transaction which signature check can be either `true` or `false`. self.pop_from::(&self.signature_buffer, seen, wsv, expired_transactions) } @@ -240,10 +229,10 @@ impl Queue { /// Pop single transaction from the queue. Record all visited and not removed transactions in `seen`. fn pop_from_queue( &self, - seen: &mut Vec>, + seen: &mut Vec>, wsv: &WorldStateView, - expired_transactions: &mut Vec, - ) -> Option { + expired_transactions: &mut Vec, + ) -> Option { // NOTE: `SKIP_SIGNATURE_CHECK=true` because `queue` contains only transactions for which signature check is `true`. self.pop_from::(&self.queue, seen, wsv, expired_transactions) } @@ -252,11 +241,11 @@ impl Queue { #[inline] fn pop_from( &self, - queue: &ArrayQueue>, - seen: &mut Vec>, + queue: &ArrayQueue>, + seen: &mut Vec>, wsv: &WorldStateView, - expired_transactions: &mut Vec, - ) -> Option { + expired_transactions: &mut Vec, + ) -> Option { loop { let Some(hash) = queue.pop() else { trace!("Queue is empty"); @@ -303,10 +292,7 @@ impl Queue { /// /// BEWARE: Shouldn't be called in parallel with itself. #[cfg(test)] - fn collect_transactions_for_block( - &self, - wsv: &WorldStateView, - ) -> Vec { + fn collect_transactions_for_block(&self, wsv: &WorldStateView) -> Vec { let mut transactions = Vec::with_capacity(self.txs_in_block); self.get_transactions_for_block(wsv, &mut transactions, &mut Vec::new()); transactions @@ -318,8 +304,8 @@ impl Queue { pub fn get_transactions_for_block( &self, wsv: &WorldStateView, - transactions: &mut Vec, - expired_transactions: &mut Vec, + transactions: &mut Vec, + expired_transactions: &mut Vec, ) { if transactions.len() >= self.txs_in_block { return; @@ -341,10 +327,8 @@ impl Queue { ) }); - let transactions_hashes: HashSet> = transactions - .iter() - .map(VersionedAcceptedTransaction::hash) - .collect(); + let transactions_hashes: HashSet<_> = + transactions.iter().map(AcceptedTransaction::hash).collect(); let txs = txs_from_queue .riffle(txs_from_waiting_buffer) .filter(|tx| !transactions_hashes.contains(&tx.hash())) @@ -386,11 +370,7 @@ mod tests { use super::*; use crate::{kura::Kura, smartcontracts::isi::Registrable as _, wsv::World, PeersIds}; - fn accepted_tx( - account_id: &str, - proposed_ttl_ms: u64, - key: KeyPair, - ) -> VersionedAcceptedTransaction { + fn accepted_tx(account_id: &str, proposed_ttl_ms: u64, key: KeyPair) -> AcceptedTransaction { let message = std::iter::repeat_with(rand::random::) .take(16) .collect(); @@ -755,7 +735,7 @@ mod tests { max_instruction_number: 4096, max_wasm_size_bytes: 0, }; - let fully_signed_tx: VersionedAcceptedTransaction = { + let fully_signed_tx: AcceptedTransaction = { let mut signed_tx = tx .clone() .sign((&key_pairs[0]).clone()) @@ -782,7 +762,7 @@ mod tests { .into() }; for key_pair in key_pairs { - let partially_signed_tx: VersionedAcceptedTransaction = get_tx(key_pair); + let partially_signed_tx: AcceptedTransaction = get_tx(key_pair); // Check that non of partially signed pass signature check assert!(matches!( partially_signed_tx.check_signature_condition(&wsv), @@ -1059,7 +1039,7 @@ mod tests { let mut tx = accepted_tx("alice@wonderland", 100_000, alice_key); assert!(queue.push(tx.clone(), &wsv).is_ok()); // tamper timestamp - tx.as_mut_v1().payload.creation_time += 2 * future_threshold_ms; + tx.payload.creation_time_ms += 2 * future_threshold_ms; assert!(matches!( queue.push(tx, &wsv), Err(Failure { diff --git a/core/src/smartcontracts/isi/block.rs b/core/src/smartcontracts/isi/block.rs index 49301e4bed1..a4eeaafc402 100644 --- a/core/src/smartcontracts/isi/block.rs +++ b/core/src/smartcontracts/isi/block.rs @@ -1,8 +1,11 @@ //! This module contains trait implementations related to block queries use eyre::{Result, WrapErr}; -use iroha_data_model::query::{ - block::FindBlockHeaderByHash, - error::{FindError, QueryExecutionFailure}, +use iroha_data_model::{ + block::Block, + query::{ + block::FindBlockHeaderByHash, + error::{FindError, QueryExecutionFailure}, + }, }; use iroha_telemetry::metrics; @@ -22,7 +25,7 @@ impl ValidQuery for FindAllBlockHeaders { let block_headers = wsv .all_blocks_by_value() .rev() - .map(|block| block.into_v1().header) + .map(|VersionedSignedBlock::V1(block)| block.payload.header) .collect(); Ok(block_headers) } @@ -35,14 +38,11 @@ impl ValidQuery for FindBlockHeaderByHash { .hash .evaluate(&Context::new(wsv)) .wrap_err("Failed to evaluate hash") - .map_err(|e| QueryExecutionFailure::Evaluate(e.to_string()))? - .typed(); + .map_err(|e| QueryExecutionFailure::Evaluate(e.to_string()))?; - let block = wsv - .all_blocks_by_value() + wsv.all_blocks_by_value() .find(|block| block.hash() == hash) - .ok_or_else(|| QueryExecutionFailure::Find(Box::new(FindError::Block(hash))))?; - - Ok(block.into_v1().header) + .ok_or_else(|| QueryExecutionFailure::Find(Box::new(FindError::Block(hash)))) + .map(|VersionedSignedBlock::V1(block)| block.payload.header) } } diff --git a/core/src/smartcontracts/isi/query.rs b/core/src/smartcontracts/isi/query.rs index e7b77844965..b32a123539d 100644 --- a/core/src/smartcontracts/isi/query.rs +++ b/core/src/smartcontracts/isi/query.rs @@ -91,14 +91,15 @@ mod tests { use std::str::FromStr; - use iroha_crypto::{Hash, HashOf, KeyPair}; - use iroha_data_model::{block::VersionedCommittedBlock, transaction::TransactionLimits}; + use iroha_crypto::{HashOf, KeyPair}; + use iroha_data_model::{block::Block, transaction::TransactionLimits}; + use iroha_genesis::AcceptedTransaction; use once_cell::sync::Lazy; use super::*; use crate::{ - block::*, kura::Kura, smartcontracts::isi::Registrable as _, tx::TransactionValidator, - wsv::World, PeersIds, + block::*, kura::Kura, smartcontracts::isi::Registrable as _, + sumeragi::network_topology::Topology, tx::TransactionValidator, wsv::World, PeersIds, }; static ALICE_KEYS: Lazy = Lazy::new(|| KeyPair::generate().unwrap()); @@ -190,32 +191,25 @@ mod tests { let valid_tx = { let tx = TransactionBuilder::new(ALICE_ID.clone(), vec![], 4000).sign(ALICE_KEYS.clone())?; - VersionedAcceptedTransaction::from(AcceptedTransaction::accept::(tx, &limits)?) + AcceptedTransaction::accept::(tx, &limits).expect("Valid") }; let invalid_tx = { let isi: InstructionBox = FailBox::new("fail").into(); let tx = TransactionBuilder::new(ALICE_ID.clone(), vec![isi.clone(), isi], 4000) .sign(ALICE_KEYS.clone())?; - AcceptedTransaction::accept::(tx, &huge_limits)?.into() + AcceptedTransaction::accept::(tx, &huge_limits).expect("Valid") }; let mut transactions = vec![valid_tx; valid_tx_per_block]; transactions.append(&mut vec![invalid_tx; invalid_tx_per_block]); - let first_block: VersionedCommittedBlock = BlockBuilder { - transactions: transactions.clone(), - event_recommendations: Vec::new(), - height: 1, - previous_block_hash: None, - view_change_index: 0, - committed_with_topology: crate::sumeragi::network_topology::Topology::new(vec![]), - key_pair: ALICE_KEYS.clone(), - transaction_validator: &TransactionValidator::new(limits), - wsv: wsv.clone(), - } - .build() - .commit_unchecked() - .into(); + let topology = Topology::new(Vec::new()); + let (first_block, _): (CommittedBlock, _) = + BlockBuilder::new(transactions.clone(), topology.clone(), vec![]) + .chain_first(&TransactionValidator::new(limits), wsv.clone()) + .sign(ALICE_KEYS.clone()) + .expect("Failed to sign block") + .commit_unchecked(); let mut curr_hash = first_block.hash(); @@ -223,21 +217,18 @@ mod tests { kura.store_block(first_block); for height in 1u64..blocks { - let block: VersionedCommittedBlock = BlockBuilder { - transactions: transactions.clone(), - event_recommendations: Vec::new(), - height, - previous_block_hash: Some(curr_hash), - view_change_index: 0, - committed_with_topology: crate::sumeragi::network_topology::Topology::new(vec![]), - key_pair: ALICE_KEYS.clone(), - transaction_validator: &TransactionValidator::new(limits), - wsv: wsv.clone(), - } - .build() - .commit_unchecked() - .into(); - + let (block, _): (CommittedBlock, _) = + BlockBuilder::new(transactions.clone(), topology.clone(), vec![]) + .chain( + height, + Some(curr_hash), + 0, + &TransactionValidator::new(limits), + wsv.clone(), + ) + .sign(ALICE_KEYS.clone()) + .expect("Failed to sign block") + .commit_unchecked(); curr_hash = block.hash(); wsv.apply(&block)?; kura.store_block(block); @@ -315,11 +306,11 @@ mod tests { .expect("WSV is empty"); assert_eq!( - &FindBlockHeaderByHash::new(*block.hash()).execute(&wsv)?, + &FindBlockHeaderByHash::new(block.hash()).execute(&wsv)?, block.header() ); - assert!(FindBlockHeaderByHash::new(Hash::new([42])) + assert!(FindBlockHeaderByHash::new(HashOf::new(&42).transmute()) .execute(&wsv) .is_err()); @@ -365,33 +356,25 @@ mod tests { max_wasm_size_bytes: 0, }; - let va_tx: VersionedAcceptedTransaction = - AcceptedTransaction::accept::(signed_tx, &tx_limits)?.into(); - - let vcb: VersionedCommittedBlock = BlockBuilder { - transactions: vec![va_tx.clone()], - event_recommendations: Vec::new(), - height: 1, - previous_block_hash: None, - view_change_index: 0, - committed_with_topology: crate::sumeragi::network_topology::Topology::new(vec![]), - key_pair: ALICE_KEYS.clone(), - transaction_validator: &TransactionValidator::new(tx_limits), - wsv: wsv.clone(), - } - .build() - .commit_unchecked() - .into(); + let va_tx: AcceptedTransaction = + AcceptedTransaction::accept::(signed_tx, &tx_limits).expect("Valid"); - wsv.apply(&vcb)?; - kura.store_block(vcb); + let topology = Topology::new(Vec::new()); + let (block, _) = BlockBuilder::new(vec![va_tx.clone()], topology.clone(), Vec::new()) + .chain_first(&TransactionValidator::new(tx_limits), wsv.clone()) + .sign(ALICE_KEYS.clone()) + .expect("Failed to sign blocks.") + .commit(&topology) + .expect("Valid"); + wsv.apply(&block)?; + kura.store_block(block); - let wrong_hash: Hash = HashOf::new(&2_u8).into(); + let wrong_hash = HashOf::new(&2_u8).transmute(); let not_found = FindTransactionByHash::new(wrong_hash).execute(&wsv); assert!(matches!(not_found, Err(_))); - let found_accepted = FindTransactionByHash::new(Hash::from(va_tx.hash())).execute(&wsv)?; - match found_accepted { + let found_accepted = FindTransactionByHash::new(va_tx.hash()).execute(&wsv)?; + match found_accepted.tx_value { TransactionValue::Transaction(tx) => { assert_eq!(va_tx.hash().transmute(), tx.hash()) } diff --git a/core/src/smartcontracts/isi/tx.rs b/core/src/smartcontracts/isi/tx.rs index 3e89bcd94d6..7c8b6266070 100644 --- a/core/src/smartcontracts/isi/tx.rs +++ b/core/src/smartcontracts/isi/tx.rs @@ -39,8 +39,8 @@ impl ValidQuery for FindTransactionByHash { .evaluate(&Context::new(wsv)) .wrap_err("Failed to get hash") .map_err(|e| QueryExecutionFailure::Evaluate(e.to_string()))?; + iroha_logger::trace!(%hash); - let hash = hash.typed(); if !wsv.has_transaction(&hash) { return Err(FindError::Transaction(hash).into()); }; diff --git a/core/src/smartcontracts/wasm.rs b/core/src/smartcontracts/wasm.rs index d3cde4871a2..dbb75dbcfbf 100644 --- a/core/src/smartcontracts/wasm.rs +++ b/core/src/smartcontracts/wasm.rs @@ -98,10 +98,10 @@ pub type Result = core::result::Result; /// Panics if something is wrong with the configuration. /// Configuration is hardcoded and tested, so this function should never panic. pub fn create_engine() -> Engine { - create_config() - .and_then(|config| { - Engine::new(&config).map_err(|err| Error::Initialization(eyre!(Box::new(err)))) - }) + let config = create_config(); + + Engine::new(&config) + .map_err(|err| Error::Initialization(eyre!(Box::new(err)))) .expect("Failed to create WASM engine with a predefined configuration. This is a bug") } @@ -116,13 +116,18 @@ pub fn load_module(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result Result { +fn create_config() -> Config { let mut config = Config::new(); + config.consume_fuel(true); + + if let Err(error) = config.cache_config_load_default() { + iroha_logger::warn!( + ?error, + "Setting up Wasmtime cache failed. Performance might degrade" + ); + } + config - .consume_fuel(true) - .cache_config_load_default() - .map_err(|err| Error::Initialization(eyre!(Box::new(err))))?; - Ok(config) } #[derive(Clone)] @@ -269,7 +274,7 @@ impl<'wrld> Runtime<'wrld> { let wsv = caller.data().wsv; wsv.validator_view() - .validate(wsv, &caller.data().account_id, query.clone()) + .validate(wsv, &caller.data().account_id, query.clone().into()) .map_err(|error| NotPermittedFail { reason: error.to_string(), }) @@ -334,7 +339,7 @@ impl<'wrld> Runtime<'wrld> { .map_err(|error| Trap::new(error.to_string()))?; } wsv.validator_view() - .validate(wsv, account_id, instruction) + .validate(wsv, account_id, instruction.into()) .map_err(|error| NotPermittedFail { reason: error.to_string(), }) diff --git a/core/src/sumeragi/main_loop.rs b/core/src/sumeragi/main_loop.rs index f0ca7933b2c..49fc88e3823 100644 --- a/core/src/sumeragi/main_loop.rs +++ b/core/src/sumeragi/main_loop.rs @@ -1,18 +1,17 @@ //! The main event loop that powers sumeragi. -#![allow(clippy::cognitive_complexity)] + use iroha_crypto::HashOf; use iroha_data_model::{block::*, transaction::error::TransactionExpired}; +use iroha_genesis::AcceptedTransaction; use iroha_p2p::UpdateTopology; use parking_lot::Mutex; use tracing::{span, Level}; -use super::*; +use super::{view_change::ProofBuilder, *}; use crate::{block::*, sumeragi::tracing::instrument}; /// `Sumeragi` is the implementation of the consensus. /// -/// TODO: paraphrase -/// /// `sumeragi_state_data` is a [`Mutex`] instead of a `RWLock` /// because it communicates more clearly the correct use of the /// lock. The most frequent action on this lock is the main loop @@ -72,12 +71,12 @@ impl Debug for Sumeragi { /// Internal structure that retains the state. pub struct State { - /// The view change index of latest [`VersionedCommittedBlock`] + /// The view change index of latest [`CommittedBlock`] pub latest_block_view_change_index: u64, - /// The hash of the latest [`VersionedCommittedBlock`] - pub latest_block_hash: Option>, - /// Hash of the previous [`VersionedCommittedBlock`] - pub previous_block_hash: Option>, + /// The hash of the latest [`CommittedBlock`] + pub latest_block_hash: Option>, + /// Hash of the previous [`CommittedBlock`] + pub previous_block_hash: Option>, /// Current block height pub latest_block_height: u64, /// The current network topology. @@ -98,7 +97,7 @@ pub struct State { /// other subsystems where we can. This way the performance of /// sumeragi is more dependent on the code that is internal to the /// subsystem. - pub transaction_cache: Vec, + pub transaction_cache: Vec, } impl Sumeragi { @@ -163,7 +162,7 @@ impl Sumeragi { #[allow(clippy::needless_pass_by_value)] fn broadcast_packet(&self, msg: MessagePacket, topology: &Topology) { - self.broadcast_packet_to(msg, &topology.sorted_peers); + self.broadcast_packet_to(msg, &topology.ordered_peers); } fn gossip_transactions(&self, state: &State, view_change_proof_chain: &ProofChain) { @@ -188,7 +187,7 @@ impl Sumeragi { /// Connect or disconnect peers according to the current network topology. fn connect_peers(&self, topology: &Topology) { - let peers = topology.sorted_peers.clone().into_iter().collect(); + let peers = topology.ordered_peers.clone().into_iter().collect(); self.network.update_topology(UpdateTopology(peers)); } @@ -198,11 +197,11 @@ impl Sumeragi { self.block_time + self.commit_time } - fn send_events(&self, events: impl Into>) { + fn send_events(&self, events: Vec) { let addr = &self.peer_id.address; if self.events_sender.receiver_count() > 0 { - for event in events.into() { + for event in events { self.events_sender .send(event) .map_err(|err| warn!(%addr, ?err, "Event not sent")) @@ -222,7 +221,7 @@ impl Sumeragi { Ok(packet) => { if let Err(error) = view_change_proof_chain.merge( packet.view_change_proofs, - ¤t_topology.sorted_peers, + ¤t_topology.ordered_peers, current_topology.max_faults(), state.latest_block_hash, ) { @@ -247,64 +246,22 @@ impl Sumeragi { shutdown_receiver: &mut tokio::sync::oneshot::Receiver<()>, ) -> Result<(), EarlyReturn> { trace!("Listen for genesis"); - assert!( - state.current_topology.is_consensus_required(), - "Only peer in network, yet required to receive genesis topology. This is a configuration error." - ); + loop { + let addr = &self.peer_id.address; + std::thread::sleep(Duration::from_millis(50)); early_return(shutdown_receiver).map_err(|e| { debug!(?e, "Early return."); e })?; - // we must connect to peers so that our block_sync can find us - // the genesis block. + + // Connect to peers so that block_sync can find the genesis block. match self.message_receiver.lock().try_recv() { Ok(packet) => { let block = match packet.message { - Message::BlockCreated(block_created) => { - // If we receive a committed genesis block that is - // valid, use it without question. During the - // genesis round we blindly take on the network - // topology described in the provided genesis - // block. - let block = { - let span = span!( - Level::TRACE, - "Genesis Round Peer is revalidating the block." - ); - let _enter = span.enter(); - match block_created.validate_and_extract_block::( - &self.transaction_validator, - state.wsv.clone(), - state.latest_block_hash, - state.latest_block_height, - ) { - Ok(block) => block, - Err(error) => { - error!(?error); - continue; - } - } - }; - // Omit signature verification during genesis round - block.commit_unchecked().into() - } - Message::BlockSyncUpdate(block_sync_update) => { - // Omit signature verification during genesis round - match block_sync_update.validate_and_extract_block::( - &self.transaction_validator, - state.wsv.clone(), - state.latest_block_hash, - state.latest_block_height, - ) { - Ok(block) => block, - Err(error) => { - error!(?error); - continue; - } - } - } + Message::BlockCreated(BlockCreated { block }) + | Message::BlockSyncUpdate(BlockSyncUpdate { block }) => block, msg => { trace!(?msg, "Not handling the message, waiting for genesis..."); continue; @@ -312,9 +269,31 @@ impl Sumeragi { }; if block.header().is_genesis() { + let block = match ValidBlock::validate( + block, + state.latest_block_height + 1, + state.latest_block_hash, + &state.current_topology, + &self.transaction_validator, + state.wsv.clone(), + ) { + Ok(block) => match block.commit(&state.current_topology) { + Ok(block) => block, + Err((block, err)) => { + warn!(%addr, hash=%block.hash(), ?err, "Received invalid genesis block"); + continue; + } + }, + Err((block, err)) => { + warn!(%addr, hash=%block.hash(), ?err, "Failed to commit genesis block"); + continue; + } + }; + commit_block(self, state, block); return Err(EarlyReturn::GenesisBlockReceivedAndCommitted); } + debug!("Received a block that was not genesis."); } Err(mpsc::TryRecvError::Disconnected) => return Err(EarlyReturn::Disconnected), @@ -324,37 +303,56 @@ impl Sumeragi { } } -fn commit_block(sumeragi: &Sumeragi, state: &mut State, block: impl Into) { - let committed_block = block.into(); - +fn commit_block( + sumeragi: &Sumeragi, + state: &mut State, + (committed_block, events): (CommittedBlock, Vec), +) { + let block_hash = committed_block.hash(); state.finalized_wsv = state.wsv.clone(); - update_state(state, sumeragi, &committed_block); + update_state(state, sumeragi, &committed_block, events); state.previous_block_hash = state.latest_block_hash; info!( addr=%sumeragi.peer_id.address, role=%state.current_topology.role(&sumeragi.peer_id), block_height=%state.latest_block_height, - block_hash=%committed_block.hash(), + %block_hash, "Committing block" ); update_topology(state, sumeragi, &committed_block); - sumeragi.kura.store_block(committed_block); - - cache_transaction(state, sumeragi); + cache_transactions(state, sumeragi); } -fn replace_top_block( - sumeragi: &Sumeragi, - state: &mut State, - block: impl Into, -) { - let committed_block = block.into(); +fn replace_top_block(sumeragi: &Sumeragi, state: &mut State, block: ValidBlock) { + let role = state.current_topology.role(&sumeragi.peer_id); + let addr = &sumeragi.peer_id.address; + + let header = block.header(); + let block_hash = block.hash(); + + warn!( + %addr, %role, %block_hash, + peer_latest_block_hash=?state.latest_block_hash, + peer_latest_block_view_change_index=?state.latest_block_view_change_index, + consensus_latest_block_view_change_index=%header.view_change_index, + "Soft fork occurred: peer in inconsistent state. Rolling back and replacing top block." + ); + + let (committed_block, events) = match block + .commit_without_proxy_tail_signature(&state.current_topology) + { + Ok(block) => block, + Err((_, err)) => { + error!(?err, %block_hash, "Failed to commit replacement block. Unable to resolve soft fork."); + return; + } + }; state.wsv = state.finalized_wsv.clone(); - update_state(state, sumeragi, &committed_block); + update_state(state, sumeragi, &committed_block, events); // state.previous_block_hash stays the same. info!( @@ -366,20 +364,12 @@ fn replace_top_block( ); update_topology(state, sumeragi, &committed_block); - sumeragi.kura.replace_top_block(committed_block); - - cache_transaction(state, sumeragi) + cache_transactions(state, sumeragi) } -fn update_topology( - state: &mut State, - sumeragi: &Sumeragi, - committed_block: &VersionedCommittedBlock, -) { - let mut topology = Topology { - sorted_peers: committed_block.header().committed_with_topology.clone(), - }; +fn update_topology(state: &mut State, sumeragi: &Sumeragi, committed_block: &CommittedBlock) { + let mut topology = Topology::new(committed_block.header().commit_topology.clone()); topology.lift_up_peers( &committed_block .signatures() @@ -400,7 +390,12 @@ fn update_topology( sumeragi.connect_peers(&state.current_topology); } -fn update_state(state: &mut State, sumeragi: &Sumeragi, committed_block: &VersionedCommittedBlock) { +fn update_state( + state: &mut State, + sumeragi: &Sumeragi, + committed_block: &CommittedBlock, + events: Vec, +) { state .wsv .apply(committed_block) @@ -413,14 +408,14 @@ fn update_state(state: &mut State, sumeragi: &Sumeragi, committed_block: &Versio // This sends "Block committed" event, so it should be done // AFTER public facing WSV update - sumeragi.send_events(committed_block); + sumeragi.send_events(events); state.latest_block_height = committed_block.header().height; state.latest_block_hash = Some(committed_block.hash()); state.latest_block_view_change_index = committed_block.header().view_change_index; } -fn cache_transaction(state: &mut State, sumeragi: &Sumeragi) { +fn cache_transactions(state: &mut State, sumeragi: &Sumeragi) { state.transaction_cache.retain(|tx| { !tx.is_in_blockchain(&state.wsv) && !tx.is_expired(sumeragi.queue.tx_time_to_live) }); @@ -432,32 +427,35 @@ fn suggest_view_change( view_change_proof_chain: &mut ProofChain, current_view_change_index: u64, ) { - let suspect_proof = { - let mut proof = Proof { - latest_block_hash: state.latest_block_hash, - view_change_index: current_view_change_index, - signatures: Vec::new(), - }; - proof - .sign(sumeragi.key_pair.clone()) - .expect("Proof signing failed"); - proof + let view_change_proof = { + let mut proof = ProofBuilder::new(current_view_change_index); + + if let Some(latest_block_hash) = state.latest_block_hash { + proof = proof.with_latest_block_hash(latest_block_hash); + } + + proof.sign(sumeragi.key_pair.clone()) }; - view_change_proof_chain - .insert_proof( - &state.current_topology.sorted_peers, - state.current_topology.max_faults(), - state.latest_block_hash, - suspect_proof, - ) - .unwrap_or_else(|err| error!("{err}")); + match view_change_proof { + Ok(proof) => { + view_change_proof_chain + .insert_proof( + &state.current_topology.ordered_peers, + state.current_topology.max_faults(), + state.latest_block_hash, + proof, + ) + .unwrap_or_else(|error| error!(?error, "View change proof incorrect")); - let msg = MessagePacket::new( - view_change_proof_chain.clone(), - Message::ViewChangeSuggested, - ); - sumeragi.broadcast_packet(msg, &state.current_topology); + let msg = MessagePacket::new( + view_change_proof_chain.clone(), + Message::ViewChangeSuggested, + ); + sumeragi.broadcast_packet(msg, &state.current_topology); + } + Err(error) => error!(?error, "Failed to sign view change proof"), + } } fn prune_view_change_proofs_and_calculate_current_index( @@ -466,18 +464,17 @@ fn prune_view_change_proofs_and_calculate_current_index( ) -> u64 { view_change_proof_chain.prune(state.latest_block_hash); view_change_proof_chain.verify_with_state( - &state.current_topology.sorted_peers, + &state.current_topology.ordered_peers, state.current_topology.max_faults(), state.latest_block_hash, ) as u64 } fn enqueue_transaction(sumeragi: &Sumeragi, wsv: &WorldStateView, tx: VersionedSignedTransaction) { - let tx = tx.into_v1(); - let addr = &sumeragi.peer_id.address; + match AcceptedTransaction::accept::(tx, &sumeragi.transaction_limits) { - Ok(tx) => match sumeragi.queue.push(tx.into(), wsv) { + Ok(tx) => match sumeragi.queue.push(tx, wsv) { Ok(_) => {} Err(crate::queue::Failure { tx, @@ -489,19 +486,19 @@ fn enqueue_transaction(sumeragi: &Sumeragi, wsv: &WorldStateView, tx: VersionedS error!(%addr, ?err, tx_hash = %tx.hash(), "Failed to enqueue transaction.") } }, - Err(err) => error!(%addr, %err, "Transaction rejected"), + Err((_tx, err)) => error!(%addr, %err, "Transaction rejected"), } } #[allow(clippy::too_many_lines)] -fn handle_message( +fn handle_message( message: Message, sumeragi: &Sumeragi, state: &mut State, voting_block: &mut Option, current_view_change_index: u64, view_change_proof_chain: &mut ProofChain, - voting_signatures: &mut Vec>, + voting_signatures: &mut Vec>, ) { let current_topology = &state.current_topology; let role = current_topology.role(&sumeragi.peer_id); @@ -516,90 +513,83 @@ fn handle_message( (Message::ViewChangeSuggested, _) => { trace!("Received view change suggestion."); } - (Message::BlockSyncUpdate(block_sync_update), _) => { - let block_hash = block_sync_update.hash(); - info!(%addr, %role, hash=%block_hash, "Block sync update received"); - - let block = match block_sync_update - .clone() - .validate_and_extract_block::( - &sumeragi.transaction_validator, - state.wsv.clone(), - state.latest_block_hash, - state.latest_block_height, - ) - .or_else(|_| - /* If the block fails validation we must check again using the finaziled wsv. - When a soft-fork occurs the consensus-block may be valid on the previous - wsv but not the current one. */ - block_sync_update.validate_and_extract_block::( - &sumeragi.transaction_validator, - state.finalized_wsv.clone(), - state.previous_block_hash, - state.latest_block_height.saturating_sub(1), - )) { + (Message::BlockSyncUpdate(BlockSyncUpdate { block }), _) => { + let block_hash = block.hash(); + debug!(%addr, %role, %block_hash, "Block sync update received"); + + let block = match ValidBlock::validate_without_validating_topology( + block, + state.latest_block_height + 1, + state.latest_block_hash, + &sumeragi.transaction_validator, + state.wsv.clone(), + ) { Ok(block) => block, - Err(error) => { - error!(%addr, %role, %block_hash, ?error, "Block not valid."); - return; + Err((block, error)) => { + let header = block.header(); + + // TODO: What is the correct condition for soft-fork? + #[allow(clippy::redundant_else)] + if state.latest_block_height == header.height + && state.previous_block_hash == header.previous_block_hash + && state.latest_block_hash != Some(block_hash) + && state.latest_block_view_change_index < header.view_change_index + { + debug!(%addr, %role, %block_hash, ?error, "Block appears to be invalid. Revalidating..."); + + /* If the block fails validation we must check again using the finaziled wsv. + When a soft-fork occurs the consensus-block may be valid on the previous + wsv but not the current one. */ + match ValidBlock::validate_without_validating_topology( + block, + state.latest_block_height, + state.previous_block_hash, + &sumeragi.transaction_validator, + state.finalized_wsv.clone(), + ) { + Ok(block) => { + replace_top_block(sumeragi, state, block); + return; + } + Err((_, error)) => { + warn!(%addr, %role, %block_hash, ?error, "Block rejected"); + return; + } + } + } else { + warn!(%addr, %role, %block_hash, ?error, "Block rejected"); + return; + } } }; - if state.previous_block_hash == block.header().previous_block_hash - && state.latest_block_height == block.header().height - && state.latest_block_hash != Some(block.hash()) - && state.latest_block_view_change_index < block.header().view_change_index - { - error!( - %addr, %role, - peer_latest_block_hash=?state.latest_block_hash, - peer_latest_block_view_change_index=?state.latest_block_view_change_index, - consensus_latest_block_hash=%block.hash(), - consensus_latest_block_view_change_index=%block.header().view_change_index, - "Soft fork occurred: peer in inconsistent state. Rolling back and replacing top block." - ); - replace_top_block(sumeragi, state, block); - return; - } - if state.latest_block_hash != block.header().previous_block_hash { - error!( - %addr, %role, - actual = ?block.header().previous_block_hash, - expected = ?state.latest_block_hash, - "Mismatch between the actual and expected hashes of the latest block." - ); - return; - } - if state.latest_block_height + 1 != block.header().height { - error!( - %addr, %role, - actual = block.header().height, - expected = state.latest_block_height + 1, - "Mismatch between the actual and expected height of the block." - ); - return; - } - - commit_block(sumeragi, state, block); + match block.commit_without_proxy_tail_signature(current_topology) { + Ok(committed_block) => commit_block(sumeragi, state, committed_block), + Err((_, err)) => { + warn!(%addr, %role, %block_hash, ?err, "Failed to commit block") + } + }; } (Message::BlockCommitted(BlockCommitted { hash, signatures }), _) => { if role == Role::ProxyTail && current_topology.is_consensus_required() || role == Role::Leader && !current_topology.is_consensus_required() { - error!(%addr, %role, "Received BlockCommitted message, but shouldn't"); + error!(%addr, %role, "Received BlockCommitted message, but shouldn't have"); } else if let Some(mut voted_block) = voting_block.take() { let voting_block_hash = voted_block.block.hash(); - if hash == voting_block_hash.transmute() { + if hash == voting_block_hash { // The manipulation of the topology relies upon all peers seeing the same signature set. // Therefore we must clear the signatures and accept what the proxy tail giveth. - voted_block.block.signatures.clear(); - add_signatures::(&mut voted_block, signatures.transmute()); + voted_block.block.replace_signatures(signatures); - match voted_block.block.commit(current_topology) { + match voted_block + .block + .commit_without_proxy_tail_signature(current_topology) + { Ok(committed_block) => commit_block(sumeragi, state, committed_block), Err((_, err)) => { - error!(%addr, %role, %hash, ?err, "Block failed to be committed") + warn!(%addr, %role, %hash, ?err, "Failed to commit block") } }; } else { @@ -624,30 +614,33 @@ fn handle_message( ); sumeragi.broadcast_packet_to(msg, [current_topology.proxy_tail()]); - info!(%addr, %block_hash, "Block validated, signed and forwarded"); + debug!(%addr, %role, %block_hash, "Block validated, signed and forwarded"); *voting_block = Some(block); } } (Message::BlockCreated(block_created), Role::ObservingPeer) => { if let Some(block) = vote_for_block(sumeragi, state, block_created) { - if current_view_change_index >= 1 { - let block_hash = block.block.hash(); + let block_hash = block.block.hash(); + if current_view_change_index >= 1 { let msg = MessagePacket::new( view_change_proof_chain.clone(), BlockSigned::from(block.block.clone()), ); sumeragi.broadcast_packet_to(msg, [current_topology.proxy_tail()]); - info!(%addr, %block_hash, "Block validated, signed and forwarded"); + debug!(%addr, %role, %block_hash, current_view_change_index, "Block validated signed and forwarded"); + } else { + // NOTE: It is ok to sign the block because signatures will be replaced from + // the one in the proxy tail + debug!(%addr, %role, %block_hash, "Block validated and signed"); } + *voting_block = Some(block); } } (Message::BlockCreated(block_created), Role::ProxyTail) => { - // NOTE: False positive from nursery - #[allow(clippy::iter_with_drain)] if let Some(mut new_block) = vote_for_block(sumeragi, state, block_created) { // NOTE: Up until this point it was unknown which block is expected to be received, // therefore all the signatures (of any hash) were collected and will now be pruned @@ -692,13 +685,11 @@ fn process_message_independent( current_view_change_index: u64, view_change_proof_chain: &mut ProofChain, round_start_time: &Instant, - is_genesis_peer: bool, + #[cfg(debug_assertions)] is_soft_fork_peer: bool, ) { let current_topology = &state.current_topology; - let role = current_topology.role(&sumeragi.peer_id); - let addr = &sumeragi.peer_id.address; - match role { + match current_topology.role(&sumeragi.peer_id) { Role::Leader => { if voting_block.is_none() { let cache_full = state.transaction_cache.len() >= sumeragi.queue.txs_in_block; @@ -707,49 +698,30 @@ fn process_message_independent( if cache_full || (deadline_reached && cache_non_empty) { let transactions = state.transaction_cache.clone(); - info!(txns=%transactions.len(), "Creating block..."); + debug!(txns=%transactions.len(), "Creating block..."); // TODO: properly process triggers! let event_recommendations = Vec::new(); - let new_block = BlockBuilder { + match BlockBuilder::new( transactions, + state.current_topology.clone(), event_recommendations, - height: state.latest_block_height + 1, - previous_block_hash: state.latest_block_hash, - view_change_index: current_view_change_index, - committed_with_topology: state.current_topology.clone(), - key_pair: sumeragi.key_pair.clone(), - transaction_validator: &sumeragi.transaction_validator, - wsv: state.wsv.clone(), - } - .build(); - - sumeragi.send_events(&new_block); - if current_topology.is_consensus_required() { - info!(%addr, hash=%new_block.hash(), "Block created"); - *voting_block = Some(VotingBlock::new(new_block.clone())); - - let msg = MessagePacket::new( - view_change_proof_chain.clone(), - BlockCreated::from(new_block), - ); - sumeragi.broadcast_packet(msg, current_topology); - } else { - match new_block.commit(current_topology) { - Ok(committed_block) => { - let msg = MessagePacket::new( - view_change_proof_chain.clone(), - BlockCommitted::from(Into::::into( - committed_block.clone(), - )), - ); - - sumeragi.broadcast_packet(msg, current_topology); - commit_block(sumeragi, state, committed_block); - } - Err(err) => error!(%addr, role=%Role::Leader, ?err), + ) + .chain( + state.latest_block_height, + state.latest_block_hash, + current_view_change_index, + &sumeragi.transaction_validator, + state.wsv.clone(), + ) + .sign(sumeragi.key_pair.clone()) + { + Ok(block) => { + *voting_block = + create_block(sumeragi, state, block, view_change_proof_chain); } - } + Err(error) => error!(?error, "Failed to sign block"), + }; } } } @@ -757,19 +729,20 @@ fn process_message_independent( if let Some(voted_block) = voting_block.take() { let voted_at = voted_block.voted_at; - match voted_block.block.commit(current_topology) { + match voted_block + .block + .commit_without_proxy_tail_signature(current_topology) + { Ok(committed_block) => { - info!(voting_block_hash = %committed_block.hash(), "Block reached required number of votes"); + debug!(block_hash=%committed_block.0.hash(), "Block reached required number of votes"); let msg = MessagePacket::new( view_change_proof_chain.clone(), - BlockCommitted::from(Into::::into( - committed_block.clone(), - )), + BlockCommitted::from(committed_block.0.clone()), ); #[cfg(debug_assertions)] - if is_genesis_peer && sumeragi.debug_force_soft_fork { + if is_soft_fork_peer && sumeragi.debug_force_soft_fork { std::thread::sleep(sumeragi.pipeline_time() * 2); } else { sumeragi.broadcast_packet(msg, current_topology); @@ -784,7 +757,7 @@ fn process_message_independent( Err((block, err)) => { // Restore the current voting block and continue the round *voting_block = Some(VotingBlock::voted_at(block, voted_at)); - trace!(?err, "Not enough signatures, waiting for more..."); + debug!(?err, "Not enough signatures, waiting for more..."); } } } @@ -793,8 +766,49 @@ fn process_message_independent( } } +fn create_block( + sumeragi: &Sumeragi, + state: &mut State, + block: ValidBlock, + view_change_proof_chain: &mut ProofChain, +) -> Option { + let current_topology = &state.current_topology; + let addr = &sumeragi.peer_id.address; + + if current_topology.is_consensus_required() { + debug!(%addr, hash=%block.hash(), "Block created"); + let msg = MessagePacket::new( + view_change_proof_chain.clone(), + BlockCreated::from(VersionedSignedBlock::from(block.clone())), + ); + + sumeragi.broadcast_packet(msg, current_topology); + Some(VotingBlock::new(block)) + } else { + // TODO: Should we stop producing blocks if we cannot guarantee BFT anymore? Should we + // make the limit for number of peers after which blocks will not be produced configurable? + // TODO: validator could also deny Unregister that lowers the number of peers below some limit + error!(%addr, ?current_topology, "Insufficient number of nodes to guarantee BFT"); + + match block.commit(current_topology) { + Ok(committed_block) => { + let msg = MessagePacket::new( + view_change_proof_chain.clone(), + BlockCommitted::from(Into::::into(committed_block.0.clone())), + ); + + commit_block(sumeragi, state, committed_block); + sumeragi.broadcast_packet(msg, &state.current_topology); + } + Err(err) => error!(%addr, role=%Role::Leader, ?err), + } + + None + } +} + // NOTE: False positive useless_let_if_seq from nursery -#[allow(clippy::too_many_arguments, clippy::useless_let_if_seq)] +#[allow(clippy::too_many_arguments)] fn reset_state( peer_id: &PeerId, pipeline_time: Duration, @@ -805,7 +819,7 @@ fn reset_state( // below is the state that gets reset. current_topology: &mut Topology, voting_block: &mut Option, - voting_signatures: &mut Vec>, + voting_signatures: &mut Vec>, round_start_time: &mut Instant, last_view_change_time: &mut Instant, view_change_time: &mut Duration, @@ -862,7 +876,7 @@ pub(crate) fn run( sumeragi.connect_peers(&state.current_topology); let span = span!(tracing::Level::TRACE, "genesis").entered(); - let is_genesis_peer = if state.latest_block_height == 0 || state.latest_block_hash.is_none() { + let is_soft_fork_peer = if state.latest_block_height == 0 || state.latest_block_hash.is_none() { if let Some(genesis_network) = genesis_network { sumeragi_init_commit_genesis(sumeragi, &mut state, genesis_network); true @@ -992,7 +1006,7 @@ pub(crate) fn run( should_sleep = true; }, |message| { - handle_message( + handle_message::( message, sumeragi, &mut state, @@ -1011,21 +1025,22 @@ pub(crate) fn run( current_view_change_index, &mut view_change_proof_chain, &round_start_time, - is_genesis_peer, + #[cfg(debug_assertions)] + is_soft_fork_peer, ); } } fn add_signatures( block: &mut VotingBlock, - signatures: impl IntoIterator>, + signatures: impl IntoIterator>, ) { for signature in signatures { if let Err(err) = block.block.add_signature(signature) { let err_msg = "Signature not valid"; if EXPECT_VALID { - error!(?err, err_msg); + warn!(?err, err_msg); } else { debug!(?err, err_msg); } @@ -1050,59 +1065,30 @@ fn expired_event(txn: &impl Transaction) -> Event { fn vote_for_block( sumeragi: &Sumeragi, state: &State, - block_created: BlockCreated, + BlockCreated { block }: BlockCreated, ) -> Option { - let block_hash = block_created.hash(); let addr = &sumeragi.peer_id.address; let role = state.current_topology.role(&sumeragi.peer_id); - trace!(%addr, %role, block_hash=%block_hash, "Block received, voting..."); - - let mut block = { - let span = span!(Level::TRACE, "block revalidation"); - let _enter = span.enter(); + trace!(%addr, %role, block_hash=%block.hash(), "Block received, voting..."); - match block_created.validate_and_extract_block::( - &sumeragi.transaction_validator, - state.wsv.clone(), - state.latest_block_hash, - state.latest_block_height, - ) { - Ok(block) => block, - Err(err) => { - warn!(%addr, %role, ?err); - return None; - } + let block = match ValidBlock::validate( + block, + state.latest_block_height + 1, + state.latest_block_hash, + &state.current_topology, + &sumeragi.transaction_validator, + state.wsv.clone(), + ) { + Ok(block) => block, + Err((block, err)) => { + warn!(%addr, %role, hash=%block.hash(), ?err, "Received invalid block"); + return None; } - }; - - if state - .current_topology - .filter_signatures_by_roles(&[Role::Leader], block.retain_verified_signatures()) - .is_empty() - { - error!( - %addr, %role, leader=%state.current_topology.leader().address, hash=%block.hash(), - "The block is rejected as it is not signed by the leader." - ); - - return None; } + .sign(sumeragi.key_pair.clone()) + .expect("Block signing failed"); - if block.header.committed_with_topology != state.current_topology.sorted_peers { - error!( - %addr, %role, block_topology=?block.header.committed_with_topology, my_topology=?state.current_topology, hash=%block.hash(), - "The block is rejected as because the topology field is incorrect." - ); - - return None; - } - - let signed_block = block - .sign(sumeragi.key_pair.clone()) - .expect("Block signing failed"); - - sumeragi.send_events(&signed_block); - Some(VotingBlock::new(signed_block)) + Some(VotingBlock::new(block)) } fn sumeragi_init_commit_genesis( @@ -1117,41 +1103,31 @@ fn sumeragi_init_commit_genesis( assert_eq!(state.latest_block_height, 0); assert_eq!(state.latest_block_hash, None); - let transactions = genesis_network.transactions; + let transactions: Vec<_> = genesis_network + .transactions + .into_iter() + .map(Into::into) + .collect(); // Don't start genesis round. Instead just commit the genesis block. - assert!( - !transactions.is_empty(), - "Genesis transaction set contains no valid transactions" - ); - let block = BlockBuilder { - transactions, - event_recommendations: Vec::new(), - height: 1, - previous_block_hash: None, - view_change_index: 0, - committed_with_topology: state.current_topology.clone(), - key_pair: sumeragi.key_pair.clone(), - transaction_validator: &sumeragi.transaction_validator, - wsv: state.wsv.clone(), - } - .build(); + let addr = &sumeragi.peer_id.address; + let (block, events): (CommittedBlock, _) = + BlockBuilder::new(transactions, state.current_topology.clone(), Vec::new()) + .chain_first(&sumeragi.transaction_validator, state.wsv.clone()) + .sign(sumeragi.key_pair.clone()) + .expect("Failed to sign genesis block") + .commit(&state.current_topology) + .expect("Topology should not be validated for genesis"); - { - info!(block_hash = %block.hash(), "Publishing genesis block."); + info!(%addr, hash=%block.hash(), "Genesis block created"); - info!( - role = ?state.current_topology.role(&sumeragi.peer_id), - block_hash = %block.hash(), - "Created a block to commit.", - ); + let msg = MessagePacket::new( + ProofChain::default(), + BlockCreated::from(VersionedSignedBlock::from(block.clone())), + ); - sumeragi.send_events(&block); - let msg = MessagePacket::new(ProofChain::default(), BlockCreated::from(block.clone())); - sumeragi.broadcast_packet(msg, &state.current_topology); - // Omit signature verification during genesis round - commit_block(sumeragi, state, block.commit_unchecked()); - } + commit_block(sumeragi, state, (block, events)); + sumeragi.broadcast_packet(msg, &state.current_topology); } /// Type enumerating early return types to reduce cyclomatic diff --git a/core/src/sumeragi/message.rs b/core/src/sumeragi/message.rs index 4b33d8131ed..bb5c91d88ef 100644 --- a/core/src/sumeragi/message.rs +++ b/core/src/sumeragi/message.rs @@ -7,43 +7,20 @@ )] use iroha_crypto::{HashOf, SignaturesOf}; -use iroha_data_model::{block::VersionedCommittedBlock, prelude::*}; +use iroha_data_model::{ + block::{Block, BlockPayload}, + prelude::*, +}; +use iroha_genesis::AcceptedTransaction; use iroha_macro::*; use iroha_version::prelude::*; use parity_scale_codec::{Decode, Encode}; use super::view_change; -use crate::{ - block::{PendingBlock, Revalidate}, - tx::TransactionValidator, - VersionedAcceptedTransaction, WorldStateView, -}; +use crate::block::{CommittedBlock, ValidBlock}; declare_versioned_with_scale!(VersionedPacket 1..2, Debug, Clone, iroha_macro::FromVariant); -impl VersionedPacket { - /// Convert `&`[`Self`] to V1 reference - pub const fn as_v1(&self) -> &MessagePacket { - match self { - Self::V1(v1) => v1, - } - } - - /// Convert `&mut` [`Self`] to V1 mutable reference - pub fn as_mut_v1(&mut self) -> &mut MessagePacket { - match self { - Self::V1(v1) => v1, - } - } - - /// Perform the conversion from [`Self`] to V1 - pub fn into_v1(self) -> MessagePacket { - match self { - Self::V1(v1) => v1, - } - } -} - /// Helper structure, wrapping messages and view change proofs. #[version_with_scale(n = 1, versioned = "VersionedPacket")] #[derive(Debug, Clone, Decode, Encode)] @@ -87,53 +64,29 @@ pub enum Message { #[non_exhaustive] pub struct BlockCreated { /// The corresponding block. - pub block: PendingBlock, + pub block: VersionedSignedBlock, } -impl From for BlockCreated { - fn from(block: PendingBlock) -> Self { +impl From for BlockCreated { + fn from(block: VersionedSignedBlock) -> Self { Self { block } } } -impl BlockCreated { - /// Extract block from block created message. - /// - /// # Errors - /// - When the block is invalid. - pub fn validate_and_extract_block( - self, - transaction_validator: &TransactionValidator, - wsv: WorldStateView, - latest_block: Option>, - block_height: u64, - ) -> Result { - self.block.revalidate::( - transaction_validator, - wsv, - latest_block, - block_height, - )?; - Ok(self.block) - } - /// Get hash of block. - pub fn hash(&self) -> HashOf { - self.block.hash() - } -} - /// `BlockSigned` message structure. #[derive(Debug, Clone, Decode, Encode)] #[non_exhaustive] pub struct BlockSigned { /// Hash of the block being signed. - pub hash: HashOf, + pub hash: HashOf, /// Set of signatures. - pub signatures: SignaturesOf, + pub signatures: SignaturesOf, } -impl From for BlockSigned { - fn from(block: PendingBlock) -> Self { +impl From for BlockSigned { + fn from(block: ValidBlock) -> Self { + let VersionedSignedBlock::V1(block) = block.into(); + Self { hash: block.hash(), signatures: block.signatures, @@ -146,16 +99,18 @@ impl From for BlockSigned { #[non_exhaustive] pub struct BlockCommitted { /// Hash of the block being signed. - pub hash: HashOf, + pub hash: HashOf, /// Set of signatures. - pub signatures: SignaturesOf, + pub signatures: SignaturesOf, } -impl From for BlockCommitted { - fn from(block: VersionedCommittedBlock) -> Self { +impl From for BlockCommitted { + fn from(block: CommittedBlock) -> Self { + let VersionedSignedBlock::V1(block) = block.into(); + Self { - hash: block.hash().transmute(), - signatures: block.as_v1().signatures.clone().transmute(), + hash: block.hash(), + signatures: block.signatures, } } } @@ -165,39 +120,7 @@ impl From for BlockCommitted { #[non_exhaustive] pub struct BlockSyncUpdate { /// The corresponding block. - block: VersionedCommittedBlock, -} - -impl From for BlockSyncUpdate { - fn from(block: VersionedCommittedBlock) -> Self { - Self { block } - } -} - -impl BlockSyncUpdate { - /// Extract block from block sync update message. - /// - /// # Errors - /// - When the block is invalid. - pub fn validate_and_extract_block( - self, - transaction_validator: &TransactionValidator, - wsv: WorldStateView, - latest_block: Option>, - block_height: u64, - ) -> Result { - self.block.revalidate::( - transaction_validator, - wsv, - latest_block, - block_height, - )?; - Ok(self.block) - } - /// Get hash of block. - pub fn hash(&self) -> HashOf { - self.block.hash() - } + pub block: VersionedSignedBlock, } /// Message for gossiping batches of transactions. @@ -210,11 +133,14 @@ pub struct TransactionGossip { impl TransactionGossip { #![allow(clippy::unused_async)] /// Constructor. - pub fn new(txs: Vec) -> Self { + pub fn new(txs: Vec) -> Self { Self { // Converting into non-accepted transaction because it's not possible // to guarantee that the sending peer checked transaction limits - txs: txs.into_iter().map(Into::into).collect(), + txs: txs + .into_iter() + .map(VersionedSignedTransaction::from) + .collect(), } } } diff --git a/core/src/sumeragi/mod.rs b/core/src/sumeragi/mod.rs index fb6bbbdc223..978d5806a4c 100644 --- a/core/src/sumeragi/mod.rs +++ b/core/src/sumeragi/mod.rs @@ -1,6 +1,4 @@ //! Translates to Emperor. Consensus-related logic of Iroha. -//! -//! `Consensus` trait is now implemented only by `Sumeragi` for now. #![allow( clippy::arithmetic_side_effects, clippy::std_instead_of_core, @@ -19,27 +17,29 @@ use iroha_data_model::{block::*, prelude::*}; use iroha_genesis::GenesisNetwork; use iroha_logger::prelude::*; use iroha_telemetry::metrics::Metrics; -use network_topology::{Role, Topology}; - -use crate::handler::ThreadHandler; - -pub mod main_loop; -pub mod message; -pub mod network_topology; -pub mod view_change; - use main_loop::State; +use network_topology::{Role, Topology}; use parking_lot::{Mutex, MutexGuard}; use self::{ message::{Message, *}, - view_change::{Proof, ProofChain}, + view_change::ProofChain, }; use crate::{ - block::*, kura::Kura, prelude::*, queue::Queue, tx::TransactionValidator, EventsSender, - IrohaNetwork, NetworkMessage, + block::{CommittedBlock, ValidBlock}, + handler::ThreadHandler, + kura::Kura, + prelude::*, + queue::Queue, + tx::TransactionValidator, + EventsSender, IrohaNetwork, NetworkMessage, }; +pub mod main_loop; +pub mod message; +pub mod network_topology; +pub mod view_change; + /* The values in the following struct are not atomics because the code that operates on them assumes their values does not change during the course of @@ -132,8 +132,8 @@ impl Sumeragi { break; }; block_index += 1; - let block_txs_accepted = block.as_v1().transactions.len() as u64; - let block_txs_rejected = block.as_v1().rejected_transactions.len() as u64; + let block_txs_accepted = block.payload().transactions.len() as u64; + let block_txs_rejected = block.payload().rejected_transactions.len() as u64; self.metrics .txs @@ -220,7 +220,7 @@ impl Sumeragi { pub fn initialize_and_start_thread( sumeragi: Arc, genesis_network: Option, - block_hashes: &[HashOf], + block_hashes: &[HashOf], ) -> ThreadHandler { let wsv = sumeragi.wsv_mutex_access().clone(); @@ -230,22 +230,26 @@ impl Sumeragi { .zip(1u64..) { let block_height: u64 = i; - let block_ref = sumeragi.internal.kura.get_block_by_height(block_height).expect("Sumeragi could not load block that was reported as present. Please check that the block storage was not disconnected."); + let (block, _) = CommittedBlock::commit_without_validation(VersionedSignedBlock::clone( + &sumeragi.internal.kura.get_block_by_height(block_height).expect("Sumeragi could not load block that was reported as present. Please check that the block storage was not disconnected."), + )); assert_eq!( - block_ref.hash(), + block.hash(), *block_hash, "Kura init correctly reported the block hash." ); - wsv.apply(&block_ref) + wsv.apply(&block) .expect("Failed to apply block to wsv in init."); } let finalized_wsv = wsv.clone(); if !block_hashes.is_empty() { - let block_ref = sumeragi.internal.kura.get_block_by_height(block_hashes.len() as u64).expect("Sumeragi could not load block that was reported as present. Please check that the block storage was not disconnected."); + let (block, _) = CommittedBlock::commit_without_validation(VersionedSignedBlock::clone( + &sumeragi.internal.kura.get_block_by_height(block_hashes.len() as u64).expect("Sumeragi could not load block that was reported as present. Please check that the block storage was not disconnected."), + )); - wsv.apply(&block_ref) + wsv.apply(&block) .expect("Failed to apply block to wsv in init."); } *sumeragi.wsv_mutex_access() = wsv.clone(); @@ -258,13 +262,18 @@ impl Sumeragi { let previous_block_hash = wsv.previous_block_hash(); let current_topology = if latest_block_height == 0 { - assert!(!sumeragi.config.trusted_peers.peers.is_empty()); - Topology::new(sumeragi.config.trusted_peers.peers.clone()) + Topology::new( + sumeragi + .config + .trusted_peers + .peers + .iter() + .cloned() + .collect(), + ) } else { let block_ref = sumeragi.internal.kura.get_block_by_height(latest_block_height).expect("Sumeragi could not load block that was reported as present. Please check that the block storage was not disconnected."); - let mut topology = Topology { - sorted_peers: block_ref.header().committed_with_topology.clone(), - }; + let mut topology = Topology::new(block_ref.header().commit_topology.clone()); topology.rotate_set_a(); topology }; @@ -296,7 +305,9 @@ impl Sumeragi { .expect("Sumeragi thread spawn should not fail."); let shutdown = move || { - let _result = shutdown_sender.send(()); + if let Err(error) = shutdown_sender.send(()) { + iroha_logger::error!(?error, "Failed to send shut down signal to sumeragi. Thead might already be shut down."); + } }; ThreadHandler::new(Box::new(shutdown), thread_handle) @@ -328,13 +339,13 @@ pub struct VotingBlock { /// At what time has this peer voted for this block pub voted_at: Instant, /// Valid Block - pub block: PendingBlock, + pub block: ValidBlock, } impl VotingBlock { /// Construct new `VotingBlock` with current time. #[allow(clippy::expect_used)] - pub fn new(block: PendingBlock) -> VotingBlock { + pub fn new(block: ValidBlock) -> VotingBlock { VotingBlock { block, voted_at: Instant::now(), @@ -342,7 +353,7 @@ impl VotingBlock { } /// Construct new `VotingBlock` with the given time. #[allow(clippy::expect_used)] - pub(crate) fn voted_at(block: PendingBlock, voted_at: Instant) -> VotingBlock { + pub(crate) fn voted_at(block: ValidBlock, voted_at: Instant) -> VotingBlock { VotingBlock { block, voted_at } } } diff --git a/core/src/sumeragi/network_topology.rs b/core/src/sumeragi/network_topology.rs index decd07c88d0..c78784a0eb2 100644 --- a/core/src/sumeragi/network_topology.rs +++ b/core/src/sumeragi/network_topology.rs @@ -26,14 +26,21 @@ use iroha_logger::trace; #[derive(Debug, Clone, PartialEq, Eq)] pub struct Topology { /// Current order of peers. The roles of peers are defined based on this order. - pub(crate) sorted_peers: Vec, + pub(crate) ordered_peers: Vec, } impl Topology { /// Create a new topology. - pub fn new(peers: impl IntoIterator) -> Self { + /// + /// # Panics + /// + /// if the given list of peers is empty + pub fn new(peers: Vec) -> Self { + // TODO: This assertion should be applied in tests as well + #[cfg(not(test))] + assert!(!peers.is_empty(), "Empty topology"); Topology { - sorted_peers: peers.into_iter().collect(), + ordered_peers: peers, } } /// Is consensus required, aka are there more than 1 peer. @@ -42,7 +49,7 @@ impl Topology { } /// How many faulty peers can this topology tolerate. pub fn max_faults(&self) -> usize { - (self.sorted_peers.len().saturating_sub(1)) / 3 + (self.ordered_peers.len().saturating_sub(1)) / 3 } /// The required amount of votes to commit a block with this topology. pub fn min_votes_for_commit(&self) -> usize { @@ -58,39 +65,39 @@ impl Topology { let mut public_keys = Vec::new(); for role in roles { let role_public_keys = match (role, self.max_faults()) { - (Role::Leader, _) => vec![self.sorted_peers[0].public_key.clone()], + (Role::Leader, _) => vec![self.ordered_peers[0].public_key.clone()], (Role::ValidatingPeer, 0) => { - if self.sorted_peers.len() > 2 { - vec![self.sorted_peers[1].public_key.clone()] + if self.ordered_peers.len() > 2 { + vec![self.ordered_peers[1].public_key.clone()] } else { vec![] } } (Role::ProxyTail, 0) => { - if self.sorted_peers.len() == 2 { - vec![self.sorted_peers[1].public_key.clone()] - } else if self.sorted_peers.len() > 2 { - vec![self.sorted_peers[2].public_key.clone()] + if self.ordered_peers.len() == 2 { + vec![self.ordered_peers[1].public_key.clone()] + } else if self.ordered_peers.len() > 2 { + vec![self.ordered_peers[2].public_key.clone()] } else { vec![] } } (Role::ObservingPeer, 0) => { - if self.sorted_peers.len() == 4 { - vec![self.sorted_peers[3].public_key.clone()] + if self.ordered_peers.len() == 4 { + vec![self.ordered_peers[3].public_key.clone()] } else { vec![] } } - (Role::ValidatingPeer, _) => self.sorted_peers + (Role::ValidatingPeer, _) => self.ordered_peers [1..(self.min_votes_for_commit() - 1)] .iter() .map(|peer_id| peer_id.public_key.clone()) .collect(), - (Role::ProxyTail, _) => vec![self.sorted_peers[self.min_votes_for_commit() - 1] + (Role::ProxyTail, _) => vec![self.ordered_peers[self.min_votes_for_commit() - 1] .public_key .clone()], - (Role::ObservingPeer, _) => self.sorted_peers[self.min_votes_for_commit()..] + (Role::ObservingPeer, _) => self.ordered_peers[self.min_votes_for_commit()..] .iter() .map(|peer_id| peer_id.public_key.clone()) .collect(), @@ -108,7 +115,7 @@ impl Topology { // This lint is a bad suggestion. #[allow(clippy::option_if_let_else)] pub fn role(&self, peer_id: &PeerId) -> Role { - match self.sorted_peers.iter().position(|p| p == peer_id) { + match self.ordered_peers.iter().position(|p| p == peer_id) { Some(index) if index == 0 => Role::Leader, Some(index) if index < self.min_votes_for_commit() => Role::ValidatingPeer, Some(index) if index == self.min_votes_for_commit() => Role::ProxyTail, @@ -121,39 +128,40 @@ impl Topology { } /// Get leader's peer id. pub fn leader(&self) -> &PeerId { - &self.sorted_peers[0] + &self.ordered_peers[0] } /// Get proxy tail's peer id. pub fn proxy_tail(&self) -> &PeerId { - &self.sorted_peers[self.min_votes_for_commit()] + &self.ordered_peers[self.min_votes_for_commit()] } - /// Add or remove peers from the topology. + /// Add or remove peers from the topology preserving the order pub fn update_peer_list(&mut self, new_peer_list: &[PeerId]) { let mut i = 0; - while i < self.sorted_peers.len() { - if new_peer_list.iter().any(|p| p == &self.sorted_peers[i]) { + while i < self.ordered_peers.len() { + if new_peer_list.contains(&self.ordered_peers[i]) { i += 1; } else { - self.sorted_peers.remove(i); + let p = self.ordered_peers.remove(i); + iroha_logger::debug!(%p, "Peer removed"); } } - self.sorted_peers.extend( + self.ordered_peers.extend( new_peer_list .iter() - .filter(|p| !self.sorted_peers.contains(p)) + .filter(|p| !self.ordered_peers.contains(p)) .cloned() .collect::>(), ); } /// Rotate peers after each failed attempt to create a block. pub fn rotate_all(&mut self) { - self.sorted_peers.rotate_left(1); + self.ordered_peers.rotate_left(1); } /// Re-arrange the set of peers after each successful block commit. pub fn rotate_set_a(&mut self) { - let top = self.sorted_peers.remove(0); - self.sorted_peers.insert( - self.min_votes_for_commit().min(self.sorted_peers.len()), + let top = self.ordered_peers.remove(0); + self.ordered_peers.insert( + self.min_votes_for_commit().min(self.ordered_peers.len()), top, ); } @@ -161,14 +169,19 @@ impl Topology { pub fn lift_up_peers(&mut self, to_lift_up: &[PublicKey]) { let mut observing = Vec::new(); let mut i = 0; - while i < self.sorted_peers.len() { - if to_lift_up.contains(&self.sorted_peers[i].public_key) { + while i < self.ordered_peers.len() { + if to_lift_up.contains(&self.ordered_peers[i].public_key) { i += 1; } else { - observing.insert(0, self.sorted_peers.remove(i)); // This has to be insert(0) and not push in order to preserve order. + // Has to be insert(0) and not push to preserve order. + observing.insert(0, self.ordered_peers.remove(i)); } } - self.sorted_peers.extend(observing); + + iroha_logger::debug!("Voting peers: {:#?}", self.ordered_peers); + iroha_logger::debug!("Observing peers: {observing:#?}"); + + self.ordered_peers.extend(observing); } } diff --git a/core/src/sumeragi/view_change.rs b/core/src/sumeragi/view_change.rs index a4f62c8de34..fb5bb308880 100644 --- a/core/src/sumeragi/view_change.rs +++ b/core/src/sumeragi/view_change.rs @@ -6,12 +6,12 @@ clippy::std_instead_of_alloc, single_use_lifetimes )] -use std::collections::HashSet; +use std::collections::{BTreeSet, HashSet}; use derive_more::{Deref, DerefMut}; use eyre::Result; -use iroha_crypto::{Hash, HashOf, KeyPair, PublicKey, Signature}; -use iroha_data_model::{block::VersionedCommittedBlock, prelude::PeerId}; +use iroha_crypto::{HashOf, KeyPair, PublicKey, SignatureOf}; +use iroha_data_model::{block::BlockPayload, prelude::PeerId}; use parity_scale_codec::{Decode, Encode}; use thiserror::Error; @@ -25,28 +25,46 @@ pub enum Error { ViewChangeNotFound, } -/// The proof of a view change. It needs to be signed by f+1 peers for proof to be valid and view change to happen. #[derive(Debug, Clone, Decode, Encode)] -pub struct Proof { +struct ProofPayload { /// Hash of the latest committed block. - pub latest_block_hash: Option>, + latest_block_hash: Option>, /// Within a round, what is the index of the view change this proof is trying to prove. - pub view_change_index: u64, + view_change_index: u64, +} + +/// The proof of a view change. It needs to be signed by f+1 peers for proof to be valid and view change to happen. +#[derive(Debug, Clone, Decode, Encode)] +pub struct SignedProof { + payload: ProofPayload, /// Collection of signatures from the different peers. - pub signatures: Vec, + signatures: BTreeSet>, } -impl Proof { - /// Produce a signature payload in the form of a [`Hash`] - pub fn signature_payload(&self) -> Hash { - let mut buf = [0_u8; Hash::LENGTH + std::mem::size_of::()]; - if let Some(hash) = self.latest_block_hash { - buf[..Hash::LENGTH].copy_from_slice(hash.as_ref()); - } - buf[Hash::LENGTH..].copy_from_slice(&self.view_change_index.to_le_bytes()); - // Now we hash the buffer to produce a payload that is completely - // different between view change proofs in the same sumeragi round. - Hash::new(buf) +/// Builder for proofs +#[repr(transparent)] +pub struct ProofBuilder(SignedProof); + +impl ProofBuilder { + /// Constructor from index. + pub fn new(view_change_index: u64) -> Self { + let proof = SignedProof { + payload: ProofPayload { + latest_block_hash: None, + view_change_index, + }, + signatures: BTreeSet::new(), + }; + + Self(proof) + } + + /// Add latest block hash to the proof. This function can be skipped + /// only if the genesis block has not yet been committed + #[must_use] + pub fn with_latest_block_hash(mut self, latest_block_hash: HashOf) -> Self { + self.0.payload.latest_block_hash = Some(latest_block_hash); + self } /// Sign this message with the peer's public and private key. @@ -54,35 +72,33 @@ impl Proof { /// /// # Errors /// Can fail during creation of signature - pub fn sign(&mut self, key_pair: KeyPair) -> Result<()> { - let signature = Signature::new(key_pair, self.signature_payload().as_ref())?; - self.signatures.push(signature); - Ok(()) + pub fn sign(mut self, key_pair: KeyPair) -> Result { + let signature = SignatureOf::from_hash(key_pair, &HashOf::new(&self.0.payload))?; + self.0.signatures.insert(signature); + Ok(self.0) } +} +impl SignedProof { /// Verify the signatures of `other` and add them to this proof. - pub fn merge_signatures(&mut self, other: Vec) { - let signature_payload = self.signature_payload(); + fn merge_signatures(&mut self, other: BTreeSet>) { for signature in other { - if signature.verify(signature_payload.as_ref()).is_ok() - && !self.signatures.contains(&signature) - { - self.signatures.push(signature); + if signature.verify(&self.payload).is_ok() && !self.signatures.contains(&signature) { + self.signatures.insert(signature); } } } /// Verify if the proof is valid, given the peers in `topology`. - pub fn verify(&self, peers: &[PeerId], max_faults: usize) -> bool { + fn verify(&self, peers: &[PeerId], max_faults: usize) -> bool { let peer_public_keys: HashSet<&PublicKey> = peers.iter().map(|peer_id| &peer_id.public_key).collect(); - let signature_payload = self.signature_payload(); let valid_count = self .signatures .iter() .filter(|signature| { - signature.verify(signature_payload.as_ref()).is_ok() + signature.verify(&self.payload).is_ok() && peer_public_keys.contains(signature.public_key()) }) .count(); @@ -97,7 +113,7 @@ impl Proof { /// Structure representing sequence of view change proofs. #[derive(Debug, Clone, Encode, Decode, Deref, DerefMut, Default)] -pub struct ProofChain(Vec); +pub struct ProofChain(Vec); impl ProofChain { /// Verify the view change proof chain. @@ -105,31 +121,32 @@ impl ProofChain { &self, peers: &[PeerId], max_faults: usize, - latest_block: Option>, + latest_block: Option>, ) -> usize { self.iter() .enumerate() .take_while(|(i, proof)| { - proof.latest_block_hash == latest_block - && proof.view_change_index == (*i as u64) + proof.payload.latest_block_hash == latest_block + && proof.payload.view_change_index == (*i as u64) && proof.verify(peers, max_faults) }) .count() } /// Remove invalid proofs from the chain. - pub fn prune(&mut self, latest_block: Option>) { + pub fn prune(&mut self, latest_block: Option>) { let valid_count = self .iter() .enumerate() .take_while(|(i, proof)| { - proof.latest_block_hash == latest_block && proof.view_change_index == (*i as u64) + proof.payload.latest_block_hash == latest_block + && proof.payload.view_change_index == (*i as u64) }) .count(); self.truncate(valid_count); } - /// Attempt to insert a view chain proof into this `ProofChain`. + /// Attempt to insert a view change proof into [`Self`]. /// /// # Errors /// - If proof latest block hash doesn't match peer latest block hash @@ -139,14 +156,14 @@ impl ProofChain { &mut self, peers: &[PeerId], max_faults: usize, - latest_block: Option>, - new_proof: Proof, + latest_block: Option>, + new_proof: SignedProof, ) -> Result<(), Error> { - if new_proof.latest_block_hash != latest_block { + if new_proof.payload.latest_block_hash != latest_block { return Err(Error::BlockHashMismatch); } let next_unfinished_view_change = self.verify_with_state(peers, max_faults, latest_block); - if new_proof.view_change_index != (next_unfinished_view_change as u64) { + if new_proof.payload.view_change_index != (next_unfinished_view_change as u64) { return Err(Error::ViewChangeNotFound); // We only care about the current view change that may or may not happen. } @@ -169,7 +186,7 @@ impl ProofChain { mut other: Self, peers: &[PeerId], max_faults: usize, - latest_block_hash: Option>, + latest_block_hash: Option>, ) -> Result<(), Error> { // Prune to exclude invalid proofs other.prune(latest_block_hash); diff --git a/core/src/tx.rs b/core/src/tx.rs index 96e9d86b375..b9dc3fb2b14 100644 --- a/core/src/tx.rs +++ b/core/src/tx.rs @@ -18,6 +18,11 @@ use std::str::FromStr; use eyre::Result; pub use iroha_data_model::prelude::*; +use iroha_data_model::{ + transaction::{error::TransactionRejectionReason, SignedTransaction}, + validator::NeedsValidationBox, +}; +use iroha_genesis::AcceptedTransaction; use iroha_logger::debug; use iroha_primitives::must_use::MustUse; @@ -48,22 +53,17 @@ impl TransactionValidator { /// /// # Errors /// Fails if validation of instruction fails (e.g. permissions mismatch). - pub fn validate( + pub fn validate( &self, tx: AcceptedTransaction, - is_genesis: bool, wsv: &WorldStateView, - ) -> Result { - if let Err(rejection_reason) = self.validate_internal(tx.clone(), is_genesis, wsv) { - return Err(RejectedTransaction { - payload: tx.payload, - signatures: tx.signatures, - rejection_reason, - } - .into()); + ) -> Result + { + if let Err(rejection_reason) = self.validate_internal::(tx.clone(), wsv) { + return Err((tx.into(), rejection_reason)); } - Ok(ValidTransaction { + Ok(SignedTransaction { payload: tx.payload, signatures: tx.signatures, } @@ -77,27 +77,24 @@ impl TransactionValidator { /// /// # Errors /// Fails if validation of any transaction fails - // - // TODO (#2742): Accept `txs` by reference, not by value - pub fn validate_every( + pub fn validate_every( &self, - txs: impl IntoIterator, + txs: impl IntoIterator, wsv: &WorldStateView, ) -> Result<(), TransactionRejectionReason> { for tx in txs { - self.validate_internal(tx.into_v1(), true, wsv)?; + self.validate_internal::(tx, wsv)?; } Ok(()) } - fn validate_internal( + fn validate_internal( &self, tx: AcceptedTransaction, - is_genesis: bool, wsv: &WorldStateView, ) -> Result<(), TransactionRejectionReason> { let account_id = &tx.payload.account_id; - Self::validate_signatures(&tx, is_genesis, wsv)?; + Self::validate_signatures::(&tx, wsv)?; if !wsv .domain(&account_id.domain_id) @@ -114,7 +111,7 @@ impl TransactionValidator { })); } - if !is_genesis { + if !IS_GENESIS { debug!("Validating transaction: {:?}", tx); Self::validate_with_runtime_validator(account_id, tx.clone(), wsv)?; } @@ -122,7 +119,7 @@ impl TransactionValidator { match tx.payload.instructions { Executable::Instructions(instructions) => { // Non-genesis instructions have been executed in `validate_with_runtime_validators()`. - if is_genesis { + if IS_GENESIS { for instruction in instructions { instruction .clone() @@ -138,17 +135,16 @@ impl TransactionValidator { Executable::Wasm(bytes) => self.validate_wasm(account_id.clone(), wsv, bytes)?, } - (!is_genesis).then(|| debug!("Validation successful")); + (!IS_GENESIS).then(|| debug!("Validation successful")); Ok(()) } /// Validate signatures for the given transaction - fn validate_signatures( + fn validate_signatures( tx: &AcceptedTransaction, - is_genesis: bool, wsv: &WorldStateView, ) -> Result<(), TransactionRejectionReason> { - if !is_genesis && tx.payload().account_id == AccountId::genesis() { + if !IS_GENESIS && tx.payload.account_id == *iroha_genesis::GENESIS_ACCOUNT_ID { return Err(TransactionRejectionReason::UnexpectedGenesisAccountSignature); } @@ -205,7 +201,6 @@ impl TransactionValidator { payload, signatures, } = tx; - let signatures = signatures.into_iter().collect(); let signed_tx = SignedTransaction { payload, @@ -213,7 +208,7 @@ impl TransactionValidator { }; wsv.validator_view() - .validate(wsv, authority, signed_tx) + .validate(wsv, authority, NeedsValidationBox::from(signed_tx)) .map_err(|err| { TransactionRejectionReason::NotPermitted(NotPermittedFail { reason: err.to_string(), @@ -236,10 +231,10 @@ pub trait CheckSignatureCondition: Sized { impl CheckSignatureCondition for AcceptedTransaction { fn check_signature_condition(&self, wsv: &WorldStateView) -> Result> { - let account_id = &self.payload.account_id; + let account_id = &self.payload().account_id; let signatories = self - .signatures + .signatures() .iter() .map(|signature| signature.public_key()) .cloned(); @@ -253,12 +248,6 @@ impl CheckSignatureCondition for AcceptedTransaction { } } -impl CheckSignatureCondition for VersionedAcceptedTransaction { - fn check_signature_condition(&self, wsv: &WorldStateView) -> Result> { - self.as_v1().check_signature_condition(wsv) - } -} - /// Returns a prebuilt expression that when executed /// returns if the needed signatures are gathered. fn check_signature_condition( @@ -287,21 +276,73 @@ impl IsInBlockchain for VersionedSignedTransaction { wsv.has_transaction(&self.hash()) } } -impl IsInBlockchain for VersionedAcceptedTransaction { +impl IsInBlockchain for AcceptedTransaction { #[inline] fn is_in_blockchain(&self, wsv: &WorldStateView) -> bool { wsv.has_transaction(&self.hash()) } } -impl IsInBlockchain for VersionedValidTransaction { - #[inline] - fn is_in_blockchain(&self, wsv: &WorldStateView) -> bool { - wsv.has_transaction(&self.hash()) + +#[cfg(test)] +mod tests { + #![allow(clippy::pedantic)] + + use iroha_config::sumeragi::DEFAULT_MAX_INSTRUCTION_NUMBER; + use iroha_data_model::prelude::*; + + use super::*; + + #[test] + fn transaction_not_accepted_max_instruction_number() { + let key_pair = iroha_crypto::KeyPair::generate().expect("Failed to generate key pair."); + let inst: InstructionBox = FailBox { + message: "Will fail".to_owned(), + } + .into(); + let tx = TransactionBuilder::new( + "root@global".parse().expect("Valid"), + vec![inst; DEFAULT_MAX_INSTRUCTION_NUMBER as usize + 1], + 1000, + ) + .sign(key_pair) + .expect("Valid"); + let tx_limits = TransactionLimits { + max_instruction_number: 4096, + max_wasm_size_bytes: 0, + }; + let result = AcceptedTransaction::accept::(tx, &tx_limits); + assert!(result.is_err()); + + let err = result.unwrap_err(); + assert_eq!( + err.1.to_string(), + format!( + "Too many instructions in payload, max number is {}, but got {}", + tx_limits.max_instruction_number, + DEFAULT_MAX_INSTRUCTION_NUMBER + 1 + ) + ); } -} -impl IsInBlockchain for VersionedRejectedTransaction { - #[inline] - fn is_in_blockchain(&self, wsv: &WorldStateView) -> bool { - wsv.has_transaction(&self.hash()) + + #[test] + fn genesis_transaction_ignore_limits() { + let key_pair = iroha_crypto::KeyPair::generate().expect("Failed to generate key pair."); + let inst: InstructionBox = FailBox { + message: "Will fail".to_owned(), + } + .into(); + let tx = TransactionBuilder::new( + "root@global".parse().expect("Valid"), + vec![inst; DEFAULT_MAX_INSTRUCTION_NUMBER as usize + 1], + 1000, + ) + .sign(key_pair) + .expect("Valid"); + let tx_limits = TransactionLimits { + max_instruction_number: 4096, + max_wasm_size_bytes: 0, + }; + + assert!(AcceptedTransaction::accept::(tx, &tx_limits).is_ok()); } } diff --git a/core/src/validator.rs b/core/src/validator.rs index d667afc32ea..c7fd1c8b6bc 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -71,9 +71,9 @@ impl Validator { &self, wsv: &WorldStateView, authority: &::Id, - operation: impl Into, + operation: data_model_validator::NeedsValidationBox, ) -> Result<()> { - let operation = operation.into(); + let operation = operation; let runtime = wasm::RuntimeBuilder::new() .with_engine(self.engine.clone()) // Cloning engine is cheap, see [`wasmtime::Engine`] docs @@ -140,9 +140,9 @@ impl MockValidator { &self, wsv: &WorldStateView, authority: &::Id, - operation: impl Into, + operation: data_model_validator::NeedsValidationBox, ) -> Result<()> { - match operation.into() { + match operation { NeedsValidationBox::Instruction(isi) => { Self::execute_instruction(wsv, authority.clone(), isi) } diff --git a/core/src/wsv.rs b/core/src/wsv.rs index c59915c6b7c..17175b52183 100644 --- a/core/src/wsv.rs +++ b/core/src/wsv.rs @@ -20,7 +20,7 @@ use iroha_config::{ }; use iroha_crypto::HashOf; use iroha_data_model::{ - block::{CommittedBlock, VersionedCommittedBlock}, + block::{Block, BlockPayload, VersionedSignedBlock}, isi::error::{InstructionExecutionFailure as Error, MathError}, prelude::*, query::error::{FindError, QueryExecutionFailure}, @@ -35,8 +35,8 @@ use crate::validator::MockValidator as Validator; #[cfg(not(test))] use crate::validator::Validator; use crate::{ + block::CommittedBlock, kura::Kura, - prelude::*, smartcontracts::{ triggers::{ self, @@ -121,9 +121,9 @@ pub struct WorldStateView { /// Configuration of World State View. pub config: Configuration, /// Blockchain. - pub block_hashes: std::cell::RefCell>>, + pub block_hashes: std::cell::RefCell>>, /// Hashes of transactions - pub transactions: DashSet>, + pub transactions: DashSet>, /// Buffer containing events generated during `WorldStateView::apply`. Renewed on every block commit. pub events_buffer: std::cell::RefCell>, /// Accumulated amount of any asset that has been transacted. @@ -318,10 +318,9 @@ impl WorldStateView { /// - If trigger execution fails /// - If timestamp conversion to `u64` fails #[iroha_logger::log(skip_all, fields(block_height))] - pub fn apply(&self, block: &VersionedCommittedBlock) -> Result<()> { + pub fn apply(&self, block: &CommittedBlock) -> Result<()> { let hash = block.hash(); - let block = block.as_v1(); - iroha_logger::prelude::Span::current().record("block_height", block.header.height); + iroha_logger::prelude::Span::current().record("block_height", block.header().height); trace!("Applying block"); let time_event = self.create_time_event(block)?; self.events_buffer @@ -354,27 +353,29 @@ impl WorldStateView { /// Get a reference to the latest block. Returns none if genesis is not committed. #[inline] - pub fn latest_block_ref(&self) -> Option> { + pub fn latest_block_ref(&self) -> Option> { self.kura .get_block_by_height(self.block_hashes.borrow().len() as u64) } /// Create time event using previous and current blocks fn create_time_event(&self, block: &CommittedBlock) -> Result { + let block_header = block.header(); + let prev_interval = self .latest_block_ref() .map(|latest_block| { let header = latest_block.header(); - header.timestamp.try_into().map(|since| TimeInterval { + header.timestamp_ms.try_into().map(|since| TimeInterval { since: Duration::from_millis(since), - length: Duration::from_millis(header.consensus_estimation), + length: Duration::from_millis(header.consensus_estimation_ms), }) }) .transpose()?; let interval = TimeInterval { - since: Duration::from_millis(block.header.timestamp.try_into()?), - length: Duration::from_millis(block.header.consensus_estimation), + since: Duration::from_millis(block_header.timestamp_ms.try_into()?), + length: Duration::from_millis(block_header.consensus_estimation_ms), }; Ok(TimeEvent { @@ -389,16 +390,15 @@ impl WorldStateView { /// # Errors /// Fails if transaction instruction execution fails fn execute_transactions(&self, block: &CommittedBlock) -> Result<()> { + let payload = block.payload(); + // TODO: Should this block panic instead? - for tx in &block.transactions { - self.process_executable( - &tx.as_v1().payload.instructions, - tx.payload().account_id.clone(), - )?; + for tx in &payload.transactions { + self.process_executable(&tx.payload().instructions, tx.payload().account_id.clone())?; self.transactions.insert(tx.hash()); } - for tx in &block.rejected_transactions { - self.transactions.insert(tx.hash()); + for tx in &payload.rejected_transactions { + self.transactions.insert(tx.0.hash()); } Ok(()) @@ -429,7 +429,9 @@ impl WorldStateView { /// /// # Errors /// - There is no account with such name. - #[allow(clippy::missing_panics_doc)] + /// + /// # Panics + /// - if [`Account::add_asset`] fails. pub fn asset_or_insert( &self, id: &::Id, @@ -458,7 +460,7 @@ impl WorldStateView { #[allow(clippy::expect_used)] pub fn all_blocks_by_value( &self, - ) -> impl DoubleEndedIterator + '_ { + ) -> impl DoubleEndedIterator + '_ { let block_count = self.block_hashes.borrow().len() as u64; (1..=block_count) .map(|height| { @@ -466,14 +468,14 @@ impl WorldStateView { .get_block_by_height(height) .expect("Failed to load block.") }) - .map(|block| VersionedCommittedBlock::clone(&block)) + .map(|block| VersionedSignedBlock::clone(&block)) } /// Return a vector of blockchain blocks after the block with the given `hash` pub fn block_hashes_after_hash( &self, - hash: Option>, - ) -> Vec> { + hash: Option>, + ) -> Vec> { hash.map_or_else( || self.block_hashes.borrow().clone(), |block_hash| { @@ -534,7 +536,7 @@ impl WorldStateView { } /// Return an iterator over blockchain block hashes starting with the block of the given `height` - pub fn block_hashes_from_height(&self, height: usize) -> Vec> { + pub fn block_hashes_from_height(&self, height: usize) -> Vec> { self.block_hashes .borrow() .iter() @@ -667,7 +669,7 @@ impl WorldStateView { let opt = self .kura .get_block_by_height(1) - .map(|genesis_block| genesis_block.as_v1().header.timestamp); + .map(|genesis_block| genesis_block.header().timestamp_ms); if opt.is_none() { error!("Failed to get genesis block from Kura."); @@ -678,7 +680,7 @@ impl WorldStateView { /// Check if this [`VersionedSignedTransaction`] is already committed or rejected. #[inline] - pub fn has_transaction(&self, hash: &HashOf) -> bool { + pub fn has_transaction(&self, hash: &HashOf) -> bool { self.transactions.contains(hash) } @@ -689,7 +691,7 @@ impl WorldStateView { } /// Return the hash of the latest block - pub fn latest_block_hash(&self) -> Option> { + pub fn latest_block_hash(&self) -> Option> { self.block_hashes.borrow().iter().nth_back(0).copied() } @@ -701,7 +703,7 @@ impl WorldStateView { } /// Return the hash of the block one before the latest block - pub fn previous_block_hash(&self) -> Option> { + pub fn previous_block_hash(&self) -> Option> { self.block_hashes.borrow().iter().nth_back(1).copied() } @@ -963,18 +965,19 @@ impl WorldStateView { let mut txs = self .all_blocks_by_value() .flat_map(|block| { - let block = block.as_v1(); - block + let payload = block.payload(); + + payload .rejected_transactions .iter() .cloned() .map(Box::new) .map(|versioned_rejected_tx| TransactionQueryResult { tx_value: TransactionValue::RejectedTransaction(versioned_rejected_tx), - block_hash: Hash::from(block.hash()), + block_hash: block.hash(), }) .chain( - block + payload .transactions .iter() .cloned() @@ -982,7 +985,7 @@ impl WorldStateView { .map(Box::new) .map(|versioned_tx| TransactionQueryResult { tx_value: TransactionValue::Transaction(versioned_tx), - block_hash: Hash::from(block.hash()), + block_hash: block.hash(), }), ) .collect::>() @@ -995,25 +998,33 @@ impl WorldStateView { /// Find a [`VersionedSignedTransaction`] by hash. pub fn transaction_value_by_hash( &self, - hash: &HashOf, - ) -> Option { + hash: &HashOf, + ) -> Option { self.all_blocks_by_value().find_map(|b| { - b.as_v1() + let payload = b.payload(); + + payload .rejected_transactions .iter() - .find(|e| e.hash() == *hash) + .find(|e| e.0.hash() == *hash) .cloned() .map(Box::new) - .map(TransactionValue::RejectedTransaction) + .map(|versioned_rejected_tx| TransactionQueryResult { + tx_value: TransactionValue::RejectedTransaction(versioned_rejected_tx), + block_hash: b.hash(), + }) .or_else(|| { - b.as_v1() + payload .transactions .iter() .find(|e| e.hash() == *hash) .cloned() .map(VersionedSignedTransaction::from) .map(Box::new) - .map(TransactionValue::Transaction) + .map(|versioned_tx| TransactionQueryResult { + tx_value: TransactionValue::Transaction(versioned_tx), + block_hash: b.hash(), + }) }) }) } @@ -1022,27 +1033,34 @@ impl WorldStateView { pub fn transactions_values_by_account_id( &self, account_id: &AccountId, - ) -> Vec { + ) -> Vec { let mut transactions = self .all_blocks_by_value() - .flat_map(|block_entry| { - let block = block_entry.as_v1(); - block + .flat_map(|b| { + let payload = b.payload(); + + payload .rejected_transactions .iter() - .filter(|transaction| &transaction.payload().account_id == account_id) + .filter(|transaction| &transaction.0.payload().account_id == account_id) .cloned() .map(Box::new) - .map(TransactionValue::RejectedTransaction) + .map(|versioned_rejected_tx| TransactionQueryResult { + tx_value: TransactionValue::RejectedTransaction(versioned_rejected_tx), + block_hash: b.hash(), + }) .chain( - block + payload .transactions .iter() .filter(|transaction| &transaction.payload().account_id == account_id) .cloned() .map(VersionedSignedTransaction::from) .map(Box::new) - .map(TransactionValue::Transaction), + .map(|versioned_tx| TransactionQueryResult { + tx_value: TransactionValue::Transaction(versioned_tx), + block_hash: b.hash(), + }), ) .collect::>() }) @@ -1145,21 +1163,24 @@ mod tests { #![allow(clippy::restriction)] use super::*; - use crate::block::PendingBlock; + use crate::block::ValidBlock; #[test] fn get_block_hashes_after_hash() { const BLOCK_CNT: usize = 10; - let mut block = PendingBlock::new_dummy().commit_unchecked(); let kura = Kura::blank_kura_for_testing(); let wsv = WorldStateView::new(World::default(), kura); + let mut valid_block = ValidBlock::new_dummy(); let mut block_hashes = vec![]; for i in 1..=BLOCK_CNT { - block.header.height = i as u64; - block.header.previous_block_hash = block_hashes.last().copied(); - let block: VersionedCommittedBlock = block.clone().into(); + let VersionedSignedBlock::V1(v1_block) = &mut valid_block.0; + v1_block.payload.header.height = i as u64; + v1_block.payload.header.previous_block_hash = block_hashes.last().copied(); + + let block = valid_block.clone().commit_unchecked(); + let block: CommittedBlock = block.0.clone(); block_hashes.push(block.hash()); wsv.apply(&block).unwrap(); } @@ -1174,15 +1195,16 @@ mod tests { fn get_blocks_from_height() { const BLOCK_CNT: usize = 10; - let mut block = PendingBlock::new_dummy().commit_unchecked(); let kura = Kura::blank_kura_for_testing(); let wsv = WorldStateView::new(World::default(), kura.clone()); + let mut valid_block = ValidBlock::new_dummy(); for i in 1..=BLOCK_CNT { - block.header.height = i as u64; - let block: VersionedCommittedBlock = block.clone().into(); - wsv.apply(&block).unwrap(); - kura.store_block(block); + let VersionedSignedBlock::V1(v1_block) = &mut valid_block.0; + v1_block.payload.header.height = i as u64; + let block = valid_block.clone().commit_unchecked(); + wsv.apply(&block.0).unwrap(); + kura.store_block(block.0); } assert_eq!( diff --git a/core/test_network/src/lib.rs b/core/test_network/src/lib.rs index 26f7260cb3a..8b317038583 100644 --- a/core/test_network/src/lib.rs +++ b/core/test_network/src/lib.rs @@ -177,7 +177,6 @@ impl Network { ) -> (Self, Client) { let mut configuration = Configuration::test(); configuration.queue.maximum_transactions_in_block = max_txs_in_block; - configuration.logger.max_log_level = iroha_logger::Level::INFO.into(); let network = Network::new_with_offline_peers( Some(configuration), n_peers, @@ -826,6 +825,9 @@ impl TestConfiguration for Configuration { iroha::samples::get_config_proxy(HashSet::new(), Some(get_key_pair())); let env_proxy = ConfigurationProxy::from_env(); let (public_key, private_key) = KeyPair::generate().unwrap().into(); + if let Some(logger) = sample_proxy.logger.as_mut() { + logger.max_log_level = Some(iroha_logger::Level::DEBUG.into()); + } sample_proxy.public_key = Some(public_key); sample_proxy.private_key = Some(private_key); sample_proxy.override_with(env_proxy) diff --git a/crypto/src/hash.rs b/crypto/src/hash.rs index 941159512b9..52010e9b2d5 100644 --- a/crypto/src/hash.rs +++ b/crypto/src/hash.rs @@ -2,7 +2,7 @@ use alloc::{borrow::ToOwned as _, format, string::String, vec, vec::Vec}; use core::{hash, marker::PhantomData, num::NonZeroU8, str::FromStr}; -use derive_more::{DebugCustom, Deref, DerefMut, Display}; +use derive_more::{DebugCustom, Display}; use iroha_ffi::FfiType; use iroha_schema::{IntoSchema, TypeId}; use parity_scale_codec::{Decode, Encode}; @@ -74,7 +74,7 @@ impl Hash { /// since it is not possible to validate the correctness of the conversion. /// Prefer creating new hashes with [`HashOf::new`] whenever possible #[must_use] - pub const fn typed(self) -> HashOf { + pub(crate) const fn typed_unchecked(self) -> HashOf { HashOf(self, PhantomData) } @@ -199,23 +199,15 @@ impl From> for Hash { } /// Represents hash of Iroha entities like `Block` or `Transaction`. Currently supports only blake2b-32. -// Lint triggers when expanding #[codec(skip)] -#[allow(clippy::default_trait_access)] -#[derive( - DebugCustom, Deref, DerefMut, Display, Decode, Encode, Deserialize, Serialize, FfiType, TypeId, -)] +#[allow(clippy::default_trait_access)] // NOTE: Caused by #[codec(skip)] +#[derive(DebugCustom, Display, Decode, Encode, Deserialize, Serialize, FfiType, TypeId)] #[debug(fmt = "{{ {} {_0} }}", "core::any::type_name::()")] #[display(fmt = "{_0}")] #[serde(transparent)] #[repr(transparent)] // TODO: Temporary until PRs are resolved #[ffi_type(opaque)] -pub struct HashOf( - #[deref] - #[deref_mut] - Hash, - #[codec(skip)] PhantomData, -); +pub struct HashOf(Hash, #[codec(skip)] PhantomData); impl Clone for HashOf { fn clone(&self) -> Self { diff --git a/crypto/src/lib.rs b/crypto/src/lib.rs index f5e42fabaff..93b3decd948 100755 --- a/crypto/src/lib.rs +++ b/crypto/src/lib.rs @@ -336,7 +336,7 @@ impl From for (PublicKey, PrivateKey) { ffi::ffi_item! { /// Public Key used in signatures. #[derive(DebugCustom, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, DeserializeFromStr, SerializeDisplay, Decode, Encode, FfiType, IntoSchema)] - #[debug(fmt = "{{digest: {digest_function}, payload: {payload:X?}}}")] + #[debug(fmt = "digest: {digest_function}, payload: {}", "hex::encode_upper(&self.payload)")] pub struct PublicKey { /// Digest function digest_function: Algorithm, @@ -421,8 +421,9 @@ impl From for PublicKey { ffi::ffi_item! { /// Private Key used in signatures. - #[derive(DebugCustom, Clone, PartialEq, Eq, Serialize, FfiType)] - #[debug(fmt = "{{digest: {digest_function}, payload: {payload:X?}}}")] + #[derive(DebugCustom, Display, Clone, PartialEq, Eq, Serialize, FfiType)] + #[debug(fmt = "digest: {digest_function}, payload: {}", "hex::encode_upper(&self.payload)")] + #[display(fmt = "payload: {}", "hex::encode_upper(&self.payload)")] pub struct PrivateKey { /// Digest function digest_function: Algorithm, @@ -432,12 +433,6 @@ ffi::ffi_item! { } } -impl fmt::Display for PrivateKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", hex::encode_upper(&self.payload)) - } -} - #[cfg_attr( all(feature = "ffi_export", not(feature = "ffi_import")), iroha_ffi::ffi_export diff --git a/crypto/src/merkle.rs b/crypto/src/merkle.rs index ac9bbe0566e..d5081733d76 100644 --- a/crypto/src/merkle.rs +++ b/crypto/src/merkle.rs @@ -228,7 +228,7 @@ impl MerkleTree { .zip(r_hash.as_ref().iter()) .map(|(l, r)| l.wrapping_add(*r)) .collect(); - Some(crate::Hash::new(sum).typed()) + Some(crate::Hash::new(sum).typed_unchecked()) } } @@ -261,7 +261,7 @@ mod tests { fn test_hashes(n_hashes: u8) -> Vec> { (1..=n_hashes) - .map(|i| Hash::prehashed([i; Hash::LENGTH]).typed()) + .map(|i| Hash::prehashed([i; Hash::LENGTH]).typed_unchecked()) .collect() } diff --git a/crypto/src/signature.rs b/crypto/src/signature.rs index cd71b879869..a632008816b 100644 --- a/crypto/src/signature.rs +++ b/crypto/src/signature.rs @@ -8,7 +8,7 @@ use std::collections::btree_set; use derive_more::{DebugCustom, Deref, DerefMut}; use getset::Getters; use iroha_schema::{IntoSchema, TypeId}; -use parity_scale_codec::{Decode, Encode, Input}; +use parity_scale_codec::{Decode, Encode}; use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use ursa::{ @@ -35,12 +35,12 @@ pub type Payload = Vec; Eq, PartialOrd, Ord, + Hash, Getters, Decode, Encode, Deserialize, Serialize, - Hash, IntoSchema, )] #[getset(get = "pub")] @@ -62,7 +62,7 @@ impl Signature { /// # Errors /// Fails if signing fails #[cfg(feature = "std")] - pub fn new(key_pair: KeyPair, payload: &[u8]) -> Result { + fn new(key_pair: KeyPair, payload: &[u8]) -> Result { let (public_key, private_key) = key_pair.into(); let algorithm: Algorithm = private_key.digest_function(); @@ -86,7 +86,7 @@ impl Signature { /// Prefer creating new signatures with [`SignatureOf::new`] whenever possible #[inline] #[cfg_attr(not(feature = "std"), allow(dead_code))] - const fn typed(self) -> SignatureOf { + const fn typed_unchecked(self) -> SignatureOf { SignatureOf(self, PhantomData) } @@ -95,7 +95,7 @@ impl Signature { /// # Errors /// Fails if message didn't pass verification #[cfg(feature = "std")] - pub fn verify(&self, payload: &[u8]) -> Result<(), Error> { + fn verify(&self, payload: &[u8]) -> Result<(), Error> { let algorithm: Algorithm = self.public_key.digest_function(); let public_key = UrsaPublicKey(self.public_key.payload().to_owned()); @@ -130,13 +130,11 @@ impl From> for Signature { } /// Represents signature of the data (`Block` or `Transaction` for example). -// Lint triggers when expanding #[codec(skip)] #[allow( + // Caused by #[codec(skip)] clippy::default_trait_access, - clippy::unsafe_derive_deserialize, - clippy::derive_hash_xor_eq )] -#[derive(Deref, DerefMut, Hash, Decode, Encode, Deserialize, Serialize, TypeId)] +#[derive(Deref, DerefMut, Decode, Encode, Deserialize, Serialize, TypeId)] #[serde(transparent)] // Transmute guard #[repr(transparent)] @@ -179,6 +177,12 @@ impl Ord for SignatureOf { } } +impl core::hash::Hash for SignatureOf { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + impl IntoSchema for SignatureOf { fn type_name() -> String { format!("SignatureOf<{}>", T::type_name()) @@ -203,28 +207,7 @@ impl SignatureOf { /// Fails if signing fails #[cfg(feature = "std")] pub fn from_hash(key_pair: KeyPair, hash: &HashOf) -> Result { - Ok(Signature::new(key_pair, hash.as_ref())?.typed()) - } - - /// Transmutes signature to some specific type - pub fn transmute(self) -> SignatureOf { - SignatureOf(self.0, PhantomData) - } - - /// Transmutes signature to some specific type - /// - /// # Warning: - /// - /// This method uses [`core::mem::transmute`] internally - pub const fn transmute_ref(&self) -> &SignatureOf { - #[allow(unsafe_code, trivial_casts)] - // SAFETY: transmuting is safe, because we're casting a - // pointer of type `SignatureOf` into a pointer of type - // `SignatureOf`, where `` and `` type parameters are - // normally related types that have the exact same alignment. - unsafe { - &*((self as *const Self).cast::>()) - } + Signature::new(key_pair, hash.as_ref()).map(Signature::typed_unchecked) } /// Verify signature for this hash @@ -287,7 +270,7 @@ impl Eq for SignatureWrapperOf {} impl PartialOrd for SignatureWrapperOf { fn partial_cmp(&self, other: &Self) -> Option { - self.0.public_key.partial_cmp(&other.0.public_key) + Some(self.cmp(other)) } } impl Ord for SignatureWrapperOf { @@ -319,10 +302,7 @@ impl core::hash::Hash for SignatureWrapperOf { /// /// If the public key of the added signature is already in the set, /// the associated signature will be replaced with the new one. -/// -/// GUARANTEE 1: Each signature corresponds to a different public key -#[allow(clippy::derive_hash_xor_eq)] -#[derive(Hash, Encode, Serialize, IntoSchema)] +#[derive(Default, Decode, Encode, Deserialize, Serialize, IntoSchema)] #[serde(transparent)] // Transmute guard #[repr(transparent)] @@ -350,34 +330,20 @@ impl PartialEq for SignaturesOf { } } impl Eq for SignaturesOf {} - -impl<'de, T> Deserialize<'de> for SignaturesOf { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - use serde::de::Error as _; - - let signatures = >>::deserialize(deserializer)?; - - if signatures.is_empty() { - return Err(D::Error::custom( - "Could not deserialize SignaturesOf. Input contains 0 signatures", - )); - } - - Ok(Self { signatures }) +impl PartialOrd for SignaturesOf { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl Ord for SignaturesOf { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.signatures.cmp(&other.signatures) } } -impl Decode for SignaturesOf { - fn decode(input: &mut I) -> Result { - let signatures = >>::decode(input)?; - - if signatures.is_empty() { - return Err("Could not decode SignaturesOf. Input contains 0 signatures".into()); - } - Ok(Self { signatures }) +impl core::hash::Hash for SignaturesOf { + fn hash(&self, state: &mut H) { + self.signatures.hash(state); } } @@ -420,10 +386,8 @@ impl From> for btree_set::BTreeSet> { } } -impl TryFrom>> for SignaturesOf { - type Error = Error; - - fn try_from(signatures: btree_set::BTreeSet>) -> Result { +impl From>> for SignaturesOf { + fn from(signatures: btree_set::BTreeSet>) -> Self { signatures.into_iter().collect() } } @@ -436,31 +400,15 @@ impl From> for SignaturesOf { } } -impl FromIterator> for Result, Error> { +impl FromIterator> for SignaturesOf { fn from_iter>>(iter: T) -> Self { - let mut iter = iter.into_iter(); - iter.next() - .ok_or(Error::EmptySignatureIter) - .map(move |first_signature| core::iter::once(first_signature).chain(iter)) - .map(|signatures| signatures.map(SignatureWrapperOf).collect()) - .map(|signatures| SignaturesOf { signatures }) + Self { + signatures: iter.into_iter().map(SignatureWrapperOf).collect(), + } } } impl SignaturesOf { - /// Transmutes signature generic type - /// - /// # Warning: - /// - /// This method uses [`core::mem::transmute`] internally - #[allow(unsafe_code, clippy::transmute_undefined_repr)] - pub fn transmute(self) -> SignaturesOf { - // SAFETY: Safe because we are transmuting to a pointer of - // type `` which is related to type ``. - let signatures = unsafe { core::mem::transmute(self.signatures) }; - SignaturesOf { signatures } - } - /// Adds a signature. If the signature with this key was present, replaces it. pub fn insert(&mut self, signature: SignatureOf) { self.signatures.insert(SignatureWrapperOf(signature)); @@ -581,8 +529,6 @@ impl std::error::Error for SignatureVerificationFail {} mod tests { #![allow(clippy::restriction)] - use parity_scale_codec::DecodeAll; - #[cfg(feature = "std")] use super::*; #[cfg(feature = "std")] @@ -644,32 +590,6 @@ mod tests { assert!(signature.verify(message).is_ok()) } - #[test] - #[cfg(feature = "std")] - fn decode_signatures_of() { - let no_signatures: SignaturesOf = SignaturesOf { - signatures: btree_set::BTreeSet::new(), - }; - let bytes = no_signatures.encode(); - - let signatures = SignaturesOf::::decode_all(&mut &bytes[..]); - assert!(signatures.is_err()); - } - - #[test] - #[cfg(feature = "std")] - fn deserialize_signatures_of() -> Result<(), serde_json::Error> { - let no_signatures: SignaturesOf = SignaturesOf { - signatures: btree_set::BTreeSet::new(), - }; - let serialized = serde_json::to_string(&no_signatures)?; - - let signatures = serde_json::from_str::>(serialized.as_str()); - assert!(signatures.is_err()); - - Ok(()) - } - #[test] #[cfg(feature = "std")] fn signatures_of_deduplication_by_public_key() { @@ -679,10 +599,7 @@ mod tests { SignatureOf::new(key_pair.clone(), &2).expect("Failed to sign"), SignatureOf::new(key_pair, &3).expect("Failed to sign"), ]; - let signatures = signatures - .into_iter() - .collect::, Error>>() - .expect("One signature must stay"); + let signatures: SignaturesOf<_> = signatures.into_iter().collect(); // Signatures with the same public key was deduplicated assert_eq!(signatures.len(), 1); } @@ -707,6 +624,8 @@ mod tests { .collect::>(); let hash_set: HashSet<_> = signatures.clone().into_iter().collect(); let btree_set: BTreeSet<_> = signatures.into_iter().collect(); + println!("{:#?}", &hash_set); + println!("{:#?}", &btree_set); // Check that `hash_set` is subset of `btree_set` for signature in &hash_set { diff --git a/data_model/Cargo.toml b/data_model/Cargo.toml index d67d17de9ae..1ded40aebfb 100644 --- a/data_model/Cargo.toml +++ b/data_model/Cargo.toml @@ -27,7 +27,7 @@ http = ["std", "warp", "iroha_version/http"] # Expose FFI API for dynamic linking (Internal use only) ffi_export = ["std", "iroha_ffi", "iroha_primitives/ffi_export", "iroha_crypto/ffi_export"] # Expose API for mutating structures (Internal use only) -transparent_api = [] +transparent-api = [] [dependencies] iroha_primitives = { path = "../primitives", version = "=2.0.0-pre-rc.13", default-features = false } diff --git a/data_model/derive/src/filter.rs b/data_model/derive/src/filter.rs index 7ee0d6b40f2..a4058300ad9 100644 --- a/data_model/derive/src/filter.rs +++ b/data_model/derive/src/filter.rs @@ -209,7 +209,7 @@ pub fn impl_filter(event: &EventEnum) -> TokenStream { } } - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] impl #import_path::Filter for #filter_ident { type Event = #imp_event; @@ -256,7 +256,7 @@ fn impl_event_filter(event: &EventEnum) -> proc_macro2::TokenStream { } } - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] impl #import_path::Filter for #event_filter_ident { type Event = #imp_event; diff --git a/data_model/derive/src/lib.rs b/data_model/derive/src/lib.rs index 74ded26ca9a..62f5b3dbcb1 100644 --- a/data_model/derive/src/lib.rs +++ b/data_model/derive/src/lib.rs @@ -10,13 +10,13 @@ mod partially_tagged; use proc_macro::TokenStream; use syn::parse_macro_input; -/// Macro which controls how to export item's API. The behaviour is controlled with `transparent_api` +/// Macro which controls how to export item's API. The behaviour is controlled with `transparent-api` /// feature flag. If the flag is active, item's public fields will be exposed as public, however, if /// it's not active, item will be exposed as opaque, i.e. no fields will be visible. This enables /// internal libraries of Iroha to see and destructure data model items. On the other hand, /// client libraries will only see opaque items and can be dynamically linked. /// -/// Additionally, this macro will rewrite private items as public when `transparent_api` is active. +/// Additionally, this macro will rewrite private items as public when `transparent-api` is active. /// If an item should remain private regardless of consumer library, just don't wrap it in this macro. /// /// Should be used only on public module named `model`. @@ -43,20 +43,20 @@ use syn::parse_macro_input; /// /* will produce: /// pub mod model { /// pub struct DataModel1 { -/// #[cfg(feature = "transparent_api")] +/// #[cfg(feature = "transparent-api")] /// pub item1: u32, -/// #[cfg(not(feature = "transparent_api"))] +/// #[cfg(not(feature = "transparent-api"))] /// pub(crate) item1: u32, /// pub(super) item2: u64 /// } /// -/// #[cfg(not(feature = "transparent_api"))] +/// #[cfg(not(feature = "transparent-api"))] /// pub struct DataModel2 { /// pub item1: u32, /// pub(super) item2: u64 /// } /// -/// #[cfg(feature = "transparent_api")] +/// #[cfg(feature = "transparent-api")] /// struct DataModel2 { /// pub item1: u32, /// pub(super) item2: u64 diff --git a/data_model/derive/src/model.rs b/data_model/derive/src/model.rs index db879181b41..6139021c29f 100644 --- a/data_model/derive/src/model.rs +++ b/data_model/derive/src/model.rs @@ -51,13 +51,13 @@ pub fn process_item(item: syn::Item) -> TokenStream { } let non_transparent_item = quote! { - #[cfg(not(feature = "transparent_api"))] + #[cfg(not(feature = "transparent-api"))] #input }; input.vis = parse_quote! {pub}; let transparent_item = quote! { - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] #input }; @@ -86,11 +86,11 @@ fn process_pub_item(input: syn::DeriveInput) -> TokenStream { } quote! { - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] #(#field_attrs)* pub #field_name: #field_ty, - #[cfg(not(feature = "transparent_api"))] + #[cfg(not(feature = "transparent-api"))] #(#field_attrs)* pub(crate) #field_name: #field_ty, } @@ -114,11 +114,11 @@ fn process_pub_item(input: syn::DeriveInput) -> TokenStream { } quote! { - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] #(#field_attrs)* pub #field_ty, - #[cfg(not(feature = "transparent_api"))] + #[cfg(not(feature = "transparent-api"))] #(#field_attrs)* pub(crate) #field_ty, } @@ -163,11 +163,11 @@ fn process_pub_item(input: syn::DeriveInput) -> TokenStream { quote! { #(#field_attrs)* - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] pub #field_name: #field_ty, #(#field_attrs)* - #[cfg(not(feature = "transparent_api"))] + #[cfg(not(feature = "transparent-api"))] pub(crate) #field_name: #field_ty, } }); diff --git a/data_model/src/account.rs b/data_model/src/account.rs index 789f0845351..0e435983d9e 100644 --- a/data_model/src/account.rs +++ b/data_model/src/account.rs @@ -163,22 +163,6 @@ pub mod model { pub struct SignatureCheckCondition(pub EvaluatesTo); } -impl AccountId { - #[cfg(feature = "transparent_api")] - const GENESIS_ACCOUNT_NAME: &str = "genesis"; - - /// Construct [`Id`] of the genesis account. - #[inline] - #[must_use] - #[cfg(feature = "transparent_api")] - pub fn genesis() -> Self { - Self { - name: Self::GENESIS_ACCOUNT_NAME.parse().expect("Valid"), - domain_id: DomainId::genesis(), - } - } -} - impl Account { /// Construct builder for [`Account`] identifiable by [`Id`] containing the given signatories. #[inline] @@ -227,7 +211,7 @@ impl Account { } } -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] impl Account { /// Add [`Asset`] into the [`Account`] returning previous asset stored under the same id #[inline] diff --git a/data_model/src/block.rs b/data_model/src/block.rs index b871d2baeb9..e220c71d303 100644 --- a/data_model/src/block.rs +++ b/data_model/src/block.rs @@ -10,274 +10,281 @@ use core::{cmp::Ordering, fmt::Display}; use derive_more::Display; use getset::Getters; -#[cfg(feature = "std")] -use iroha_crypto::SignatureOf; -use iroha_crypto::{HashOf, MerkleTree, SignaturesOf}; +use iroha_crypto::{HashOf, MerkleTree, SignatureOf, SignaturesOf}; +use iroha_data_model_derive::model; +use iroha_macro::FromVariant; use iroha_schema::IntoSchema; -use iroha_version::{declare_versioned_with_scale, version_with_scale}; -use parity_scale_codec::{Decode, Encode}; +use iroha_version::{declare_versioned, version_with_scale}; +pub use model::*; +use parity_scale_codec::{Decode, Encode, Input}; use serde::{Deserialize, Serialize}; -pub use self::{ - committed::{CommittedBlock, VersionedCommittedBlock}, - header::BlockHeader, -}; -use crate::{events::prelude::*, model, peer, transaction::prelude::*}; +use crate::{events::prelude::*, peer, transaction::prelude::*}; -mod header { - pub use self::model::*; +/// Trait for basic block operations +pub trait Block { + /// Calculate block hash + #[cfg(feature = "std")] + fn hash(&self) -> HashOf { + HashOf::new(self.header()).transmute() + } + /// Return block header + fn header(&self) -> &BlockHeader { + &self.payload().header + } + + /// Return block payload + fn payload(&self) -> &BlockPayload; + /// Return block signatures + fn signatures(&self) -> &SignaturesOf; +} + +#[model] +pub mod model { use super::*; + use crate::transaction::error::TransactionRejectionReason; + + #[derive( + Debug, + Display, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Getters, + Decode, + Encode, + Deserialize, + Serialize, + IntoSchema, + )] + #[cfg_attr( + feature = "std", + display(fmt = "Block №{height} (hash: {});", "HashOf::new(&self)") + )] + #[cfg_attr(not(feature = "std"), display(fmt = "Block №{height}"))] + #[getset(get = "pub")] + #[allow(missing_docs)] + #[ffi_type] + // TODO: Do we need both BlockPayload and BlockHeader? + // If yes, what data goes into which structure? + pub struct BlockHeader { + /// Number of blocks in the chain including this block. + pub height: u64, + /// Creation timestamp (unix time in milliseconds). + pub timestamp_ms: u128, + /// Hash of the previous block in the chain. + pub previous_block_hash: Option>, + /// Hash of merkle tree root of valid transactions' hashes. + pub transactions_hash: Option>>, + /// Hash of merkle tree root of rejected transactions' hashes. + pub rejected_transactions_hash: Option>>, + /// Topology of the network at the time of block commit. + pub commit_topology: Vec, + /// Value of view change index. Used to resolve soft forks. + // NOTE: This field used to be required to rotate topology. After merging + // https://github.com/hyperledger/iroha/pull/3250 only commit_topology is used + #[deprecated(since = "2.0.0-pre-rc.13", note = "Will be removed in future versions")] + pub view_change_index: u64, + /// Estimation of consensus duration (in milliseconds). + pub consensus_estimation_ms: u64, + } - #[model] - pub mod model { - use super::*; + #[derive( + Debug, Display, Clone, Eq, Getters, Decode, Encode, Deserialize, Serialize, IntoSchema, + )] + #[display(fmt = "({header})")] + #[getset(get = "pub")] + #[allow(missing_docs)] + #[ffi_type] + pub struct BlockPayload { + /// Block header + pub header: BlockHeader, + /// array of transactions, which successfully passed validation and consensus step. + pub transactions: Vec, + /// Array of rejected transactions. + pub rejected_transactions: Vec<(VersionedSignedTransaction, TransactionRejectionReason)>, + /// Event recommendations. + #[getset(skip)] // NOTE: Unused ATM + pub event_recommendations: Vec, + } - /// Header of the block. The hash should be taken from its byte representation. - #[derive( - Debug, - Display, - Clone, - PartialEq, - Eq, - Hash, - Getters, - Decode, - Encode, - Deserialize, - Serialize, - IntoSchema, - )] - #[cfg_attr( - feature = "std", - display(fmt = "Block №{height} (hash: {});", "HashOf::new(&self)") - )] - #[cfg_attr(not(feature = "std"), display(fmt = "Block №{height}"))] - #[getset(get = "pub")] - #[ffi_type] - pub struct BlockHeader { - /// Unix time (in milliseconds) of block forming by a peer. - pub timestamp: u128, - /// Estimation of consensus duration in milliseconds - pub consensus_estimation: u64, - /// A number of blocks in the chain up to the block. - pub height: u64, - /// Value of view change index used to resolve soft forks - pub view_change_index: u64, - /// Hash of a previous block in the chain. - /// Is an array of zeros for the first block. - pub previous_block_hash: Option>, - /// Hash of merkle tree root of the tree of valid transactions' hashes. - pub transactions_hash: Option>>, - /// Hash of merkle tree root of the tree of rejected transactions' hashes. - pub rejected_transactions_hash: Option>>, - /// Network topology when the block was committed. - pub committed_with_topology: Vec, - } + /// Signed block + #[version_with_scale(n = 1, versioned = "VersionedSignedBlock")] + #[derive( + Debug, + Display, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Getters, + Encode, + Serialize, + IntoSchema, + )] + #[display(fmt = "({payload})")] + #[getset(get = "pub")] + #[ffi_type] + pub struct SignedBlock { + /// Block payload + pub payload: BlockPayload, + /// Signatures of peers which approved this block. + pub signatures: SignaturesOf, } +} - impl BlockHeader { - /// Checks if it's a header of a genesis block. - #[inline] - pub const fn is_genesis(&self) -> bool { - self.height == 1 - } +#[cfg(any(feature = "ffi_import", feature = "ffi_export"))] +declare_versioned!(VersionedSignedBlock 1..2, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, FromVariant, iroha_ffi::FfiType, IntoSchema); +#[cfg(all(not(feature = "ffi_import"), not(feature = "ffi_export")))] +declare_versioned!(VersionedSignedBlock 1..2, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, FromVariant, IntoSchema); + +// TODO: Think about how should BlockPayload implement Eq, Ord, Hash? +impl PartialEq for BlockPayload { + fn eq(&self, other: &Self) -> bool { + self.header == other.header } +} +impl PartialOrd for BlockPayload { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl Ord for BlockPayload { + fn cmp(&self, other: &Self) -> Ordering { + self.header.cmp(&other.header) + } +} +impl core::hash::Hash for BlockPayload { + fn hash(&self, state: &mut H) { + self.header.hash(state) + } +} - impl PartialOrd for BlockHeader { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } +impl BlockHeader { + /// Checks if it's a header of a genesis block. + #[inline] + pub const fn is_genesis(&self) -> bool { + self.height == 1 } +} - impl Ord for BlockHeader { - fn cmp(&self, other: &Self) -> Ordering { - self.timestamp.cmp(&other.timestamp) - } +impl Block for SignedBlock { + fn payload(&self) -> &BlockPayload { + &self.payload + } + fn signatures(&self) -> &SignaturesOf { + &self.signatures } } -mod committed { - use iroha_macro::FromVariant; +impl Block for VersionedSignedBlock { + fn payload(&self) -> &BlockPayload { + let VersionedSignedBlock::V1(block) = self; + block.payload() + } + fn signatures(&self) -> &SignaturesOf { + let VersionedSignedBlock::V1(block) = self; + block.signatures() + } +} - pub use self::model::*; +mod candidate { use super::*; - #[cfg(any(feature = "ffi_import", feature = "ffi_export"))] - declare_versioned_with_scale!(VersionedCommittedBlock 1..2, Debug, Clone, PartialEq, Eq, Hash, FromVariant, Deserialize, Serialize, iroha_ffi::FfiType, IntoSchema); - #[cfg(all(not(feature = "ffi_import"), not(feature = "ffi_export")))] - declare_versioned_with_scale!(VersionedCommittedBlock 1..2, Debug, Clone, PartialEq, Eq, Hash, FromVariant, Deserialize, Serialize, IntoSchema); + #[derive(Decode, Deserialize)] + #[serde(transparent)] + struct SignedBlockCandidate(SignedBlock); - #[model] - pub mod model { - use super::*; + impl SignedBlockCandidate { + fn validate(mut self) -> Result { + #[cfg(feature = "std")] + self.validate_header()?; - /// The `CommittedBlock` struct represents a block accepted by consensus - #[version_with_scale(n = 1, versioned = "VersionedCommittedBlock")] - #[derive( - Debug, - Display, - Clone, - PartialEq, - Eq, - Hash, - Getters, - Decode, - Encode, - Deserialize, - Serialize, - IntoSchema, - )] - #[display(fmt = "({header})")] - #[getset(get = "pub")] - #[ffi_type] - pub struct CommittedBlock { - /// Block header - pub header: BlockHeader, - /// Array of rejected transactions. - pub rejected_transactions: Vec, - /// array of transactions, which successfully passed validation and consensus step. - pub transactions: Vec, - /// Event recommendations. - pub event_recommendations: Vec, - /// Signatures of peers which approved this block - pub signatures: SignaturesOf, - } - } - - impl VersionedCommittedBlock { - /// Convert from `&VersionedCommittedBlock` to V1 reference - #[inline] - pub const fn as_v1(&self) -> &CommittedBlock { - match self { - Self::V1(v1) => v1, + #[cfg(feature = "std")] + if self.retain_verified_signatures().is_empty() { + return Err("Block contains no signatures"); } - } - /// Convert from `&mut VersionedCommittedBlock` to V1 mutable reference - #[inline] - pub fn as_mut_v1(&mut self) -> &mut CommittedBlock { - match self { - Self::V1(v1) => v1, + let payload = &self.0.payload; + if payload.transactions.is_empty() && payload.rejected_transactions.is_empty() { + return Err("Block is empty"); } - } - /// Performs the conversion from `VersionedCommittedBlock` to V1 - #[inline] - pub fn into_v1(self) -> CommittedBlock { - match self { - Self::V1(v1) => v1, - } + Ok(self.0) } - /// Calculate the hash of the current block. - /// `VersionedCommitedBlock` should have the same hash as `VersionedCommitedBlock`. #[cfg(feature = "std")] - #[inline] - pub fn hash(&self) -> HashOf { - self.as_v1().hash().transmute() - } - - /// Returns the header of a valid block - #[inline] - pub const fn header(&self) -> &BlockHeader { - &self.as_v1().header + fn retain_verified_signatures(&mut self) -> Vec<&SignatureOf> { + self.0 + .signatures + .retain_verified_by_hash(self.0.hash()) + .collect() } - /// Return signatures that are verified with the `hash` of this block #[cfg(feature = "std")] - #[inline] - pub fn signatures(&self) -> impl IntoIterator> { - self.as_v1() - .signatures + fn validate_header(&self) -> Result<(), &'static str> { + let actual_txs_hash = self.0.header().transactions_hash; + let actual_rejected_txs_hash = self.0.header().rejected_transactions_hash; + + let expected_txs_hash = self + .0 + .payload + .transactions .iter() - .map(SignatureOf::transmute_ref) - } - } - - impl Display for VersionedCommittedBlock { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - self.as_v1().fmt(f) - } - } - - impl PartialOrd for VersionedCommittedBlock { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } - } - - impl Ord for VersionedCommittedBlock { - fn cmp(&self, other: &Self) -> Ordering { - self.as_v1().cmp(other.as_v1()) - } - } + .map(VersionedSignedTransaction::hash) + .collect::>() + .hash(); + let expected_rejected_txs_hash = self + .0 + .payload + .rejected_transactions + .iter() + .map(|(rejected_transaction, _)| rejected_transaction.hash()) + .collect::>() + .hash(); - impl CommittedBlock { - /// Calculate the hash of the current block. - /// `CommitedBlock` should have the same hash as `ValidBlock`. - #[cfg(feature = "std")] - #[inline] - pub fn hash(&self) -> HashOf { - HashOf::new(&self.header).transmute() - } - } + if expected_txs_hash != actual_txs_hash { + return Err("Transactions' hash incorrect. Expected: {expected_txs_hash:?}, actual: {actual_txs_hash:?}"); + } + if expected_rejected_txs_hash != actual_rejected_txs_hash { + return Err("Rejected transactions' hash incorrect. Expected: {expected_rejected_txs_hash:?}, actual: {actual_rejected_txs_hash:?}"); + } + // TODO: Validate Event recommendations somehow? - impl PartialOrd for CommittedBlock { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) + Ok(()) } } - impl Ord for CommittedBlock { - fn cmp(&self, other: &Self) -> Ordering { - self.header.cmp(&other.header) + impl Decode for SignedBlock { + fn decode(input: &mut I) -> Result { + SignedBlockCandidate::decode(input)? + .validate() + .map_err(Into::into) } } - - #[cfg(feature = "std")] - impl From<&CommittedBlock> for Vec { - fn from(block: &CommittedBlock) -> Self { - let rejected_tx = block - .rejected_transactions - .iter() - .cloned() - .map(|transaction| { - PipelineEvent { - entity_kind: PipelineEntityKind::Transaction, - status: PipelineStatus::Rejected( - transaction.as_v1().rejection_reason.clone().into(), - ), - hash: transaction.hash().into(), - } - .into() - }); - let tx = block.transactions.iter().cloned().map(|transaction| { - PipelineEvent { - entity_kind: PipelineEntityKind::Transaction, - status: PipelineStatus::Committed, - hash: transaction.hash().into(), - } - .into() - }); - let current_block = core::iter::once( - PipelineEvent { - entity_kind: PipelineEntityKind::Block, - status: PipelineStatus::Committed, - hash: block.hash().into(), - } - .into(), - ); - - tx.chain(rejected_tx).chain(current_block).collect() + impl<'de> Deserialize<'de> for SignedBlock { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use serde::de::Error as _; + + SignedBlockCandidate::deserialize(deserializer)? + .validate() + .map_err(D::Error::custom) } } +} - #[cfg(feature = "std")] - impl From<&VersionedCommittedBlock> for Vec { - #[inline] - fn from(block: &VersionedCommittedBlock) -> Self { - block.as_v1().into() - } +impl Display for VersionedSignedBlock { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let VersionedSignedBlock::V1(block) = self; + block.fmt(f) } } @@ -296,29 +303,6 @@ pub mod stream { declare_versioned_with_scale!(VersionedBlockMessage 1..2, Debug, Clone, FromVariant, IntoSchema); - impl VersionedBlockMessage { - /// Convert from `&VersionedBlockPublisherMessage` to V1 reference - pub const fn as_v1(&self) -> &BlockMessage { - match self { - Self::V1(v1) => v1, - } - } - - /// Convert from `&mut VersionedBlockPublisherMessage` to V1 mutable reference - pub fn as_mut_v1(&mut self) -> &mut BlockMessage { - match self { - Self::V1(v1) => v1, - } - } - - /// Performs the conversion from `VersionedBlockPublisherMessage` to V1 - pub fn into_v1(self) -> BlockMessage { - match self { - Self::V1(v1) => v1, - } - } - } - #[model] pub mod model { use super::*; @@ -334,10 +318,10 @@ pub mod stream { #[version_with_scale(n = 1, versioned = "VersionedBlockMessage")] #[derive(Debug, Clone, Decode, Encode, IntoSchema)] #[repr(transparent)] - pub struct BlockMessage(pub VersionedCommittedBlock); + pub struct BlockMessage(pub VersionedSignedBlock); } - impl From for VersionedCommittedBlock { + impl From for VersionedSignedBlock { fn from(source: BlockMessage) -> Self { source.0 } @@ -345,38 +329,19 @@ pub mod stream { declare_versioned_with_scale!(VersionedBlockSubscriptionRequest 1..2, Debug, Clone, FromVariant, IntoSchema); - impl VersionedBlockSubscriptionRequest { - /// Convert from `&VersionedBlockSubscriberMessage` to V1 reference - pub const fn as_v1(&self) -> &BlockSubscriptionRequest { - match self { - Self::V1(v1) => v1, - } - } - - /// Convert from `&mut VersionedBlockSubscriberMessage` to V1 mutable reference - pub fn as_mut_v1(&mut self) -> &mut BlockSubscriptionRequest { - match self { - Self::V1(v1) => v1, - } - } - - /// Performs the conversion from `VersionedBlockSubscriberMessage` to V1 - pub fn into_v1(self) -> BlockSubscriptionRequest { - match self { - Self::V1(v1) => v1, - } - } - } - /// Exports common structs and enums from this module. pub mod prelude { - pub use super::{ - BlockMessage, BlockSubscriptionRequest, VersionedBlockMessage, - VersionedBlockSubscriptionRequest, - }; + pub use super::{VersionedBlockMessage, VersionedBlockSubscriptionRequest}; } } +/// Exports common structs and enums from this module. +pub mod prelude { + #[cfg(feature = "http")] + pub use super::stream::prelude::*; + pub use super::{Block, VersionedSignedBlock}; +} + pub mod error { //! Module containing errors that can occur during instruction evaluation diff --git a/data_model/src/domain.rs b/data_model/src/domain.rs index 89a51fc1d07..c4337e321d9 100644 --- a/data_model/src/domain.rs +++ b/data_model/src/domain.rs @@ -198,7 +198,7 @@ impl Domain { } } -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] impl Domain { /// Add [`Account`] into the [`Domain`] returning previous account stored under the same id #[inline] @@ -266,21 +266,6 @@ impl FromIterator for crate::Value { } } -impl DomainId { - #[cfg(feature = "transparent_api")] - const GENESIS_DOMAIN_NAME: &str = "genesis"; - - /// Construct [`Id`] of the genesis domain. - #[inline] - #[must_use] - #[cfg(feature = "transparent_api")] - pub fn genesis() -> Self { - Self { - name: Self::GENESIS_DOMAIN_NAME.parse().expect("Valid"), - } - } -} - /// The prelude re-exports most commonly used traits, structs and macros from this crate. pub mod prelude { pub use super::{Domain, DomainId}; diff --git a/data_model/src/events/data/events.rs b/data_model/src/events/data/events.rs index 03d44955524..6ee1173e313 100644 --- a/data_model/src/events/data/events.rs +++ b/data_model/src/events/data/events.rs @@ -526,7 +526,7 @@ mod validator { } } - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] impl super::Filter for ValidatorFilter { type Event = ValidatorEvent; diff --git a/data_model/src/events/data/filters.rs b/data_model/src/events/data/filters.rs index 49776ea76fb..58e20c582c9 100644 --- a/data_model/src/events/data/filters.rs +++ b/data_model/src/events/data/filters.rs @@ -136,7 +136,7 @@ mod accept_all_as_string { } } -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] impl Filter for FilterOpt { type Event = F::Event; @@ -148,7 +148,7 @@ impl Filter for FilterOpt { } } -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] impl Filter for DataEntityFilter { type Event = DataEvent; @@ -180,7 +180,7 @@ where } } -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] impl Filter for OriginFilter where ::Id: @@ -222,7 +222,7 @@ pub mod prelude { } #[cfg(test)] -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] mod tests { #[cfg(not(feature = "std"))] use alloc::collections::BTreeSet; @@ -237,7 +237,7 @@ mod tests { }; #[test] - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] fn entity_scope() { let domain_name = "wonderland".parse().expect("Valid"); let account_name = "alice".parse().expect("Valid"); diff --git a/data_model/src/events/data/mod.rs b/data_model/src/events/data/mod.rs index 1c1ab8494f8..0153d217ef6 100644 --- a/data_model/src/events/data/mod.rs +++ b/data_model/src/events/data/mod.rs @@ -10,7 +10,7 @@ use iroha_schema::IntoSchema; use parity_scale_codec::{Decode, Encode}; use serde::{Deserialize, Serialize}; -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] use super::Filter; use crate::prelude::*; pub use crate::Registered; diff --git a/data_model/src/events/execute_trigger.rs b/data_model/src/events/execute_trigger.rs index b9222c16387..0f95fccb991 100644 --- a/data_model/src/events/execute_trigger.rs +++ b/data_model/src/events/execute_trigger.rs @@ -59,7 +59,7 @@ pub mod model { } } -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] impl Filter for ExecuteTriggerEventFilter { type Event = ExecuteTriggerEvent; diff --git a/data_model/src/events/mod.rs b/data_model/src/events/mod.rs index dd94c4d1a20..e4425dab704 100644 --- a/data_model/src/events/mod.rs +++ b/data_model/src/events/mod.rs @@ -87,7 +87,7 @@ pub mod model { } /// Trait for filters -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] pub trait Filter { /// Type of event that can be filtered type Event; @@ -114,7 +114,7 @@ pub trait Filter { } } -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] impl Filter for FilterBox { type Event = Event; @@ -145,29 +145,6 @@ pub mod stream { declare_versioned_with_scale!(VersionedEventMessage 1..2, Debug, Clone, FromVariant, IntoSchema); - impl VersionedEventMessage { - #[allow(missing_docs)] - pub const fn as_v1(&self) -> &EventMessage { - match self { - Self::V1(v1) => v1, - } - } - - #[allow(missing_docs)] - pub fn as_mut_v1(&mut self) -> &mut EventMessage { - match self { - Self::V1(v1) => v1, - } - } - - #[allow(missing_docs)] - pub fn into_v1(self) -> EventMessage { - match self { - Self::V1(v1) => v1, - } - } - } - #[model] pub mod model { use super::*; @@ -194,29 +171,6 @@ pub mod stream { } declare_versioned_with_scale!(VersionedEventSubscriptionRequest 1..2, Debug, Clone, FromVariant, IntoSchema); - - impl VersionedEventSubscriptionRequest { - #[allow(missing_docs)] - pub const fn as_v1(&self) -> &EventSubscriptionRequest { - match self { - Self::V1(v1) => v1, - } - } - - #[allow(missing_docs)] - pub fn as_mut_v1(&mut self) -> &mut EventSubscriptionRequest { - match self { - Self::V1(v1) => v1, - } - } - - #[allow(missing_docs)] - pub fn into_v1(self) -> EventSubscriptionRequest { - match self { - Self::V1(v1) => v1, - } - } - } } /// Exports common structs and enums from this module. @@ -226,7 +180,7 @@ pub mod prelude { EventMessage, EventSubscriptionRequest, VersionedEventMessage, VersionedEventSubscriptionRequest, }; - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] pub use super::Filter; pub use super::{ data::prelude::*, execute_trigger::prelude::*, pipeline::prelude::*, time::prelude::*, diff --git a/data_model/src/events/pipeline.rs b/data_model/src/events/pipeline.rs index 67fe381af4c..a821db663ac 100644 --- a/data_model/src/events/pipeline.rs +++ b/data_model/src/events/pipeline.rs @@ -196,13 +196,13 @@ impl PipelineEventFilter { } #[inline] - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] fn field_matches(filter: Option<&T>, event: &T) -> bool { filter.map_or(true, |field| field == event) } } -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] impl super::Filter for PipelineEventFilter { type Event = PipelineEvent; @@ -219,7 +219,7 @@ impl super::Filter for PipelineEventFilter { } } -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] impl PipelineStatus { fn kind(&self) -> PipelineStatusKind { PipelineStatusKind::from(self) @@ -235,7 +235,7 @@ pub mod prelude { } #[cfg(test)] -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] mod tests { #![allow(clippy::restriction)] diff --git a/data_model/src/events/time.rs b/data_model/src/events/time.rs index 654a100921c..7a52134a53e 100644 --- a/data_model/src/events/time.rs +++ b/data_model/src/events/time.rs @@ -135,7 +135,7 @@ pub mod model { } } -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] impl Filter for TimeEventFilter { type Event = TimeEvent; @@ -172,7 +172,7 @@ impl Filter for TimeEventFilter { } /// Count something with the `schedule` within the `interval` -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] fn count_matches_in_interval(schedule: &Schedule, interval: &TimeInterval) -> u32 { schedule.period.map_or_else( || u32::from(Range::from(*interval).contains(&schedule.start)), @@ -199,7 +199,7 @@ fn count_matches_in_interval(schedule: &Schedule, interval: &TimeInterval) -> u3 /// /// # Panics /// Panics if resulting number in seconds can't be represented as `u64` -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] fn multiply_duration_by_u128(duration: Duration, n: u128) -> Duration { if let Ok(n) = u32::try_from(n) { return duration * n; @@ -251,7 +251,7 @@ pub mod prelude { } #[cfg(test)] -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] mod tests { use super::*; diff --git a/data_model/src/expression.rs b/data_model/src/expression.rs index 9bd41133c72..fe475ac0d6b 100644 --- a/data_model/src/expression.rs +++ b/data_model/src/expression.rs @@ -1,7 +1,7 @@ //! Expressions to use inside of ISIs. #![allow( - // Because of `codec(skip)` + // Caused by #[codec(skip)] clippy::default_trait_access, // Because of length on instructions and expressions (can't be 0) clippy::len_without_is_empty, diff --git a/data_model/src/lib.rs b/data_model/src/lib.rs index c87ccad4b16..98577684de9 100644 --- a/data_model/src/lib.rs +++ b/data_model/src/lib.rs @@ -32,14 +32,14 @@ use core::{ #[cfg(feature = "std")] use std::borrow::Cow; -use block::VersionedCommittedBlock; +use block::{BlockPayload, VersionedSignedBlock}; #[cfg(not(target_arch = "aarch64"))] use derive_more::Into; use derive_more::{AsRef, DebugCustom, Deref, Display, From, FromStr}; use events::FilterBox; use getset::Getters; pub use iroha_crypto::SignatureOf; -use iroha_crypto::{Hash, PublicKey}; +use iroha_crypto::{HashOf, PublicKey}; use iroha_data_model_derive::{ model, IdEqOrdHash, PartiallyTaggedDeserialize, PartiallyTaggedSerialize, }; @@ -49,14 +49,14 @@ use iroha_primitives::{ small::{Array as SmallArray, SmallVec}, }; use iroha_schema::IntoSchema; +pub use model::*; use parity_scale_codec::{Decode, Encode}; -use prelude::{Executable, TransactionQueryResult}; +use prelude::{Executable, TransactionPayload, TransactionQueryResult}; use serde::{Deserialize, Serialize}; use serde_with::{DeserializeFromStr, SerializeDisplay}; use strum::EnumDiscriminants; -pub use self::model::*; -use crate::{account::SignatureCheckCondition, name::Name, transaction::TransactionValue}; +use crate::{account::SignatureCheckCondition, name::Name}; pub mod account; pub mod asset; @@ -303,7 +303,7 @@ pub mod parameter { reason: "Failed to parse the `val` part of the `Parameter` as `LengthLimits`. Invalid upper `u32` bound.", })?; - Value::LengthLimits(LengthLimits::new(lower, upper)) + Value::Limits(LimitsValue::Length(LengthLimits::new(lower, upper))) } // Shorthand for `TransactionLimits` "TL" => { @@ -319,10 +319,10 @@ pub mod parameter { reason: "Failed to parse the `val` part of the `Parameter` as `TransactionLimits`. `max_wasm_size_bytes` field should be a valid `u64`.", })?; - Value::TransactionLimits(transaction::TransactionLimits::new( + Value::Limits(LimitsValue::Transaction(transaction::TransactionLimits::new( max_instr, max_wasm_size, - )) + ))) } // Shorthand for `MetadataLimits` "ML" => { @@ -338,7 +338,7 @@ pub mod parameter { reason: "Failed to parse the `val` part of the `Parameter` as `MetadataLimits`. Invalid `u32` in `max_entry_byte_size` field.", })?; - Value::MetadataLimits(metadata::Limits::new(lower, upper)) + Value::Limits(LimitsValue::Metadata(metadata::Limits::new(lower, upper))) } _ => return Err(ParseError { reason: @@ -407,19 +407,19 @@ pub mod parameter { ParameterId { name: Name::from_str("TransactionLimits").expect("Failed to parse `Name`"), }, - Value::TransactionLimits(TransactionLimits::new(42, 24)), + Value::Limits(LimitsValue::Transaction(TransactionLimits::new(42, 24))), ), Parameter::new( ParameterId { name: Name::from_str("MetadataLimits").expect("Failed to parse `Name`"), }, - Value::MetadataLimits(MetadataLimits::new(42, 24)), + Value::Limits(LimitsValue::Metadata(MetadataLimits::new(42, 24))), ), Parameter::new( ParameterId { name: Name::from_str("LengthLimits").expect("Failed to parse `Name`"), }, - Value::LengthLimits(LengthLimits::new(24, 42)), + Value::Limits(LimitsValue::Length(LengthLimits::new(24, 42))), ), Parameter::new( ParameterId { @@ -677,20 +677,17 @@ pub mod model { Vec, ), LimitedMetadata(metadata::Metadata), - MetadataLimits(metadata::Limits), - TransactionLimits(transaction::TransactionLimits), - LengthLimits(LengthLimits), + Limits(LimitsValue), #[serde_partially_tagged(untagged)] Id(IdBox), #[serde_partially_tagged(untagged)] Identifiable(IdentifiableBox), PublicKey(PublicKey), SignatureCheckCondition(SignatureCheckCondition), - TransactionValue(TransactionValue), TransactionQueryResult(TransactionQueryResult), PermissionToken(permission::PermissionToken), - Hash(Hash), - Block(VersionedCommittedBlockWrapper), + Hash(HashValue), + Block(VersionedSignedBlockWrapper), BlockHeader(block::BlockHeader), Ipv4Addr(iroha_primitives::addr::Ipv4Addr), Ipv6Addr(iroha_primitives::addr::Ipv6Addr), @@ -734,7 +731,61 @@ pub mod model { Fixed(fixed::Fixed), } - /// Cross-platform wrapper for [`VersionedCommittedBlock`]. + /// Enum for all supported hash types + #[derive( + DebugCustom, + Display, + Copy, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + FromVariant, + Decode, + Encode, + Deserialize, + Serialize, + IntoSchema, + )] + #[ffi_type] + pub enum HashValue { + /// Block hash + Block(HashOf), + /// Transaction hash + Transaction(HashOf), + } + + /// Enum for all supported limit types + #[derive( + DebugCustom, + Display, + Copy, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + FromVariant, + Decode, + Encode, + Deserialize, + Serialize, + IntoSchema, + )] + #[ffi_type] + pub enum LimitsValue { + /// Metadata limits + Metadata(metadata::Limits), + /// Transaction limits + Transaction(transaction::TransactionLimits), + /// Length limits + Length(LengthLimits), + } + + /// Cross-platform wrapper for [`VersionedSignedBlock`]. #[cfg(not(target_arch = "aarch64"))] #[derive( Debug, @@ -754,12 +805,12 @@ pub mod model { Serialize, IntoSchema, )] - // SAFETY: VersionedCommittedBlockWrapper has no trap representations in VersionedCommittedBlock - #[schema(transparent = "VersionedCommittedBlock")] + // SAFETY: VersionedSignedBlockWrapper has no trap representations in VersionedSignedBlock + #[schema(transparent = "VersionedSignedBlock")] #[ffi_type(unsafe {robust})] #[serde(transparent)] #[repr(transparent)] - pub struct VersionedCommittedBlockWrapper(VersionedCommittedBlock); + pub struct VersionedSignedBlockWrapper(pub VersionedSignedBlock); /// Cross-platform wrapper for `BlockValue`. #[cfg(target_arch = "aarch64")] @@ -780,15 +831,15 @@ pub mod model { Serialize, IntoSchema, )] - #[schema(transparent = "Box")] + #[schema(transparent = "Box")] #[as_ref(forward)] #[deref(forward)] #[from(forward)] - // SAFETY: VersionedCommittedBlockWrapper has no trap representations in Box + // SAFETY: VersionedSignedBlockWrapper has no trap representations in Box #[ffi_type(unsafe {robust})] #[serde(transparent)] #[repr(transparent)] - pub struct VersionedCommittedBlockWrapper(pub(super) Box); + pub struct VersionedSignedBlockWrapper(pub Box); /// Limits of length of the identifiers (e.g. in [`domain::Domain`], [`account::Account`], [`asset::AssetDefinition`]) in number of chars #[derive( @@ -907,8 +958,8 @@ impl TryFrom for NumericValue { } #[cfg(target_arch = "aarch64")] -impl From for VersionedCommittedBlock { - fn from(block_value: VersionedCommittedBlockWrapper) -> Self { +impl From for VersionedSignedBlock { + fn from(block_value: VersionedSignedBlockWrapper) -> Self { *block_value.0 } } @@ -934,7 +985,7 @@ impl fmt::Display for Value { Value::Identifiable(v) => fmt::Display::fmt(&v, f), Value::PublicKey(v) => fmt::Display::fmt(&v, f), Value::SignatureCheckCondition(v) => fmt::Display::fmt(&v, f), - Value::TransactionValue(_) => write!(f, "TransactionValue"), + // TODO: display hash of the transaction Value::TransactionQueryResult(_) => write!(f, "TransactionQueryResult"), Value::PermissionToken(v) => fmt::Display::fmt(&v, f), Value::Hash(v) => fmt::Display::fmt(&v, f), @@ -942,10 +993,8 @@ impl fmt::Display for Value { Value::BlockHeader(v) => fmt::Display::fmt(&v, f), Value::Ipv4Addr(v) => fmt::Display::fmt(&v, f), Value::Ipv6Addr(v) => fmt::Display::fmt(&v, f), + Value::Limits(v) => fmt::Display::fmt(&v, f), Value::Numeric(v) => fmt::Display::fmt(&v, f), - Value::MetadataLimits(v) => fmt::Display::fmt(&v, f), - Value::TransactionLimits(v) => fmt::Display::fmt(&v, f), - Value::LengthLimits(v) => fmt::Display::fmt(&v, f), Value::Validator(v) => write!(f, "Validator({} bytes)", v.wasm.as_ref().len()), } } @@ -964,7 +1013,6 @@ impl Value { | Identifiable(_) | String(_) | Name(_) - | TransactionValue(_) | TransactionQueryResult(_) | PermissionToken(_) | Hash(_) @@ -972,9 +1020,7 @@ impl Value { | Ipv4Addr(_) | Ipv6Addr(_) | BlockHeader(_) - | MetadataLimits(_) - | TransactionLimits(_) - | LengthLimits(_) + | Limits(_) | Numeric(_) | Validator(_) => 1_usize, Vec(v) => v.iter().map(Self::len).sum::() + 1_usize, @@ -984,8 +1030,8 @@ impl Value { } } -impl From for Value { - fn from(block_value: VersionedCommittedBlock) -> Self { +impl From for Value { + fn from(block_value: VersionedSignedBlock) -> Self { Value::Block(block_value.into()) } } @@ -1003,11 +1049,9 @@ where } } -// TODO: This macro looks very similar to `from_and_try_from_value_identifiable` -// and `from_and_try_from_value_identifiablebox` macros. It should be possible to -// generalize them under one macro +// TODO: The following macros looks very similar. Try to generalize them under one macro macro_rules! from_and_try_from_value_idbox { - ( $($variant:ident( $ty:ty ),)* $(,)? ) => { + ( $($variant:ident( $ty:ty )),+ $(,)? ) => { $( impl TryFrom for $ty { type Error = ErrorTryFromEnum; @@ -1026,26 +1070,12 @@ macro_rules! from_and_try_from_value_idbox { Value::Id(IdBox::$variant(id)) } } - )* + )+ }; } -from_and_try_from_value_idbox!( - PeerId(peer::PeerId), - DomainId(domain::DomainId), - AccountId(account::AccountId), - AssetId(asset::AssetId), - AssetDefinitionId(asset::AssetDefinitionId), - TriggerId(trigger::TriggerId), - RoleId(role::RoleId), - ParameterId(parameter::ParameterId), -); - -// TODO: Should we wrap String with new type in order to convert like here? -//from_and_try_from_value_idbox!((DomainName(Name), ErrorValueTryFromDomainName),); - macro_rules! from_and_try_from_value_identifiablebox { - ( $( $variant:ident( Box< $ty:ty > ),)* $(,)? ) => { + ( $( $variant:ident( Box< $ty:ty > )),+ $(,)? ) => { $( impl TryFrom for $ty { type Error = ErrorTryFromEnum; @@ -1064,11 +1094,11 @@ macro_rules! from_and_try_from_value_identifiablebox { Value::Identifiable(IdentifiableBox::$variant(Box::new(id))) } } - )* + )+ }; } macro_rules! from_and_try_from_value_identifiable { - ( $( $variant:ident( $ty:ty ), )* $(,)? ) => { + ( $( $variant:ident( $ty:ty ) ),+ $(,)? ) => { $( impl TryFrom for $ty { type Error = ErrorTryFromEnum; @@ -1087,11 +1117,124 @@ macro_rules! from_and_try_from_value_identifiable { Value::Identifiable(IdentifiableBox::$variant(id)) } } - )* + )+ }; } -from_and_try_from_value_identifiablebox!( +macro_rules! from_and_try_from_and_try_as_value_hash { + ( $( $variant:ident($ty:ty)),+ $(,)? ) => { $( + impl TryFrom for $ty { + type Error = ErrorTryFromEnum; + + #[inline] + fn try_from(value: Value) -> Result { + if let Value::Hash(HashValue::$variant(value)) = value { + Ok(value) + } else { + Err(Self::Error::default()) + } + } + } + + impl From<$ty> for Value { + #[inline] + fn from(value: $ty) -> Self { + Value::Hash(HashValue::$variant(value)) + } + } + + impl TryAsMut<$ty> for HashValue { + type Error = crate::EnumTryAsError<$ty, HashValue>; + + #[inline] + fn try_as_mut(&mut self) -> Result<&mut $ty, Self::Error> { + if let HashValue::$variant (value) = self { + Ok(value) + } else { + Err(crate::EnumTryAsError::got(*self)) + } + } + } + + impl TryAsRef<$ty> for HashValue { + type Error = crate::EnumTryAsError<$ty, HashValue>; + + #[inline] + fn try_as_ref(&self) -> Result<& $ty, Self::Error> { + if let HashValue::$variant (value) = self { + Ok(value) + } else { + Err(crate::EnumTryAsError::got(*self)) + } + } + })+ + }; +} + +macro_rules! from_and_try_from_and_try_as_value_numeric { + ( $( $variant:ident($ty:ty)),+ $(,)? ) => { $( + impl TryFrom for $ty { + type Error = ErrorTryFromEnum; + + #[inline] + fn try_from(value: Value) -> Result { + if let Value::Numeric(NumericValue::$variant(value)) = value { + Ok(value) + } else { + Err(Self::Error::default()) + } + } + } + + impl From<$ty> for Value { + #[inline] + fn from(value: $ty) -> Self { + Value::Numeric(NumericValue::$variant(value)) + } + } + + impl TryAsMut<$ty> for NumericValue { + type Error = crate::EnumTryAsError<$ty, NumericValue>; + + #[inline] + fn try_as_mut(&mut self) -> Result<&mut $ty, Self::Error> { + if let NumericValue:: $variant (value) = self { + Ok(value) + } else { + Err(crate::EnumTryAsError::got(*self)) + } + } + } + + impl TryAsRef<$ty> for NumericValue { + type Error = crate::EnumTryAsError<$ty, NumericValue>; + + #[inline] + fn try_as_ref(&self) -> Result<& $ty, Self::Error> { + if let NumericValue:: $variant (value) = self { + Ok(value) + } else { + Err(crate::EnumTryAsError::got(*self)) + } + } + })+ + }; +} + +from_and_try_from_value_idbox!( + PeerId(peer::PeerId), + DomainId(domain::DomainId), + AccountId(account::AccountId), + AssetId(asset::AssetId), + AssetDefinitionId(asset::AssetDefinitionId), + TriggerId(trigger::TriggerId), + RoleId(role::RoleId), + ParameterId(parameter::ParameterId), + // TODO: Should we wrap String with new type in order to convert like here? + //from_and_try_from_value_idbox!((DomainName(Name), ErrorValueTryFromDomainName),); +); + +from_and_try_from_value_identifiablebox! { NewDomain(Box), NewAccount(Box), NewAssetDefinition(Box), @@ -1103,10 +1246,10 @@ from_and_try_from_value_identifiablebox!( Asset(Box), Role(Box), PermissionTokenDefinition(Box), - Parameter(Box), -); + Parameter(Box) +} -from_and_try_from_value_identifiable!( +from_and_try_from_value_identifiable! { NewDomain(Box), NewAccount(Box), NewAssetDefinition(Box), @@ -1118,8 +1261,20 @@ from_and_try_from_value_identifiable!( Trigger(TriggerBox), Role(Box), PermissionTokenDefinition(Box), - Parameter(Box), -); + Parameter(Box) +} + +from_and_try_from_and_try_as_value_numeric! { + U32(u32), + U64(u64), + U128(u128), + Fixed(fixed::Fixed) +} + +from_and_try_from_and_try_as_value_hash! { + Block(HashOf), + Transaction(HashOf) +} impl TryFrom for RegistrableBox { type Error = ErrorTryFromEnum; @@ -1217,7 +1372,7 @@ where } } -impl TryFrom for VersionedCommittedBlock { +impl TryFrom for VersionedSignedBlock { type Error = ErrorTryFromEnum; fn try_from(value: Value) -> Result { @@ -1247,65 +1402,6 @@ where } } -macro_rules! from_and_try_from_and_try_as_value_numeric { - ( $( $variant:ident($ty:ty),)+ $(,)? ) => { - $( - impl TryFrom for $ty { - type Error = ErrorTryFromEnum; - - #[inline] - fn try_from(value: Value) -> Result { - if let Value::Numeric(NumericValue::$variant(value)) = value { - Ok(value) - } else { - Err(Self::Error::default()) - } - } - } - - impl From<$ty> for Value { - #[inline] - fn from(value: $ty) -> Self { - Value::Numeric(NumericValue::$variant(value)) - } - } - - impl TryAsMut<$ty> for NumericValue { - type Error = crate::EnumTryAsError<$ty, NumericValue>; - - #[inline] - fn try_as_mut(&mut self) -> Result<&mut $ty, Self::Error> { - if let NumericValue:: $variant (value) = self { - Ok(value) - } else { - Err(crate::EnumTryAsError::got(*self)) - } - } - } - - impl TryAsRef<$ty> for NumericValue { - type Error = crate::EnumTryAsError<$ty, NumericValue>; - - #[inline] - fn try_as_ref(&self) -> Result<& $ty, Self::Error> { - if let NumericValue:: $variant (value) = self { - Ok(value) - } else { - Err(crate::EnumTryAsError::got(*self)) - } - } - } - )* - }; -} - -from_and_try_from_and_try_as_value_numeric! { - U32(u32), - U64(u64), - U128(u128), - Fixed(fixed::Fixed), -} - impl TryFrom for Value { type Error = >::Error; fn try_from(value: f64) -> Result { @@ -1771,13 +1867,13 @@ pub mod prelude { #[cfg(feature = "std")] pub use super::current_time; pub use super::{ - account::prelude::*, asset::prelude::*, domain::prelude::*, evaluate::prelude::*, - events::prelude::*, expression::prelude::*, isi::prelude::*, metadata::prelude::*, - name::prelude::*, parameter::prelude::*, peer::prelude::*, permission::prelude::*, - query::prelude::*, role::prelude::*, transaction::prelude::*, trigger::prelude::*, - validator::prelude::*, EnumTryAsError, HasMetadata, IdBox, Identifiable, IdentifiableBox, - LengthLimits, NumericValue, PredicateTrait, RegistrableBox, ToValue, TriggerBox, TryAsMut, - TryAsRef, TryToValue, UpgradableBox, ValidationError, Value, + account::prelude::*, asset::prelude::*, block::prelude::*, domain::prelude::*, + evaluate::prelude::*, events::prelude::*, expression::prelude::*, isi::prelude::*, + metadata::prelude::*, name::prelude::*, parameter::prelude::*, peer::prelude::*, + permission::prelude::*, query::prelude::*, role::prelude::*, transaction::prelude::*, + trigger::prelude::*, validator::prelude::*, EnumTryAsError, HasMetadata, IdBox, + Identifiable, IdentifiableBox, LengthLimits, NumericValue, PredicateTrait, RegistrableBox, + ToValue, TriggerBox, TryAsMut, TryAsRef, TryToValue, UpgradableBox, ValidationError, Value, }; #[cfg(feature = "http")] pub use super::{pagination::prelude::*, sorting::prelude::*}; diff --git a/data_model/src/metadata.rs b/data_model/src/metadata.rs index da0e3311c9f..4662542650e 100644 --- a/data_model/src/metadata.rs +++ b/data_model/src/metadata.rs @@ -228,7 +228,7 @@ impl Metadata { } } -#[cfg(feature = "transparent_api")] +#[cfg(feature = "transparent-api")] impl Metadata { /// Removes a key from the map, returning the owned /// `Some(value)` at the key if the key was previously in the @@ -296,7 +296,7 @@ mod tests { } #[test] - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] fn nested_fns_ignore_empty_path() { let mut metadata = Metadata::new(); let empty_path = vec![]; @@ -308,7 +308,7 @@ mod tests { } #[test] - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] fn nesting_inserts_removes() -> Result<(), TestError> { let mut metadata = Metadata::new(); let limits = Limits::new(1024, 1024); @@ -342,7 +342,7 @@ mod tests { } #[test] - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] fn non_existent_path_segment_fails() -> Result<(), TestError> { let mut metadata = Metadata::new(); let limits = Limits::new(10, 15); diff --git a/data_model/src/predicate.rs b/data_model/src/predicate.rs index 5ff434f5840..52096fa73b1 100644 --- a/data_model/src/predicate.rs +++ b/data_model/src/predicate.rs @@ -5,7 +5,7 @@ use alloc::vec; use core::{fmt::Display, ops::Not}; use super::*; -use crate::{IdBox, Name, Value}; +use crate::{block::Block, IdBox, Name, Value}; mod nontrivial { use super::*; @@ -1000,7 +1000,7 @@ pub mod value { Display(string::StringPredicate), /// Apply predicate to the numerical value. Numerical(numerical::SemiRange), - /// Timestamp (currently for [`VersionedCommittedBlock`] only). + /// Timestamp (currently for [`VersionedSignedBlock`] only). TimeStamp(numerical::SemiInterval), /// IpAddress enumerable by `u32` Ipv4Addr(ip_addr::Ipv4Predicate), @@ -1066,7 +1066,7 @@ pub mod value { ValuePredicate::Numerical(pred) => pred.applies(input), ValuePredicate::Display(pred) => pred.applies(&input.to_string()), ValuePredicate::TimeStamp(pred) => match input { - Value::Block(block) => pred.applies(block.header().timestamp), + Value::Block(block) => pred.applies(block.header().timestamp_ms), _ => false, }, ValuePredicate::Ipv4Addr(pred) => match input { diff --git a/data_model/src/query.rs b/data_model/src/query.rs index d10ce12c782..72e2dab453c 100644 --- a/data_model/src/query.rs +++ b/data_model/src/query.rs @@ -1085,13 +1085,13 @@ pub mod transaction { use alloc::{format, string::String, vec::Vec}; use derive_more::Display; - use iroha_crypto::Hash; + use iroha_crypto::HashOf; use super::Query; use crate::{ expression::EvaluatesTo, prelude::Account, - transaction::{TransactionQueryResult, TransactionValue}, + transaction::{TransactionPayload, TransactionQueryResult}, Identifiable, }; @@ -1119,11 +1119,11 @@ pub mod transaction { #[derive(Display)] #[display(fmt = "Find transaction with `{hash}` hash")] #[repr(transparent)] - // SAFETY: `FindTransactionByHash` has no trap representation in `EvaluatesTo` + // SAFETY: `FindTransactionByHash` has no trap representation in `EvaluatesTo>` #[ffi_type(unsafe {robust})] pub struct FindTransactionByHash { /// Transaction hash. - pub hash: EvaluatesTo, + pub hash: EvaluatesTo>, } } @@ -1132,11 +1132,11 @@ pub mod transaction { } impl Query for FindTransactionsByAccountId { - type Output = Vec; + type Output = Vec; } impl Query for FindTransactionByHash { - type Output = TransactionValue; + type Output = TransactionQueryResult; } impl FindTransactionsByAccountId { @@ -1150,7 +1150,7 @@ pub mod transaction { impl FindTransactionByHash { ///Construct [`FindTransactionByHash`]. - pub fn new(hash: impl Into>) -> Self { + pub fn new(hash: impl Into>>) -> Self { Self { hash: hash.into() } } } @@ -1170,11 +1170,11 @@ pub mod block { use alloc::{boxed::Box, format, string::String, vec::Vec}; use derive_more::Display; - use iroha_crypto::Hash; + use iroha_crypto::HashOf; use super::Query; use crate::{ - block::{BlockHeader, VersionedCommittedBlock}, + block::{BlockHeader, BlockPayload, VersionedSignedBlock}, prelude::EvaluatesTo, }; @@ -1197,16 +1197,16 @@ pub mod block { #[derive(Display)] #[display(fmt = "Find block header with `{hash}` hash")] #[repr(transparent)] - // SAFETY: `FindBlockHeaderByHash` has no trap representation in `EvaluatesTo` + // SAFETY: `FindBlockHeaderByHash` has no trap representation in `EvaluatesTo>` #[ffi_type(unsafe {robust})] pub struct FindBlockHeaderByHash { /// Block hash. - pub hash: EvaluatesTo, + pub hash: EvaluatesTo>, } } impl Query for FindAllBlocks { - type Output = Vec; + type Output = Vec; } impl Query for FindAllBlockHeaders { @@ -1219,7 +1219,7 @@ pub mod block { impl FindBlockHeaderByHash { /// Construct [`FindBlockHeaderByHash`]. - pub fn new(hash: impl Into>) -> Self { + pub fn new(hash: impl Into>>) -> Self { Self { hash: hash.into() } } } @@ -1287,9 +1287,6 @@ pub mod http { #[version_with_scale(n = 1, versioned = "VersionedQueryResult")] #[serde(transparent)] #[repr(transparent)] - // TODO: This should be a separate type, not just wrap Value because it infects Value - // with variants that can only ever be returned, i.e. can't be used in instructions - // enum QueryResult { ... } pub struct QueryResult(pub Value); } @@ -1377,7 +1374,12 @@ pub mod error { pub use self::model::*; use super::*; - use crate::{block::VersionedCommittedBlock, permission, prelude::*, validator}; + use crate::{ + block::{BlockPayload, VersionedSignedBlock}, + permission, + prelude::*, + validator, + }; #[model] pub mod model { @@ -1441,10 +1443,10 @@ pub mod error { MetadataKey(Name), /// Block with supplied parent hash not found. More description in a string. #[display(fmt = "Block with hash {_0} not found")] - Block(HashOf), + Block(HashOf), /// Transaction with given hash not found. #[display(fmt = "Transaction not found")] - Transaction(HashOf), + Transaction(HashOf), /// Peer not found. #[display(fmt = "Peer {_0} not found")] Peer(PeerId), @@ -1473,7 +1475,7 @@ pub mod prelude { pub use super::http::*; pub use super::{ account::prelude::*, asset::prelude::*, block::prelude::*, domain::prelude::*, - peer::prelude::*, permission::prelude::*, role::prelude::*, transaction::*, - trigger::prelude::*, Query, QueryBox, + error::QueryExecutionFailure, peer::prelude::*, permission::prelude::*, role::prelude::*, + transaction::*, trigger::prelude::*, Query, QueryBox, }; } diff --git a/data_model/src/transaction.rs b/data_model/src/transaction.rs index 8d429f15514..cd6531bf4a5 100644 --- a/data_model/src/transaction.rs +++ b/data_model/src/transaction.rs @@ -1,56 +1,46 @@ //! [`Transaction`] structures and related implementations. #![allow(clippy::std_instead_of_core)] -// TODO: Remove when a proper `Display` will be implemented for `Transaction` -#![allow(clippy::use_debug)] #[cfg(not(feature = "std"))] -use alloc::{boxed::Box, collections::btree_set, format, string::String, vec::Vec}; +use alloc::{boxed::Box, format, string::String, vec::Vec}; use core::{ cmp::Ordering, fmt::{Display, Formatter, Result as FmtResult}, iter::IntoIterator, }; #[cfg(feature = "std")] -use std::{collections::btree_set, time::Duration}; +use std::time::Duration; use derive_more::{Constructor, DebugCustom, Display}; use getset::Getters; -use iroha_crypto::{Hash, SignatureOf, SignatureVerificationFail, SignaturesOf}; +use iroha_crypto::{HashOf, SignaturesOf}; use iroha_data_model_derive::model; use iroha_macro::FromVariant; use iroha_schema::IntoSchema; -#[cfg(feature = "transparent_api")] -use iroha_version::declare_versioned_with_scale; use iroha_version::{declare_versioned, version, version_with_scale}; use parity_scale_codec::{Decode, Encode}; use serde::{Deserialize, Serialize}; pub use self::model::*; -use crate::{account::Account, isi::InstructionBox, metadata::UnlimitedMetadata, Identifiable}; - -/// Default maximum number of instructions and expressions per transaction -pub const DEFAULT_MAX_INSTRUCTION_NUMBER: u64 = 2_u64.pow(12); - -/// Default maximum number of instructions and expressions per transaction -pub const DEFAULT_MAX_WASM_SIZE_BYTES: u64 = 2_u64.pow(22); // 4 MiB +use crate::{ + account::Account, block::BlockPayload, isi::InstructionBox, metadata::UnlimitedMetadata, + Identifiable, +}; /// Trait for basic transaction operations pub trait Transaction { - /// Result of hashing - type HashOf: Transaction; - /// Returns payload of a transaction fn payload(&self) -> &TransactionPayload; /// Calculate transaction [`Hash`](`iroha_crypto::Hash`). #[inline] #[cfg(feature = "std")] - fn hash(&self) -> iroha_crypto::HashOf - where - Self: Sized, - { - iroha_crypto::HashOf::new(self.payload()).transmute() + fn hash(&self) -> iroha_crypto::HashOf { + iroha_crypto::HashOf::new(self.payload()) } + /// Return signatures + fn signatures(&self) -> &SignaturesOf; + /// Checks if number of instructions in payload or wasm size exceeds maximum /// /// # Errors @@ -103,7 +93,7 @@ pub trait Transaction { /// specified TTLs was reached. #[cfg(feature = "std")] fn is_expired(&self, transaction_time_to_live: Duration) -> bool { - let tx_timestamp = Duration::from_millis(self.payload().creation_time); + let tx_timestamp = Duration::from_millis(self.payload().creation_time_ms); crate::current_time().saturating_sub(tx_timestamp) > core::cmp::min( transaction_time_to_live, @@ -115,7 +105,7 @@ pub trait Transaction { /// to have a future timestamp. #[cfg(feature = "std")] fn is_in_future(&self, threshold: Duration) -> bool { - let tx_timestamp = Duration::from_millis(self.payload().creation_time); + let tx_timestamp = Duration::from_millis(self.payload().creation_time_ms); tx_timestamp.saturating_sub(crate::current_time()) > threshold } } @@ -131,12 +121,12 @@ pub trait Sign { fn sign( self, key_pair: iroha_crypto::KeyPair, - ) -> Result; + ) -> Result; } #[model] pub mod model { - use super::*; + use super::{error::TransactionRejectionReason, *}; /// Either ISI or Wasm binary #[derive( @@ -205,17 +195,17 @@ pub mod model { #[getset(get = "pub")] #[ffi_type] pub struct TransactionPayload { + /// Creation timestamp (unix time in milliseconds). + pub creation_time_ms: u64, + /// Random value to make different hashes for transactions which occur repeatedly and simultaneously. + pub nonce: Option, /// Account ID of transaction creator. pub account_id: ::Id, - /// Instructions or WebAssembly smartcontract + /// ISI or a `WebAssembly` smartcontract. pub instructions: Executable, - /// Time of creation (unix time, in milliseconds). - pub creation_time: u64, - /// The transaction will be dropped after this time if it is still in a `Queue`. + /// If transaction is not committed by this time it will be dropped. pub time_to_live_ms: u64, - /// Random value to make different hashes for transactions which occur repeatedly and simultaneously - pub nonce: Option, - /// Metadata. + /// Store for additional information. pub metadata: UnlimitedMetadata, } @@ -256,9 +246,9 @@ pub mod model { pub payload: TransactionPayload, } - /// Structure that represents the second state of the transaction after receiving at least one signature. + /// Transaction that contains at least one signature /// - /// `Iroha` and its clients use [`Transaction`] to send transactions over the network. + /// `Iroha` and its clients use [`Self`] to send transactions over the network. /// After a transaction is signed and before it can be processed any further, /// the transaction must be accepted by the `Iroha` peer. /// The peer verifies the signatures and checks the limits. @@ -276,13 +266,14 @@ pub mod model { Serialize, IntoSchema, )] - #[display(fmt = "{self:?}")] // TODO ? + #[cfg_attr(not(feature = "std"), display(fmt = "Signed transaction"))] + #[cfg_attr(feature = "std", display(fmt = "{}", "self.hash()"))] #[ffi_type] pub struct SignedTransaction { /// [`Transaction`] payload. pub payload: TransactionPayload, - /// [`SignatureOf`]<[`TransactionPayload`]>. - pub signatures: btree_set::BTreeSet>, + /// [`iroha_crypto::SignatureOf`]<[`TransactionPayload`]>. + pub signatures: SignaturesOf, } /// Transaction Value used in Instructions and Queries @@ -294,7 +285,7 @@ pub mod model { /// Committed transaction Transaction(Box), /// Rejected transaction with reason of rejection - RejectedTransaction(Box), + RejectedTransaction(Box<(VersionedSignedTransaction, TransactionRejectionReason)>), } /// `TransactionQueryResult` is used in `FindAllTransactions` query @@ -317,56 +308,7 @@ pub mod model { /// Transaction pub tx_value: TransactionValue, /// The hash of the block to which `tx` belongs to - pub block_hash: Hash, - } - - /// `ValidTransaction` represents trustfull Transaction state. - #[version_with_scale(n = 1, versioned = "VersionedValidTransaction")] - #[derive( - Debug, Clone, PartialEq, Eq, Hash, Decode, Encode, Deserialize, Serialize, IntoSchema, - )] - #[ffi_type] - pub struct ValidTransaction { - /// The [`Transaction`]'s payload. - pub payload: TransactionPayload, - /// [`SignatureOf`]<[`TransactionPayload`]>. - pub signatures: SignaturesOf, - } - - /// [`RejectedTransaction`] represents transaction rejected by some validator at some stage of the pipeline. - #[version(n = 1, versioned = "VersionedRejectedTransaction")] - #[derive( - Debug, - Clone, - PartialEq, - Eq, - Hash, - Getters, - Decode, - Encode, - Deserialize, - Serialize, - IntoSchema, - )] - #[ffi_type] - pub struct RejectedTransaction { - /// The [`Transaction`]'s payload. - pub payload: TransactionPayload, - /// [`SignatureOf`] [`Transaction`]. - pub signatures: SignaturesOf, - /// The reason for rejecting this transaction during the validation pipeline. - #[getset(get = "pub")] - pub rejection_reason: error::TransactionRejectionReason, - } - - /// `AcceptedTransaction` — a transaction accepted by iroha peer. - #[version_with_scale(n = 1, versioned = "VersionedAcceptedTransaction")] - #[derive(Debug, Clone, Decode, Encode, Serialize)] - pub(crate) struct AcceptedTransaction { - /// Payload of this transaction. - pub payload: TransactionPayload, - /// Signatures for this transaction. - pub signatures: SignaturesOf, + pub block_hash: HashOf, } } @@ -419,7 +361,7 @@ impl TransactionBuilder { payload: TransactionPayload { account_id, instructions: instructions.into(), - creation_time, + creation_time_ms: creation_time, time_to_live_ms: proposed_ttl_ms, nonce: None, metadata: UnlimitedMetadata::new(), @@ -449,14 +391,14 @@ impl Sign for TransactionBuilder { fn sign( self, key_pair: iroha_crypto::KeyPair, - ) -> Result { - let signature = SignatureOf::new(key_pair, &self.payload)?; - let signatures = btree_set::BTreeSet::from([signature]); + ) -> Result { + let signatures = SignaturesOf::new(key_pair, &self.payload)?; Ok(SignedTransaction { payload: self.payload, signatures, - }) + } + .into()) } } @@ -465,87 +407,48 @@ declare_versioned!(VersionedSignedTransaction 1..2, Debug, Clone, PartialEq, Eq, #[cfg(all(not(feature = "ffi_import"), not(feature = "ffi_export")))] declare_versioned!(VersionedSignedTransaction 1..2, Debug, Clone, PartialEq, Eq, Hash, FromVariant, IntoSchema); -impl VersionedSignedTransaction { - /// Convert from `&VersionedSignedTransaction` to V1 reference - pub const fn as_v1(&self) -> &SignedTransaction { - match self { - Self::V1(v1) => v1, - } - } - - /// Convert from `&mut VersionedSignedTransaction` to V1 mutable reference - #[inline] - pub fn as_mut_v1(&mut self) -> &mut SignedTransaction { - match self { - Self::V1(v1) => v1, - } - } - - /// Performs the conversion from `VersionedSignedTransaction` to V1 - #[inline] - pub fn into_v1(self) -> SignedTransaction { - match self { - Self::V1(v1) => v1, - } - } -} - impl Transaction for VersionedSignedTransaction { - type HashOf = Self; - #[inline] fn payload(&self) -> &TransactionPayload { match self { Self::V1(v1) => &v1.payload, } } -} - -impl From for VersionedSignedTransaction { - fn from(transaction: VersionedValidTransaction) -> Self { - match transaction { - VersionedValidTransaction::V1(transaction) => { - let signatures = transaction.signatures.into(); - - SignedTransaction { - payload: transaction.payload, - signatures, - } - .into() - } - } - } -} -impl SignedTransaction { - /// Return signatures - pub fn signatures(&self) -> impl ExactSizeIterator> { - self.signatures.iter() + #[inline] + fn signatures(&self) -> &SignaturesOf { + let VersionedSignedTransaction::V1(tx) = self; + tx.signatures() } } impl Transaction for SignedTransaction { - type HashOf = Self; - #[inline] fn payload(&self) -> &TransactionPayload { &self.payload } + + #[inline] + fn signatures(&self) -> &SignaturesOf { + &self.signatures + } } #[cfg(feature = "std")] -impl Sign for SignedTransaction { +impl Sign for VersionedSignedTransaction { fn sign( - mut self, + self, key_pair: iroha_crypto::KeyPair, - ) -> Result { - let signature = SignatureOf::new(key_pair, &self.payload)?; - self.signatures.insert(signature); + ) -> Result { + let VersionedSignedTransaction::V1(mut tx) = self; + let signature = iroha_crypto::SignatureOf::new(key_pair, &tx.payload)?; + tx.signatures.insert(signature); Ok(SignedTransaction { - payload: self.payload, - signatures: self.signatures, - }) + payload: tx.payload, + signatures: tx.signatures, + } + .into()) } } @@ -555,7 +458,7 @@ impl TransactionValue { pub fn payload(&self) -> &TransactionPayload { match self { TransactionValue::Transaction(tx) => tx.payload(), - TransactionValue::RejectedTransaction(tx) => tx.payload(), + TransactionValue::RejectedTransaction(tx) => tx.0.payload(), } } } @@ -571,8 +474,8 @@ impl Ord for TransactionValue { #[inline] fn cmp(&self, other: &Self) -> Ordering { self.payload() - .creation_time - .cmp(&other.payload().creation_time) + .creation_time_ms + .cmp(&other.payload().creation_time_ms) } } @@ -595,233 +498,8 @@ impl Ord for TransactionQueryResult { #[inline] fn cmp(&self, other: &Self) -> Ordering { self.payload() - .creation_time - .cmp(&other.payload().creation_time) - } -} - -#[cfg(any(feature = "ffi_import", feature = "ffi_export"))] -declare_versioned!(VersionedValidTransaction 1..2, Debug, Clone, PartialEq, Eq, Hash, FromVariant, iroha_ffi::FfiType, IntoSchema); -#[cfg(all(not(feature = "ffi_import"), not(feature = "ffi_export")))] -declare_versioned!(VersionedValidTransaction 1..2, Debug, Clone, PartialEq, Eq, Hash, FromVariant, IntoSchema); - -impl VersionedValidTransaction { - /// Convert from `&VersionedValidTransaction` to V1 reference - #[inline] - pub const fn as_v1(&self) -> &ValidTransaction { - match self { - Self::V1(v1) => v1, - } - } - - /// Convert from `&mut VersionedValidTransaction` to V1 mutable reference - #[inline] - pub fn as_mut_v1(&mut self) -> &mut ValidTransaction { - match self { - Self::V1(v1) => v1, - } - } - - /// Performs the conversion from `VersionedValidTransaction` to V1 - #[inline] - pub fn into_v1(self) -> ValidTransaction { - match self { - Self::V1(v1) => v1, - } - } -} - -impl Transaction for VersionedValidTransaction { - type HashOf = VersionedSignedTransaction; - - #[inline] - fn payload(&self) -> &TransactionPayload { - &self.as_v1().payload - } -} - -impl ValidTransaction { - /// Return signatures - pub fn signatures(&self) -> impl ExactSizeIterator> { - self.signatures.iter() - } -} - -impl Transaction for ValidTransaction { - type HashOf = SignedTransaction; - - #[inline] - fn payload(&self) -> &TransactionPayload { - &self.payload - } -} - -#[cfg(any(feature = "ffi_import", feature = "ffi_export"))] -declare_versioned!(VersionedRejectedTransaction 1..2, Debug, Clone, PartialEq, Eq, Hash, FromVariant, iroha_ffi::FfiType, IntoSchema); -#[cfg(all(not(feature = "ffi_import"), not(feature = "ffi_export")))] -declare_versioned!(VersionedRejectedTransaction 1..2, Debug, Clone, PartialEq, Eq, Hash, FromVariant, IntoSchema); - -impl VersionedRejectedTransaction { - /// Convert from `&VersionedRejectedTransaction` to V1 reference - #[inline] - pub const fn as_v1(&self) -> &RejectedTransaction { - match self { - Self::V1(v1) => v1, - } - } - - /// Convert from `&mut VersionedRejectedTransaction` to V1 mutable reference - #[inline] - pub fn as_mut_v1(&mut self) -> &mut RejectedTransaction { - match self { - Self::V1(v1) => v1, - } - } - - /// Performs the conversion from `VersionedRejectedTransaction` to V1 - #[inline] - pub fn into_v1(self) -> RejectedTransaction { - match self { - Self::V1(v1) => v1, - } - } -} - -impl Transaction for VersionedRejectedTransaction { - type HashOf = VersionedSignedTransaction; - - #[inline] - fn payload(&self) -> &TransactionPayload { - match self { - Self::V1(v1) => &v1.payload, - } - } -} - -impl RejectedTransaction { - /// Return signatures - pub fn signatures(&self) -> impl ExactSizeIterator> { - self.signatures.iter() - } -} - -impl Transaction for RejectedTransaction { - type HashOf = SignedTransaction; - - #[inline] - fn payload(&self) -> &TransactionPayload { - &self.payload - } -} - -impl From for VersionedSignedTransaction { - fn from(transaction: VersionedRejectedTransaction) -> Self { - match transaction { - VersionedRejectedTransaction::V1(transaction) => { - let signatures = transaction.signatures.into(); - - SignedTransaction { - payload: transaction.payload, - signatures, - } - .into() - } - } - } -} - -#[cfg(feature = "transparent_api")] -declare_versioned_with_scale!(VersionedAcceptedTransaction 1..2, Debug, Clone, iroha_macro::FromVariant, Serialize); - -#[cfg(feature = "transparent_api")] -impl VersionedAcceptedTransaction { - /// Convert from `&VersionedAcceptedTransaction` to V1 reference - pub const fn as_v1(&self) -> &AcceptedTransaction { - match self { - VersionedAcceptedTransaction::V1(v1) => v1, - } - } - - /// Convert from `&mut VersionedAcceptedTransaction` to V1 mutable reference - pub fn as_mut_v1(&mut self) -> &mut AcceptedTransaction { - match self { - VersionedAcceptedTransaction::V1(v1) => v1, - } - } - - /// Performs the conversion from `VersionedAcceptedTransaction` to V1 - pub fn into_v1(self) -> AcceptedTransaction { - match self { - VersionedAcceptedTransaction::V1(v1) => v1, - } - } -} - -#[cfg(feature = "transparent_api")] -impl Transaction for VersionedAcceptedTransaction { - type HashOf = VersionedSignedTransaction; - - #[inline] - fn payload(&self) -> &TransactionPayload { - &self.as_v1().payload - } -} - -#[cfg(feature = "transparent_api")] -impl Transaction for AcceptedTransaction { - type HashOf = SignedTransaction; - - #[inline] - fn payload(&self) -> &TransactionPayload { - &self.payload - } -} - -#[cfg(feature = "transparent_api")] -impl AcceptedTransaction { - /// Accept transaction. Transition from [`Transaction`] to [`AcceptedTransaction`]. - /// - /// # Errors - /// - /// - if it does not adhere to limits - /// - if signature verification fails - #[cfg(feature = "std")] - pub fn accept( - transaction: SignedTransaction, - limits: &TransactionLimits, - ) -> Result { - if !IS_GENESIS { - transaction.check_limits(limits)? - } - let signatures: SignaturesOf<_> = transaction - .signatures - .try_into() - .expect("Transaction should have at least one signature"); - signatures.verify(&transaction.payload)?; - - Ok(Self { - payload: transaction.payload, - signatures, - }) - } -} - -#[cfg(feature = "transparent_api")] -impl From for VersionedSignedTransaction { - fn from(tx: VersionedAcceptedTransaction) -> Self { - let tx: AcceptedTransaction = tx.into_v1(); - let tx: SignedTransaction = tx.into(); - tx.into() - } -} - -#[cfg(feature = "transparent_api")] -impl From for SignedTransaction { - fn from(transaction: AcceptedTransaction) -> Self { - SignedTransaction { - payload: transaction.payload, - signatures: transaction.signatures.into_iter().collect(), - } + .creation_time_ms + .cmp(&other.payload().creation_time_ms) } } @@ -872,18 +550,6 @@ pub mod error { pub mod model { use super::*; - /// Error type for transaction from [`Transaction`] to [`AcceptedTransaction`] - #[derive(Debug, Display, FromVariant)] - #[cfg_attr(feature = "std", derive(thiserror::Error))] - pub(crate) enum AcceptTransactionFailure { - /// Failure during limits check - TransactionLimit(#[cfg_attr(feature = "std", source)] TransactionLimitError), - /// Failure during signature verification - SignatureVerification( - #[cfg_attr(feature = "std", source)] SignatureVerificationFail, - ), - } - /// Error which indicates max instruction count was reached #[derive( Debug, @@ -1159,43 +825,17 @@ mod http { #[repr(transparent)] // SAFETY: `PendingTransactions` has no trap representation in `Vec` #[ffi_type(unsafe {robust})] - pub struct PendingTransactions(pub(super) Vec); - } - - impl VersionedPendingTransactions { - /// Convert from `&VersionedPendingTransactions` to V1 reference - #[inline] - pub const fn as_v1(&self) -> &PendingTransactions { - match self { - Self::V1(v1) => v1, - } - } - - /// Convert from `&mut VersionedPendingTransactions` to V1 mutable reference - #[inline] - pub fn as_mut_v1(&mut self) -> &mut PendingTransactions { - match self { - Self::V1(v1) => v1, - } - } - - /// Performs the conversion from `VersionedPendingTransactions` to V1 - #[inline] - pub fn into_v1(self) -> PendingTransactions { - match self { - Self::V1(v1) => v1, - } - } + pub struct PendingTransactions(pub(super) Vec); } - impl FromIterator for PendingTransactions { - fn from_iter>(iter: T) -> Self { - Self(iter.into_iter().collect()) + impl FromIterator for VersionedPendingTransactions { + fn from_iter>(iter: T) -> Self { + PendingTransactions(iter.into_iter().collect::>()).into() } } impl IntoIterator for PendingTransactions { - type Item = SignedTransaction; + type Item = VersionedSignedTransaction; type IntoIter = vec::IntoIter; @@ -1221,13 +861,10 @@ pub mod prelude { #[cfg(feature = "std")] pub use super::Sign; pub use super::{ - error::prelude::*, Executable, RejectedTransaction, SignedTransaction, Transaction, - TransactionBuilder, TransactionLimits, TransactionPayload, TransactionQueryResult, - TransactionValue, ValidTransaction, VersionedRejectedTransaction, - VersionedSignedTransaction, VersionedValidTransaction, WasmSmartContract, + error::prelude::*, Executable, Transaction, TransactionBuilder, TransactionLimits, + TransactionPayload, TransactionQueryResult, TransactionValue, VersionedSignedTransaction, + WasmSmartContract, }; - #[cfg(feature = "transparent_api")] - pub use super::{AcceptedTransaction, VersionedAcceptedTransaction}; } #[cfg(test)] @@ -1235,64 +872,6 @@ mod tests { #![allow(clippy::pedantic, clippy::restriction)] use super::*; - #[cfg(feature = "transparent_api")] - use crate::prelude::FailBox; - - #[test] - #[cfg(feature = "transparent_api")] - fn transaction_not_accepted_max_instruction_number() { - let key_pair = iroha_crypto::KeyPair::generate().expect("Failed to generate key pair."); - let inst: InstructionBox = FailBox { - message: "Will fail".to_owned(), - } - .into(); - let tx = TransactionBuilder::new( - "root@global".parse().expect("Valid"), - vec![inst; DEFAULT_MAX_INSTRUCTION_NUMBER as usize + 1], - 1000, - ) - .sign(key_pair) - .expect("Valid"); - let tx_limits = TransactionLimits { - max_instruction_number: 4096, - max_wasm_size_bytes: 0, - }; - let result = AcceptedTransaction::accept::(tx, &tx_limits); - assert!(result.is_err()); - - let err = result.unwrap_err(); - assert_eq!( - err.to_string(), - format!( - "Too many instructions in payload, max number is {}, but got {}", - tx_limits.max_instruction_number, - DEFAULT_MAX_INSTRUCTION_NUMBER + 1 - ) - ); - } - - #[test] - #[cfg(feature = "transparent_api")] - fn genesis_transaction_ignore_limits() { - let key_pair = iroha_crypto::KeyPair::generate().expect("Failed to generate key pair."); - let inst: InstructionBox = FailBox { - message: "Will fail".to_owned(), - } - .into(); - let tx = TransactionBuilder::new( - "root@global".parse().expect("Valid"), - vec![inst; DEFAULT_MAX_INSTRUCTION_NUMBER as usize + 1], - 1000, - ) - .sign(key_pair) - .expect("Valid"); - let tx_limits = TransactionLimits { - max_instruction_number: 4096, - max_wasm_size_bytes: 0, - }; - - assert!(AcceptedTransaction::accept::(tx, &tx_limits).is_ok()); - } #[test] fn wasm_smart_contract_debug_repr_should_contain_just_len() { diff --git a/data_model/src/trigger.rs b/data_model/src/trigger.rs index d0e26ce567e..a704488b9c7 100644 --- a/data_model/src/trigger.rs +++ b/data_model/src/trigger.rs @@ -180,7 +180,7 @@ pub mod action { pub use self::model::*; use super::*; - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] use crate::prelude::Account; #[model] @@ -232,7 +232,7 @@ pub mod action { } } - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] impl crate::HasMetadata for Action { fn metadata(&self) -> &crate::metadata::Metadata { &self.metadata @@ -287,7 +287,7 @@ pub mod action { } /// Trait for common methods for all [`Action`]'s - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] pub trait ActionTrait { /// Type of action executable type Executable; @@ -317,7 +317,7 @@ pub mod action { fn clone_and_box(&self) -> Action; } - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] impl + Clone, E: Clone> ActionTrait for Action { type Executable = E; @@ -398,7 +398,7 @@ pub mod action { pub mod prelude { //! Re-exports of commonly used types. - #[cfg(feature = "transparent_api")] + #[cfg(feature = "transparent-api")] pub use super::action::ActionTrait; pub use super::{action::prelude::*, Trigger, TriggerId}; } diff --git a/data_model/src/validator.rs b/data_model/src/validator.rs index 63d00d4111a..3a737ddb4c1 100644 --- a/data_model/src/validator.rs +++ b/data_model/src/validator.rs @@ -53,7 +53,7 @@ pub mod model { } // TODO: Client doesn't need structures defined inside this macro. When dynamic linking is - // implemented use: #[cfg(any(feature = "transparent_api", feature = "ffi_import"))] + // implemented use: #[cfg(any(feature = "transparent-api", feature = "ffi_import"))] /// Boxed version of [`NeedsPermission`] #[derive( @@ -62,6 +62,7 @@ pub mod model { #[ffi_type] pub enum NeedsValidationBox { /// [`Transaction`] application operation + // TODO: Should it not be `VersionedSignedTransaction`? Transaction(SignedTransaction), /// [`InstructionBox`] execution operation Instruction(InstructionBox), diff --git a/default_validator/src/lib.rs b/default_validator/src/lib.rs index f3543f81f21..86783753b4b 100644 --- a/default_validator/src/lib.rs +++ b/default_validator/src/lib.rs @@ -9,7 +9,7 @@ extern crate panic_halt; pub mod isi; -use iroha_validator::{pass_conditions, prelude::*}; +use iroha_validator::{data_model::transaction::SignedTransaction, pass_conditions, prelude::*}; /// Apply `callback` macro for all token types from this crate. /// diff --git a/docs/source/references/config.md b/docs/source/references/config.md index c11cad1447b..9416bbde2ff 100644 --- a/docs/source/references/config.md +++ b/docs/source/references/config.md @@ -41,10 +41,10 @@ The following is the default configuration used by Iroha. "DEBUG_OUTPUT_NEW_BLOCKS": false }, "SUMERAGI": { - "KEY_PAIR": null, "PEER_ID": null, - "BLOCK_TIME_MS": 2000, + "KEY_PAIR": null, "TRUSTED_PEERS": null, + "BLOCK_TIME_MS": 2000, "COMMIT_TIME_LIMIT_MS": 4000, "TRANSACTION_LIMITS": { "max_instruction_number": 4096, @@ -485,7 +485,7 @@ Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI ### `sumeragi.block_time_ms` -The period of time a peer waits for the `CreatedBlock` message after getting a `TransactionReceipt` +Time a peer waits to produce a new block since the beginning of the voting round Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_BLOCK_TIME_MS` @@ -495,7 +495,7 @@ Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI ### `sumeragi.commit_time_limit_ms` -The period of time a peer waits for `CommitMessage` from the proxy tail. +Time a peer waits for the block to be committed since the beginning of the voting round Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_COMMIT_TIME_LIMIT_MS` @@ -558,7 +558,7 @@ Has type `Option`[^1]. Can be configured via environment vari ### `sumeragi.trusted_peers` -Optional list of predefined trusted peers. +List of predefined trusted peers. Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_TRUSTED_PEERS` diff --git a/docs/source/references/schema.json b/docs/source/references/schema.json index 8bd35ff7fec..ca1779383c6 100644 --- a/docs/source/references/schema.json +++ b/docs/source/references/schema.json @@ -561,40 +561,60 @@ "BlockHeader": { "Struct": [ { - "name": "timestamp", + "name": "height", + "type": "u64" + }, + { + "name": "timestamp_ms", "type": "u128" }, { - "name": "consensus_estimation", - "type": "u64" + "name": "previous_block_hash", + "type": "Option>" }, { - "name": "height", - "type": "u64" + "name": "transactions_hash", + "type": "Option>>" + }, + { + "name": "rejected_transactions_hash", + "type": "Option>>" + }, + { + "name": "commit_topology", + "type": "Vec" }, { "name": "view_change_index", "type": "u64" }, { - "name": "previous_block_hash", - "type": "Option>" + "name": "consensus_estimation_ms", + "type": "u64" + } + ] + }, + "BlockMessage": "VersionedSignedBlock", + "BlockPayload": { + "Struct": [ + { + "name": "header", + "type": "BlockHeader" }, { - "name": "transactions_hash", - "type": "Option>>" + "name": "transactions", + "type": "Vec" }, { - "name": "rejected_transactions_hash", - "type": "Option>>" + "name": "rejected_transactions", + "type": "Vec>" }, { - "name": "committed_with_topology", - "type": "Vec" + "name": "event_recommendations", + "type": "Vec" } ] }, - "BlockMessage": "VersionedCommittedBlock", "BlockRejectionReason": { "Enum": [ { @@ -616,30 +636,6 @@ } ] }, - "CommittedBlock": { - "Struct": [ - { - "name": "header", - "type": "BlockHeader" - }, - { - "name": "rejected_transactions", - "type": "Vec" - }, - { - "name": "transactions", - "type": "Vec" - }, - { - "name": "event_recommendations", - "type": "Vec" - }, - { - "name": "signatures", - "type": "SignaturesOf" - } - ] - }, "Compact": { "Int": "Compact" }, @@ -1001,7 +997,15 @@ } ] }, - "EvaluatesTo": { + "EvaluatesTo>": { + "Struct": [ + { + "name": "expression", + "type": "Expression" + } + ] + }, + "EvaluatesTo>": { "Struct": [ { "name": "expression", @@ -1673,7 +1677,7 @@ "Struct": [ { "name": "hash", - "type": "EvaluatesTo" + "type": "EvaluatesTo>" } ] }, @@ -1721,11 +1725,11 @@ }, { "name": "Block", - "type": "HashOf" + "type": "HashOf" }, { "name": "Transaction", - "type": "HashOf" + "type": "HashOf" }, { "name": "Peer", @@ -1785,7 +1789,7 @@ "Struct": [ { "name": "hash", - "type": "EvaluatesTo" + "type": "EvaluatesTo>" } ] }, @@ -1885,9 +1889,21 @@ ] }, "Hash": "Array", - "HashOf>": "Hash", - "HashOf": "Hash", - "HashOf": "Hash", + "HashOf": "Hash", + "HashOf>": "Hash", + "HashOf": "Hash", + "HashValue": { + "Enum": [ + { + "name": "Block", + "type": "HashOf" + }, + { + "name": "Transaction", + "type": "HashOf" + } + ] + }, "IdBox": { "Enum": [ { @@ -2161,6 +2177,22 @@ } ] }, + "LimitsValue": { + "Enum": [ + { + "name": "Metadata", + "type": "Limits" + }, + { + "name": "Transaction", + "type": "TransactionLimits" + }, + { + "name": "Length", + "type": "LengthLimits" + } + ] + }, "Metadata": { "Struct": [ { @@ -2413,11 +2445,11 @@ "Option": { "Option": "Hash" }, - "Option>>": { - "Option": "HashOf>" + "Option>": { + "Option": "HashOf" }, - "Option>": { - "Option": "HashOf" + "Option>>": { + "Option": "HashOf>" }, "Option": { "Option": "InstructionBox" @@ -2581,7 +2613,7 @@ } ] }, - "PendingTransactions": "Vec", + "PendingTransactions": "Vec", "PermissionRemoved": { "Struct": [ { @@ -2983,22 +3015,6 @@ } ] }, - "RejectedTransaction": { - "Struct": [ - { - "name": "payload", - "type": "TransactionPayload" - }, - { - "name": "signatures", - "type": "SignaturesOf" - }, - { - "name": "rejection_reason", - "type": "TransactionRejectionReason" - } - ] - }, "RemoveKeyValueBox": { "Struct": [ { @@ -3204,14 +3220,14 @@ ] }, "SignatureCheckCondition": "EvaluatesTo", - "SignatureOf": "Signature", + "SignatureOf": "Signature", "SignatureOf": "Signature", "SignatureOf": "Signature", - "SignaturesOf": { + "SignaturesOf": { "Struct": [ { "name": "signatures", - "type": "SortedVec>" + "type": "SortedVec>" } ] }, @@ -3223,6 +3239,18 @@ } ] }, + "SignedBlock": { + "Struct": [ + { + "name": "payload", + "type": "BlockPayload" + }, + { + "name": "signatures", + "type": "SignaturesOf" + } + ] + }, "SignedQuery": { "Struct": [ { @@ -3243,7 +3271,7 @@ }, { "name": "signatures", - "type": "SortedVec>" + "type": "SignaturesOf" } ] }, @@ -3298,8 +3326,8 @@ "SortedVec": { "Vec": "RoleId" }, - "SortedVec>": { - "Vec": "SignatureOf" + "SortedVec>": { + "Vec": "SignatureOf" }, "SortedVec>": { "Vec": "SignatureOf" @@ -3400,6 +3428,14 @@ }, "TransactionPayload": { "Struct": [ + { + "name": "creation_time_ms", + "type": "u64" + }, + { + "name": "nonce", + "type": "Option" + }, { "name": "account_id", "type": "AccountId" @@ -3408,18 +3444,10 @@ "name": "instructions", "type": "Executable" }, - { - "name": "creation_time", - "type": "u64" - }, { "name": "time_to_live_ms", "type": "u64" }, - { - "name": "nonce", - "type": "Option" - }, { "name": "metadata", "type": "SortedMap" @@ -3434,7 +3462,7 @@ }, { "name": "block_hash", - "type": "Hash" + "type": "HashOf" } ] }, @@ -3477,7 +3505,7 @@ }, { "name": "RejectedTransaction", - "type": "VersionedRejectedTransaction" + "type": "Tuple2" } ] }, @@ -3605,6 +3633,12 @@ } ] }, + "Tuple2": { + "Tuple": [ + "VersionedSignedTransaction", + "TransactionRejectionReason" + ] + }, "UnregisterBox": { "Struct": [ { @@ -3637,18 +3671,6 @@ } ] }, - "ValidTransaction": { - "Struct": [ - { - "name": "payload", - "type": "TransactionPayload" - }, - { - "name": "signatures", - "type": "SignaturesOf" - } - ] - }, "Validator": { "Struct": [ { @@ -3707,16 +3729,8 @@ "type": "Metadata" }, { - "name": "MetadataLimits", - "type": "Limits" - }, - { - "name": "TransactionLimits", - "type": "TransactionLimits" - }, - { - "name": "LengthLimits", - "type": "LengthLimits" + "name": "Limits", + "type": "LimitsValue" }, { "name": "Id", @@ -3734,10 +3748,6 @@ "name": "SignatureCheckCondition", "type": "SignatureCheckCondition" }, - { - "name": "TransactionValue", - "type": "TransactionValue" - }, { "name": "TransactionQueryResult", "type": "TransactionQueryResult" @@ -3748,11 +3758,11 @@ }, { "name": "Hash", - "type": "Hash" + "type": "HashValue" }, { "name": "Block", - "type": "VersionedCommittedBlock" + "type": "VersionedSignedBlock" }, { "name": "BlockHeader", @@ -3794,13 +3804,7 @@ "name": "LimitedMetadata" }, { - "name": "MetadataLimits" - }, - { - "name": "TransactionLimits" - }, - { - "name": "LengthLimits" + "name": "Limits" }, { "name": "Id" @@ -3814,9 +3818,6 @@ { "name": "SignatureCheckCondition" }, - { - "name": "TransactionValue" - }, { "name": "TransactionQueryResult" }, @@ -3908,17 +3909,14 @@ "Vec": { "Vec": "PeerId" }, - "Vec": { - "Vec": "SignedTransaction" + "Vec>": { + "Vec": "Tuple2" }, "Vec": { "Vec": "Value" }, - "Vec": { - "Vec": "VersionedRejectedTransaction" - }, - "Vec": { - "Vec": "VersionedValidTransaction" + "Vec": { + "Vec": "VersionedSignedTransaction" }, "Vec": { "Vec": "u8" @@ -3939,14 +3937,6 @@ } ] }, - "VersionedCommittedBlock": { - "Enum": [ - { - "name": "V1", - "type": "CommittedBlock" - } - ] - }, "VersionedEventMessage": { "Enum": [ { @@ -3979,11 +3969,11 @@ } ] }, - "VersionedRejectedTransaction": { + "VersionedSignedBlock": { "Enum": [ { "name": "V1", - "type": "RejectedTransaction" + "type": "SignedBlock" } ] }, @@ -4003,14 +3993,6 @@ } ] }, - "VersionedValidTransaction": { - "Enum": [ - { - "name": "V1", - "type": "ValidTransaction" - } - ] - }, "WasmExecutionFail": { "Struct": [ { diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index 76e6999fb30..9b2acc7b4eb 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -6,8 +6,9 @@ edition = "2021" [dependencies] iroha_config = { version = "=2.0.0-pre-rc.13", path = "../config" } +iroha_macro = { version = "2.0.0-pre-rc.13", path = "../macro", default-features = false } iroha_crypto = { version = "=2.0.0-pre-rc.13", path = "../crypto" } -iroha_data_model = { version = "=2.0.0-pre-rc.13", path = "../data_model", features = ["transparent_api"] } +iroha_data_model = { version = "=2.0.0-pre-rc.13", path = "../data_model", features = ["transparent-api"] } iroha_logger = { version = "=2.0.0-pre-rc.13", path = "../logger" } iroha_primitives = { version = "=2.0.0-pre-rc.13", path = "../primitives" } iroha_schema = { version = "=2.0.0-pre-rc.13", path = "../schema" } @@ -15,5 +16,7 @@ iroha_schema = { version = "=2.0.0-pre-rc.13", path = "../schema" } derive_more = { version = "0.99.17", default-features = false, features = ["deref"]} serde = { version = "1.0.151", features = ["derive"] } serde_json = "1.0.91" -eyre = "0.6.8" +once_cell = "1.16.0" +thiserror = "1.0.38" tracing = "0.1.37" +eyre = "0.6.8" diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index d5f1a6ffa4f..7d2154ed9d1 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -16,15 +16,17 @@ use std::{ path::{Path, PathBuf}, }; -use derive_more::{Deref, From}; +use derive_more::{Deref, Display, From}; use eyre::{bail, eyre, ErrReport, Result, WrapErr}; use iroha_config::genesis::Configuration; -use iroha_crypto::{KeyPair, PublicKey}; +use iroha_crypto::{KeyPair, PublicKey, SignatureVerificationFail, SignaturesOf}; use iroha_data_model::{ asset::AssetDefinition, prelude::{Metadata, *}, + transaction::{error::TransactionLimitError, SignedTransaction}, validator::Validator, }; +use iroha_macro::FromVariant; use iroha_primitives::small::{smallvec, SmallVec}; use iroha_schema::IntoSchema; use serde::{Deserialize, Serialize}; @@ -32,9 +34,85 @@ use serde::{Deserialize, Serialize}; /// Time to live for genesis transactions. const GENESIS_TRANSACTIONS_TTL_MS: u64 = 100_000; +/// `AcceptedTransaction` — a transaction accepted by iroha peer. +#[derive(Debug, Clone)] +pub struct AcceptedTransaction { + /// Transaction payload. + pub payload: TransactionPayload, + /// Transaction signatures. + pub signatures: SignaturesOf, +} + +/// Error type for transaction from [`Transaction`] to [`AcceptedTransaction`] +#[derive(Debug, Display, FromVariant, thiserror::Error)] +pub enum AcceptTransactionFailure { + /// Failure during limits check + TransactionLimit(#[source] TransactionLimitError), + /// Failure during signature verification + SignatureVerification(#[source] SignatureVerificationFail), +} + +/// [`Id`] of the genesis domain. +pub static GENESIS_DOMAIN_ID: once_cell::sync::Lazy = + once_cell::sync::Lazy::new(|| "genesis".parse().expect("Valid")); +/// [`Id`] of the genesis account. +pub static GENESIS_ACCOUNT_ID: once_cell::sync::Lazy = + once_cell::sync::Lazy::new(|| "genesis@genesis".parse().expect("Valid")); + +impl Transaction for AcceptedTransaction { + #[inline] + fn payload(&self) -> &TransactionPayload { + &self.payload + } + + #[inline] + fn signatures(&self) -> &SignaturesOf { + &self.signatures + } +} + +impl AcceptedTransaction { + /// Accept transaction. Transition from [`Transaction`] to [`AcceptedTransaction`]. + /// + /// # Errors + /// + /// - if it does not adhere to limits + /// - if signature verification fails + pub fn accept( + transaction: VersionedSignedTransaction, + limits: &TransactionLimits, + ) -> Result { + if let Err(error) = transaction.signatures().verify(transaction.payload()) { + return Err((transaction, error.into())); + } + + if !IS_GENESIS { + if let Err(error) = transaction.check_limits(limits) { + return Err((transaction, error.into())); + } + } + + let VersionedSignedTransaction::V1(v1_transaction) = transaction; + Ok(Self { + payload: v1_transaction.payload, + signatures: v1_transaction.signatures, + }) + } +} + +impl From for VersionedSignedTransaction { + fn from(source: AcceptedTransaction) -> Self { + SignedTransaction { + payload: source.payload, + signatures: source.signatures, + } + .into() + } +} + /// Genesis network trait for mocking pub trait GenesisNetworkTrait: - Deref> + Sync + Send + 'static + Sized + Debug + Deref> + Sync + Send + 'static + Sized + Debug { /// Construct [`GenesisNetwork`] from configuration. /// @@ -53,7 +131,7 @@ pub trait GenesisNetworkTrait: pub struct GenesisNetwork { /// transactions from `GenesisBlock`, any transaction is accepted #[deref] - pub transactions: Vec, + pub transactions: Vec, } impl GenesisNetworkTrait for GenesisNetwork { @@ -240,14 +318,18 @@ impl GenesisTransaction { self, genesis_key_pair: KeyPair, limits: &TransactionLimits, - ) -> Result { - let transaction = - TransactionBuilder::new(AccountId::genesis(), self.isi, GENESIS_TRANSACTIONS_TTL_MS) - .sign(genesis_key_pair)?; + ) -> Result { + let transaction = TransactionBuilder::new( + GENESIS_ACCOUNT_ID.clone(), + self.isi, + GENESIS_TRANSACTIONS_TTL_MS, + ) + .sign(genesis_key_pair)?; AcceptedTransaction::accept::(transaction, limits) - .wrap_err("Failed to accept transaction") .map(Into::into) + .map_err(|(_block, error)| error) + .wrap_err("Failed to accept transaction") } } @@ -357,7 +439,7 @@ impl RawGenesisDomainBuilder { /// Add an account to this domain without a public key. #[cfg(test)] - pub fn account_without_public_key(mut self, account_name: Name) -> Self { + fn account_without_public_key(mut self, account_name: Name) -> Self { let account_id = AccountId::new(account_name, self.domain_id.clone()); self.transaction .isi diff --git a/schema/gen/Cargo.toml b/schema/gen/Cargo.toml index 930f8780c97..244b8ab1b2d 100644 --- a/schema/gen/Cargo.toml +++ b/schema/gen/Cargo.toml @@ -11,6 +11,6 @@ license.workspace = true # TODO: Should genesis belong to schema? #3284 iroha_genesis = { version = "=2.0.0-pre-rc.13", path = "../../genesis"} iroha_primitives = { version = "=2.0.0-pre-rc.13", path = "../../primitives" } -iroha_data_model = { version = "=2.0.0-pre-rc.13", path = "../../data_model", features = ["http"] } +iroha_data_model = { version = "=2.0.0-pre-rc.13", path = "../../data_model", features = ["http", "transparent-api"] } iroha_crypto = { version = "=2.0.0-pre-rc.13", path = "../../crypto" } iroha_schema = { version = "=2.0.0-pre-rc.13", path = "../../schema" } diff --git a/schema/gen/src/lib.rs b/schema/gen/src/lib.rs index 0514434a071..92238387e97 100644 --- a/schema/gen/src/lib.rs +++ b/schema/gen/src/lib.rs @@ -3,11 +3,11 @@ //! types are included in the schema. #![allow(clippy::arithmetic_side_effects)] -use iroha_data_model::{block::stream::prelude::*, query::error::QueryExecutionFailure}; use iroha_genesis::RawGenesisBlock; use iroha_schema::prelude::*; macro_rules! types { + ($($t:ty),+ $(,)?) => { /// Generate map holding all schema types #[macro_export] @@ -17,7 +17,7 @@ macro_rules! types { $( $insert_entry!(map, $t); )+ #[cfg(target_arch = "aarch64")] - $insert_entry!(map, Box); + $insert_entry!(map, Box); map }} @@ -95,7 +95,7 @@ types!( BTreeSet, BTreeSet, BTreeSet, - BTreeSet>, + BTreeSet>, BlockHeader, BlockMessage, BlockRejectionReason, @@ -119,10 +119,8 @@ types!( Box>, Box, Box, - Box, Box, BurnBox, - CommittedBlock, Conditional, ConfigurationEvent, ConstString, @@ -147,7 +145,8 @@ types!( EvaluatesTo, EvaluatesTo, EvaluatesTo, - EvaluatesTo, + EvaluatesTo>, + EvaluatesTo>, EvaluatesTo, EvaluatesTo, EvaluatesTo, @@ -237,9 +236,9 @@ types!( GrantBox, Greater, Hash, - HashOf>, - HashOf, - HashOf, + HashOf, + HashOf>, + HashOf, IdBox, IdentifiableBox, If, @@ -278,8 +277,8 @@ types!( Option, Option, Option, - Option>>, - Option>, + Option>, + Option>>, Option, Option, Option, @@ -325,7 +324,6 @@ types!( QueryResult, RaiseTo, RegisterBox, - RejectedTransaction, RemoveKeyValueBox, Repeats, RevokeBox, @@ -343,12 +341,12 @@ types!( SetParameterBox, Signature, SignatureCheckCondition, - SignatureOf, + SignatureOf, SignatureOf, SignatureOf, - SignatureWrapperOf, + SignatureWrapperOf, SignatureWrapperOf, - SignaturesOf, + SignaturesOf, SignaturesOf, SignedQuery, SignedTransaction, @@ -377,7 +375,6 @@ types!( UnregisterBox, UnsatisfiedSignatureConditionFail, UpgradableBox, - ValidTransaction, Validator, ValidatorEvent, Value, @@ -390,21 +387,17 @@ types!( Vec, Vec, Vec, - Vec, - Vec, Vec, VersionedBlockMessage, VersionedBlockSubscriptionRequest, - VersionedCommittedBlock, - VersionedCommittedBlockWrapper, VersionedEventMessage, VersionedEventSubscriptionRequest, VersionedPaginatedQueryResult, VersionedPendingTransactions, - VersionedRejectedTransaction, + VersionedSignedBlock, + VersionedSignedBlockWrapper, VersionedSignedQuery, VersionedSignedTransaction, - VersionedValidTransaction, WasmExecutionFail, WasmSmartContract, Where, @@ -439,7 +432,7 @@ mod tests { BlockMessage, BlockSubscriptionRequest, VersionedBlockMessage, VersionedBlockSubscriptionRequest, }, - BlockHeader, CommittedBlock, VersionedCommittedBlock, + BlockHeader, BlockPayload, }, domain::NewDomain, ipfs::IpfsPath, @@ -452,9 +445,12 @@ mod tests { }, prelude::*, query::error::{FindError, QueryExecutionFailure}, - transaction::error::{TransactionExpired, TransactionLimitError}, + transaction::{ + error::{TransactionExpired, TransactionLimitError}, + SignedTransaction, + }, validator::Validator, - ValueKind, VersionedCommittedBlockWrapper, + ValueKind, VersionedSignedBlockWrapper, }; use iroha_genesis::RawGenesisBlock; use iroha_primitives::{ @@ -468,8 +464,7 @@ mod tests { // NOTE: These type parameters should not be have their schema exposed // By default `PhantomData` wrapped types schema will not be included - const SCHEMALESS_TYPES: [&str; 2] = - ["MerkleTree", "RegistrableBox"]; + const SCHEMALESS_TYPES: [&str; 2] = ["MerkleTree", "RegistrableBox"]; fn is_const_generic(generic: &str) -> bool { generic.parse::().is_ok() diff --git a/schema/src/lib.rs b/schema/src/lib.rs index cb01440e0ac..38171fefc84 100644 --- a/schema/src/lib.rs +++ b/schema/src/lib.rs @@ -489,6 +489,28 @@ impl IntoSchema for [T; L] { } } +// TODO: Implement for all tuples? +impl TypeId for (F, S) { + fn id() -> String { + format!("Tuple2<{}, {}>", F::id(), S::id()) + } +} +impl IntoSchema for (F, S) { + fn type_name() -> String { + format!("Tuple2<{}, {}>", F::type_name(), S::type_name()) + } + fn update_schema_map(map: &mut MetaMap) { + if !map.contains_key::() { + map.insert::(Metadata::Tuple(UnnamedFieldsMeta { + types: vec![core::any::TypeId::of::(), core::any::TypeId::of::()], + })); + + F::update_schema_map(map); + S::update_schema_map(map); + } + } +} + pub mod prelude { //! Exports common types. diff --git a/tools/kura_inspector/src/main.rs b/tools/kura_inspector/src/main.rs index 0ff0a89f3c7..9516cb6d478 100644 --- a/tools/kura_inspector/src/main.rs +++ b/tools/kura_inspector/src/main.rs @@ -8,7 +8,6 @@ use std::path::{Path, PathBuf}; use clap::{Parser, Subcommand}; use iroha_core::kura::{BlockIndex, BlockStore}; -use iroha_data_model::block::VersionedCommittedBlock; use iroha_version::scale::DecodeVersioned; /// Kura inspector @@ -136,7 +135,7 @@ fn print_blockchain(block_store_path: &Path, from_height: u64, block_count: u64) block_store .read_block_data(idx.start, &mut block_buf) .expect(&format!("Failed to read block № {} data.", meta_index + 1)); - let block = VersionedCommittedBlock::decode_all_versioned(&block_buf) + let block = iroha_data_model::block::VersionedSignedBlock::decode_all_versioned(&block_buf) .expect(&format!("Failed to decode block № {}", meta_index + 1)); println!("Block#{} :", meta_index + 1); println!("{block:#?}"); diff --git a/tools/parity_scale_decoder/samples/trigger.bin b/tools/parity_scale_decoder/samples/trigger.bin index 99b44b46404..bdbf97f7b88 100644 Binary files a/tools/parity_scale_decoder/samples/trigger.bin and b/tools/parity_scale_decoder/samples/trigger.bin differ diff --git a/tools/parity_scale_decoder/src/main.rs b/tools/parity_scale_decoder/src/main.rs index 626f1519c72..78ae8c51852 100644 --- a/tools/parity_scale_decoder/src/main.rs +++ b/tools/parity_scale_decoder/src/main.rs @@ -27,7 +27,7 @@ use iroha_data_model::{ BlockMessage, BlockSubscriptionRequest, VersionedBlockMessage, VersionedBlockSubscriptionRequest, }, - BlockHeader, CommittedBlock, VersionedCommittedBlock, + BlockHeader, BlockPayload, }, domain::NewDomain, ipfs::IpfsPath, @@ -40,9 +40,12 @@ use iroha_data_model::{ }, prelude::*, query::error::{FindError, QueryExecutionFailure}, - transaction::error::{TransactionExpired, TransactionLimitError}, + transaction::{ + error::{TransactionExpired, TransactionLimitError}, + SignedTransaction, + }, validator::Validator, - ValueKind, VersionedCommittedBlockWrapper, + ValueKind, VersionedSignedBlockWrapper, }; use iroha_primitives::{ addr::{Ipv4Addr, Ipv6Addr},