diff --git a/.config/lingua.dic b/.config/lingua.dic new file mode 100644 index 00000000000..a31ecf2c7bd --- /dev/null +++ b/.config/lingua.dic @@ -0,0 +1,176 @@ +90 +annualised/MS +Apache-2.0/M +AccountId/MS +api/SM +auth +auths/SM +API/SM +APIs +arg +args +aren +async +Best/MS +BlockId +BFT/M +bitfield/MS +blake2/MS +blockchain/MS +borked +BridgeStorage +BlockNumber +BTC/S +CLI/MS +Chain1 +Chain2 +ChainSpec +ChainTime +chain_getBlock +choosen +config/MS +crypto/MS +customizable/B +debian/M +decodable/MS +DOT/S +doesn +dispatchables +ed25519 +enum/MS +ERC-20 +ethereum/MS +externality/MS +extrinsic/MS +extrinsics +fedora/M +FN +FinalizationError +GiB/S +GPL/M +GPLv3/M +Handler/MS +HeaderA +HeaderId +https +implementers +inherent/MS +initialize/RG +instantiate/B +intrinsic/MS +intrinsics +InitiateChange +isn +io +js +keccak256/M +KSM/S +Lane1 +Lane2 +Lane3 +LaneId +kusama/S +KYC/M +keccak +Kovan +merkle/MS +MessageNonce +MessageNonces +Merklized +MaybeOrphan +MaybeExtra +MetricsParams +MessagePayload +misbehavior/SM +misbehaviors +MIN_SIZE +MIT/M +max_value +multivalidator/SM +natively +OldHeader +nonces +number +no_std +ok +oneshot/MS +others' +OutboundMessages +parablock/MS +parachain/MS +parameterize/D +pallet_message_lane +polkadot/MS +pov-block/MS +PoA +PoV/MS +precommit +promethius +promethius' +prune_end +prune_depth +provisioner/MS +redhat/M +repo/MS +receival +RPC/MS +RLP +runtime/MS +Runtime1 +Runtime2 +rustc/MS +ServiceFactory/MS +SignedExtension +SIZE_FACTOR +sr25519 +SS58 +SS58Prefix +src +S|N +SURI +source +struct/MS +Submitter1 +submitters/MS +subsystem/MS +subsystems' +shouldn +synchronizer +taskmanager/MS +teleport/RG +teleportation/SM +teleporter/SM +teleporters +testnet/MS +trie/MS +trustless/Y +ThisChain +TCP +ubuntu/M +union/MSG +undeliverable +unfinalized +unpruned +unservable/B +unsynced +ve +vec +Vec +validator/SM +verifier +w3f/MS +wasm/M +WND/S +XCM/S +XCMP/M +include/BG +isolate/BG +Instance1 +Instance2 +Instance42 +Pre +Rialto +stringified +Stringified +millau +Millau diff --git a/.config/spellcheck.toml b/.config/spellcheck.toml new file mode 100644 index 00000000000..956077a81cc --- /dev/null +++ b/.config/spellcheck.toml @@ -0,0 +1,11 @@ +[hunspell] +lang = "en_US" +search_dirs = ["."] +extra_dictionaries = ["lingua.dic"] + +[hunspell.quirks] +# `Type`'s +# 5x +transform_regex = ["^'([^\\s])'$", "^[0-9]+(?:\\.[0-9]*)?x$", "^'s$", "^\\+$", "."] +allow_concatenation = true +allow_dashes = true diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4ebd12e0d6f..0f858e24f6f 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -41,3 +41,28 @@ jobs: with: command: fmt args: --all -- --check + check-spellcheck: + name: Check For Spelling and/or Grammar Mistakes + runs-on: ubuntu-latest + env: + RUST_BACKTRACE: full + steps: + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.4.1 + with: + access_token: ${{ github.token }} + + - name: Checkout sources & submodules + uses: actions/checkout@master + with: + fetch-depth: 5 + submodules: recursive + + - name: Add cargo-spellcheck + run: cargo install cargo-spellcheck + + - name: Run spellcheck + uses: actions-rs/cargo@master + with: + command: spellcheck + args: check -m 1 -vv diff --git a/bin/millau/node/src/chain_spec.rs b/bin/millau/node/src/chain_spec.rs index 340b8fd9dff..1ab9e32ae8b 100644 --- a/bin/millau/node/src/chain_spec.rs +++ b/bin/millau/node/src/chain_spec.rs @@ -24,7 +24,7 @@ use sp_core::{sr25519, Pair, Public}; use sp_finality_grandpa::AuthorityId as GrandpaId; use sp_runtime::traits::{IdentifyAccount, Verify}; -/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. +/// Specialized `ChainSpec`. This is a specialization of the general Substrate Chain-specChain-spec type. pub type ChainSpec = sc_service::GenericChainSpec; /// The chain specification option. This is expected to come in from the CLI and diff --git a/bin/millau/node/src/cli.rs b/bin/millau/node/src/cli.rs index 46323ed25c9..49f4fc092fa 100644 --- a/bin/millau/node/src/cli.rs +++ b/bin/millau/node/src/cli.rs @@ -29,7 +29,7 @@ pub struct Cli { /// Possible subcommands of the main binary. #[derive(Debug, StructOpt)] pub enum Subcommand { - /// Key management cli utilities + /// Key management CLI utilities Key(sc_cli::KeySubcommand), /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. diff --git a/bin/millau/runtime/src/lib.rs b/bin/millau/runtime/src/lib.rs index 30cf1bd87cd..e6bb5827941 100644 --- a/bin/millau/runtime/src/lib.rs +++ b/bin/millau/runtime/src/lib.rs @@ -237,7 +237,7 @@ parameter_types! { } impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. + /// A timestamp: milliseconds since the UNIX epoch. type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; diff --git a/bin/rialto/node/src/cli.rs b/bin/rialto/node/src/cli.rs index 46323ed25c9..49f4fc092fa 100644 --- a/bin/rialto/node/src/cli.rs +++ b/bin/rialto/node/src/cli.rs @@ -29,7 +29,7 @@ pub struct Cli { /// Possible subcommands of the main binary. #[derive(Debug, StructOpt)] pub enum Subcommand { - /// Key management cli utilities + /// Key management CLI utilities Key(sc_cli::KeySubcommand), /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. diff --git a/bin/rialto/runtime/src/exchange.rs b/bin/rialto/runtime/src/exchange.rs index a054962a79c..e259856f375 100644 --- a/bin/rialto/runtime/src/exchange.rs +++ b/bin/rialto/runtime/src/exchange.rs @@ -55,7 +55,7 @@ pub struct EthereumTransactionInclusionProof { /// /// The assumption is that this pair will never appear more than once in /// transactions included into finalized blocks. This is obviously true -/// for any existing eth-like chain (that keep current tx format), because +/// for any existing eth-like chain (that keep current TX format), because /// otherwise transaction can be replayed over and over. #[derive(Encode, Decode, PartialEq, RuntimeDebug)] pub struct EthereumTransactionTag { @@ -65,7 +65,7 @@ pub struct EthereumTransactionTag { pub nonce: sp_core::U256, } -/// Eth transaction from runtime perspective. +/// Ethereum transaction from runtime perspective. pub struct EthTransaction; impl MaybeLockFundsTransaction for EthTransaction { diff --git a/bin/rialto/runtime/src/lib.rs b/bin/rialto/runtime/src/lib.rs index 69fff6b10c3..49a931199d6 100644 --- a/bin/rialto/runtime/src/lib.rs +++ b/bin/rialto/runtime/src/lib.rs @@ -344,7 +344,7 @@ parameter_types! { } impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. + /// A timestamp: milliseconds since the UNIX epoch. type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; @@ -409,30 +409,16 @@ impl pallet_session::Config for Runtime { } parameter_types! { - /// This is a pretty unscientific cap. - /// - /// Note that once this is hit the pallet will essentially throttle incoming requests down to one - /// call per block. + // This is a pretty unscientific cap. + // + // Note that once this is hit the pallet will essentially throttle incoming requests down to one + // call per block. pub const MaxRequests: u32 = 50; -} - -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - /// Number of headers to keep in benchmarks. - /// - /// In benchmarks we always populate with full number of `HeadersToKeep` to make sure that - /// pruning is taken into account. - /// - /// Note: This is lower than regular value, to speed up benchmarking setup. - pub const HeadersToKeep: u32 = 1024; -} -#[cfg(not(feature = "runtime-benchmarks"))] -parameter_types! { - /// Number of headers to keep. - /// - /// Assuming the worst case of every header being finalized, we will keep headers at least for a - /// week. + // Number of headers to keep. + // + // Assuming the worst case of every header being finalized, we will keep headers at least for a + // week. pub const HeadersToKeep: u32 = 7 * bp_rialto::DAYS as u32; } diff --git a/bin/runtime-common/src/messages.rs b/bin/runtime-common/src/messages.rs index 8e83c0f94ad..6f627a19869 100644 --- a/bin/runtime-common/src/messages.rs +++ b/bin/runtime-common/src/messages.rs @@ -502,7 +502,7 @@ pub mod target { /// Verify proof of Bridged -> This chain messages. /// /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside of this function. This function only verifies that the proof declares exactly + /// outside this function. This function only verifies that the proof declares exactly /// `messages_count` messages. pub fn verify_messages_proof( proof: FromBridgedChainMessagesProof>>, diff --git a/docs/high-level-overview.md b/docs/high-level-overview.md index 14b1eee6d45..f5f0b3dcbb6 100644 --- a/docs/high-level-overview.md +++ b/docs/high-level-overview.md @@ -110,7 +110,7 @@ Users of the pallet add their messages to an "outbound lane" on the source chain finalized message relayers are responsible for reading the current queue of messages and submitting some (or all) of them to the "inbound lane" of the target chain. Each message has a `nonce` associated with it, which serves as the ordering of messages. The inbound lane stores the last -delivered nonce to prevent replaying messages. To succesfuly deliver the message to the inbound lane +delivered nonce to prevent replaying messages. To successfully deliver the message to the inbound lane on target chain the relayer has to present present a storage proof which shows that the message was part of the outbound lane on the source chain. diff --git a/docs/testing-scenarios.md b/docs/testing-scenarios.md index 343720524ec..fd55022d98a 100644 --- a/docs/testing-scenarios.md +++ b/docs/testing-scenarios.md @@ -91,7 +91,7 @@ kCharlie. 1. Relayer prepares transaction which delivers `B1` and with all of the missing ancestors to the target chain (one header per transaction). -1. After the transaction is succesfully dispatched the Polkadot on-chain light client of the Kusama +1. After the transaction is successfully dispatched the Polkadot on-chain light client of the Kusama chain learns about block `B1` - it is stored in the on-chain storage. ### Syncing finality loop diff --git a/modules/currency-exchange/src/lib.rs b/modules/currency-exchange/src/lib.rs index 542082f85ab..9a8af5ba501 100644 --- a/modules/currency-exchange/src/lib.rs +++ b/modules/currency-exchange/src/lib.rs @@ -70,7 +70,7 @@ decl_error! { InvalidRecipient, /// Cannot map from peer recipient to this blockchain recipient. FailedToMapRecipients, - /// Failed to convert from peer blockchain currency to this blockhain currency. + /// Failed to convert from peer blockchain currency to this blockchain currency. FailedToConvertCurrency, /// Deposit has failed. DepositFailed, diff --git a/modules/dispatch/src/lib.rs b/modules/dispatch/src/lib.rs index 416d080b0c1..e95dc1cdfe9 100644 --- a/modules/dispatch/src/lib.rs +++ b/modules/dispatch/src/lib.rs @@ -19,7 +19,7 @@ //! The messages are interpreted directly as runtime `Call`. We attempt to decode //! them and then dispatch as usual. To prevent compatibility issues, the Calls have //! to include a `spec_version`. This will be checked before dispatch. In the case of -//! a succesful dispatch an event is emitted. +//! a successful dispatch an event is emitted. #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] @@ -57,7 +57,7 @@ pub enum CallOrigin { /// best finalized. pub stopped_at_finalized_sibling: bool, /// Header ancestors that were read while we have been searching for - /// cached votes entry. Newest header has index 0. + /// cached votes entry. The newest header has index 0. pub unaccounted_ancestry: VecDeque<(HeaderId, Option, AuraHeader)>, /// Cached finality votes, if they have been found. The associated /// header is not included into `unaccounted_ancestry`. @@ -59,7 +59,7 @@ pub struct FinalityEffects { pub struct FinalityVotes { /// Number of votes per each validator. pub votes: BTreeMap, - /// Ancestry blocks with oldest ancestors at the beginning and newest at the + /// Ancestry blocks with the oldest ancestors at the beginning and newest at the /// end of the queue. pub ancestry: VecDeque>, } diff --git a/modules/ethereum/src/import.rs b/modules/ethereum/src/import.rs index 8cd4c8a17c7..89ee39f7b3c 100644 --- a/modules/ethereum/src/import.rs +++ b/modules/ethereum/src/import.rs @@ -22,7 +22,7 @@ use crate::{AuraConfiguration, ChainTime, ChangeToEnact, PruningStrategy, Storag use bp_eth_poa::{AuraHeader, HeaderId, Receipt}; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; -/// Imports bunch of headers and updates blocks finality. +/// Imports a bunch of headers and updates blocks finality. /// /// Transactions receipts must be provided if `header_import_requires_receipts()` /// has returned true. diff --git a/modules/ethereum/src/lib.rs b/modules/ethereum/src/lib.rs index aeb7d69f763..135668ed412 100644 --- a/modules/ethereum/src/lib.rs +++ b/modules/ethereum/src/lib.rs @@ -169,7 +169,7 @@ struct PruningRange { /// were unable to prune for whatever reason (i.e. if it isn't finalized yet and has /// scheduled validators set change). pub oldest_unpruned_block: u64, - /// Number of oldest block(s) that we want to keep. We want to prune blocks in range + /// Number of the oldest block(s) that we want to keep. We want to prune blocks in range /// [`oldest_unpruned_block`; `oldest_block_to_keep`). pub oldest_block_to_keep: u64, } diff --git a/modules/ethereum/src/test_utils.rs b/modules/ethereum/src/test_utils.rs index 18ad6876d68..8b07ebe937a 100644 --- a/modules/ethereum/src/test_utils.rs +++ b/modules/ethereum/src/test_utils.rs @@ -19,7 +19,7 @@ //! Although the name implies that it is used by tests, it shouldn't be be used _directly_ by tests. //! Instead these utilities should be used by the Mock runtime, which in turn is used by tests. //! -//! On the other hand, they may be used directly by the bechmarking module. +//! On the other hand, they may be used directly by the benchmarking module. // Since this is test code it's fine that not everything is used #![allow(dead_code)] diff --git a/modules/ethereum/src/verification.rs b/modules/ethereum/src/verification.rs index c79242d1d4d..0a7e1e51f7a 100644 --- a/modules/ethereum/src/verification.rs +++ b/modules/ethereum/src/verification.rs @@ -43,7 +43,7 @@ pub fn is_importable_header(storage: &S, header: &AuraHeader) -> Res Ok((id, finalized_id)) } -/// Try accept unsigned aura header into transaction pool. +/// Try to accept unsigned aura header into transaction pool. /// /// Returns required and provided tags. pub fn accept_aura_header_into_pool( diff --git a/modules/grandpa/src/benchmarking.rs b/modules/grandpa/src/benchmarking.rs index b7294e91800..cb170fdc8b1 100644 --- a/modules/grandpa/src/benchmarking.rs +++ b/modules/grandpa/src/benchmarking.rs @@ -51,10 +51,9 @@ use bp_test_utils::{ TEST_GRANDPA_ROUND, TEST_GRANDPA_SET_ID, }; use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller}; -use frame_support::traits::Get; use frame_system::RawOrigin; use sp_finality_grandpa::AuthorityId; -use sp_runtime::traits::Zero; +use sp_runtime::traits::{One, Zero}; use sp_std::{vec, vec::Vec}; // The maximum number of vote ancestries to include in a justification. @@ -67,14 +66,6 @@ const MAX_VOTE_ANCESTRIES: u32 = 1000; // number of validators. const MAX_VALIDATOR_SET_SIZE: u32 = 1024; -/// Returns number of first header to be imported. -/// -/// Since we boostrap the pallet with `HeadersToKeep` already imported headers, -/// this function computes the next expected header number to import. -fn header_number, I: 'static, N: From>() -> N { - (T::HeadersToKeep::get() + 1).into() -} - benchmarks_instance_pallet! { // This is the "gold standard" benchmark for this extrinsic, and it's what should be used to // annotate the weight in the pallet. @@ -99,9 +90,9 @@ benchmarks_instance_pallet! { is_halted: false, }; - bootstrap_bridge::(init_data); + initialize_bridge::(init_data); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); let params = JustificationGeneratorParams { header: header.clone(), round: TEST_GRANDPA_ROUND, @@ -115,7 +106,7 @@ benchmarks_instance_pallet! { }: _(RawOrigin::Signed(caller), header, justification) verify { - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); let expected_hash = header.hash(); assert_eq!(>::get(), expected_hash); @@ -136,8 +127,8 @@ benchmarks_instance_pallet! { is_halted: false, }; - bootstrap_bridge::(init_data); - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); + initialize_bridge::(init_data); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); let params = JustificationGeneratorParams { header: header.clone(), @@ -152,7 +143,7 @@ benchmarks_instance_pallet! { }: submit_finality_proof(RawOrigin::Signed(caller), header, justification) verify { - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); let expected_hash = header.hash(); assert_eq!(>::get(), expected_hash); @@ -179,8 +170,8 @@ benchmarks_instance_pallet! { is_halted: false, }; - bootstrap_bridge::(init_data); - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); + initialize_bridge::(init_data); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); let params = JustificationGeneratorParams { header: header.clone(), @@ -195,7 +186,7 @@ benchmarks_instance_pallet! { }: submit_finality_proof(RawOrigin::Signed(caller), header, justification) verify { - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); let expected_hash = header.hash(); assert_eq!(>::get(), expected_hash); diff --git a/modules/grandpa/src/lib.rs b/modules/grandpa/src/lib.rs index 80bc8e951ac..358e99ef2d6 100644 --- a/modules/grandpa/src/lib.rs +++ b/modules/grandpa/src/lib.rs @@ -28,7 +28,7 @@ //! //! Since this pallet only tracks finalized headers it does not deal with forks. Forks can only //! occur if the GRANDPA validator set on the bridged chain is either colluding or there is a severe -//! bug causing resulting in an equivocation. Such events are outside of the scope of this pallet. +//! bug causing resulting in an equivocation. Such events are outside the scope of this pallet. //! Shall the fork occur on the bridged chain governance intervention will be required to //! re-initialize the bridge and track the right fork. @@ -158,8 +158,20 @@ pub mod pallet { verify_justification::(&justification, hash, *number, authority_set)?; let _enacted = try_enact_authority_change::(&finality_target, set_id)?; + let index = >::get(); + let pruning = >::try_get(index); + >::put(hash); + >::insert(hash, finality_target); + >::insert(index, hash); >::mutate(|count| *count += 1); - insert_header::(finality_target, hash); + + // Update ring buffer pointer and remove old header. + >::put((index + 1) % T::HeadersToKeep::get()); + if let Ok(hash) = pruning { + log::debug!(target: "runtime::bridge-grandpa", "Pruning old header: {:?}.", hash); + >::remove(hash); + } + log::info!(target: "runtime::bridge-grandpa", "Succesfully imported finalized header with hash {:?}!", hash); Ok(().into()) @@ -168,7 +180,7 @@ pub mod pallet { /// Bootstrap the bridge pallet with an initial header and authority set from which to sync. /// /// The initial configuration provided does not need to be the genesis header of the bridged - /// chain, it can be any arbirary header. You can also provide the next scheduled set change + /// chain, it can be any arbitrary header. You can also provide the next scheduled set change /// if it is already know. /// /// This function is only allowed to be called from a trusted origin and writes to storage @@ -415,25 +427,6 @@ pub mod pallet { ) } - /// Import a previously verified header to the storage. - /// - /// Note this function solely takes care of updating the storage and pruning old entries, - /// but does not verify the validaty of such import. - pub(crate) fn insert_header, I: 'static>(header: BridgedHeader, hash: BridgedBlockHash) { - let index = >::get(); - let pruning = >::try_get(index); - >::put(hash); - >::insert(hash, header); - >::insert(index, hash); - - // Update ring buffer pointer and remove old header. - >::put((index + 1) % T::HeadersToKeep::get()); - if let Ok(hash) = pruning { - log::debug!(target: "runtime::bridge-grandpa", "Pruning old header: {:?}.", hash); - >::remove(hash); - } - } - /// Since this writes to storage with no real checks this should only be used in functions that /// were called by a trusted origin. pub(crate) fn initialize_bridge, I: 'static>( @@ -448,8 +441,8 @@ pub mod pallet { let initial_hash = header.hash(); >::put(initial_hash); - >::put(0); - insert_header::(header, initial_hash); + >::put(initial_hash); + >::insert(initial_hash, header); let authority_set = bp_header_chain::AuthoritySet::new(authority_list, set_id); >::put(authority_set); @@ -457,29 +450,6 @@ pub mod pallet { >::put(is_halted); } - #[cfg(feature = "runtime-benchmarks")] - pub(crate) fn bootstrap_bridge, I: 'static>( - init_params: super::InitializationData>, - ) { - let start_number = *init_params.header.number(); - let end_number = start_number + T::HeadersToKeep::get().into(); - initialize_bridge::(init_params); - - let mut number = start_number; - while number < end_number { - number = number + sp_runtime::traits::One::one(); - let header = >::new( - number, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); - let hash = header.hash(); - insert_header::(header, hash); - } - } - /// Ensure that the origin is either root, or `PalletOwner`. fn ensure_owner_or_root, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> { match origin.into() { diff --git a/modules/grandpa/src/weights.rs b/modules/grandpa/src/weights.rs index 9e7c2ebc087..a548534a20b 100644 --- a/modules/grandpa/src/weights.rs +++ b/modules/grandpa/src/weights.rs @@ -17,7 +17,7 @@ //! Autogenerated weights for pallet_bridge_grandpa //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-04-21, STEPS: [50, ], REPEAT: 20 +//! DATE: 2021-04-14, STEPS: [50, ], REPEAT: 20 //! LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled //! CHAIN: Some("dev"), DB CACHE: 128 @@ -60,29 +60,29 @@ pub struct RialtoWeight(PhantomData); impl WeightInfo for RialtoWeight { fn submit_finality_proof(v: u32, p: u32) -> Weight { (0 as Weight) - .saturating_add((756_462_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((791_236_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((837_084_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((874_929_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn submit_finality_proof_on_single_fork(v: u32) -> Weight { - (280_121_000 as Weight) - .saturating_add((14_098_000 as Weight).saturating_mul(v as Weight)) + (276_463_000 as Weight) + .saturating_add((14_149_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn submit_finality_proof_on_many_forks(p: u32) -> Weight { - (10_370_940_000 as Weight) - .saturating_add((96_902_000 as Weight).saturating_mul(p as Weight)) + (10_676_019_000 as Weight) + .saturating_add((97_598_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn find_scheduled_change(n: u32) -> Weight { - (479_000 as Weight).saturating_add((11_000 as Weight).saturating_mul(n as Weight)) + (618_000 as Weight).saturating_add((8_000 as Weight).saturating_mul(n as Weight)) } fn read_write_authority_sets(n: u32) -> Weight { - (8_030_000 as Weight) - .saturating_add((232_000 as Weight).saturating_mul(n as Weight)) + (8_582_000 as Weight) + .saturating_add((234_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -92,29 +92,29 @@ impl WeightInfo for RialtoWeight { impl WeightInfo for () { fn submit_finality_proof(v: u32, p: u32) -> Weight { (0 as Weight) - .saturating_add((756_462_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((791_236_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((837_084_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((874_929_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } fn submit_finality_proof_on_single_fork(v: u32) -> Weight { - (280_121_000 as Weight) - .saturating_add((14_098_000 as Weight).saturating_mul(v as Weight)) + (276_463_000 as Weight) + .saturating_add((14_149_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } fn submit_finality_proof_on_many_forks(p: u32) -> Weight { - (10_370_940_000 as Weight) - .saturating_add((96_902_000 as Weight).saturating_mul(p as Weight)) + (10_676_019_000 as Weight) + .saturating_add((97_598_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } fn find_scheduled_change(n: u32) -> Weight { - (479_000 as Weight).saturating_add((11_000 as Weight).saturating_mul(n as Weight)) + (618_000 as Weight).saturating_add((8_000 as Weight).saturating_mul(n as Weight)) } fn read_write_authority_sets(n: u32) -> Weight { - (8_030_000 as Weight) - .saturating_add((232_000 as Weight).saturating_mul(n as Weight)) + (8_582_000 as Weight) + .saturating_add((234_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } diff --git a/modules/messages/src/benchmarking.rs b/modules/messages/src/benchmarking.rs index d1ecf775000..c601dadb8c6 100644 --- a/modules/messages/src/benchmarking.rs +++ b/modules/messages/src/benchmarking.rs @@ -151,7 +151,7 @@ benchmarks_instance! { // * outbound lane already has state, so it needs to be read and decoded; // * relayers fund account does not exists (in practice it needs to exist in production environment); // * maximal number of messages is being pruned during the call; - // * message size is 1KB. + // * message size is 1 KB. // // With single KB of message size, the weight of the call is increased (roughly) by // `(send_16_kb_message_worst_case - send_1_kb_message_worst_case) / 15`. diff --git a/modules/messages/src/instant_payments.rs b/modules/messages/src/instant_payments.rs index 524a3765d6a..80bc32f294a 100644 --- a/modules/messages/src/instant_payments.rs +++ b/modules/messages/src/instant_payments.rs @@ -31,14 +31,14 @@ use sp_std::fmt::Debug; /// Instant message payments made in given currency. /// -/// The balance is initally reserved in a special `relayers-fund` account, and transferred +/// The balance is initially reserved in a special `relayers-fund` account, and transferred /// to the relayer when message delivery is confirmed. /// -/// Additionaly, confirmation transaction submitter (`confirmation_relayer`) is reimbursed +/// Additionally, confirmation transaction submitter (`confirmation_relayer`) is reimbursed /// with the confirmation rewards (part of message fee, reserved to pay for delivery confirmation). /// /// NOTE The `relayers-fund` account must always exist i.e. be over Existential Deposit (ED; the -/// pallet enforces that) to make sure that even if the message cost is below ED it is still payed +/// pallet enforces that) to make sure that even if the message cost is below ED it is still paid /// to the relayer account. /// NOTE It's within relayer's interest to keep their balance above ED as well, to make sure they /// can receive the payment. diff --git a/modules/messages/src/lib.rs b/modules/messages/src/lib.rs index 9e2563498fe..f30a9730f58 100644 --- a/modules/messages/src/lib.rs +++ b/modules/messages/src/lib.rs @@ -80,7 +80,7 @@ mod mock; pub trait Config: frame_system::Config { // General types - /// They overarching event type. + /// They're overarching event type. type Event: From> + Into<::Event>; /// Benchmarks results from runtime we're plugged into. type WeightInfo: WeightInfoExt; @@ -608,22 +608,22 @@ impl, I: Instance> Pallet { OutboundMessages::::get(MessageKey { lane_id: lane, nonce }).map(|message_data| message_data.payload) } - /// Get nonce of latest generated message at given outbound lane. + /// Get nonce of the latest generated message at given outbound lane. pub fn outbound_latest_generated_nonce(lane: LaneId) -> MessageNonce { OutboundLanes::::get(&lane).latest_generated_nonce } - /// Get nonce of latest confirmed message at given outbound lane. + /// Get nonce of the latest confirmed message at given outbound lane. pub fn outbound_latest_received_nonce(lane: LaneId) -> MessageNonce { OutboundLanes::::get(&lane).latest_received_nonce } - /// Get nonce of latest received message at given inbound lane. + /// Get nonce of the latest received message at given inbound lane. pub fn inbound_latest_received_nonce(lane: LaneId) -> MessageNonce { InboundLanes::::get(&lane).last_delivered_nonce() } - /// Get nonce of latest confirmed message at given inbound lane. + /// Get nonce of the latest confirmed message at given inbound lane. pub fn inbound_latest_confirmed_nonce(lane: LaneId) -> MessageNonce { InboundLanes::::get(&lane).last_confirmed_nonce } diff --git a/modules/messages/src/weights.rs b/modules/messages/src/weights.rs index f86a21e3ed9..0eecd0d8462 100644 --- a/modules/messages/src/weights.rs +++ b/modules/messages/src/weights.rs @@ -17,7 +17,7 @@ //! Autogenerated weights for pallet_bridge_messages //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-04-21, STEPS: [50, ], REPEAT: 20 +//! DATE: 2021-04-14, STEPS: [50, ], REPEAT: 20 //! LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled //! CHAIN: Some("dev"), DB CACHE: 128 @@ -73,105 +73,105 @@ pub trait WeightInfo { pub struct RialtoWeight(PhantomData); impl WeightInfo for RialtoWeight { fn send_minimal_message_worst_case() -> Weight { - (149_643_000 as Weight) + (149_497_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn send_1_kb_message_worst_case() -> Weight { - (153_329_000 as Weight) + (154_339_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn send_16_kb_message_worst_case() -> Weight { - (200_113_000 as Weight) + (200_066_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn increase_message_fee() -> Weight { - (6_407_252_000 as Weight) + (6_432_637_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof() -> Weight { - (141_256_000 as Weight) + (141_671_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_two_messages_proof() -> Weight { - (247_723_000 as Weight) + (247_393_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - (159_731_000 as Weight) + (159_312_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_1_kb() -> Weight { - (168_546_000 as Weight) + (167_935_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_16_kb() -> Weight { - (450_087_000 as Weight) + (449_846_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_delivery_proof_for_single_message() -> Weight { - (164_519_000 as Weight) + (127_322_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - (173_300_000 as Weight) + (134_120_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - (246_205_000 as Weight) + (191_193_000 as Weight) .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn send_messages_of_various_lengths(i: u32) -> Weight { - (149_551_000 as Weight) + (115_699_000 as Weight) .saturating_add((3_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn receive_multiple_messages_proof(i: u32) -> Weight { (0 as Weight) - .saturating_add((114_817_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((113_551_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight { - (437_797_000 as Weight) - .saturating_add((10_000 as Weight).saturating_mul(i as Weight)) + (458_731_000 as Weight) + .saturating_add((9_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_message_proofs_with_large_leaf(i: u32) -> Weight { - (137_633_000 as Weight) + (82_314_000 as Weight) .saturating_add((7_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight { - (0 as Weight) - .saturating_add((118_482_000 as Weight).saturating_mul(i as Weight)) + (16_766_000 as Weight) + .saturating_add((115_533_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight { - (116_036_000 as Weight) - .saturating_add((7_118_000 as Weight).saturating_mul(i as Weight)) + (122_146_000 as Weight) + .saturating_add((6_789_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight { - (172_780_000 as Weight) - .saturating_add((63_718_000 as Weight).saturating_mul(i as Weight)) + (155_671_000 as Weight) + .saturating_add((63_020_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(i as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) @@ -182,105 +182,105 @@ impl WeightInfo for RialtoWeight { // For backwards compatibility and tests impl WeightInfo for () { fn send_minimal_message_worst_case() -> Weight { - (149_643_000 as Weight) + (149_497_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn send_1_kb_message_worst_case() -> Weight { - (153_329_000 as Weight) + (154_339_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn send_16_kb_message_worst_case() -> Weight { - (200_113_000 as Weight) + (200_066_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn increase_message_fee() -> Weight { - (6_407_252_000 as Weight) + (6_432_637_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof() -> Weight { - (141_256_000 as Weight) + (141_671_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_two_messages_proof() -> Weight { - (247_723_000 as Weight) + (247_393_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - (159_731_000 as Weight) + (159_312_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_1_kb() -> Weight { - (168_546_000 as Weight) + (167_935_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_16_kb() -> Weight { - (450_087_000 as Weight) + (449_846_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_delivery_proof_for_single_message() -> Weight { - (164_519_000 as Weight) + (127_322_000 as Weight) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - (173_300_000 as Weight) + (134_120_000 as Weight) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - (246_205_000 as Weight) + (191_193_000 as Weight) .saturating_add(RocksDbWeight::get().reads(8 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn send_messages_of_various_lengths(i: u32) -> Weight { - (149_551_000 as Weight) + (115_699_000 as Weight) .saturating_add((3_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn receive_multiple_messages_proof(i: u32) -> Weight { (0 as Weight) - .saturating_add((114_817_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((113_551_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight { - (437_797_000 as Weight) - .saturating_add((10_000 as Weight).saturating_mul(i as Weight)) + (458_731_000 as Weight) + .saturating_add((9_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_message_proofs_with_large_leaf(i: u32) -> Weight { - (137_633_000 as Weight) + (82_314_000 as Weight) .saturating_add((7_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight { - (0 as Weight) - .saturating_add((118_482_000 as Weight).saturating_mul(i as Weight)) + (16_766_000 as Weight) + .saturating_add((115_533_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight { - (116_036_000 as Weight) - .saturating_add((7_118_000 as Weight).saturating_mul(i as Weight)) + (122_146_000 as Weight) + .saturating_add((6_789_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight { - (172_780_000 as Weight) - .saturating_add((63_718_000 as Weight).saturating_mul(i as Weight)) + (155_671_000 as Weight) + .saturating_add((63_020_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(i as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) diff --git a/modules/messages/src/weights_ext.rs b/modules/messages/src/weights_ext.rs index cb754a10231..0c3abadebd6 100644 --- a/modules/messages/src/weights_ext.rs +++ b/modules/messages/src/weights_ext.rs @@ -26,7 +26,7 @@ use frame_support::weights::Weight; pub const EXPECTED_DEFAULT_MESSAGE_LENGTH: u32 = 128; /// We assume that size of signed extensions on all our chains and size of all 'small' arguments of calls -/// we're checking here would fit 1KB. +/// we're checking here would fit 1 KB. const SIGNED_EXTENSIONS_SIZE: u32 = 1024; /// Ensure that weights from `WeightInfoExt` implementation are looking correct. @@ -242,7 +242,7 @@ pub trait WeightInfoExt: WeightInfo { weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead) } - /// Returns weight that needs to be accounted when receiving given number of messages with message + /// Returns weight that needs to be accounted when receiving a given number of messages with message /// delivery transaction (`receive_messages_proof`). fn receive_messages_proof_messages_overhead(messages: MessageNonce) -> Weight { let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); @@ -269,7 +269,7 @@ pub trait WeightInfoExt: WeightInfo { weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead) } - /// Returns weight that needs to be accounted when receiving confirmations for given number of + /// Returns weight that needs to be accounted when receiving confirmations for a given number of /// messages with delivery confirmation transaction (`receive_messages_delivery_proof`). fn receive_messages_delivery_proof_messages_overhead(messages: MessageNonce) -> Weight { let weight_of_two_messages = Self::receive_delivery_proof_for_two_messages_by_single_relayer(); @@ -279,7 +279,7 @@ pub trait WeightInfoExt: WeightInfo { .saturating_mul(messages as Weight) } - /// Returns weight that needs to be accounted when receiving confirmations for given number of + /// Returns weight that needs to be accounted when receiving confirmations for a given number of /// relayers entries with delivery confirmation transaction (`receive_messages_delivery_proof`). fn receive_messages_delivery_proof_relayers_overhead(relayers: MessageNonce) -> Weight { let weight_of_two_messages_by_two_relayers = Self::receive_delivery_proof_for_two_messages_by_two_relayers(); @@ -290,7 +290,7 @@ pub trait WeightInfoExt: WeightInfo { .saturating_mul(relayers as Weight) } - /// Returns weight that needs to be accounted when storage proof of given size is recieved (either in + /// Returns weight that needs to be accounted when storage proof of given size is received (either in /// `receive_messages_proof` or `receive_messages_delivery_proof`). /// /// **IMPORTANT**: this overhead is already included in the 'base' transaction cost - e.g. proof diff --git a/modules/substrate/src/fork_tests.rs b/modules/substrate/src/fork_tests.rs new file mode 100644 index 00000000000..6dc7846dadb --- /dev/null +++ b/modules/substrate/src/fork_tests.rs @@ -0,0 +1,515 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tests for checking that behavior of importing headers and finality proofs works correctly. +//! +//! The tests are built around the idea that we will be importing headers on different forks and we +//! should be able to check that we're correctly importing headers, scheduling changes, and +//! finalizing headers across different forks. +//! +//! Each test is depicted using beautiful ASCII art. The symbols used in the tests are the +//! following: +//! +//! - S|N: Schedules change in N blocks +//! - E: Enacts change +//! - F: Finalized +//! - FN: Finality proof imported for header N +//! +//! Each diagram also comes with an import order. This is important since we expect things to fail +//! when headers or proofs are imported in a certain order. +//! +//! Tests can be read as follows: +//! +//! ## Example Import 1 +//! +//! (Type::Header(2, 1, None, None), Ok(())) +//! +//! Import header 2 on fork 1. This does not create a fork, or schedule an authority set change. We +//! expect this header import to be successful. +//! +//! ## Example Import 2 +//! +//! (Type::Header(4, 2, Some((3, 1)), Some(0)), Ok(())) +//! +//! Import header 4 on fork 2. This header starts a new fork from header 3 on fork 1. It also +//! schedules a change with a delay of 0 blocks. It should be successfully imported. +//! +//! ## Example Import 3 +//! +//! (Type::Finality(2, 1), Err(FinalizationError::OldHeader.into())) +//! +//! Import a finality proof for header 2 on fork 1. This finality proof should fail to be imported +//! because the header is an old header. + +use crate::mock::*; +use crate::storage::ImportedHeader; +use crate::verifier::*; +use crate::{BestFinalized, BestHeight, BridgeStorage, NextScheduledChange, PalletStorage}; +use bp_header_chain::AuthoritySet; +use bp_test_utils::{alice, authority_list, bob, make_justification_for_header}; +use codec::Encode; +use frame_support::{IterableStorageMap, StorageValue}; +use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; +use sp_runtime::{Digest, DigestItem}; +use std::collections::BTreeMap; + +type ForkId = u64; +type Delay = u64; + +// Indicates when to start a new fork. The first item in the tuple +// will be the parent header of the header starting this fork. +type ForksAt = Option<(TestNumber, ForkId)>; +type ScheduledChangeAt = Option; + +#[derive(Debug)] +enum Type { + Header(TestNumber, ForkId, ForksAt, ScheduledChangeAt), + Finality(TestNumber, ForkId), +} + +// Order: 1, 2, 2', 3, 3'' +// +// / [3''] +// / [2'] +// [1] <- [2] <- [3] +#[test] +fn fork_can_import_headers_on_different_forks() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + let mut chain = vec![ + (Type::Header(1, 1, None, None), Ok(())), + (Type::Header(2, 1, None, None), Ok(())), + (Type::Header(2, 2, Some((1, 1)), None), Ok(())), + (Type::Header(3, 1, None, None), Ok(())), + (Type::Header(3, 3, Some((2, 2)), None), Ok(())), + ]; + + create_chain(&mut storage, &mut chain); + + let best_headers = storage.best_headers(); + assert_eq!(best_headers.len(), 2); + assert_eq!(>::get(), 3); + }) +} + +// Order: 1, 2, 2', F2, F2' +// +// [1] <- [2: F] +// \ [2'] +// +// Not allowed to finalize 2' +#[test] +fn fork_does_not_allow_competing_finality_proofs() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + let mut chain = vec![ + (Type::Header(1, 1, None, None), Ok(())), + (Type::Header(2, 1, None, None), Ok(())), + (Type::Header(2, 2, Some((1, 1)), None), Ok(())), + (Type::Finality(2, 1), Ok(())), + (Type::Finality(2, 2), Err(FinalizationError::OldHeader.into())), + ]; + + create_chain(&mut storage, &mut chain); + }) +} + +// Order: 1, 2, 3, F2, 3 +// +// [1] <- [2: S|0] <- [3] +// +// Not allowed to import 3 until we get F2 +// +// Note: GRANDPA would technically allow 3 to be imported as long as it didn't try and enact an +// authority set change. However, since we expect finality proofs to be imported quickly we've +// decided to simplify our import process and disallow header imports until we get a finality proof. +#[test] +fn fork_waits_for_finality_proof_before_importing_header_past_one_which_enacts_a_change() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + let mut chain = vec![ + (Type::Header(1, 1, None, None), Ok(())), + (Type::Header(2, 1, None, Some(0)), Ok(())), + ( + Type::Header(3, 1, None, None), + Err(ImportError::AwaitingFinalityProof.into()), + ), + (Type::Finality(2, 1), Ok(())), + (Type::Header(3, 1, None, None), Ok(())), + ]; + + create_chain(&mut storage, &mut chain); + }) +} + +// Order: 1, 2, F2, 3 +// +// [1] <- [2: S|1] <- [3: S|0] +// +// GRANDPA can have multiple authority set changes pending on the same fork. However, we've decided +// to introduce a limit of _one_ pending authority set change per fork in order to simplify pallet +// logic and to prevent DoS attacks if GRANDPA finality were to temporarily stall for a long time +// (we'd have to perform a lot of expensive ancestry checks to catch back up). +#[test] +fn fork_does_not_allow_multiple_scheduled_changes_on_the_same_fork() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + let mut chain = vec![ + (Type::Header(1, 1, None, None), Ok(())), + (Type::Header(2, 1, None, Some(1)), Ok(())), + ( + Type::Header(3, 1, None, Some(0)), + Err(ImportError::PendingAuthoritySetChange.into()), + ), + (Type::Finality(2, 1), Ok(())), + (Type::Header(3, 1, None, Some(0)), Ok(())), + ]; + + create_chain(&mut storage, &mut chain); + }) +} + +// Order: 1, 2, 2' +// +// / [2': S|0] +// [1] <- [2: S|0] +// +// Both 2 and 2' should be marked as needing justifications since they enact changes. +#[test] +fn fork_correctly_tracks_which_headers_require_finality_proofs() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + let mut chain = vec![ + (Type::Header(1, 1, None, None), Ok(())), + (Type::Header(2, 1, None, Some(0)), Ok(())), + (Type::Header(2, 2, Some((1, 1)), Some(0)), Ok(())), + ]; + + create_chain(&mut storage, &mut chain); + + let header_ids = storage.missing_justifications(); + assert_eq!(header_ids.len(), 2); + assert!(header_ids[0].hash != header_ids[1].hash); + assert_eq!(header_ids[0].number, 2); + assert_eq!(header_ids[1].number, 2); + }) +} + +// Order: 1, 2, 2', 3', F2, 3, 4' +// +// / [2': S|1] <- [3'] <- [4'] +// [1] <- [2: S|0] <- [3] +// +// +// Not allowed to import 3 or 4' +// Can only import 3 after we get the finality proof for 2 +#[test] +fn fork_does_not_allow_importing_past_header_that_enacts_changes_on_forks() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + let mut chain = vec![ + (Type::Header(1, 1, None, None), Ok(())), + (Type::Header(2, 1, None, Some(0)), Ok(())), + (Type::Header(2, 2, Some((1, 1)), Some(1)), Ok(())), + ( + Type::Header(3, 1, None, None), + Err(ImportError::AwaitingFinalityProof.into()), + ), + (Type::Header(3, 2, None, None), Ok(())), + (Type::Finality(2, 1), Ok(())), + (Type::Header(3, 1, None, None), Ok(())), + ( + Type::Header(4, 2, None, None), + Err(ImportError::AwaitingFinalityProof.into()), + ), + ]; + + create_chain(&mut storage, &mut chain); + + // Since we can't query the map directly to check if we applied the right authority set + // change (we don't know the header hash of 2) we need to get a little clever. + let mut next_change = >::iter(); + let (_, scheduled_change_on_fork) = next_change.next().unwrap(); + assert_eq!(scheduled_change_on_fork.height, 3); + + // Sanity check to make sure we enacted the change on the canonical change + assert_eq!(next_change.next(), None); + }) +} + +// Order: 1, 2, 3, 2', 3' +// +// / [2'] <- [3'] +// [1] <- [2: S|0] <- [3] +// +// Not allowed to import 3 +// Fine to import 2' and 3' +#[test] +fn fork_allows_importing_on_different_fork_while_waiting_for_finality_proof() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + let mut chain = vec![ + (Type::Header(1, 1, None, None), Ok(())), + (Type::Header(2, 1, None, Some(0)), Ok(())), + ( + Type::Header(3, 1, None, None), + Err(ImportError::AwaitingFinalityProof.into()), + ), + (Type::Header(2, 2, Some((1, 1)), None), Ok(())), + (Type::Header(3, 2, None, None), Ok(())), + ]; + + create_chain(&mut storage, &mut chain); + }) +} + +// Order: 1, 2, 2', F2, 3, 3' +// +// / [2'] <- [3'] +// [1] <- [2: F] <- [3] +// +// In our current implementation we're allowed to keep building on fork 2 for as long as our hearts' +// content. However, we'll never be able to finalize anything on that fork. We'd have to check for +// ancestry with `best_finalized` on every import which will get expensive. +// +// I think this is fine as long as we run pruning every so often to clean up these dead forks. +#[test] +fn fork_allows_importing_on_different_fork_past_finalized_header() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + let mut chain = vec![ + (Type::Header(1, 1, None, None), Ok(())), + (Type::Header(2, 1, None, Some(0)), Ok(())), + (Type::Header(2, 2, Some((1, 1)), None), Ok(())), + (Type::Finality(2, 1), Ok(())), + (Type::Header(3, 1, None, None), Ok(())), + (Type::Header(3, 2, None, None), Ok(())), + ]; + + create_chain(&mut storage, &mut chain); + }) +} + +// Order: 1, 2, 3, 4, 3', 4' +// +// / [3': E] <- [4'] +// [1] <- [2: S|1] <- [3: E] <- [4] +// +// Not allowed to import {4|4'} +#[test] +fn fork_can_track_scheduled_changes_across_forks() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + let mut chain = vec![ + (Type::Header(1, 1, None, None), Ok(())), + (Type::Header(2, 1, None, Some(1)), Ok(())), + (Type::Header(3, 1, None, None), Ok(())), + ( + Type::Header(4, 1, None, None), + Err(ImportError::AwaitingFinalityProof.into()), + ), + (Type::Header(3, 2, Some((2, 1)), None), Ok(())), + ( + Type::Header(4, 2, None, None), + Err(ImportError::AwaitingFinalityProof.into()), + ), + ]; + + create_chain(&mut storage, &mut chain); + }) +} + +#[derive(Debug, PartialEq)] +enum TestError { + Import(ImportError), + Finality(FinalizationError), +} + +impl From for TestError { + fn from(e: ImportError) -> Self { + TestError::Import(e) + } +} + +impl From for TestError { + fn from(e: FinalizationError) -> Self { + TestError::Finality(e) + } +} + +// Builds a fork-aware representation of a blockchain given a list of headers. +// +// Takes a list of headers and finality proof operations which will be applied in order. The +// expected outcome for each operation is also required. +// +// The first header in the list will be used as the genesis header and will be manually imported +// into storage. +fn create_chain(storage: &mut S, chain: &mut Vec<(Type, Result<(), TestError>)>) +where + S: BridgeStorage
+ Clone, +{ + let mut map = BTreeMap::new(); + let mut verifier = Verifier { + storage: storage.clone(), + }; + initialize_genesis(storage, &mut map, chain.remove(0).0); + + for h in chain { + match h { + (Type::Header(num, fork_id, does_fork, schedules_change), expected_result) => { + // If we've never seen this fork before + if !map.contains_key(&fork_id) { + // Let's get the info about where to start the fork + if let Some((parent_num, forked_from_id)) = does_fork { + let fork = &*map.get(&forked_from_id).unwrap(); + let parent = fork + .iter() + .find(|h| h.number == *parent_num) + .expect("Trying to fork on a parent which doesn't exist"); + + let mut header = test_header(*num); + header.parent_hash = parent.hash(); + header.state_root = [*fork_id as u8; 32].into(); + + if let Some(delay) = schedules_change { + header.digest = change_log(*delay); + } + + // Try and import into storage + let res = verifier + .import_header(header.hash(), header.clone()) + .map_err(TestError::Import); + assert_eq!( + res, *expected_result, + "Expected {:?} while importing header ({}, {}), got {:?}", + *expected_result, *num, *fork_id, res, + ); + + // Let's mark the header down in a new fork + if res.is_ok() { + map.insert(*fork_id, vec![header]); + } + } + } else { + // We've seen this fork before so let's append our new header to it + let parent_hash = { + let fork = &*map.get(&fork_id).unwrap(); + fork.last().unwrap().hash() + }; + + let mut header = test_header(*num); + header.parent_hash = parent_hash; + + // Doing this to make sure headers at the same height but on + // different forks have different hashes + header.state_root = [*fork_id as u8; 32].into(); + + if let Some(delay) = schedules_change { + header.digest = change_log(*delay); + } + + let res = verifier + .import_header(header.hash(), header.clone()) + .map_err(TestError::Import); + assert_eq!( + res, *expected_result, + "Expected {:?} while importing header ({}, {}), got {:?}", + *expected_result, *num, *fork_id, res, + ); + + if res.is_ok() { + map.get_mut(&fork_id).unwrap().push(header); + } + } + } + (Type::Finality(num, fork_id), expected_result) => { + let header = map[fork_id] + .iter() + .find(|h| h.number == *num) + .expect("Trying to finalize block that doesn't exist"); + + // This is technically equivocating (accepting the same justification on the same + // `grandpa_round`). + // + // See for more: https://github.com/paritytech/parity-bridges-common/issues/430 + let grandpa_round = 1; + let set_id = 1; + let authorities = authority_list(); + let justification = make_justification_for_header(header, grandpa_round, set_id, &authorities).encode(); + + let res = verifier + .import_finality_proof(header.hash(), justification.into()) + .map_err(TestError::Finality); + assert_eq!( + res, *expected_result, + "Expected {:?} while importing finality proof for header ({}, {}), got {:?}", + *expected_result, *num, *fork_id, res, + ); + } + } + } + + for (key, value) in map.iter() { + println!("{}: {:#?}", key, value); + } +} + +fn initialize_genesis(storage: &mut S, map: &mut BTreeMap>, genesis: Type) +where + S: BridgeStorage
, +{ + if let Type::Header(num, fork_id, None, None) = genesis { + let genesis = test_header(num); + map.insert(fork_id, vec![genesis.clone()]); + + let genesis = ImportedHeader { + header: genesis, + requires_justification: false, + is_finalized: true, + signal_hash: None, + }; + + >::put(genesis.hash()); + storage.write_header(&genesis); + } else { + panic!("Unexpected genesis block format {:#?}", genesis) + } + + let set_id = 1; + let authorities = authority_list(); + let authority_set = AuthoritySet::new(authorities, set_id); + storage.update_current_authority_set(authority_set); +} + +pub(crate) fn change_log(delay: u64) -> Digest { + let consensus_log = ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { + next_authorities: vec![(alice(), 1), (bob(), 1)], + delay, + }); + + Digest:: { + logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], + } +} diff --git a/modules/substrate/src/lib.rs b/modules/substrate/src/lib.rs new file mode 100644 index 00000000000..b97f1d4aa88 --- /dev/null +++ b/modules/substrate/src/lib.rs @@ -0,0 +1,1040 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate Bridge Pallet +//! +//! This pallet is an on-chain light client for chains which have a notion of finality. +//! +//! It has a simple interface for achieving this. First it can import headers to the runtime +//! storage. During this it will check the validity of the headers and ensure they don't conflict +//! with any existing headers (e.g they're on a different finalized chain). Secondly it can finalize +//! an already imported header (and its ancestors) given a valid GRANDPA justification. +//! +//! With these two functions the pallet is able to form a "source of truth" for what headers have +//! been finalized on a given Substrate chain. This can be a useful source of info for other +//! higher-level applications. + +#![cfg_attr(not(feature = "std"), no_std)] +// Runtime-generated enums +#![allow(clippy::large_enum_variant)] + +use crate::storage::ImportedHeader; +use bp_header_chain::AuthoritySet; +use bp_runtime::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf}; +use frame_support::{ + decl_error, decl_module, decl_storage, dispatch::DispatchResult, ensure, traits::Get, weights::DispatchClass, +}; +use frame_system::{ensure_signed, RawOrigin}; +use sp_runtime::traits::Header as HeaderT; +use sp_runtime::{traits::BadOrigin, RuntimeDebug}; +use sp_std::{marker::PhantomData, prelude::*}; +use sp_trie::StorageProof; + +// Re-export since the node uses these when configuring genesis +pub use storage::{InitializationData, ScheduledChange}; + +mod storage; +mod verifier; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod fork_tests; + +/// Block number of the bridged chain. +pub(crate) type BridgedBlockNumber = BlockNumberOf<::BridgedChain>; +/// Block hash of the bridged chain. +pub(crate) type BridgedBlockHash = HashOf<::BridgedChain>; +/// Hasher of the bridged chain. +pub(crate) type BridgedBlockHasher = HasherOf<::BridgedChain>; +/// Header of the bridged chain. +pub(crate) type BridgedHeader = HeaderOf<::BridgedChain>; + +/// A convenience type identifying headers. +#[derive(RuntimeDebug, PartialEq)] +pub struct HeaderId { + /// The block number of the header. + pub number: H::Number, + /// The hash of the header. + pub hash: H::Hash, +} + +pub trait Config: frame_system::Config { + /// Chain that we are bridging here. + type BridgedChain: Chain; +} + +decl_storage! { + trait Store for Module as SubstrateBridge { + /// Hash of the header used to bootstrap the pallet. + InitialHash: BridgedBlockHash; + /// The number of the highest block(s) we know of. + BestHeight: BridgedBlockNumber; + /// Hash of the header at the highest known height. + /// + /// If there are multiple headers at the same "best" height + /// this will contain all of their hashes. + BestHeaders: Vec>; + /// Hash of the best finalized header. + BestFinalized: BridgedBlockHash; + /// The set of header IDs (number, hash) which enact an authority set change and therefore + /// require a GRANDPA justification. + RequiresJustification: map hasher(identity) BridgedBlockHash => BridgedBlockNumber; + /// Headers which have been imported into the pallet. + ImportedHeaders: map hasher(identity) BridgedBlockHash => Option>>; + /// The current GRANDPA Authority set. + CurrentAuthoritySet: AuthoritySet; + /// The next scheduled authority set change for a given fork. + /// + /// The fork is indicated by the header which _signals_ the change (key in the mapping). + /// Note that this is different from a header which _enacts_ a change. + // GRANDPA doesn't require there to always be a pending change. In fact, most of the time + // there will be no pending change available. + NextScheduledChange: map hasher(identity) BridgedBlockHash => Option>>; + /// Optional pallet owner. + /// + /// Pallet owner has a right to halt all pallet operations and then resume it. If it is + /// `None`, then there are no direct ways to halt/resume pallet operations, but other + /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt + /// flag directly or call the `halt_operations`). + ModuleOwner get(fn module_owner): Option; + /// If true, all pallet transactions are failed immediately. + IsHalted get(fn is_halted): bool; + } + add_extra_genesis { + config(owner): Option; + config(init_data): Option>>; + build(|config| { + if let Some(ref owner) = config.owner { + >::put(owner); + } + + if let Some(init_data) = config.init_data.clone() { + initialize_bridge::(init_data); + } else { + // Since the bridge hasn't been initialized we shouldn't allow anyone to perform + // transactions. + IsHalted::put(true); + } + }) + } +} + +decl_error! { + pub enum Error for Module { + /// This header has failed basic verification. + InvalidHeader, + /// This header has not been finalized. + UnfinalizedHeader, + /// The header is unknown. + UnknownHeader, + /// The storage proof doesn't contains storage root. So it is invalid for given header. + StorageRootMismatch, + /// Error when trying to fetch storage value from the proof. + StorageValueUnavailable, + /// All pallet operations are halted. + Halted, + /// The pallet has already been initialized. + AlreadyInitialized, + /// The given header is not a descendant of a particular header. + NotDescendant, + /// The header being imported is on a fork which is incompatible with the current chain. + /// + /// This can happen if we try and import a finalized header at a lower height than our + /// current `best_finalized` header. + ConflictingFork, + } +} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + /// Import a signed Substrate header into the runtime. + /// + /// This will perform some basic checks to make sure it is fine to + /// import into the runtime. However, it does not perform any checks + /// related to finality. + // TODO: Update weights [#78] + #[weight = 0] + pub fn import_signed_header( + origin, + header: BridgedHeader, + ) -> DispatchResult { + ensure_operational::()?; + let _ = ensure_signed(origin)?; + let hash = header.hash(); + log::trace!("Going to import header {:?}: {:?}", hash, header); + + let mut verifier = verifier::Verifier { + storage: PalletStorage::::new(), + }; + + let _ = verifier + .import_header(hash, header) + .map_err(|e| { + log::error!("Failed to import header {:?}: {:?}", hash, e); + >::InvalidHeader + })?; + + log::trace!("Successfully imported header: {:?}", hash); + + Ok(()) + } + + /// Import a finality proof for a particular header. + /// + /// This will take care of finalizing any already imported headers + /// which get finalized when importing this particular proof, as well + /// as updating the current and next validator sets. + // TODO: Update weights [#78] + #[weight = 0] + pub fn finalize_header( + origin, + hash: BridgedBlockHash, + finality_proof: Vec, + ) -> DispatchResult { + ensure_operational::()?; + let _ = ensure_signed(origin)?; + log::trace!("Going to finalize header: {:?}", hash); + + let mut verifier = verifier::Verifier { + storage: PalletStorage::::new(), + }; + + let _ = verifier + .import_finality_proof(hash, finality_proof.into()) + .map_err(|e| { + log::error!("Failed to finalize header {:?}: {:?}", hash, e); + >::UnfinalizedHeader + })?; + + log::trace!("Successfully finalized header: {:?}", hash); + + Ok(()) + } + + /// Bootstrap the bridge pallet with an initial header and authority set from which to sync. + /// + /// The initial configuration provided does not need to be the genesis header of the bridged + /// chain, it can be any arbitrary header. You can also provide the next scheduled set change + /// if it is already know. + /// + /// This function is only allowed to be called from a trusted origin and writes to storage + /// with practically no checks in terms of the validity of the data. It is important that + /// you ensure that valid data is being passed in. + //TODO: Update weights [#78] + #[weight = 0] + pub fn initialize( + origin, + init_data: InitializationData>, + ) { + ensure_owner_or_root::(origin)?; + let init_allowed = !>::exists(); + ensure!(init_allowed, >::AlreadyInitialized); + initialize_bridge::(init_data.clone()); + + log::info!( + "Pallet has been initialized with the following parameters: {:?}", init_data + ); + } + + /// Change `ModuleOwner`. + /// + /// May only be called either by root, or by `ModuleOwner`. + #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] + pub fn set_owner(origin, new_owner: Option) { + ensure_owner_or_root::(origin)?; + match new_owner { + Some(new_owner) => { + ModuleOwner::::put(&new_owner); + log::info!("Setting pallet Owner to: {:?}", new_owner); + }, + None => { + ModuleOwner::::kill(); + log::info!("Removed Owner of pallet."); + }, + } + } + + /// Halt all pallet operations. Operations may be resumed using `resume_operations` call. + /// + /// May only be called either by root, or by `ModuleOwner`. + #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] + pub fn halt_operations(origin) { + ensure_owner_or_root::(origin)?; + IsHalted::put(true); + log::warn!("Stopping pallet operations."); + } + + /// Resume all pallet operations. May be called even if pallet is halted. + /// + /// May only be called either by root, or by `ModuleOwner`. + #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] + pub fn resume_operations(origin) { + ensure_owner_or_root::(origin)?; + IsHalted::put(false); + log::info!("Resuming pallet operations."); + } + } +} + +impl Module { + /// Get the highest header(s) that the pallet knows of. + pub fn best_headers() -> Vec<(BridgedBlockNumber, BridgedBlockHash)> { + PalletStorage::::new() + .best_headers() + .iter() + .map(|id| (id.number, id.hash)) + .collect() + } + + /// Get the best finalized header the pallet knows of. + /// + /// Returns a dummy header if there is no best header. This can only happen + /// if the pallet has not been initialized yet. + /// + /// Since this has been finalized correctly a user of the bridge + /// pallet should be confident that any transactions that were + /// included in this or any previous header will not be reverted. + pub fn best_finalized() -> BridgedHeader { + PalletStorage::::new().best_finalized_header().header + } + + /// Check if a particular header is known to the bridge pallet. + pub fn is_known_header(hash: BridgedBlockHash) -> bool { + PalletStorage::::new().header_exists(hash) + } + + /// Check if a particular header is finalized. + /// + /// Will return false if the header is not known to the pallet. + // One thing worth noting here is that this approach won't work well + // once we track forks since there could be an older header on a + // different fork which isn't an ancestor of our best finalized header. + pub fn is_finalized_header(hash: BridgedBlockHash) -> bool { + let storage = PalletStorage::::new(); + if let Some(header) = storage.header_by_hash(hash) { + header.is_finalized + } else { + false + } + } + + /// Returns a list of headers which require finality proofs. + /// + /// These headers require proofs because they enact authority set changes. + pub fn require_justifications() -> Vec<(BridgedBlockNumber, BridgedBlockHash)> { + PalletStorage::::new() + .missing_justifications() + .iter() + .map(|id| (id.number, id.hash)) + .collect() + } + + /// Verify that the passed storage proof is valid, given it is crafted using + /// known finalized header. If the proof is valid, then the `parse` callback + /// is called and the function returns its result. + pub fn parse_finalized_storage_proof( + finalized_header_hash: BridgedBlockHash, + storage_proof: StorageProof, + parse: impl FnOnce(bp_runtime::StorageProofChecker>) -> R, + ) -> Result { + let storage = PalletStorage::::new(); + let header = storage + .header_by_hash(finalized_header_hash) + .ok_or(Error::::UnknownHeader)?; + if !header.is_finalized { + return Err(Error::::UnfinalizedHeader.into()); + } + + let storage_proof_checker = bp_runtime::StorageProofChecker::new(*header.state_root(), storage_proof) + .map_err(|_| Error::::StorageRootMismatch)?; + Ok(parse(storage_proof_checker)) + } +} + +impl bp_header_chain::HeaderChain, sp_runtime::DispatchError> for Module { + fn best_finalized() -> BridgedHeader { + PalletStorage::::new().best_finalized_header().header + } + + fn authority_set() -> AuthoritySet { + PalletStorage::::new().current_authority_set() + } + + fn append_header(header: BridgedHeader) -> Result<(), sp_runtime::DispatchError> { + // We do a quick check here to ensure that our header chain is making progress and isn't + // "travelling back in time" (which would be indicative of something bad, e.g a hard-fork). + let best_finalized = PalletStorage::::new().best_finalized_header().header; + ensure!(best_finalized.number() < header.number(), >::ConflictingFork); + import_header_unchecked::<_, T>(&mut PalletStorage::::new(), header); + + Ok(()) + } +} + +/// Import a finalized header without checking if this is true. +/// +/// This function assumes that all the given header has already been proven to be valid and +/// finalized. Using this assumption it will write them to storage with minimal checks. That +/// means it's of great importance that this function *not* called with any headers whose +/// finality has not been checked, otherwise you risk bricking your bridge. +/// +/// One thing this function does do for you is GRANDPA authority set handoffs. However, since it +/// does not do verification on the incoming header it will assume that the authority set change +/// signals in the digest are well formed. +fn import_header_unchecked(storage: &mut S, header: BridgedHeader) +where + S: BridgeStorage
>, + T: Config, +{ + // Since we want to use the existing storage infrastructure we need to indicate the fork + // that we're on. We will assume that since we are using the unchecked import there are no + // forks, and can indicate that by using the first imported header's "fork". + let dummy_fork_hash = >::get(); + + // If we have a pending change in storage let's check if the current header enacts it. + let enact_change = if let Some(pending_change) = storage.scheduled_set_change(dummy_fork_hash) { + pending_change.height == *header.number() + } else { + // We don't have a scheduled change in storage at the moment. Let's check if the current + // header signals an authority set change. + if let Some(change) = bp_header_chain::find_grandpa_authorities_scheduled_change(&header) { + let next_set = AuthoritySet { + authorities: change.next_authorities, + set_id: storage.current_authority_set().set_id + 1, + }; + + let height = *header.number() + change.delay; + let scheduled_change = ScheduledChange { + authority_set: next_set, + height, + }; + + storage.schedule_next_set_change(dummy_fork_hash, scheduled_change); + + // If the delay is 0 this header will enact the change it signaled + height == *header.number() + } else { + false + } + }; + + if enact_change { + const ENACT_SET_PROOF: &str = "We only set `enact_change` as `true` if we are sure that there is a scheduled + authority set change in storage. Therefore, it must exist."; + + // If we are unable to enact an authority set it means our storage entry for scheduled + // changes is missing. Best to crash since this is likely a bug. + let _ = storage.enact_authority_set(dummy_fork_hash).expect(ENACT_SET_PROOF); + } + + storage.update_best_finalized(header.hash()); + + storage.write_header(&ImportedHeader { + header, + requires_justification: false, + is_finalized: true, + signal_hash: None, + }); +} + +/// Ensure that the origin is either root, or `ModuleOwner`. +fn ensure_owner_or_root(origin: T::Origin) -> Result<(), BadOrigin> { + match origin.into() { + Ok(RawOrigin::Root) => Ok(()), + Ok(RawOrigin::Signed(ref signer)) if Some(signer) == >::module_owner().as_ref() => Ok(()), + _ => Err(BadOrigin), + } +} + +/// Ensure that the pallet is in operational mode (not halted). +fn ensure_operational() -> Result<(), Error> { + if IsHalted::get() { + Err(>::Halted) + } else { + Ok(()) + } +} + +/// (Re)initialize bridge with given header for using it in external benchmarks. +#[cfg(feature = "runtime-benchmarks")] +pub fn initialize_for_benchmarks(header: HeaderOf) { + initialize_bridge::(InitializationData { + header, + authority_list: Vec::new(), // we don't verify any proofs in external benchmarks + set_id: 0, + scheduled_change: None, + is_halted: false, + }); +} + +/// Since this writes to storage with no real checks this should only be used in functions that were +/// called by a trusted origin. +fn initialize_bridge(init_params: InitializationData>) { + let InitializationData { + header, + authority_list, + set_id, + scheduled_change, + is_halted, + } = init_params; + + let initial_hash = header.hash(); + + let mut signal_hash = None; + if let Some(ref change) = scheduled_change { + assert!( + change.height > *header.number(), + "Changes must be scheduled past initial header." + ); + + signal_hash = Some(initial_hash); + >::insert(initial_hash, change); + }; + + >::put(initial_hash); + >::put(header.number()); + >::put(vec![initial_hash]); + >::put(initial_hash); + + let authority_set = AuthoritySet::new(authority_list, set_id); + CurrentAuthoritySet::put(authority_set); + + >::insert( + initial_hash, + ImportedHeader { + header, + requires_justification: false, + is_finalized: true, + signal_hash, + }, + ); + + IsHalted::put(is_halted); +} + +/// Expected interface for interacting with bridge pallet storage. +// TODO: This should be split into its own less-Substrate-dependent crate +pub trait BridgeStorage { + /// The header type being used by the pallet. + type Header: HeaderT; + + /// Write a header to storage. + fn write_header(&mut self, header: &ImportedHeader); + + /// Get the header(s) at the highest known height. + fn best_headers(&self) -> Vec>; + + /// Get the best finalized header the pallet knows of. + /// + /// Returns None if there is no best header. This can only happen if the pallet + /// has not been initialized yet. + fn best_finalized_header(&self) -> ImportedHeader; + + /// Update the best finalized header the pallet knows of. + fn update_best_finalized(&self, hash: ::Hash); + + /// Check if a particular header is known to the pallet. + fn header_exists(&self, hash: ::Hash) -> bool; + + /// Returns a list of headers which require justifications. + /// + /// A header will require a justification if it enacts a new authority set. + fn missing_justifications(&self) -> Vec>; + + /// Get a specific header by its hash. + /// + /// Returns None if it is not known to the pallet. + fn header_by_hash(&self, hash: ::Hash) -> Option>; + + /// Get the current GRANDPA authority set. + fn current_authority_set(&self) -> AuthoritySet; + + /// Update the current GRANDPA authority set. + /// + /// Should only be updated when a scheduled change has been triggered. + fn update_current_authority_set(&self, new_set: AuthoritySet); + + /// Replace the current authority set with the next scheduled set. + /// + /// Returns an error if there is no scheduled authority set to enact. + #[allow(clippy::result_unit_err)] + fn enact_authority_set(&mut self, signal_hash: ::Hash) -> Result<(), ()>; + + /// Get the next scheduled GRANDPA authority set change. + fn scheduled_set_change( + &self, + signal_hash: ::Hash, + ) -> Option::Number>>; + + /// Schedule a GRANDPA authority set change in the future. + /// + /// Takes the hash of the header which scheduled this particular change. + fn schedule_next_set_change( + &mut self, + signal_hash: ::Hash, + next_change: ScheduledChange<::Number>, + ); +} + +/// Used to interact with the pallet storage in a more abstract way. +#[derive(Default, Clone)] +pub struct PalletStorage(PhantomData); + +impl PalletStorage { + fn new() -> Self { + Self(PhantomData::::default()) + } +} + +impl BridgeStorage for PalletStorage { + type Header = BridgedHeader; + + fn write_header(&mut self, header: &ImportedHeader>) { + use core::cmp::Ordering; + + let hash = header.hash(); + let current_height = header.number(); + let best_height = >::get(); + + match current_height.cmp(&best_height) { + Ordering::Equal => { + // Want to avoid duplicates in the case where we're writing a finalized header to + // storage which also happens to be at the best height the best height + let not_duplicate = !>::contains_key(hash); + if not_duplicate { + >::append(hash); + } + } + Ordering::Greater => { + >::kill(); + >::append(hash); + >::put(current_height); + } + Ordering::Less => { + // This is fine. We can still have a valid header, but it might just be on a + // different fork and at a lower height than the "best" overall header. + } + } + + if header.requires_justification { + >::insert(hash, current_height); + } else { + // If the key doesn't exist this is a no-op, so it's fine to call it often + >::remove(hash); + } + + >::insert(hash, header); + } + + fn best_headers(&self) -> Vec>> { + let number = >::get(); + >::get() + .iter() + .map(|hash| HeaderId { number, hash: *hash }) + .collect() + } + + fn best_finalized_header(&self) -> ImportedHeader> { + // We will only construct a dummy header if the pallet is not initialized and someone tries + // to use the public module interface (not dispatchables) to get the best finalized header. + // This is an edge case since this can only really happen when bootstrapping the bridge. + let hash = >::get(); + self.header_by_hash(hash).unwrap_or_else(|| ImportedHeader { + header: >::new( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + requires_justification: false, + is_finalized: false, + signal_hash: None, + }) + } + + fn update_best_finalized(&self, hash: BridgedBlockHash) { + >::put(hash); + } + + fn header_exists(&self, hash: BridgedBlockHash) -> bool { + >::contains_key(hash) + } + + fn header_by_hash(&self, hash: BridgedBlockHash) -> Option>> { + >::get(hash) + } + + fn missing_justifications(&self) -> Vec>> { + >::iter() + .map(|(hash, number)| HeaderId { number, hash }) + .collect() + } + + fn current_authority_set(&self) -> AuthoritySet { + CurrentAuthoritySet::get() + } + + fn update_current_authority_set(&self, new_set: AuthoritySet) { + CurrentAuthoritySet::put(new_set) + } + + fn enact_authority_set(&mut self, signal_hash: BridgedBlockHash) -> Result<(), ()> { + let new_set = >::take(signal_hash).ok_or(())?.authority_set; + self.update_current_authority_set(new_set); + + Ok(()) + } + + fn scheduled_set_change(&self, signal_hash: BridgedBlockHash) -> Option>> { + >::get(signal_hash) + } + + fn schedule_next_set_change( + &mut self, + signal_hash: BridgedBlockHash, + next_change: ScheduledChange>, + ) { + >::insert(signal_hash, next_change) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{run_test, test_header, unfinalized_header, Origin, TestHeader, TestRuntime}; + use bp_header_chain::HeaderChain; + use bp_test_utils::{alice, authority_list, bob}; + use frame_support::{assert_err, assert_noop, assert_ok}; + use sp_runtime::DispatchError; + + fn init_with_origin(origin: Origin) -> Result, DispatchError> { + let init_data = InitializationData { + header: test_header(1), + authority_list: authority_list(), + set_id: 1, + scheduled_change: None, + is_halted: false, + }; + + Module::::initialize(origin, init_data.clone()).map(|_| init_data) + } + + #[test] + fn init_root_or_owner_origin_can_initialize_pallet() { + run_test(|| { + assert_noop!(init_with_origin(Origin::signed(1)), DispatchError::BadOrigin); + assert_ok!(init_with_origin(Origin::root())); + + // Reset storage so we can initialize the pallet again + BestFinalized::::kill(); + ModuleOwner::::put(2); + assert_ok!(init_with_origin(Origin::signed(2))); + }) + } + + #[test] + fn init_storage_entries_are_correctly_initialized() { + run_test(|| { + assert!(Module::::best_headers().is_empty()); + assert_eq!(Module::::best_finalized(), test_header(0)); + + let init_data = init_with_origin(Origin::root()).unwrap(); + + let storage = PalletStorage::::new(); + assert!(storage.header_exists(init_data.header.hash())); + assert_eq!( + storage.best_headers()[0], + crate::HeaderId { + number: *init_data.header.number(), + hash: init_data.header.hash() + } + ); + assert_eq!(storage.best_finalized_header().hash(), init_data.header.hash()); + assert_eq!(storage.current_authority_set().authorities, init_data.authority_list); + assert_eq!(IsHalted::get(), false); + }) + } + + #[test] + fn init_can_only_initialize_pallet_once() { + run_test(|| { + assert_ok!(init_with_origin(Origin::root())); + assert_noop!( + init_with_origin(Origin::root()), + >::AlreadyInitialized + ); + }) + } + + #[test] + fn pallet_owner_may_change_owner() { + run_test(|| { + ModuleOwner::::put(2); + + assert_ok!(Module::::set_owner(Origin::root(), Some(1))); + assert_noop!( + Module::::halt_operations(Origin::signed(2)), + DispatchError::BadOrigin, + ); + assert_ok!(Module::::halt_operations(Origin::root())); + + assert_ok!(Module::::set_owner(Origin::signed(1), None)); + assert_noop!( + Module::::resume_operations(Origin::signed(1)), + DispatchError::BadOrigin, + ); + assert_noop!( + Module::::resume_operations(Origin::signed(2)), + DispatchError::BadOrigin, + ); + assert_ok!(Module::::resume_operations(Origin::root())); + }); + } + + #[test] + fn pallet_may_be_halted_by_root() { + run_test(|| { + assert_ok!(Module::::halt_operations(Origin::root())); + assert_ok!(Module::::resume_operations(Origin::root())); + }); + } + + #[test] + fn pallet_may_be_halted_by_owner() { + run_test(|| { + ModuleOwner::::put(2); + + assert_ok!(Module::::halt_operations(Origin::signed(2))); + assert_ok!(Module::::resume_operations(Origin::signed(2))); + + assert_noop!( + Module::::halt_operations(Origin::signed(1)), + DispatchError::BadOrigin, + ); + assert_noop!( + Module::::resume_operations(Origin::signed(1)), + DispatchError::BadOrigin, + ); + + assert_ok!(Module::::halt_operations(Origin::signed(2))); + assert_noop!( + Module::::resume_operations(Origin::signed(1)), + DispatchError::BadOrigin, + ); + }); + } + + #[test] + fn pallet_rejects_transactions_if_halted() { + run_test(|| { + IsHalted::put(true); + + assert_noop!( + Module::::import_signed_header(Origin::signed(1), test_header(1)), + Error::::Halted, + ); + + assert_noop!( + Module::::finalize_header(Origin::signed(1), test_header(1).hash(), vec![]), + Error::::Halted, + ); + }) + } + + #[test] + fn parse_finalized_storage_proof_rejects_proof_on_unknown_header() { + run_test(|| { + assert_noop!( + Module::::parse_finalized_storage_proof( + Default::default(), + StorageProof::new(vec![]), + |_| (), + ), + Error::::UnknownHeader, + ); + }); + } + + #[test] + fn parse_finalized_storage_proof_rejects_proof_on_unfinalized_header() { + run_test(|| { + let mut storage = PalletStorage::::new(); + let header = unfinalized_header(1); + storage.write_header(&header); + + assert_noop!( + Module::::parse_finalized_storage_proof( + header.header.hash(), + StorageProof::new(vec![]), + |_| (), + ), + Error::::UnfinalizedHeader, + ); + }); + } + + #[test] + fn parse_finalized_storage_accepts_valid_proof() { + run_test(|| { + let mut storage = PalletStorage::::new(); + let (state_root, storage_proof) = bp_runtime::craft_valid_storage_proof(); + let mut header = unfinalized_header(1); + header.is_finalized = true; + header.header.set_state_root(state_root); + storage.write_header(&header); + + assert_ok!( + Module::::parse_finalized_storage_proof(header.header.hash(), storage_proof, |_| (),), + (), + ); + }); + } + + #[test] + fn importing_unchecked_headers_works() { + run_test(|| { + init_with_origin(Origin::root()).unwrap(); + let storage = PalletStorage::::new(); + + let header = test_header(2); + assert_ok!(Module::::append_header(header.clone())); + + assert!(storage.header_by_hash(header.hash()).unwrap().is_finalized); + assert_eq!(storage.best_finalized_header().header, header); + assert_eq!(storage.best_headers()[0].hash, header.hash()); + }) + } + + #[test] + fn importing_unchecked_header_ensures_that_chain_is_extended() { + run_test(|| { + init_with_origin(Origin::root()).unwrap(); + + let header = test_header(3); + assert_ok!(Module::::append_header(header)); + + let header = test_header(2); + assert_err!( + Module::::append_header(header), + Error::::ConflictingFork, + ); + + let header = test_header(4); + assert_ok!(Module::::append_header(header)); + }) + } + + #[test] + fn importing_unchecked_headers_enacts_new_authority_set() { + run_test(|| { + init_with_origin(Origin::root()).unwrap(); + let storage = PalletStorage::::new(); + + let next_set_id = 2; + let next_authorities = vec![(alice(), 1), (bob(), 1)]; + + // Need to update the header digest to indicate that our header signals an authority set + // change. The change will be enacted when we import our header. + let mut header = test_header(2); + header.digest = fork_tests::change_log(0); + + // Let's import our test header + assert_ok!(Module::::append_header(header.clone())); + + // Make sure that our header is the best finalized + assert_eq!(storage.best_finalized_header().header, header); + assert_eq!(storage.best_headers()[0].hash, header.hash()); + + // Make sure that the authority set actually changed upon importing our header + assert_eq!( + storage.current_authority_set(), + AuthoritySet::new(next_authorities, next_set_id), + ); + }) + } + + #[test] + fn importing_unchecked_headers_enacts_new_authority_set_from_old_header() { + run_test(|| { + init_with_origin(Origin::root()).unwrap(); + let storage = PalletStorage::::new(); + + let next_set_id = 2; + let next_authorities = vec![(alice(), 1), (bob(), 1)]; + + // Need to update the header digest to indicate that our header signals an authority set + // change. However, the change doesn't happen until the next block. + let mut schedules_change = test_header(2); + schedules_change.digest = fork_tests::change_log(1); + let header = test_header(3); + + // Let's import our test headers + assert_ok!(Module::::append_header(schedules_change)); + assert_ok!(Module::::append_header(header.clone())); + + // Make sure that our header is the best finalized + assert_eq!(storage.best_finalized_header().header, header); + assert_eq!(storage.best_headers()[0].hash, header.hash()); + + // Make sure that the authority set actually changed upon importing our header + assert_eq!( + storage.current_authority_set(), + AuthoritySet::new(next_authorities, next_set_id), + ); + }) + } + + #[test] + fn importing_unchecked_header_can_enact_set_change_scheduled_at_genesis() { + run_test(|| { + let storage = PalletStorage::::new(); + + let next_authorities = vec![(alice(), 1)]; + let next_set_id = 2; + let next_authority_set = AuthoritySet::new(next_authorities.clone(), next_set_id); + + let first_scheduled_change = ScheduledChange { + authority_set: next_authority_set, + height: 2, + }; + + let init_data = InitializationData { + header: test_header(1), + authority_list: authority_list(), + set_id: 1, + scheduled_change: Some(first_scheduled_change), + is_halted: false, + }; + + assert_ok!(Module::::initialize(Origin::root(), init_data)); + + // We are expecting an authority set change at height 2, so this header should enact + // that upon being imported. + assert_ok!(Module::::append_header(test_header(2))); + + // Make sure that the authority set actually changed upon importing our header + assert_eq!( + storage.current_authority_set(), + AuthoritySet::new(next_authorities, next_set_id), + ); + }) + } +} diff --git a/modules/substrate/src/verifier.rs b/modules/substrate/src/verifier.rs new file mode 100644 index 00000000000..a3207f07aff --- /dev/null +++ b/modules/substrate/src/verifier.rs @@ -0,0 +1,856 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! The verifier's role is to check the validity of headers being imported, and also determine if +//! they can be finalized. +//! +//! When importing headers it performs checks to ensure that no invariants are broken (like +//! importing the same header twice). When it imports finality proofs it will ensure that the proof +//! has been signed off by the correct GRANDPA authorities, and also enact any authority set changes +//! if required. + +use crate::storage::{ImportedHeader, ScheduledChange}; +use crate::BridgeStorage; + +use bp_header_chain::{find_grandpa_authorities_scheduled_change, justification::verify_justification, AuthoritySet}; +use finality_grandpa::voter_set::VoterSet; +use sp_runtime::traits::{CheckedAdd, Header as HeaderT, One}; +use sp_runtime::RuntimeDebug; +use sp_std::{prelude::Vec, vec}; + +/// The finality proof used by the pallet. +/// +/// For a Substrate based chain using GRANDPA this will +/// be an encoded GRANDPA Justification. +#[derive(RuntimeDebug)] +pub struct FinalityProof(Vec); + +impl From<&[u8]> for FinalityProof { + fn from(proof: &[u8]) -> Self { + Self(proof.to_vec()) + } +} + +impl From> for FinalityProof { + fn from(proof: Vec) -> Self { + Self(proof) + } +} + +/// Errors which can happen while importing a header. +#[derive(RuntimeDebug, PartialEq)] +pub enum ImportError { + /// This header is at the same height or older than our latest finalized block, thus not useful. + OldHeader, + /// This header has already been imported by the pallet. + HeaderAlreadyExists, + /// We're missing a parent for this header. + MissingParent, + /// The number of the header does not follow its parent's number. + InvalidChildNumber, + /// The height of the next authority set change overflowed. + ScheduledHeightOverflow, + /// Received an authority set which was invalid in some way, such as + /// the authority weights being empty or overflowing the `AuthorityWeight` + /// type. + InvalidAuthoritySet, + /// This header is not allowed to be imported since an ancestor requires a finality proof. + /// + /// This can happen if an ancestor is supposed to enact an authority set change. + AwaitingFinalityProof, + /// This header schedules an authority set change even though we're still waiting + /// for an old authority set change to be enacted on this fork. + PendingAuthoritySetChange, +} + +/// Errors which can happen while verifying a headers finality. +#[derive(RuntimeDebug, PartialEq)] +pub enum FinalizationError { + /// This header has never been imported by the pallet. + UnknownHeader, + /// Trying to prematurely import a justification + PrematureJustification, + /// We failed to verify this header's ancestry. + AncestryCheckFailed, + /// This header is at the same height or older than our latest finalized block, thus not useful. + OldHeader, + /// The given justification was not able to finalize the given header. + /// + /// There are several reasons why this might happen, such as the justification being + /// signed by the wrong authority set, being given alongside an unexpected header, + /// or failing ancestry checks. + InvalidJustification, +} + +/// Used to verify imported headers and their finality status. +#[derive(RuntimeDebug)] +pub struct Verifier { + pub storage: S, +} + +impl Verifier +where + S: BridgeStorage
, + H: HeaderT, + H::Number: finality_grandpa::BlockNumberOps, +{ + /// Import a header to the pallet. + /// + /// Will perform some basic checks to make sure that this header doesn't break any assumptions + /// such as being on a different finalized fork. + pub fn import_header(&mut self, hash: H::Hash, header: H) -> Result<(), ImportError> { + let best_finalized = self.storage.best_finalized_header(); + + if header.number() <= best_finalized.number() { + return Err(ImportError::OldHeader); + } + + if self.storage.header_exists(hash) { + return Err(ImportError::HeaderAlreadyExists); + } + + let parent_header = self + .storage + .header_by_hash(*header.parent_hash()) + .ok_or(ImportError::MissingParent)?; + + let parent_number = *parent_header.number(); + if parent_number + One::one() != *header.number() { + return Err(ImportError::InvalidChildNumber); + } + + // A header requires a justification if it enacts an authority set change. We don't + // need to act on it right away (we'll update the set once the header gets finalized), but + // we need to make a note of it. + // + // Note: This assumes that we can only have one authority set change pending per fork at a + // time. While this is not strictly true of GRANDPA (it can have multiple pending changes, + // even across forks), this assumption simplifies our tracking of authority set changes. + let mut signal_hash = parent_header.signal_hash; + let scheduled_change = find_grandpa_authorities_scheduled_change(&header); + + // Check if our fork is expecting an authority set change + let requires_justification = if let Some(hash) = signal_hash { + const PROOF: &str = "If the header has a signal hash it means there's an accompanying set + change in storage, therefore this must always be valid."; + let pending_change = self.storage.scheduled_set_change(hash).expect(PROOF); + + if scheduled_change.is_some() { + return Err(ImportError::PendingAuthoritySetChange); + } + + if *header.number() > pending_change.height { + return Err(ImportError::AwaitingFinalityProof); + } + + pending_change.height == *header.number() + } else { + // Since we don't currently have a pending authority set change let's check if the header + // contains a log indicating when the next change should be. + if let Some(change) = scheduled_change { + let mut total_weight = 0u64; + + for (_id, weight) in &change.next_authorities { + total_weight = total_weight + .checked_add(*weight) + .ok_or(ImportError::InvalidAuthoritySet)?; + } + + // If none of the authorities have a weight associated with them the + // set is essentially empty. We don't want that. + if total_weight == 0 { + return Err(ImportError::InvalidAuthoritySet); + } + + let next_set = AuthoritySet { + authorities: change.next_authorities, + set_id: self.storage.current_authority_set().set_id + 1, + }; + + let height = (*header.number()) + .checked_add(&change.delay) + .ok_or(ImportError::ScheduledHeightOverflow)?; + + let scheduled_change = ScheduledChange { + authority_set: next_set, + height, + }; + + // Note: It's important that the signal hash is updated if a header schedules a + // change or else we end up with inconsistencies in other places. + signal_hash = Some(hash); + self.storage.schedule_next_set_change(hash, scheduled_change); + + // If the delay is 0 this header will enact the change it signaled + height == *header.number() + } else { + false + } + }; + + self.storage.write_header(&ImportedHeader { + header, + requires_justification, + is_finalized: false, + signal_hash, + }); + + Ok(()) + } + + /// Verify that a previously imported header can be finalized with the given GRANDPA finality + /// proof. If the header enacts an authority set change the change will be applied once the + /// header has been finalized. + pub fn import_finality_proof(&mut self, hash: H::Hash, proof: FinalityProof) -> Result<(), FinalizationError> { + // Make sure that we've previously imported this header + let header = self + .storage + .header_by_hash(hash) + .ok_or(FinalizationError::UnknownHeader)?; + + // We don't want to finalize an ancestor of an already finalized + // header, this would be inconsistent + let last_finalized = self.storage.best_finalized_header(); + if header.number() <= last_finalized.number() { + return Err(FinalizationError::OldHeader); + } + + let current_authority_set = self.storage.current_authority_set(); + let voter_set = VoterSet::new(current_authority_set.authorities).expect( + "We verified the correctness of the authority list during header import, + before writing them to storage. This must always be valid.", + ); + verify_justification::( + (hash, *header.number()), + current_authority_set.set_id, + &voter_set, + &proof.0, + ) + .map_err(|_| FinalizationError::InvalidJustification)?; + log::trace!("Received valid justification for {:?}", header); + + log::trace!( + "Checking ancestry for headers between {:?} and {:?}", + last_finalized, + header + ); + let mut finalized_headers = + if let Some(ancestors) = headers_between(&self.storage, last_finalized, header.clone()) { + // Since we only try and finalize headers with a height strictly greater + // than `best_finalized` if `headers_between` returns Some we must have + // at least one element. If we don't something's gone wrong, so best + // to die before we write to storage. + assert_eq!( + ancestors.is_empty(), + false, + "Empty ancestry list returned from `headers_between()`", + ); + + // Check if any of our ancestors `requires_justification` a.k.a schedule authority + // set changes. If they're still waiting to be finalized we must reject this + // justification. We don't include our current header in this check. + // + // We do this because it is important to to import justifications _in order_, + // otherwise we risk finalizing headers on competing chains. + let requires_justification = ancestors.iter().skip(1).find(|h| h.requires_justification); + if requires_justification.is_some() { + return Err(FinalizationError::PrematureJustification); + } + + ancestors + } else { + return Err(FinalizationError::AncestryCheckFailed); + }; + + // If the current header was marked as `requires_justification` it means that it enacts a + // new authority set change. When we finalize the header we need to update the current + // authority set. + if header.requires_justification { + const SIGNAL_HASH_PROOF: &str = "When we import a header we only mark it as + `requires_justification` if we have checked that it contains a signal hash. Therefore + this must always be valid."; + + const ENACT_SET_PROOF: &str = + "Headers must only be marked as `requires_justification` if there's a scheduled change in storage."; + + // If we are unable to enact an authority set it means our storage entry for scheduled + // changes is missing. Best to crash since this is likely a bug. + let _ = self + .storage + .enact_authority_set(header.signal_hash.expect(SIGNAL_HASH_PROOF)) + .expect(ENACT_SET_PROOF); + } + + for header in finalized_headers.iter_mut() { + header.is_finalized = true; + header.requires_justification = false; + header.signal_hash = None; + self.storage.write_header(header); + } + + self.storage.update_best_finalized(hash); + + Ok(()) + } +} + +/// Returns the lineage of headers between [ child, ancestor ) +fn headers_between( + storage: &S, + ancestor: ImportedHeader, + child: ImportedHeader, +) -> Option>> +where + S: BridgeStorage
, + H: HeaderT, +{ + let mut ancestors = vec![]; + let mut current_header = child; + + while ancestor.hash() != current_header.hash() { + // We've gotten to the same height and we're not related + if ancestor.number() >= current_header.number() { + return None; + } + + let parent = storage.header_by_hash(*current_header.parent_hash()); + ancestors.push(current_header); + current_header = match parent { + Some(h) => h, + None => return None, + } + } + + Some(ancestors) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::*; + use crate::{BestFinalized, BestHeight, HeaderId, ImportedHeaders, PalletStorage}; + use bp_test_utils::{alice, authority_list, bob, make_justification_for_header}; + use codec::Encode; + use frame_support::{assert_err, assert_ok}; + use frame_support::{StorageMap, StorageValue}; + use sp_finality_grandpa::{AuthorityId, ConsensusLog, SetId, GRANDPA_ENGINE_ID}; + use sp_runtime::{Digest, DigestItem}; + + fn schedule_next_change( + authorities: Vec, + set_id: SetId, + height: TestNumber, + ) -> ScheduledChange { + let authorities = authorities.into_iter().map(|id| (id, 1u64)).collect(); + let authority_set = AuthoritySet::new(authorities, set_id); + ScheduledChange { authority_set, height } + } + + // Useful for quickly writing a chain of headers to storage + // Input is expected in the form: vec![(num, requires_justification, is_finalized)] + fn write_headers>( + storage: &mut S, + headers: Vec<(u64, bool, bool)>, + ) -> Vec> { + let mut imported_headers = vec![]; + let genesis = ImportedHeader { + header: test_header(0), + requires_justification: false, + is_finalized: true, + signal_hash: None, + }; + + >::put(genesis.hash()); + storage.write_header(&genesis); + imported_headers.push(genesis); + + for (num, requires_justification, is_finalized) in headers { + let header = ImportedHeader { + header: test_header(num), + requires_justification, + is_finalized, + signal_hash: None, + }; + + storage.write_header(&header); + imported_headers.push(header); + } + + imported_headers + } + + // Given a block number will generate a chain of headers which don't require justification and + // are not considered to be finalized. + fn write_default_headers>( + storage: &mut S, + headers: Vec, + ) -> Vec> { + let headers = headers.iter().map(|num| (*num, false, false)).collect(); + write_headers(storage, headers) + } + + #[test] + fn fails_to_import_old_header() { + run_test(|| { + let mut storage = PalletStorage::::new(); + let parent = unfinalized_header(5); + storage.write_header(&parent); + storage.update_best_finalized(parent.hash()); + + let header = test_header(1); + let mut verifier = Verifier { storage }; + assert_err!(verifier.import_header(header.hash(), header), ImportError::OldHeader); + }) + } + + #[test] + fn fails_to_import_header_without_parent() { + run_test(|| { + let mut storage = PalletStorage::::new(); + let parent = unfinalized_header(1); + storage.write_header(&parent); + storage.update_best_finalized(parent.hash()); + + // By default the parent is `0x00` + let header = TestHeader::new_from_number(2); + + let mut verifier = Verifier { storage }; + assert_err!( + verifier.import_header(header.hash(), header), + ImportError::MissingParent + ); + }) + } + + #[test] + fn fails_to_import_header_twice() { + run_test(|| { + let storage = PalletStorage::::new(); + let header = test_header(1); + >::put(header.hash()); + + let imported_header = ImportedHeader { + header: header.clone(), + requires_justification: false, + is_finalized: false, + signal_hash: None, + }; + >::insert(header.hash(), &imported_header); + + let mut verifier = Verifier { storage }; + assert_err!(verifier.import_header(header.hash(), header), ImportError::OldHeader); + }) + } + + #[test] + fn succesfully_imports_valid_but_unfinalized_header() { + run_test(|| { + let storage = PalletStorage::::new(); + let parent = test_header(1); + let parent_hash = parent.hash(); + >::put(parent.hash()); + + let imported_header = ImportedHeader { + header: parent, + requires_justification: false, + is_finalized: true, + signal_hash: None, + }; + >::insert(parent_hash, &imported_header); + + let header = test_header(2); + let mut verifier = Verifier { + storage: storage.clone(), + }; + assert_ok!(verifier.import_header(header.hash(), header.clone())); + + let stored_header = storage + .header_by_hash(header.hash()) + .expect("Should have been imported successfully"); + assert_eq!(stored_header.is_finalized, false); + assert_eq!(stored_header.hash(), storage.best_headers()[0].hash); + }) + } + + #[test] + fn successfully_imports_two_different_headers_at_same_height() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + // We want to write the genesis header to storage + let _ = write_headers(&mut storage, vec![]); + + // Both of these headers have the genesis header as their parent + let header_on_fork1 = test_header(1); + let mut header_on_fork2 = test_header(1); + + // We need to change _something_ to make it a different header + header_on_fork2.state_root = [1; 32].into(); + + let mut verifier = Verifier { + storage: storage.clone(), + }; + + // It should be fine to import both + assert_ok!(verifier.import_header(header_on_fork1.hash(), header_on_fork1.clone())); + assert_ok!(verifier.import_header(header_on_fork2.hash(), header_on_fork2.clone())); + + // We should have two headers marked as being the best since they're + // both at the same height + let best_headers = storage.best_headers(); + assert_eq!(best_headers.len(), 2); + assert_eq!( + best_headers[0], + HeaderId { + number: *header_on_fork1.number(), + hash: header_on_fork1.hash() + } + ); + assert_eq!( + best_headers[1], + HeaderId { + number: *header_on_fork2.number(), + hash: header_on_fork2.hash() + } + ); + assert_eq!(>::get(), 1); + }) + } + + #[test] + fn correctly_updates_the_best_header_given_a_better_header() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + // We want to write the genesis header to storage + let _ = write_headers(&mut storage, vec![]); + + // Write two headers at the same height to storage. + let best_header = test_header(1); + let mut also_best_header = test_header(1); + + // We need to change _something_ to make it a different header + also_best_header.state_root = [1; 32].into(); + + let mut verifier = Verifier { + storage: storage.clone(), + }; + + // It should be fine to import both + assert_ok!(verifier.import_header(best_header.hash(), best_header.clone())); + assert_ok!(verifier.import_header(also_best_header.hash(), also_best_header)); + + // The headers we manually imported should have been marked as the best + // upon writing to storage. Let's confirm that. + assert_eq!(storage.best_headers().len(), 2); + assert_eq!(>::get(), 1); + + // Now let's build something at a better height. + let mut better_header = test_header(2); + better_header.parent_hash = best_header.hash(); + + assert_ok!(verifier.import_header(better_header.hash(), better_header.clone())); + + // Since `better_header` is the only one at height = 2 we should only have + // a single "best header" now. + let best_headers = storage.best_headers(); + assert_eq!(best_headers.len(), 1); + assert_eq!( + best_headers[0], + HeaderId { + number: *better_header.number(), + hash: better_header.hash() + } + ); + assert_eq!(>::get(), 2); + }) + } + + #[test] + fn doesnt_write_best_header_twice_upon_finalization() { + run_test(|| { + let mut storage = PalletStorage::::new(); + let _imported_headers = write_default_headers(&mut storage, vec![1]); + + let set_id = 1; + let authorities = authority_list(); + let initial_authority_set = AuthoritySet::new(authorities.clone(), set_id); + storage.update_current_authority_set(initial_authority_set); + + // Let's import our header + let header = test_header(2); + let mut verifier = Verifier { + storage: storage.clone(), + }; + assert_ok!(verifier.import_header(header.hash(), header.clone())); + + // Our header should be the only best header we have + assert_eq!(storage.best_headers()[0].hash, header.hash()); + assert_eq!(storage.best_headers().len(), 1); + + // Now lets finalize our best header + let grandpa_round = 1; + let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); + assert_ok!(verifier.import_finality_proof(header.hash(), justification.into())); + + // Our best header should only appear once in the list of best headers + assert_eq!(storage.best_headers()[0].hash, header.hash()); + assert_eq!(storage.best_headers().len(), 1); + }) + } + + #[test] + fn related_headers_are_ancestors() { + run_test(|| { + let mut storage = PalletStorage::::new(); + let mut imported_headers = write_default_headers(&mut storage, vec![1, 2, 3]); + + for header in imported_headers.iter() { + assert!(storage.header_exists(header.hash())); + } + + let ancestor = imported_headers.remove(0); + let child = imported_headers.pop().unwrap(); + let ancestors = headers_between(&storage, ancestor, child); + + assert!(ancestors.is_some()); + assert_eq!(ancestors.unwrap().len(), 3); + }) + } + + #[test] + fn unrelated_headers_are_not_ancestors() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + let mut imported_headers = write_default_headers(&mut storage, vec![1, 2, 3]); + for header in imported_headers.iter() { + assert!(storage.header_exists(header.hash())); + } + + // Need to give it a different parent_hash or else it'll be + // related to our test genesis header + let mut bad_ancestor = test_header(0); + bad_ancestor.parent_hash = [1u8; 32].into(); + let bad_ancestor = ImportedHeader { + header: bad_ancestor, + requires_justification: false, + is_finalized: false, + signal_hash: None, + }; + + let child = imported_headers.pop().unwrap(); + let ancestors = headers_between(&storage, bad_ancestor, child); + assert!(ancestors.is_none()); + }) + } + + #[test] + fn ancestor_newer_than_child_is_not_related() { + run_test(|| { + let mut storage = PalletStorage::::new(); + + let mut imported_headers = write_default_headers(&mut storage, vec![1, 2, 3]); + for header in imported_headers.iter() { + assert!(storage.header_exists(header.hash())); + } + + // What if we have an "ancestor" that's newer than child? + let new_ancestor = test_header(5); + let new_ancestor = ImportedHeader { + header: new_ancestor, + requires_justification: false, + is_finalized: false, + signal_hash: None, + }; + + let child = imported_headers.pop().unwrap(); + let ancestors = headers_between(&storage, new_ancestor, child); + assert!(ancestors.is_none()); + }) + } + + #[test] + fn doesnt_import_header_which_schedules_change_with_invalid_authority_set() { + run_test(|| { + let mut storage = PalletStorage::::new(); + let _imported_headers = write_default_headers(&mut storage, vec![1]); + let mut header = test_header(2); + + // This is an *invalid* authority set because the combined weight of the + // authorities is greater than `u64::MAX` + let consensus_log = ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { + next_authorities: vec![(alice(), u64::MAX), (bob(), u64::MAX)], + delay: 0, + }); + + header.digest = Digest:: { + logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], + }; + + let mut verifier = Verifier { storage }; + + assert_eq!( + verifier.import_header(header.hash(), header).unwrap_err(), + ImportError::InvalidAuthoritySet + ); + }) + } + + #[test] + fn finalizes_header_which_doesnt_enact_or_schedule_a_new_authority_set() { + run_test(|| { + let mut storage = PalletStorage::::new(); + let _imported_headers = write_default_headers(&mut storage, vec![1]); + + // Nothing special about this header, yet GRANDPA may have created a justification + // for it since it does that periodically + let header = test_header(2); + + let set_id = 1; + let authorities = authority_list(); + let authority_set = AuthoritySet::new(authorities.clone(), set_id); + storage.update_current_authority_set(authority_set); + + // We'll need this justification to finalize the header + let grandpa_round = 1; + let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); + + let mut verifier = Verifier { + storage: storage.clone(), + }; + + assert_ok!(verifier.import_header(header.hash(), header.clone())); + assert_ok!(verifier.import_finality_proof(header.hash(), justification.into())); + assert_eq!(storage.best_finalized_header().header, header); + }) + } + + #[test] + fn correctly_verifies_and_finalizes_chain_of_headers() { + run_test(|| { + let mut storage = PalletStorage::::new(); + let imported_headers = write_default_headers(&mut storage, vec![1, 2]); + let header = test_header(3); + + let set_id = 1; + let authorities = authority_list(); + let authority_set = AuthoritySet { + authorities: authorities.clone(), + set_id, + }; + storage.update_current_authority_set(authority_set); + + let grandpa_round = 1; + let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); + + let mut verifier = Verifier { + storage: storage.clone(), + }; + assert!(verifier.import_header(header.hash(), header.clone()).is_ok()); + assert!(verifier + .import_finality_proof(header.hash(), justification.into()) + .is_ok()); + + // Make sure we marked the our headers as finalized + assert!(storage.header_by_hash(imported_headers[1].hash()).unwrap().is_finalized); + assert!(storage.header_by_hash(imported_headers[2].hash()).unwrap().is_finalized); + assert!(storage.header_by_hash(header.hash()).unwrap().is_finalized); + + // Make sure the header at the highest height is the best finalized + assert_eq!(storage.best_finalized_header().header, header); + }); + } + + #[test] + fn updates_authority_set_upon_finalizing_header_which_enacts_change() { + run_test(|| { + let mut storage = PalletStorage::::new(); + let genesis_hash = write_headers(&mut storage, vec![])[0].hash(); + + // We want this header to indicate that there's an upcoming set change on this fork + let parent = ImportedHeader { + header: test_header(1), + requires_justification: false, + is_finalized: false, + signal_hash: Some(genesis_hash), + }; + storage.write_header(&parent); + + let set_id = 1; + let authorities = authority_list(); + let initial_authority_set = AuthoritySet::new(authorities.clone(), set_id); + storage.update_current_authority_set(initial_authority_set); + + // This header enacts an authority set change upon finalization + let header = test_header(2); + + let grandpa_round = 1; + let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); + + // Schedule a change at the height of our header + let set_id = 2; + let height = *header.number(); + let authorities = vec![alice()]; + let change = schedule_next_change(authorities, set_id, height); + storage.schedule_next_set_change(genesis_hash, change.clone()); + + let mut verifier = Verifier { + storage: storage.clone(), + }; + + assert_ok!(verifier.import_header(header.hash(), header.clone())); + assert_eq!(storage.missing_justifications().len(), 1); + assert_eq!(storage.missing_justifications()[0].hash, header.hash()); + + assert_ok!(verifier.import_finality_proof(header.hash(), justification.into())); + assert_eq!(storage.best_finalized_header().header, header); + + // Make sure that we have updated the set now that we've finalized our header + assert_eq!(storage.current_authority_set(), change.authority_set); + assert!(storage.missing_justifications().is_empty()); + }) + } + + #[test] + fn importing_finality_proof_for_already_finalized_header_doesnt_work() { + run_test(|| { + let mut storage = PalletStorage::::new(); + let genesis = test_header(0); + + let genesis = ImportedHeader { + header: genesis, + requires_justification: false, + is_finalized: true, + signal_hash: None, + }; + + // Make sure that genesis is the best finalized header + >::put(genesis.hash()); + storage.write_header(&genesis); + + let mut verifier = Verifier { storage }; + + // Now we want to try and import it again to see what happens + assert_eq!( + verifier + .import_finality_proof(genesis.hash(), vec![4, 2].into()) + .unwrap_err(), + FinalizationError::OldHeader + ); + }); + } +} diff --git a/primitives/chain-kusama/src/lib.rs b/primitives/chain-kusama/src/lib.rs index 7163d15ef13..b221aff049d 100644 --- a/primitives/chain-kusama/src/lib.rs +++ b/primitives/chain-kusama/src/lib.rs @@ -79,7 +79,7 @@ sp_api::decl_runtime_apis! { /// /// Returns `None` if message is too expensive to be sent to Kusama from this chain. /// - /// Please keep in mind that this method returns lowest message fee required for message + /// Please keep in mind that this method returns the lowest message fee required for message /// to be accepted to the lane. It may be good idea to pay a bit over this price to account /// future exchange rate changes and guarantee that relayer would deliver your message /// to the target chain. @@ -109,7 +109,7 @@ sp_api::decl_runtime_apis! { pub trait FromKusamaInboundLaneApi { /// Returns nonce of the latest message, received by given lane. fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. + /// Nonce of the latest message that has been confirmed to the bridged chain. fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; /// State of the unrewarded relayers set at given lane. fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; diff --git a/primitives/chain-millau/src/lib.rs b/primitives/chain-millau/src/lib.rs index 22f09cb5b09..5d3b1fd182e 100644 --- a/primitives/chain-millau/src/lib.rs +++ b/primitives/chain-millau/src/lib.rs @@ -123,7 +123,7 @@ pub type BlockNumber = u64; /// Hash type used in Millau. pub type Hash = ::Out; -/// The type of an object that can produce hashes on Millau. +/// Type of object that can produce hashes on Millau. pub type Hasher = BlakeTwoAndKeccak256; /// The header type used by Millau. @@ -305,7 +305,7 @@ sp_api::decl_runtime_apis! { /// /// Returns `None` if message is too expensive to be sent to Millau from this chain. /// - /// Please keep in mind that this method returns lowest message fee required for message + /// Please keep in mind that this method returns the lowest message fee required for message /// to be accepted to the lane. It may be good idea to pay a bit over this price to account /// future exchange rate changes and guarantee that relayer would deliver your message /// to the target chain. @@ -335,7 +335,7 @@ sp_api::decl_runtime_apis! { pub trait FromMillauInboundLaneApi { /// Returns nonce of the latest message, received by given lane. fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. + /// Nonce of the latest message that has been confirmed to the bridged chain. fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; /// State of the unrewarded relayers set at given lane. fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; diff --git a/primitives/chain-polkadot/src/lib.rs b/primitives/chain-polkadot/src/lib.rs index 8e0d30cdb60..8398b3d5273 100644 --- a/primitives/chain-polkadot/src/lib.rs +++ b/primitives/chain-polkadot/src/lib.rs @@ -79,7 +79,7 @@ sp_api::decl_runtime_apis! { /// /// Returns `None` if message is too expensive to be sent to Polkadot from this chain. /// - /// Please keep in mind that this method returns lowest message fee required for message + /// Please keep in mind that this method returns the lowest message fee required for message /// to be accepted to the lane. It may be good idea to pay a bit over this price to account /// future exchange rate changes and guarantee that relayer would deliver your message /// to the target chain. @@ -109,7 +109,7 @@ sp_api::decl_runtime_apis! { pub trait FromPolkadotInboundLaneApi { /// Returns nonce of the latest message, received by given lane. fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. + /// Nonce of the latest message that has been confirmed to the bridged chain. fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; /// State of the unrewarded relayers set at given lane. fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; diff --git a/primitives/chain-rialto/src/lib.rs b/primitives/chain-rialto/src/lib.rs index c10f31bae33..f1e2209f4db 100644 --- a/primitives/chain-rialto/src/lib.rs +++ b/primitives/chain-rialto/src/lib.rs @@ -114,7 +114,7 @@ pub type BlockNumber = u32; /// Hash type used in Rialto. pub type Hash = ::Out; -/// The type of an object that can produce hashes on Rialto. +/// Type of object that can produce hashes on Rialto. pub type Hasher = BlakeTwo256; /// The header type used by Rialto. @@ -266,7 +266,7 @@ sp_api::decl_runtime_apis! { /// /// Returns `None` if message is too expensive to be sent to Rialto from this chain. /// - /// Please keep in mind that this method returns lowest message fee required for message + /// Please keep in mind that this method returns the lowest message fee required for message /// to be accepted to the lane. It may be good idea to pay a bit over this price to account /// future exchange rate changes and guarantee that relayer would deliver your message /// to the target chain. @@ -296,7 +296,7 @@ sp_api::decl_runtime_apis! { pub trait FromRialtoInboundLaneApi { /// Returns nonce of the latest message, received by given lane. fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. + /// Nonce of the latest message that has been confirmed to the bridged chain. fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; /// State of the unrewarded relayers set at given lane. fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; diff --git a/primitives/chain-rococo/src/lib.rs b/primitives/chain-rococo/src/lib.rs index b79fdf6cfcf..21b40c595eb 100644 --- a/primitives/chain-rococo/src/lib.rs +++ b/primitives/chain-rococo/src/lib.rs @@ -134,7 +134,7 @@ sp_api::decl_runtime_apis! { /// /// Returns `None` if message is too expensive to be sent to Rococo from this chain. /// - /// Please keep in mind that this method returns lowest message fee required for message + /// Please keep in mind that this method returns the lowest message fee required for message /// to be accepted to the lane. It may be good idea to pay a bit over this price to account /// future exchange rate changes and guarantee that relayer would deliver your message /// to the target chain. @@ -164,7 +164,7 @@ sp_api::decl_runtime_apis! { pub trait FromRococoInboundLaneApi { /// Returns nonce of the latest message, received by given lane. fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. + /// Nonce of the latest message that has been confirmed to the bridged chain. fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; /// State of the unrewarded relayers set at given lane. fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; diff --git a/primitives/chain-westend/src/lib.rs b/primitives/chain-westend/src/lib.rs index db97364ef41..08ca9c28c8c 100644 --- a/primitives/chain-westend/src/lib.rs +++ b/primitives/chain-westend/src/lib.rs @@ -141,7 +141,7 @@ sp_api::decl_runtime_apis! { /// /// Returns `None` if message is too expensive to be sent to Westend from this chain. /// - /// Please keep in mind that this method returns lowest message fee required for message + /// Please keep in mind that this method returns the lowest message fee required for message /// to be accepted to the lane. It may be good idea to pay a bit over this price to account /// future exchange rate changes and guarantee that relayer would deliver your message /// to the target chain. @@ -171,7 +171,7 @@ sp_api::decl_runtime_apis! { pub trait FromWestendInboundLaneApi { /// Returns nonce of the latest message, received by given lane. fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. + /// Nonce of the latest message that has been confirmed to the bridged chain. fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; /// State of the unrewarded relayers set at given lane. fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; diff --git a/primitives/currency-exchange/src/lib.rs b/primitives/currency-exchange/src/lib.rs index 88695dbb5ef..c85a9d4ff7d 100644 --- a/primitives/currency-exchange/src/lib.rs +++ b/primitives/currency-exchange/src/lib.rs @@ -36,7 +36,7 @@ pub enum Error { InvalidRecipient, /// Cannot map from peer recipient to this blockchain recipient. FailedToMapRecipients, - /// Failed to convert from peer blockchain currency to this blockhain currency. + /// Failed to convert from peer blockchain currency to this blockchain currency. FailedToConvertCurrency, /// Deposit has failed. DepositFailed, diff --git a/primitives/ethereum-poa/src/lib.rs b/primitives/ethereum-poa/src/lib.rs index 57c539f2e27..f4b07eeae72 100644 --- a/primitives/ethereum-poa/src/lib.rs +++ b/primitives/ethereum-poa/src/lib.rs @@ -323,7 +323,7 @@ impl UnsignedTransaction { stream.out().to_vec() } - /// Encode to given rlp stream. + /// Encode to given RLP stream. pub fn rlp_to(&self, chain_id: Option, stream: &mut RlpStream) { stream.append(&self.nonce); stream.append(&self.gas_price); @@ -405,7 +405,7 @@ impl SealedEmptyStep { keccak_256(&message.out()).into() } - /// Returns rlp for the vector of empty steps (we only do encoding in tests). + /// Returns RLP for the vector of empty steps (we only do encoding in tests). pub fn rlp_of(empty_steps: &[SealedEmptyStep]) -> Bytes { let mut s = RlpStream::new(); s.begin_list(empty_steps.len()); diff --git a/primitives/header-chain/src/justification.rs b/primitives/header-chain/src/justification.rs index 139b4303243..a8eeed35692 100644 --- a/primitives/header-chain/src/justification.rs +++ b/primitives/header-chain/src/justification.rs @@ -36,7 +36,7 @@ pub enum Error { InvalidJustificationTarget, /// Invalid commit in justification. InvalidJustificationCommit, - /// Justification has invalid authority singature. + /// Justification has invalid authority signature. InvalidAuthoritySignature, /// The justification has precommit for the header that has no route from the target header. InvalidPrecommitAncestryProof, diff --git a/primitives/messages/src/lib.rs b/primitives/messages/src/lib.rs index c3ffce8baa5..059af440704 100644 --- a/primitives/messages/src/lib.rs +++ b/primitives/messages/src/lib.rs @@ -83,7 +83,7 @@ pub struct InboundLaneData { /// Identifiers of relayers and messages that they have delivered to this lane (ordered by message nonce). /// /// This serves as a helper storage item, to allow the source chain to easily pay rewards - /// to the relayers who succesfuly delivered messages to the target chain (inbound lane). + /// to the relayers who successfully delivered messages to the target chain (inbound lane). /// /// It is guaranteed to have at most N entries, where N is configured at the module level. /// If there are N entries in this vec, then: @@ -119,7 +119,7 @@ impl Default for InboundLaneData { } impl InboundLaneData { - /// Returns approximate size of the struct, given number of entries in the `relayers` set and + /// Returns approximate size of the struct, a given number of entries in the `relayers` set and /// size of each entry. /// /// Returns `None` if size overflows `u32` limits. @@ -154,12 +154,12 @@ pub struct UnrewardedRelayersState { /// Outbound lane data. #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] pub struct OutboundLaneData { - /// Nonce of oldest message that we haven't yet pruned. May point to not-yet-generated message if + /// Nonce of the oldest message that we haven't yet pruned. May point to not-yet-generated message if /// all sent messages are already pruned. pub oldest_unpruned_nonce: MessageNonce, - /// Nonce of latest message, received by bridged chain. + /// Nonce of the latest message, received by bridged chain. pub latest_received_nonce: MessageNonce, - /// Nonce of latest message, generated by us. + /// Nonce of the latest message, generated by us. pub latest_generated_nonce: MessageNonce, } diff --git a/primitives/messages/src/source_chain.rs b/primitives/messages/src/source_chain.rs index 1d313634bcb..f687dc77bc8 100644 --- a/primitives/messages/src/source_chain.rs +++ b/primitives/messages/src/source_chain.rs @@ -58,12 +58,12 @@ pub trait TargetHeaderChain { /// payload would (at least) be accepted into target chain transaction pool AND /// eventually will be successfully 'mined'. The most obvious incorrect implementation /// example would be implementation for BTC chain that accepts payloads larger than - /// 1MB. BTC nodes aren't accepting transactions that are larger than 1MB, so relayer + /// 1 MB. BTC nodes aren't accepting transactions that are larger than 1 MB, so relayer /// will be unable to craft valid transaction => this (and all subsequent) messages will /// never be delivered. fn verify_message(payload: &Payload) -> Result<(), Self::Error>; - /// Verify messages delivery proof and return lane && nonce of the latest recevied message. + /// Verify messages delivery proof and return lane && nonce of the latest received message. fn verify_messages_delivery_proof( proof: Self::MessagesDeliveryProof, ) -> Result<(LaneId, InboundLaneData), Self::Error>; @@ -102,7 +102,7 @@ pub trait LaneMessageVerifier { /// by relayer. /// /// So to be sure that any non-altruist relayer would agree to deliver message, submitter -/// should set `delivery_and_dispatch_fee` to at least (equialent of): sum of fees from (2) +/// should set `delivery_and_dispatch_fee` to at least (equivalent of): sum of fees from (2) /// to (4) above, plus some interest for the relayer. pub trait MessageDeliveryAndDispatchPayment { /// Error type. diff --git a/primitives/messages/src/target_chain.rs b/primitives/messages/src/target_chain.rs index 676e919bc61..278322317f0 100644 --- a/primitives/messages/src/target_chain.rs +++ b/primitives/messages/src/target_chain.rs @@ -75,7 +75,7 @@ pub trait SourceHeaderChain { /// messages will be rejected. /// /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside of this function. This function only verifies that the proof declares exactly + /// outside this function. This function only verifies that the proof declares exactly /// `messages_count` messages. fn verify_messages_proof( proof: Self::MessagesProof, diff --git a/primitives/polkadot-core/src/lib.rs b/primitives/polkadot-core/src/lib.rs index c9858c0820d..74252ea7f74 100644 --- a/primitives/polkadot-core/src/lib.rs +++ b/primitives/polkadot-core/src/lib.rs @@ -71,7 +71,7 @@ pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// All Polkadot-like chains allow 2 seconds of compute with a 6 second average block time. +/// All Polkadot-like chains allow 2 seconds of compute with a 6-second average block time. /// /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; @@ -83,7 +83,7 @@ pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1); parameter_types! { - /// All Polkadot-like chains have maximal block size set to 5MB. + /// All Polkadot-like chains have maximal block size set to 5 MB. /// /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. pub BlockLength: limits::BlockLength = limits::BlockLength::max_with_normal_ratio( @@ -165,7 +165,7 @@ pub type Index = u32; /// Hashing type. pub type Hashing = BlakeTwo256; -/// The type of an object that can produce hashes on Polkadot-like chains. +/// The type of object that can produce hashes on Polkadot-like chains. pub type Hasher = BlakeTwo256; /// The header type used by Polkadot-like chains. diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index e7f990d2830..dbb3dd9d889 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -70,9 +70,9 @@ pub type InstanceId = [u8; 4]; /// Type of accounts on the source chain. pub enum SourceAccount { - /// An account that belongs to Root (priviledged origin). + /// An account that belongs to Root (privileged origin). Root, - /// A non-priviledged account. + /// A non-privileged account. /// /// The embedded account ID may or may not have a private key depending on the "owner" of the /// account (private key, pallet, proxy, etc.). @@ -82,7 +82,7 @@ pub enum SourceAccount { /// Derive an account ID from a foreign account ID. /// /// This function returns an encoded Blake2 hash. It is the responsibility of the caller to ensure -/// this can be succesfully decoded into an AccountId. +/// this can be successfully decoded into an AccountId. /// /// The `bridge_id` is used to provide extra entropy when producing account IDs. This helps prevent /// AccountId collisions between different bridges on a single target chain. diff --git a/primitives/test-utils/src/lib.rs b/primitives/test-utils/src/lib.rs index 0fcc263763c..ce84f212469 100644 --- a/primitives/test-utils/src/lib.rs +++ b/primitives/test-utils/src/lib.rs @@ -48,7 +48,7 @@ pub struct JustificationGeneratorParams { pub authorities: Vec<(Account, AuthorityWeight)>, /// The total number of vote ancestries in our justification. /// - /// These may be distributed among many different forks. + /// These may be distributed among many forks. pub votes: u32, /// The number of forks. /// diff --git a/relays/bin-ethereum/src/ethereum_client.rs b/relays/bin-ethereum/src/ethereum_client.rs index 71a3f38859b..007bef49fea 100644 --- a/relays/bin-ethereum/src/ethereum_client.rs +++ b/relays/bin-ethereum/src/ethereum_client.rs @@ -41,7 +41,7 @@ type RpcResult = std::result::Result; /// interactions involving, for example, an Ethereum contract. #[async_trait] pub trait EthereumHighLevelRpc { - /// Returns best Substrate block that PoA chain knows of. + /// Returns the best Substrate block that PoA chain knows of. async fn best_substrate_block(&self, contract_address: Address) -> RpcResult; /// Returns true if Substrate header is known to Ethereum node. diff --git a/relays/bin-ethereum/src/rialto_client.rs b/relays/bin-ethereum/src/rialto_client.rs index d9c0f265cbb..51fbd57f934 100644 --- a/relays/bin-ethereum/src/rialto_client.rs +++ b/relays/bin-ethereum/src/rialto_client.rs @@ -41,13 +41,13 @@ type RpcResult = std::result::Result; /// interactions involving, for example, an Ethereum bridge module. #[async_trait] pub trait SubstrateHighLevelRpc { - /// Returns best Ethereum block that Substrate runtime knows of. + /// Returns the best Ethereum block that Substrate runtime knows of. async fn best_ethereum_block(&self) -> RpcResult; - /// Returns best finalized Ethereum block that Substrate runtime knows of. + /// Returns the best finalized Ethereum block that Substrate runtime knows of. async fn best_ethereum_finalized_block(&self) -> RpcResult; - /// Returns whether or not transactions receipts are required for Ethereum header submission. + /// Returns or not transactions receipts are required for Ethereum header submission. async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult; - /// Returns whether or not the given Ethereum header is known to the Substrate runtime. + /// Returns or not the given Ethereum header is known to the Substrate runtime. async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult; } diff --git a/relays/bin-substrate/src/messages_lane.rs b/relays/bin-substrate/src/messages_lane.rs index 616e2253a6b..bc353340696 100644 --- a/relays/bin-substrate/src/messages_lane.rs +++ b/relays/bin-substrate/src/messages_lane.rs @@ -50,16 +50,16 @@ pub struct MessagesRelayParams { pub trait SubstrateMessageLane: MessageLane { /// Name of the runtime method that returns dispatch weight of outbound messages at the source chain. const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str; - /// Name of the runtime method that returns latest generated nonce at the source chain. + /// Name of the runtime method that returns the latest generated nonce at the source chain. const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str; - /// Name of the runtime method that returns latest received (confirmed) nonce at the the source chain. + /// Name of the runtime method that returns the latest received (confirmed) nonce at the the source chain. const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str; - /// Name of the runtime method that returns latest received nonce at the target chain. + /// Name of the runtime method that returns the latest received nonce at the target chain. const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str; - /// Name of the runtime method that returns latest confirmed (reward-paid) nonce at the target chain. + /// Name of the runtime method that returns the latest confirmed (reward-paid) nonce at the target chain. const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str; - /// Numebr of the runtime method that returns state of "unrewarded relayers" set at the target chain. + /// Number of the runtime method that returns state of "unrewarded relayers" set at the target chain. const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str; /// Name of the runtime method that returns id of best finalized source header at target chain. @@ -203,7 +203,7 @@ mod tests { // reserved for messages dispatch allows dispatch of non-trivial messages. // // Any significant change in this values should attract additional attention. - (1013, 216_583_333_334), + (1020, 216_583_333_334), ); } } diff --git a/relays/client-ethereum/src/sign.rs b/relays/client-ethereum/src/sign.rs index 6f479ab7d5c..da1dbc4842e 100644 --- a/relays/client-ethereum/src/sign.rs +++ b/relays/client-ethereum/src/sign.rs @@ -47,7 +47,7 @@ impl Default for SigningParams { } } -/// Sign and submit tranaction using given Ethereum client. +/// Sign and submit transaction using given Ethereum client. pub async fn sign_and_submit_transaction( client: &Client, params: &SigningParams, diff --git a/relays/client-substrate/src/client.rs b/relays/client-substrate/src/client.rs index 892a63d6d5b..7d6b288a0ee 100644 --- a/relays/client-substrate/src/client.rs +++ b/relays/client-substrate/src/client.rs @@ -201,7 +201,7 @@ impl Client { /// Get the nonce of the given Substrate account. /// - /// Note: It's the caller's responsibility to make sure `account` is a valid ss58 address. + /// Note: It's the caller's responsibility to make sure `account` is a valid SS58 address. pub async fn next_account_index(&self, account: C::AccountId) -> Result { Ok(Substrate::::system_account_next_index(&*self.client, account).await?) } diff --git a/relays/client-substrate/src/guard.rs b/relays/client-substrate/src/guard.rs index 68fef1c4c9c..8381d0832eb 100644 --- a/relays/client-substrate/src/guard.rs +++ b/relays/client-substrate/src/guard.rs @@ -80,7 +80,7 @@ pub fn abort_on_spec_version_change(mut env: impl Environm }); } -/// Abort if, during a 24 hours, free balance of given account is decreased at least by given value. +/// Abort if, during 24 hours, free balance of given account is decreased at least by given value. /// Other components may increase (or decrease) balance of account and it WILL affect logic of the guard. pub fn abort_when_account_balance_decreased( mut env: impl Environment, diff --git a/relays/exchange/src/exchange.rs b/relays/exchange/src/exchange.rs index 4a2f07fa7f9..05bfa3b8cbd 100644 --- a/relays/exchange/src/exchange.rs +++ b/relays/exchange/src/exchange.rs @@ -113,7 +113,7 @@ pub trait TargetClient: RelayClient { async fn is_header_known(&self, id: &HeaderId

) -> Result; /// Returns `Ok(true)` if header is finalized by the target node. async fn is_header_finalized(&self, id: &HeaderId

) -> Result; - /// Returns best finalized header id. + /// Returns the best finalized header id. async fn best_finalized_header_id(&self) -> Result, Self::Error>; /// Returns `Ok(true)` if transaction proof is need to be relayed. async fn filter_transaction_proof(&self, proof: &P::TransactionProof) -> Result; diff --git a/relays/generic/utils/src/relay_loop.rs b/relays/generic/utils/src/relay_loop.rs new file mode 100644 index 00000000000..a5252fdc042 --- /dev/null +++ b/relays/generic/utils/src/relay_loop.rs @@ -0,0 +1,91 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::{FailedClient, MaybeConnectionError}; + +use async_trait::async_trait; +use std::{fmt::Debug, future::Future, time::Duration}; + +/// Default pause between reconnect attempts. +pub const RECONNECT_DELAY: Duration = Duration::from_secs(10); + +/// Basic blockchain client from relay perspective. +#[async_trait] +pub trait Client: Clone + Send + Sync { + /// Type of error these clients returns. + type Error: Debug + MaybeConnectionError; + + /// Try to reconnect to source node. + async fn reconnect(&mut self) -> Result<(), Self::Error>; +} + +/// Run relay loop. +/// +/// This function represents an outer loop, which in turn calls provided `loop_run` function to do +/// actual job. When `loop_run` returns, this outer loop reconnects to failed client (source, +/// target or both) and calls `loop_run` again. +pub async fn run( + reconnect_delay: Duration, + mut source_client: SC, + mut target_client: TC, + loop_run: R, +) where + R: Fn(SC, TC) -> F, + F: Future>, +{ + loop { + let result = loop_run(source_client.clone(), target_client.clone()).await; + + match result { + Ok(()) => break, + Err(failed_client) => loop { + async_std::task::sleep(reconnect_delay).await; + if failed_client == FailedClient::Both || failed_client == FailedClient::Source { + match source_client.reconnect().await { + Ok(()) => (), + Err(error) => { + log::warn!( + target: "bridge", + "Failed to reconnect to source client. Going to retry in {}s: {:?}", + reconnect_delay.as_secs(), + error, + ); + continue; + } + } + } + if failed_client == FailedClient::Both || failed_client == FailedClient::Target { + match target_client.reconnect().await { + Ok(()) => (), + Err(error) => { + log::warn!( + target: "bridge", + "Failed to reconnect to target client. Going to retry in {}s: {:?}", + reconnect_delay.as_secs(), + error, + ); + continue; + } + } + } + + break; + }, + } + + log::debug!(target: "bridge", "Restarting relay loop"); + } +} diff --git a/relays/headers/src/headers.rs b/relays/headers/src/headers.rs index be3e2cb6e6d..b5830c23eb6 100644 --- a/relays/headers/src/headers.rs +++ b/relays/headers/src/headers.rs @@ -65,7 +65,7 @@ pub struct QueuedHeaders { /// Headers that are (we believe) currently submitted to target node by our, /// not-yet mined transactions. submitted: HeadersQueue

, - /// Synced headers childrens. We need it to support case when header is synced, but some of + /// Synced headers children. We need it to support case when header is synced, but some of /// its parents are incomplete. synced_children: SyncedChildren

, /// Pointers to all headers that we ever seen and we believe we can touch in the future. @@ -191,7 +191,7 @@ impl QueuedHeaders

{ .unwrap_or(HeaderStatus::Unknown) } - /// Get oldest header from given queue. + /// Get the oldest header from given queue. pub fn header(&self, status: HeaderStatus) -> Option<&QueuedHeader

> { match status { HeaderStatus::Unknown | HeaderStatus::Synced => None, @@ -205,7 +205,7 @@ impl QueuedHeaders

{ } } - /// Get oldest headers from given queue until functor will return false. + /// Get the oldest headers from given queue until functor will return false. pub fn headers( &self, status: HeaderStatus, @@ -282,7 +282,7 @@ impl QueuedHeaders

{ ); } - /// Receive best header from the target node. + /// Receive the best header from the target node. pub fn target_best_header_response(&mut self, id: &HeaderIdOf

) { self.header_synced(id) } @@ -453,7 +453,7 @@ impl QueuedHeaders

{ } } - /// When incomplete headers ids are receved from target node. + /// When incomplete headers ids are received from target node. pub fn incomplete_headers_response(&mut self, ids: HashSet>) { // all new incomplete headers are marked Synced and all their descendants // are moved from Ready/Submitted to Incomplete queue diff --git a/relays/headers/src/sync.rs b/relays/headers/src/sync.rs index e992b1f8e58..39ab67ef5c0 100644 --- a/relays/headers/src/sync.rs +++ b/relays/headers/src/sync.rs @@ -35,7 +35,7 @@ pub struct HeadersSyncParams { /// Maximal total headers size in single submit request. pub max_headers_size_in_single_submit: usize, /// We only may store and accept (from Ethereum node) headers that have - /// number >= than best_substrate_header.number - prune_depth. + /// number >= than best_substrate_header. Number - prune_depth. pub prune_depth: u32, /// Target transactions mode. pub target_tx_mode: TargetTransactionMode, @@ -58,9 +58,9 @@ pub enum TargetTransactionMode { pub struct HeadersSync { /// Synchronization parameters. params: HeadersSyncParams, - /// Best header number known to source node. + /// The best header number known to source node. source_best_number: Option, - /// Best header known to target node. + /// The best header known to target node. target_best_header: Option>, /// Headers queue. headers: QueuedHeaders

, @@ -80,12 +80,12 @@ impl HeadersSync

{ } } - /// Return best header number known to source node. + /// Return the best header number known to source node. pub fn source_best_number(&self) -> Option { self.source_best_number } - /// Best header known to target node. + /// The best header known to target node. pub fn target_best_header(&self) -> Option> { self.target_best_header } @@ -150,7 +150,7 @@ impl HeadersSync

{ Some(best_downloaded_number + One::one()) } - /// Selech orphan header to downoload. + /// Selech orphan header to download. pub fn select_orphan_header_to_download(&self) -> Option<&QueuedHeader

> { let orphan_header = self.headers.header(HeaderStatus::Orphan)?; diff --git a/relays/headers/src/sync_loop.rs b/relays/headers/src/sync_loop.rs index b2049320565..320c18cd574 100644 --- a/relays/headers/src/sync_loop.rs +++ b/relays/headers/src/sync_loop.rs @@ -80,7 +80,7 @@ pub trait SourceClient: RelayClient { /// Target client trait. #[async_trait] pub trait TargetClient: RelayClient { - /// Returns ID of best header known to the target node. + /// Returns ID of the best header known to the target node. async fn best_header_id(&self) -> Result, Self::Error>; /// Returns true if header is known to the target node. diff --git a/relays/messages/src/lib.rs b/relays/messages/src/lib.rs index cdd94bca954..861091ab205 100644 --- a/relays/messages/src/lib.rs +++ b/relays/messages/src/lib.rs @@ -18,7 +18,7 @@ //! data. Message lane allows sending arbitrary messages between bridged chains. This //! module provides entrypoint that starts reading messages from given message lane //! of source chain and submits proof-of-message-at-source-chain transactions to the -//! target chain. Additionaly, proofs-of-messages-delivery are sent back from the +//! target chain. Additionally, proofs-of-messages-delivery are sent back from the //! target chain to the source chain. // required for futures::select! diff --git a/relays/messages/src/message_lane_loop.rs b/relays/messages/src/message_lane_loop.rs index af04bf984e1..3780732b30a 100644 --- a/relays/messages/src/message_lane_loop.rs +++ b/relays/messages/src/message_lane_loop.rs @@ -104,7 +104,7 @@ pub trait SourceClient: RelayClient { /// Returns state of the client. async fn state(&self) -> Result, Self::Error>; - /// Get nonce of instance of latest generated message. + /// Get nonce of instance of the latest generated message. async fn latest_generated_nonce( &self, id: SourceHeaderIdOf

, @@ -150,13 +150,13 @@ pub trait TargetClient: RelayClient { /// Returns state of the client. async fn state(&self) -> Result, Self::Error>; - /// Get nonce of latest received message. + /// Get nonce of the latest received message. async fn latest_received_nonce( &self, id: TargetHeaderIdOf

, ) -> Result<(TargetHeaderIdOf

, MessageNonce), Self::Error>; - /// Get nonce of latest confirmed message. + /// Get nonce of the latest confirmed message. async fn latest_confirmed_received_nonce( &self, id: TargetHeaderIdOf

, @@ -188,7 +188,7 @@ pub trait TargetClient: RelayClient { /// State of the client. #[derive(Clone, Debug, Default, PartialEq)] pub struct ClientState { - /// Best header id of this chain. + /// The best header id of this chain. pub best_self: SelfHeaderId, /// Best finalized header id of this chain. pub best_finalized_self: SelfHeaderId, diff --git a/relays/messages/src/message_race_delivery.rs b/relays/messages/src/message_race_delivery.rs index b50b0ffe31b..55525f01dc3 100644 --- a/relays/messages/src/message_race_delivery.rs +++ b/relays/messages/src/message_race_delivery.rs @@ -11,7 +11,7 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -//! Message delivery race delivers proof-of-messages from lane.source to lane.target. +//! Message delivery race delivers proof-of-messages from lane. Source to lane.target. use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; use crate::message_lane_loop::{ @@ -213,7 +213,7 @@ where /// Additional nonces data from the target client used by message delivery race. #[derive(Debug, Clone)] struct DeliveryRaceTargetNoncesData { - /// Latest nonce that we know: (1) has been delivered to us (2) has been confirmed + /// The latest nonce that we know: (1) has been delivered to us (2) has been confirmed /// back to the source node (by confirmations race) and (3) relayer has received /// reward for (and this has been confirmed by the message delivery race). confirmed_nonce: MessageNonce, @@ -233,7 +233,7 @@ struct MessageDeliveryStrategy { max_messages_weight_in_single_batch: Weight, /// Maximal messages size in the single delivery transaction. max_messages_size_in_single_batch: usize, - /// Latest confirmed nonces at the source client + the header id where we have first met this nonce. + /// The latest confirmed nonces at the source client + the header id where we have first met this nonce. latest_confirmed_nonces_at_source: VecDeque<(SourceHeaderIdOf

, MessageNonce)>, /// Target nonces from the source client. target_nonces: Option>, diff --git a/relays/messages/src/message_race_loop.rs b/relays/messages/src/message_race_loop.rs index 41f5ede1033..5a0a5dd8bd3 100644 --- a/relays/messages/src/message_race_loop.rs +++ b/relays/messages/src/message_race_loop.rs @@ -76,7 +76,7 @@ pub struct SourceClientNonces { /// New nonces range known to the client. `New` here means all nonces generated after /// `prev_latest_nonce` passed to the `SourceClient::nonces` method. pub new_nonces: NoncesRange, - /// Latest nonce that is confirmed to the bridged client. This nonce only makes + /// The latest nonce that is confirmed to the bridged client. This nonce only makes /// sense in some races. In other races it is `None`. pub confirmed_nonce: Option, } @@ -84,7 +84,7 @@ pub struct SourceClientNonces { /// Nonces on the race target client. #[derive(Debug, Clone)] pub struct TargetClientNonces { - /// Latest nonce that is known to the target client. + /// The latest nonce that is known to the target client. pub latest_nonce: MessageNonce, /// Additional data from target node that may be used by the race. pub nonces_data: TargetNoncesData, @@ -93,7 +93,7 @@ pub struct TargetClientNonces { /// One of message lane clients, which is source client for the race. #[async_trait] pub trait SourceClient { - /// Type of error this clients returns. + /// Type of error these clients returns. type Error: std::fmt::Debug + MaybeConnectionError; /// Type of nonces range returned by the source client. type NoncesRange: NoncesRange; @@ -118,7 +118,7 @@ pub trait SourceClient { /// One of message lane clients, which is target client for the race. #[async_trait] pub trait TargetClient { - /// Type of error this clients returns. + /// Type of error these clients returns. type Error: std::fmt::Debug + MaybeConnectionError; /// Type of the additional data from the target client, used by the race. type TargetNoncesData: std::fmt::Debug; @@ -155,12 +155,12 @@ pub trait RaceStrategy: Debug { fn is_empty(&self) -> bool; /// Return id of source header that is required to be on target to continue synchronization. fn required_source_header_at_target(&self, current_best: &SourceHeaderId) -> Option; - /// Return best nonce at source node. + /// Return the best nonce at source node. /// /// `Some` is returned only if we are sure that the value is greater or equal /// than the result of `best_at_target`. fn best_at_source(&self) -> Option; - /// Return best nonce at target node. + /// Return the best nonce at target node. /// /// May return `None` if value is yet unknown. fn best_at_target(&self) -> Option; @@ -196,7 +196,7 @@ pub struct RaceState { /// Best finalized source header id at the best block on the target /// client (at the `best_finalized_source_header_id_at_best_target`). pub best_finalized_source_header_id_at_best_target: Option, - /// Best header id at the target client. + /// The best header id at the target client. pub best_target_header_id: Option, /// Best finalized header id at the target client. pub best_finalized_target_header_id: Option, diff --git a/relays/messages/src/message_race_receiving.rs b/relays/messages/src/message_race_receiving.rs index 4381b63591f..5fd80258880 100644 --- a/relays/messages/src/message_race_receiving.rs +++ b/relays/messages/src/message_race_receiving.rs @@ -11,7 +11,7 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -//! Message receiving race delivers proof-of-messages-delivery from lane.target to lane.source. +//! Message receiving race delivers proof-of-messages-delivery from lane. Target to lane.source. use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; use crate::message_lane_loop::{ diff --git a/relays/messages/src/message_race_strategy.rs b/relays/messages/src/message_race_strategy.rs index 7088f8d74b5..9da27ce44ca 100644 --- a/relays/messages/src/message_race_strategy.rs +++ b/relays/messages/src/message_race_strategy.rs @@ -35,7 +35,7 @@ pub struct BasicStrategy< > { /// All queued nonces. source_queue: VecDeque<(HeaderId, SourceNoncesRange)>, - /// Best nonce known to target node (at its best block). `None` if it has not been received yet. + /// The best nonce known to target node (at its best block). `None` if it has not been received yet. best_target_nonce: Option, /// Unused generic types dump. _phantom: PhantomData<(TargetHeaderNumber, TargetHeaderHash, Proof)>, diff --git a/relays/messages/src/metrics.rs b/relays/messages/src/metrics.rs index 51a4118be85..ee6a3d085ca 100644 --- a/relays/messages/src/metrics.rs +++ b/relays/messages/src/metrics.rs @@ -80,28 +80,28 @@ impl MessageLaneLoopMetrics { .set(target_client_state.best_finalized_peer_at_best_self.0.into()); } - /// Update latest generated nonce at source. + /// Update the latest generated nonce at source. pub fn update_source_latest_generated_nonce(&self, source_latest_generated_nonce: MessageNonce) { self.lane_state_nonces .with_label_values(&["source_latest_generated"]) .set(source_latest_generated_nonce); } - /// Update latest confirmed nonce at source. + /// Update the latest confirmed nonce at source. pub fn update_source_latest_confirmed_nonce(&self, source_latest_confirmed_nonce: MessageNonce) { self.lane_state_nonces .with_label_values(&["source_latest_confirmed"]) .set(source_latest_confirmed_nonce); } - /// Update latest received nonce at target. + /// Update the latest received nonce at target. pub fn update_target_latest_received_nonce(&self, target_latest_generated_nonce: MessageNonce) { self.lane_state_nonces .with_label_values(&["target_latest_received"]) .set(target_latest_generated_nonce); } - /// Update latest confirmed nonce at target. + /// Update the latest confirmed nonce at target. pub fn update_target_latest_confirmed_nonce(&self, target_latest_confirmed_nonce: MessageNonce) { self.lane_state_nonces .with_label_values(&["target_latest_confirmed"]) diff --git a/relays/substrate/src/cli.rs b/relays/substrate/src/cli.rs new file mode 100644 index 00000000000..7fb95d74675 --- /dev/null +++ b/relays/substrate/src/cli.rs @@ -0,0 +1,416 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Deal with CLI args of substrate-to-substrate relay. + +use bp_message_lane::LaneId; +use codec::{Decode, Encode}; +use sp_runtime::app_crypto::Ss58Codec; +use structopt::{clap::arg_enum, StructOpt}; + +use crate::rialto_millau::cli as rialto_millau; + +/// Parse relay CLI args. +pub fn parse_args() -> Command { + Command::from_args() +} + +/// Substrate-to-Substrate bridge utilities. +#[derive(StructOpt)] +#[structopt(about = "Substrate-to-Substrate relay")] +pub enum Command { + /// Start headers relay between two chains. + /// + /// The on-chain bridge component should have been already initialized with + /// `init-bridge` sub-command. + RelayHeaders(RelayHeaders), + /// Start messages relay between two chains. + /// + /// Ties up to `MessageLane` pallets on both chains and starts relaying messages. + /// Requires the header relay to be already running. + RelayMessages(RelayMessages), + /// Initialize on-chain bridge pallet with current header data. + /// + /// Sends initialization transaction to bootstrap the bridge with current finalized block data. + InitBridge(InitBridge), + /// Send custom message over the bridge. + /// + /// Allows interacting with the bridge by sending messages over `MessageLane` component. + /// The message is being sent to the source chain, delivered to the target chain and dispatched + /// there. + SendMessage(SendMessage), + /// Generate SCALE-encoded `Call` for choosen network. + /// + /// The call can be used either as message payload or can be wrapped into a transaction + /// and executed on the chain directly. + EncodeCall(EncodeCall), + /// Generate SCALE-encoded `MessagePayload` object that can be sent over selected bridge. + /// + /// The `MessagePayload` can be then fed to `MessageLane::send_message` function and sent over + /// the bridge. + EncodeMessagePayload(EncodeMessagePayload), + /// Estimate Delivery and Dispatch Fee required for message submission to message lane. + EstimateFee(EstimateFee), + /// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain. + DeriveAccount(DeriveAccount), +} + +impl Command { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + match self { + Self::InitBridge(arg) => arg.run().await?, + Self::RelayHeaders(arg) => arg.run().await?, + Self::RelayMessages(arg) => arg.run().await?, + Self::SendMessage(arg) => arg.run().await?, + Self::EncodeCall(arg) => arg.run().await?, + Self::EncodeMessagePayload(arg) => arg.run().await?, + Self::EstimateFee(arg) => arg.run().await?, + Self::DeriveAccount(arg) => arg.run().await?, + } + Ok(()) + } +} + +/// Start headers relayer process. +#[derive(StructOpt)] +pub enum RelayHeaders { + #[structopt(flatten)] + RialtoMillau(rialto_millau::RelayHeaders), +} + +impl RelayHeaders { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + match self { + Self::RialtoMillau(arg) => arg.run().await?, + } + Ok(()) + } +} + +/// Start message relayer process. +#[derive(StructOpt)] +pub enum RelayMessages { + #[structopt(flatten)] + RialtoMillau(rialto_millau::RelayMessages), +} + +impl RelayMessages { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + match self { + Self::RialtoMillau(arg) => arg.run().await?, + } + Ok(()) + } +} + +/// Initialize bridge pallet. +#[derive(StructOpt)] +pub enum InitBridge { + #[structopt(flatten)] + RialtoMillau(rialto_millau::InitBridge), +} + +impl InitBridge { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + match self { + Self::RialtoMillau(arg) => arg.run().await?, + } + Ok(()) + } +} + +/// Send bridge message. +#[derive(StructOpt)] +pub enum SendMessage { + #[structopt(flatten)] + RialtoMillau(rialto_millau::SendMessage), +} + +impl SendMessage { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + match self { + Self::RialtoMillau(arg) => arg.run().await?, + } + Ok(()) + } +} + +/// A call to encode. +#[derive(StructOpt)] +pub enum EncodeCall { + #[structopt(flatten)] + RialtoMillau(rialto_millau::EncodeCall), +} + +impl EncodeCall { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + match self { + Self::RialtoMillau(arg) => arg.run().await?, + } + Ok(()) + } +} + +/// A `MessagePayload` to encode. +#[derive(StructOpt)] +pub enum EncodeMessagePayload { + #[structopt(flatten)] + RialtoMillau(rialto_millau::EncodeMessagePayload), +} + +impl EncodeMessagePayload { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + match self { + Self::RialtoMillau(arg) => arg.run().await?, + } + Ok(()) + } +} + +/// Estimate Delivery & Dispatch Fee command. +#[derive(StructOpt)] +pub enum EstimateFee { + #[structopt(flatten)] + RialtoMillau(rialto_millau::EstimateFee), +} + +impl EstimateFee { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + match self { + Self::RialtoMillau(arg) => arg.run().await?, + } + Ok(()) + } +} + +/// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain. +/// +/// The (derived) target chain `AccountId` is going to be used as dispatch origin of the call +/// that has been sent over the bridge. +/// This account can also be used to receive target-chain funds (or other form of ownership), +/// since messages sent over the bridge will be able to spend these. +#[derive(StructOpt)] +pub enum DeriveAccount { + #[structopt(flatten)] + RialtoMillau(rialto_millau::DeriveAccount), +} + +impl DeriveAccount { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + match self { + Self::RialtoMillau(arg) => arg.run().await?, + } + Ok(()) + } +} + +arg_enum! { + #[derive(Debug)] + /// The origin to use when dispatching the message on the target chain. + /// + /// - `Target` uses account existing on the target chain (requires target private key). + /// - `Origin` uses account derived from the source-chain account. + pub enum Origins { + Target, + Source, + } +} + +/// Generic account id with custom parser. +#[derive(Debug)] +pub struct AccountId { + account: sp_runtime::AccountId32, + version: sp_core::crypto::Ss58AddressFormat, +} + +impl std::str::FromStr for AccountId { + type Err = String; + + fn from_str(s: &str) -> Result { + let (account, version) = sp_runtime::AccountId32::from_ss58check_with_version(s) + .map_err(|err| format!("Unable to decode SS58 address: {:?}", err))?; + Ok(Self { account, version }) + } +} + +impl AccountId { + /// Perform runtime checks of SS58 version and get Rialto's AccountId. + pub fn into_rialto(self) -> bp_rialto::AccountId { + self.check_and_get("Rialto", rialto_runtime::SS58Prefix::get()) + } + + /// Perform runtime checks of SS58 version and get Millau's AccountId. + pub fn into_millau(self) -> bp_millau::AccountId { + self.check_and_get("Millau", millau_runtime::SS58Prefix::get()) + } + + /// Check SS58Prefix and return the account id. + fn check_and_get(self, net: &str, expected_prefix: u8) -> sp_runtime::AccountId32 { + let version: u16 = self.version.into(); + println!("Version: {} vs {}", version, expected_prefix); + if version != expected_prefix as u16 { + log::warn!( + target: "bridge", + "Following address: {} does not seem to match {}'s format, got: {}", + self.account, + net, + self.version, + ) + } + self.account + } +} + +/// Lane id. +#[derive(Debug)] +pub struct HexLaneId(pub LaneId); + +impl From for LaneId { + fn from(lane_id: HexLaneId) -> LaneId { + lane_id.0 + } +} + +impl std::str::FromStr for HexLaneId { + type Err = hex::FromHexError; + + fn from_str(s: &str) -> Result { + let mut lane_id = LaneId::default(); + hex::decode_to_slice(s, &mut lane_id)?; + Ok(HexLaneId(lane_id)) + } +} + +/// Nicer formatting for raw bytes vectors. +#[derive(Encode, Decode)] +pub struct HexBytes(pub Vec); + +impl std::str::FromStr for HexBytes { + type Err = hex::FromHexError; + + fn from_str(s: &str) -> Result { + Ok(Self(hex::decode(s)?)) + } +} + +impl std::fmt::Debug for HexBytes { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "0x{}", hex::encode(&self.0)) + } +} + +impl HexBytes { + /// Encode given object and wrap into nicely formatted bytes. + pub fn encode(t: &T) -> Self { + Self(t.encode()) + } +} + +/// Prometheus metrics params. +#[derive(StructOpt)] +pub struct PrometheusParams { + /// Do not expose a Prometheus metric endpoint. + #[structopt(long)] + pub no_prometheus: bool, + /// Expose Prometheus endpoint at given interface. + #[structopt(long, default_value = "127.0.0.1")] + pub prometheus_host: String, + /// Expose Prometheus endpoint at given port. + #[structopt(long, default_value = "9616")] + pub prometheus_port: u16, +} + +impl From for Option { + fn from(cli_params: PrometheusParams) -> Option { + if !cli_params.no_prometheus { + Some(relay_utils::metrics::MetricsParams { + host: cli_params.prometheus_host, + port: cli_params.prometheus_port, + }) + } else { + None + } + } +} + +/// Either explicit or maximal allowed value. +#[derive(Debug)] +pub enum ExplicitOrMaximal { + /// User has explicitly specified argument value. + Explicit(V), + /// Maximal allowed value for this argument. + Maximal, +} + +impl std::str::FromStr for ExplicitOrMaximal +where + V::Err: std::fmt::Debug, +{ + type Err = String; + + fn from_str(s: &str) -> Result { + if s.to_lowercase() == "max" { + return Ok(ExplicitOrMaximal::Maximal); + } + + V::from_str(s) + .map(ExplicitOrMaximal::Explicit) + .map_err(|e| format!("Failed to parse '{:?}'. Expected 'max' or explicit value", e)) + } +} + +/// Create chain-specific set of configuration objects: connection parameters, +/// signing parameters and bridge initialization parameters. +#[macro_export] +macro_rules! declare_chain_options { + ($chain:ident, $chain_prefix:ident) => { + paste::item! { + #[doc = $chain " connection params."] + #[derive(StructOpt)] + pub struct [<$chain ConnectionParams>] { + #[doc = "Connect to " $chain " node at given host."] + #[structopt(long, default_value = "127.0.0.1")] + pub [<$chain_prefix _host>]: String, + #[doc = "Connect to " $chain " node websocket server at given port."] + #[structopt(long)] + pub [<$chain_prefix _port>]: u16, + #[doc = "Use secure websocket connection."] + #[structopt(long)] + pub [<$chain_prefix _secure>]: bool, + } + + #[doc = $chain " signing params."] + #[derive(StructOpt)] + pub struct [<$chain SigningParams>] { + #[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."] + #[structopt(long)] + pub [<$chain_prefix _signer>]: String, + #[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."] + #[structopt(long)] + pub [<$chain_prefix _signer_password>]: Option, + } + } + }; +}