diff --git a/.config/lingua.dic b/.config/lingua.dic deleted file mode 100644 index 24dd02c3c980..000000000000 --- a/.config/lingua.dic +++ /dev/null @@ -1,111 +0,0 @@ -150 -adversary/SM -annualised/MS -Apache-2.0/M -api/SM -API/SM -APIs -assignee/SM -async -BFT/M -bitfield/MS -blake2/MS -blockchain/MS -borked -BTC -BTC/S -CLI/MS -codec/SM -config/MS -crypto/MS -customizable/B -debian/M -decodable/MS -dispatchable/SM -DMP/SM -DOTs -DOT/SM -ed25519 -enum/MS -ERC-20 -ETH -ethereum/MS -externality/MS -extrinsic/MS -extrinsics -fedora/M -GiB/S -GPL/M -GPLv3/M -Handler/MS -HMP/SM -https -include/BG -inherent/MS -initialize/RG -instantiate/B -intrinsic/MS -intrinsics -io -isolate/BG -jaeger/MS -js -keccak256/M -KSM/S -kusama/S -KYC/M -merkle/MS -misbehavior/SM -misbehaviors -MIT/M -MQC/SM -multivalidator/SM -NFT/SM -oneshot/MS -others' -parablock/MS -parachain/MS -parameterize/D -picosecond/SM -polkadot/MS -pov-block/MS -PoV/MS -promethius -promethius' -provisioner/MS -PVF/S -redhat/M -repo/MS -RPC/MS -runtime/MS -rustc/MS -sr25519 -struct/MS -subsystem/MS -subsystems' -taskmanager/MS -TCP -teleport/RG -teleportation/SM -teleporter/SM -teleporters -testnet/MS -trie/MS -trustless/Y -tuple/SM -ubuntu/M -UDP -UI -unfinalize/BD -union/MSG -unservable/B -validator/SM -VMP/SM -VRF/SM -w3f/MS -wasm/M -WND/S -XCM/S -XCMP/M -instantiation/SM -NFA diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 079621cbdc54..3a7d38532991 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -164,6 +164,18 @@ check-runtime-benchmarks: - ./scripts/gitlab/check_runtime_benchmarks.sh - sccache -s +spellcheck: + stage: test + <<: *docker-env + <<: *rules-pr-only + script: + - cargo spellcheck --version + # compare with the commit parent to the PR, given it's from a default branch + - git fetch origin +${CI_DEFAULT_BRANCH}:${CI_DEFAULT_BRANCH} + - time cargo spellcheck check -vvv --cfg=scripts/gitlab/spellcheck.toml --checkers hunspell --code 1 + -r $(git diff --name-only ${CI_COMMIT_SHA} $(git merge-base ${CI_COMMIT_SHA} ${CI_DEFAULT_BRANCH})) + allow_failure: true + build-adder-collator: stage: test <<: *collect-artifacts @@ -383,9 +395,9 @@ trigger-simnet: variables: TRGR_PROJECT: ${CI_PROJECT_NAME} TRGR_REF: ${CI_COMMIT_REF_NAME} - # simnet project ID + # Simnet project ID DWNSTRM_ID: 332 script: - # API trigger for a simnet job, argument value is set in the project variables + # API trigger for a Simnet job, argument value is set in the project variables - ./scripts/gitlab/trigger_pipeline.sh --simnet-version=${SIMNET_REF} allow_failure: true diff --git a/README.md b/README.md index 5281d40803a5..d5251eb319d1 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Polkadot -Implementation of a https://polkadot.network node in Rust based on the Substrate framework. +Implementation of a node in Rust based on the Substrate framework. > **NOTE:** In 2018, we split our implementation of "Polkadot" from its development framework > "Substrate". See the [Substrate][substrate-repo] repo for git history prior to 2018. @@ -19,7 +19,7 @@ either run the latest binary from our [releases](https://github.com/paritytech/polkadot/releases) page, or install Polkadot from one of our package repositories. -Installation from the debian or rpm repositories will create a `systemd` +Installation from the Debian or rpm repositories will create a `systemd` service that can be used to run a Polkadot node. This is disabled by default, and can be started by running `systemctl start polkadot` on demand (use `systemctl enable polkadot` to make it auto-start after reboot). By default, it @@ -207,7 +207,7 @@ You can run a simple single-node development "network" on your machine by runnin polkadot --dev ``` -You can muck around by heading to https://polkadot.js.org/apps and choose "Local Node" from the +You can muck around by heading to and choose "Local Node" from the Settings menu. ### Local Two-node Testnet @@ -246,7 +246,3 @@ Ensure you replace `ALICE_BOOTNODE_ID_HERE` with the node ID from the output of ## License Polkadot is [GPL 3.0 licensed](LICENSE). - -## Important Notice - -https://polkadot.network/testnetdisclaimer diff --git a/bridges/.config/lingua.dic b/bridges/.config/lingua.dic index da87e36948c7..8db035c2c28a 100644 --- a/bridges/.config/lingua.dic +++ b/bridges/.config/lingua.dic @@ -32,7 +32,7 @@ choosen config/MS crypto/MS customizable/B -debian/M +Debian/M decodable/MS DOT/S doesn diff --git a/bridges/.maintain/rialto-weight-template.hbs b/bridges/.maintain/rialto-weight-template.hbs index 4bf856948ae3..4868e6c84bb2 100644 --- a/bridges/.maintain/rialto-weight-template.hbs +++ b/bridges/.maintain/rialto-weight-template.hbs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Autogenerated weights for {{pallet}} +//! Autogenerated weights for {{cmd.pallet}} //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}} diff --git a/bridges/bin/millau/runtime/src/lib.rs b/bridges/bin/millau/runtime/src/lib.rs index 75039a64f878..aa71b866f95d 100644 --- a/bridges/bin/millau/runtime/src/lib.rs +++ b/bridges/bin/millau/runtime/src/lib.rs @@ -199,7 +199,7 @@ impl frame_system::Config for Runtime { type BlockLength = bp_millau::BlockLength; /// The weight of database operations that the runtime can invoke. type DbWeight = DbWeight; - /// The designated SS58 prefix of this chain. + /// The designated `SS58` prefix of this chain. type SS58Prefix = SS58Prefix; /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); @@ -239,7 +239,7 @@ parameter_types! { } impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. + /// A timestamp: milliseconds since the Unix epoch. type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; @@ -421,9 +421,9 @@ pub type Header = generic::Header; pub type Block = generic::Block; /// A Block signed with a Justification pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. +/// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The `SignedExtension` to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion, frame_system::CheckTxVersion, diff --git a/bridges/bin/rialto/runtime/src/exchange.rs b/bridges/bin/rialto/runtime/src/exchange.rs index 38df04c8ccd6..3b9c88112e4b 100644 --- a/bridges/bin/rialto/runtime/src/exchange.rs +++ b/bridges/bin/rialto/runtime/src/exchange.rs @@ -55,8 +55,8 @@ pub struct EthereumTransactionInclusionProof { /// /// The assumption is that this pair will never appear more than once in /// transactions included into finalized blocks. This is obviously true -/// for any existing eth-like chain (that keep current tx format), because -/// otherwise transaction can be replayed over and over. +/// for any existing eth-like chain (that keep current transaction format), +/// because otherwise transaction can be replayed over and over. #[derive(Encode, Decode, PartialEq, RuntimeDebug)] pub struct EthereumTransactionTag { /// Account that has locked funds. diff --git a/bridges/bin/rialto/runtime/src/kovan.rs b/bridges/bin/rialto/runtime/src/kovan.rs index f1b0bb99a600..528c6205846f 100644 --- a/bridges/bin/rialto/runtime/src/kovan.rs +++ b/bridges/bin/rialto/runtime/src/kovan.rs @@ -34,8 +34,8 @@ frame_support::parameter_types! { kovan_validators_configuration(); } -/// Max number of finalized headers to keep. It is equivalent of ~24 hours of -/// finalized blocks on current Kovan chain. +/// Max number of finalized headers to keep. It is equivalent of approximately +/// 24 hours of finalized blocks on current Kovan chain. const FINALIZED_HEADERS_TO_KEEP: u64 = 20_000; /// Aura engine configuration for Kovan chain. diff --git a/bridges/bin/rialto/runtime/src/lib.rs b/bridges/bin/rialto/runtime/src/lib.rs index d2b920d2023c..1dd5a10b4530 100644 --- a/bridges/bin/rialto/runtime/src/lib.rs +++ b/bridges/bin/rialto/runtime/src/lib.rs @@ -206,7 +206,7 @@ impl frame_system::Config for Runtime { type BlockLength = bp_rialto::BlockLength; /// The weight of database operations that the runtime can invoke. type DbWeight = DbWeight; - /// The designated SS58 prefix of this chain. + /// The designated `SS58` prefix of this chain. type SS58Prefix = SS58Prefix; /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); @@ -346,7 +346,7 @@ parameter_types! { } impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. + /// A timestamp: milliseconds since the Unix epoch. type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; @@ -531,9 +531,9 @@ pub type Header = generic::Header; pub type Block = generic::Block; /// A Block signed with a Justification pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. +/// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The `SignedExtension` to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1060,7 +1060,7 @@ impl_runtime_apis! { /// Millau account ownership digest from Rialto. /// /// The byte vector returned by this function should be signed with a Millau account private key. -/// This way, the owner of `rialto_account_id` on Rialto proves that the 'millau' account private key +/// This way, the owner of `rialto_account_id` on Rialto proves that the Millau account private key /// is also under his control. pub fn rialto_to_millau_account_ownership_digest( millau_call: &Call, diff --git a/bridges/bin/rialto/runtime/src/rialto_poa.rs b/bridges/bin/rialto/runtime/src/rialto_poa.rs index fecc733569bd..77bd288e8648 100644 --- a/bridges/bin/rialto/runtime/src/rialto_poa.rs +++ b/bridges/bin/rialto/runtime/src/rialto_poa.rs @@ -110,7 +110,7 @@ impl TPruningStrategy for PruningStrategy { } } -/// ChainTime provider +/// `ChainTime` provider #[derive(Default)] pub struct ChainTime; diff --git a/bridges/modules/currency-exchange/src/benchmarking.rs b/bridges/modules/currency-exchange/src/benchmarking.rs index 574ae93f6ee0..74da4c1b7ec4 100644 --- a/bridges/modules/currency-exchange/src/benchmarking.rs +++ b/bridges/modules/currency-exchange/src/benchmarking.rs @@ -40,10 +40,10 @@ pub struct ProofParams { /// When true, recipient must exists before import. pub recipient_exists: bool, /// When 0, transaction should have minimal possible size. When this value has non-zero value n, - /// transaction size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR. + /// transaction size should be (if possible) near to `MIN_SIZE + n * SIZE_FACTOR`. pub transaction_size_factor: u32, /// When 0, proof should have minimal possible size. When this value has non-zero value n, - /// proof size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR. + /// proof size should be (if possible) near to `MIN_SIZE + n * SIZE_FACTOR`. pub proof_size_factor: u32, } diff --git a/bridges/modules/dispatch/src/lib.rs b/bridges/modules/dispatch/src/lib.rs index 2b1cd1beb7a9..a16cd214469e 100644 --- a/bridges/modules/dispatch/src/lib.rs +++ b/bridges/modules/dispatch/src/lib.rs @@ -19,7 +19,7 @@ //! The messages are interpreted directly as runtime `Call`. We attempt to decode //! them and then dispatch as usual. To prevent compatibility issues, the Calls have //! to include a `spec_version`. This will be checked before dispatch. In the case of -//! a succesful dispatch an event is emitted. +//! a successful dispatch an event is emitted. #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] @@ -52,7 +52,7 @@ pub trait Config: frame_system::Config { /// The overarching event type. type Event: From> + Into<::Event>; /// Id of the message. Whenever message is passed to the dispatch module, it emits - /// event with this id + dispatch result. Could be e.g. (LaneId, MessageNonce) if + /// event with this id + dispatch result. Could be e.g. (`LaneId`, `MessageNonce`) if /// it comes from the messages module. type MessageId: Parameter; /// Type of account ID on source chain. @@ -77,13 +77,13 @@ pub trait Config: frame_system::Config { /// The type that is used to wrap the `Self::Call` when it is moved over bridge. /// /// The idea behind this is to avoid `Call` conversion/decoding until we'll be sure - /// that all other stuff (like `spec_version`) is ok. If we would try to decode + /// that all other stuff (like `spec_version`) is OK. If we would try to decode /// `Call` which has been encoded using previous `spec_version`, then we might end /// up with decoding error, instead of `MessageVersionSpecMismatch`. type EncodedCall: Decode + Encode + Into>::Call, ()>>; - /// A type which can be turned into an AccountId from a 256-bit hash. + /// A type which can be turned into an `AccountId` from a 256-bit hash. /// - /// Used when deriving target chain AccountIds from source chain AccountIds. + /// Used when deriving target chain `AccountId`s from source chain `AccountId`s. type AccountIdConverter: sp_runtime::traits::Convert; } diff --git a/bridges/primitives/header-chain/src/justification.rs b/bridges/primitives/header-chain/src/justification.rs index 625d7762597a..cc47070b8ca2 100644 --- a/bridges/primitives/header-chain/src/justification.rs +++ b/bridges/primitives/header-chain/src/justification.rs @@ -16,7 +16,7 @@ //! Pallet for checking GRANDPA Finality Proofs. //! -//! Adapted copy of substrate/client/finality-grandpa/src/justification.rs. If origin +//! Adapted copy of `substrate/client/finality-grandpa/src/justification.rs`. If origin //! will ever be moved to the sp_finality_grandpa, we should reuse that implementation. use codec::{Decode, Encode}; @@ -57,7 +57,7 @@ pub enum Error { InvalidJustificationTarget, /// The authority has provided an invalid signature. InvalidAuthoritySignature, - /// The justification contains precommit for header that is not a descendant of the commit header. + /// The justification contains pre-commit for header that is not a descendant of the commit header. PrecommitIsNotCommitDescendant, /// The cumulative weight of all votes in the justification is not enough to justify commit /// header finalization. @@ -119,7 +119,7 @@ where if signed.precommit.target_number < justification.commit.target_number { return Err(Error::PrecommitIsNotCommitDescendant); } - // all precommits must be for target block descendents + // all precommits must be for target block descendants chain = chain.ensure_descendant(&justification.commit.target_hash, &signed.precommit.target_hash)?; // since we know now that the precommit target is the descendant of the justification target, // we may increase 'weight' of the justification target @@ -154,7 +154,7 @@ where } // check that the cumulative weight of validators voted for the justification target (or one - // of its descendents) is larger than required threshold. + // of its descendants) is larger than required threshold. let threshold = authorities_set.threshold().0.into(); if cumulative_weight >= threshold { Ok(()) diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 722230ac9a50..a69a46f1648e 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -65,7 +65,7 @@ pub enum Subcommand { #[cfg(not(feature = "try-runtime"))] TryRuntime, - /// Key management cli utilities + /// Key management CLI utilities Key(sc_cli::KeySubcommand), } diff --git a/core-primitives/src/lib.rs b/core-primitives/src/lib.rs index c552929d15f3..e4efb825e012 100644 --- a/core-primitives/src/lib.rs +++ b/core-primitives/src/lib.rs @@ -81,11 +81,11 @@ impl sp_std::fmt::Debug for CandidateHash { pub type Nonce = u32; /// The balance of an account. -/// 128-bits (or 38 significant decimal figures) will allow for 10m currency (10^7) at a resolution -/// to all for one second's worth of an annualised 50% reward be paid to a unit holder (10^11 unit -/// denomination), or 10^18 total atomic units, to grow at 50%/year for 51 years (10^9 multiplier) -/// for an eventual total of 10^27 units (27 significant decimal figures). -/// We round denomination to 10^12 (12 sdf), and leave the other redundancy at the upper end so +/// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a resolution +/// to all for one second's worth of an annualised 50% reward be paid to a unit holder (`10^11` unit +/// denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years (`10^9` multiplier) +/// for an eventual total of `10^27` units (27 significant decimal figures). +/// We round denomination to `10^12` (12 SDF), and leave the other redundancy at the upper end so /// that 32 bits may be multiplied with a balance in 128 bits without worrying about overflow. pub type Balance = u128; @@ -99,7 +99,7 @@ pub type BlockId = generic::BlockId; /// Opaque, encoded, unchecked extrinsic. pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; -/// The information that goes alongside a transfer_into_parachain operation. Entirely opaque, it +/// The information that goes alongside a `transfer_into_parachain` operation. Entirely opaque, it /// will generally be used for identifying the reason for the transfer. Typically it will hold the /// destination account to which the transfer should be credited. If still more information is /// needed, then this should be a hash with the pre-image presented via an off-chain mechanism on @@ -144,7 +144,7 @@ pub struct OutboundHrmpMessage { pub data: sp_std::vec::Vec, } -/// V1 primitives. +/// `V1` primitives. pub mod v1 { pub use super::*; } diff --git a/doc/testing.md b/doc/testing.md index 985cda9b0aff..8230ea352c0f 100644 --- a/doc/testing.md +++ b/doc/testing.md @@ -20,7 +20,7 @@ One particular subsystem (subsystem under test) interacts with a mocked overseer that is made to assert incoming and outgoing messages of the subsystem under test. This is largely present today, but has some fragmentation in the evolved -integration test implementation. A proc-macro/macro_rules would allow +integration test implementation. A `proc-macro`/`macro_rules` would allow for more consistent implementation and structure. #### Behavior tests (3) @@ -29,27 +29,25 @@ Launching small scale networks, with multiple adversarial nodes without any furt This should include tests around the thresholds in order to evaluate the error handling once certain assumed invariants fail. -For this purpose based on `AllSubsystems` and proc-macro `AllSubsystemsGen`. +For this purpose based on `AllSubsystems` and `proc-macro` `AllSubsystemsGen`. This assumes a simplistic test runtime. #### Testing at scale (4) Launching many nodes with configurable network speed and node features in a cluster of nodes. -At this scale the [`simnet`][simnet] comes into play which launches a full cluster of nodes. +At this scale the [Simnet][simnet] comes into play which launches a full cluster of nodes. The scale is handled by spawning a kubernetes cluster and the meta description -is covered by [`gurke`][gurke]. -Asserts are made using grafana rules, based on the existing prometheus metrics. This can +is covered by [Gurke][Gurke]. +Asserts are made using Grafana rules, based on the existing prometheus metrics. This can be extended by adding an additional service translating `jaeger` spans into addition prometheus avoiding additional polkadot source changes. - _Behavior tests_ and _testing at scale_ have naturally soft boundary. The most significant difference is the presence of a real network and the number of nodes, since a single host often not capable to run multiple nodes at once. - --- ## Coverage @@ -93,15 +91,15 @@ miniserve -r ./coverage grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore "/*" -o lcov.info ``` -The test coverage in `lcov` can the be published to . +The test coverage in `lcov` can the be published to . ```sh bash <(curl -s https://codecov.io/bash) -f lcov.info ``` -or just printed as part of the PR using a github action i.e. [jest-lcov-reporter](https://github.com/marketplace/actions/jest-lcov-reporter). +or just printed as part of the PR using a github action i.e. [`jest-lcov-reporter`](https://github.com/marketplace/actions/jest-lcov-reporter). -For full examples on how to use [grcov /w polkadot specifics see the github repo](https://github.com/mozilla/grcov#coverallscodecov-output). +For full examples on how to use [`grcov` /w polkadot specifics see the github repo](https://github.com/mozilla/grcov#coverallscodecov-output). ## Fuzzing @@ -146,13 +144,12 @@ Requirements: * spawn nodes with preconfigured behaviors * allow multiple types of configuration to be specified -* allow extensability via external crates +* allow extendability via external crates * ... --- - -## Implementation of different behavior strain nodes. +## Implementation of different behavior strain nodes ### Goals @@ -166,21 +163,21 @@ well as shorting the block time and epoch times down to a few `100ms` and a few #### MVP -A simple small scale builder pattern would suffice for stage one impl of allowing to +A simple small scale builder pattern would suffice for stage one implementation of allowing to replace individual subsystems. An alternative would be to harness the existing `AllSubsystems` type and replace the subsystems as needed. -#### Full proc-macro impl +#### Full `proc-macro` implementation `Overseer` is a common pattern. -It could be extracted as proc macro and generative proc-macro. +It could be extracted as `proc` macro and generative `proc-macro`. This would replace the `AllSubsystems` type as well as implicitly create the `AllMessages` enum as `AllSubsystemsGen` does today. The implementation is yet to be completed, see the [implementation PR](https://github.com/paritytech/polkadot/pull/2962) for details. -##### Declare an overseer impl +##### Declare an overseer implementation ```rust struct BehaveMaleficient; @@ -237,8 +234,8 @@ fn main() -> eyre::Result<()> { #### Simnet -Spawn a kubernetes cluster based on a meta description using [gurke] with the -[simnet] scripts. +Spawn a kubernetes cluster based on a meta description using [Gurke] with the +[Simnet] scripts. Coordinated attacks of multiple nodes or subsystems must be made possible via a side-channel, that is out of scope for this document. @@ -246,11 +243,11 @@ a side-channel, that is out of scope for this document. The individual node configurations are done as targets with a particular builder configuration. -#### Behavior tests w/o simnet +#### Behavior tests w/o Simnet Commonly this will require multiple nodes, and most machines are limited to running two or three nodes concurrently. -Hence, this is not the common case and is just an impl _idea_. +Hence, this is not the common case and is just an implementation _idea_. ```rust behavior_testcase!{ @@ -263,5 +260,5 @@ behavior_testcase!{ } ``` -[gurke]: https://github.com/paritytech/gurke +[Gurke]: https://github.com/paritytech/gurke [simnet]: https://github.com/paritytech/simnet_scripts diff --git a/erasure-coding/src/lib.rs b/erasure-coding/src/lib.rs index 2cae7160443d..69f30162ba90 100644 --- a/erasure-coding/src/lib.rs +++ b/erasure-coding/src/lib.rs @@ -20,7 +20,7 @@ //! The way we accomplish this is by erasure coding the data into n pieces //! and constructing a merkle root of the data. //! -//! Each of n validators stores their piece of data. We assume n=3f+k, 0 < k ≤ 3. +//! Each of n validators stores their piece of data. We assume `n = 3f + k`, `0 < k ≤ 3`. //! f is the maximum number of faulty validators in the system. //! The data is coded so any f+1 chunks can be used to reconstruct the full data. @@ -58,7 +58,7 @@ pub enum Error { /// Chunks not of uniform length or the chunks are empty. #[error("Chunks are not unform, mismatch in length or are zero sized")] NonUniformChunks, - /// An uneven byte-length of a shard is not valid for GF(2^16) encoding. + /// An uneven byte-length of a shard is not valid for `GF(2^16)` encoding. #[error("Uneven length is not valid for field GF(2^16)")] UnevenLength, /// Chunk index out of bounds. diff --git a/file_header.txt b/file_header.txt index f3a8b8eb3041..6dbc98f18e44 100644 --- a/file_header.txt +++ b/file_header.txt @@ -1,4 +1,4 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// Copyright 2017-2021 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 72a1beaeafe8..16f991eb54d2 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -443,7 +443,7 @@ struct MetricsInner { new_activations_per_availability_core: prometheus::Histogram, } -/// CollationGenerationSubsystem metrics. +/// `CollationGenerationSubsystem` metrics. #[derive(Default, Clone)] pub struct Metrics(Option); diff --git a/node/core/approval-voting/src/approval_checking.rs b/node/core/approval-voting/src/approval_checking.rs index 0843d574fc41..3f9d2154bf1c 100644 --- a/node/core/approval-voting/src/approval_checking.rs +++ b/node/core/approval-voting/src/approval_checking.rs @@ -297,8 +297,8 @@ fn filled_tranche_iterator<'a>( pre.chain(approval_entries_filled).chain(post) } -/// Computes the number of no_show validators in a set of assignments given the relevant approvals -/// and tick parameters. This method also returns the next tick at which a no_show will occur +/// Computes the number of `no_show` validators in a set of assignments given the relevant approvals +/// and tick parameters. This method also returns the next tick at which a `no_show` will occur /// amongst the set of validators that have not submitted an approval. /// /// If the returned `next_no_show` is not None, there are two possible cases for the value of diff --git a/node/core/approval-voting/src/approval_db/v1/mod.rs b/node/core/approval-voting/src/approval_db/v1/mod.rs index 428692b02595..7cf0c59ee5ac 100644 --- a/node/core/approval-voting/src/approval_db/v1/mod.rs +++ b/node/core/approval-voting/src/approval_db/v1/mod.rs @@ -38,7 +38,7 @@ const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks"; #[cfg(test)] pub mod tests; -/// DbBackend is a concrete implementation of the higher-level Backend trait +/// `DbBackend` is a concrete implementation of the higher-level Backend trait pub struct DbBackend { inner: Arc, config: Config, diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 4c9cbee814b2..b6f06c0fdb67 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -152,7 +152,7 @@ pub(crate) struct Config { n_cores: u32, /// The zeroth delay tranche width. zeroth_delay_tranche_width: u32, - /// The number of samples we do of relay_vrf_modulo. + /// The number of samples we do of `relay_vrf_modulo`. relay_vrf_modulo_samples: u32, /// The number of delay tranches in total. n_delay_tranches: u32, diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 77ce36a597fa..16ce617b7a3e 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -121,7 +121,7 @@ enum Mode { /// The approval voting subsystem. pub struct ApprovalVotingSubsystem { - /// LocalKeystore is needed for assignment keys, but not necessarily approval keys. + /// `LocalKeystore` is needed for assignment keys, but not necessarily approval keys. /// /// We do a lot of VRF signing and need the keys to have low latency. keystore: Arc, @@ -145,7 +145,7 @@ struct MetricsInner { time_recover_and_approve: prometheus::Histogram, } -/// Aproval Voting metrics. +/// Approval Voting metrics. #[derive(Default, Clone)] pub struct Metrics(Option); diff --git a/node/core/approval-voting/src/time.rs b/node/core/approval-voting/src/time.rs index 4ca85fa44dae..d12132fab9a9 100644 --- a/node/core/approval-voting/src/time.rs +++ b/node/core/approval-voting/src/time.rs @@ -24,7 +24,7 @@ use std::pin::Pin; const TICK_DURATION_MILLIS: u64 = 500; -/// A base unit of time, starting from the unix epoch, split into half-second intervals. +/// A base unit of time, starting from the Unix epoch, split into half-second intervals. pub(crate) type Tick = u64; /// A clock which allows querying of the current tick as well as diff --git a/node/core/av-store/src/tests.rs b/node/core/av-store/src/tests.rs index 38c05eb15130..baaa13abbf4e 100644 --- a/node/core/av-store/src/tests.rs +++ b/node/core/av-store/src/tests.rs @@ -285,7 +285,7 @@ fn runtime_api_error_does_not_stop_the_subsystem() { } ); - // runtime api call fails + // runtime API call fails assert_matches!( overseer_recv(&mut virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 682c45536a27..9ce95459a837 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -104,7 +104,7 @@ pub enum Error { /// PoV data to validate. enum PoVData { - /// Allready available (from candidate selection). + /// Already available (from candidate selection). Ready(Arc), /// Needs to be fetched from validator (we are checking a signed statement). FetchFromValidator { @@ -856,7 +856,7 @@ impl CandidateBackingJob { /// This also does bounds-checking on the validator index and will return an error if the /// validator index is out of bounds for the current validator set. It's expected that /// this should never happen due to the interface of the candidate backing subsystem - - /// the networking component repsonsible for feeding statements to the backing subsystem + /// the networking component responsible for feeding statements to the backing subsystem /// is meant to check the signature and provenance of all statements before submission. async fn dispatch_new_statement_to_dispute_coordinator( &self, diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index 8da711da649a..1edc89886a3b 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -312,5 +312,5 @@ impl JobTrait for BitfieldSigningJob { } } -/// BitfieldSigningSubsystem manages a number of bitfield signing jobs. +/// `BitfieldSigningSubsystem` manages a number of bitfield signing jobs. pub type BitfieldSigningSubsystem = JobSubsystem; diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs index e163fe07667f..42f2e4794b32 100644 --- a/node/core/chain-selection/src/backend.rs +++ b/node/core/chain-selection/src/backend.rs @@ -45,7 +45,7 @@ pub(super) trait Backend { fn load_leaves(&self) -> Result; /// Load the stagnant list at the given timestamp. fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error>; - /// Load all stagnant lists up to and including the given UNIX timestamp + /// Load all stagnant lists up to and including the given Unix timestamp /// in ascending order. fn load_stagnant_at_up_to(&self, up_to: Timestamp) -> Result)>, Error>; diff --git a/node/core/chain-selection/src/db_backend/v1.rs b/node/core/chain-selection/src/db_backend/v1.rs index 6aea4af8c136..71a4f718f9e6 100644 --- a/node/core/chain-selection/src/db_backend/v1.rs +++ b/node/core/chain-selection/src/db_backend/v1.rs @@ -26,7 +26,7 @@ //! ``` //! //! The big-endian encoding is used for creating iterators over the key-value DB which are -//! accessible by prefix, to find the earlist block number stored as well as the all stagnant +//! accessible by prefix, to find the earliest block number stored as well as the all stagnant //! blocks. //! //! The `Vec`s stored are always non-empty. Empty `Vec`s are not stored on disk so there is no diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index a52119c76ef5..9862f60d7de8 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -534,7 +534,7 @@ async fn handle_active_leaf( ); // If we don't know the weight, we can't import the block. - // And none of its descendents either. + // And none of its descendants either. break; } Some(w) => w, diff --git a/node/core/dispute-coordinator/src/backend.rs b/node/core/dispute-coordinator/src/backend.rs index 35c36f986c91..dfd53213f70c 100644 --- a/node/core/dispute-coordinator/src/backend.rs +++ b/node/core/dispute-coordinator/src/backend.rs @@ -57,7 +57,7 @@ pub trait Backend { where I: IntoIterator; } -/// An in-memory overllay for the backend. +/// An in-memory overlay for the backend. /// /// This maintains read-only access to the underlying backend, but can be converted into a set of /// write operations which will, when written to the underlying backend, give the same view as the @@ -121,7 +121,7 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { self.inner.load_candidate_votes(session, candidate_hash) } - /// Prepare a write to the 'earliest session' field of the DB. + /// Prepare a write to the "earliest session" field of the DB. /// /// Later calls to this function will override earlier ones. pub fn write_earliest_session(&mut self, session: SessionIndex) { diff --git a/node/core/dispute-coordinator/src/db/v1.rs b/node/core/dispute-coordinator/src/db/v1.rs index d3f859e7d641..2d8b488fed69 100644 --- a/node/core/dispute-coordinator/src/db/v1.rs +++ b/node/core/dispute-coordinator/src/db/v1.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! V1 database for the dispute coordinator. +//! `V1` database for the dispute coordinator. use polkadot_primitives::v1::{ CandidateReceipt, ValidDisputeStatementKind, InvalidDisputeStatementKind, ValidatorIndex, diff --git a/node/core/dispute-coordinator/src/lib.rs b/node/core/dispute-coordinator/src/lib.rs index 2cc155a51e99..f2b956f3fe6d 100644 --- a/node/core/dispute-coordinator/src/lib.rs +++ b/node/core/dispute-coordinator/src/lib.rs @@ -212,7 +212,7 @@ pub enum DisputeStatus { /// since the given timestamp. #[codec(index = 1)] ConcludedFor(Timestamp), - /// The dispute has been concluded agains the candidate + /// The dispute has been concluded against the candidate /// since the given timestamp. /// /// This takes precedence over `ConcludedFor` in the case that diff --git a/node/core/dispute-participation/src/tests.rs b/node/core/dispute-participation/src/tests.rs index 734f997338e8..43aecd3f7847 100644 --- a/node/core/dispute-participation/src/tests.rs +++ b/node/core/dispute-participation/src/tests.rs @@ -144,7 +144,7 @@ async fn fetch_validation_code(virtual_overseer: &mut VirtualOverseer) { )) => { tx.send(Ok(Some(validation_code))).unwrap(); }, - "overseer did not receive runtime api request for validation code", + "overseer did not receive runtime API request for validation code", ); } @@ -243,7 +243,7 @@ fn cannot_participate_if_cannot_recover_validation_code() { )) => { tx.send(Ok(None)).unwrap(); }, - "overseer did not receive runtime api request for validation code", + "overseer did not receive runtime API request for validation code", ); virtual_overseer diff --git a/node/core/pvf/src/error.rs b/node/core/pvf/src/error.rs index 71377cdf3dc2..f0ba95515054 100644 --- a/node/core/pvf/src/error.rs +++ b/node/core/pvf/src/error.rs @@ -40,13 +40,13 @@ pub enum InvalidCandidate { /// /// (b) The candidate triggered a code path that has lead to the process death. For example, /// the PVF found a way to consume unbounded amount of resources and then it either exceeded - /// an rlimit (if set) or, again, invited OOM killer. Another possibility is a bug in + /// an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a bug in /// wasmtime allowed the PVF to gain control over the execution worker. /// /// We attribute such an event to an invalid candidate in either case. /// /// The rationale for this is that a glitch may lead to unfair rejecting candidate by a single - /// validator. If the glitch is somewhat more persistant the validator will reject all candidate + /// validator. If the glitch is somewhat more persistent the validator will reject all candidate /// thrown at it and hopefully the operator notices it by decreased reward performance of the /// validator. On the other hand, if the worker died because of (b) we would have better chances /// to stop the attack. diff --git a/node/core/pvf/src/execute/worker.rs b/node/core/pvf/src/execute/worker.rs index 3f9466e7cc49..12d073c08cf2 100644 --- a/node/core/pvf/src/execute/worker.rs +++ b/node/core/pvf/src/execute/worker.rs @@ -185,7 +185,7 @@ impl Response { } } -/// The entrypoint that the spawned execute worker should start with. The socket_path specifies +/// The entrypoint that the spawned execute worker should start with. The `socket_path` specifies /// the path to the socket used to communicate with the host. pub fn worker_entrypoint(socket_path: &str) { worker_event_loop("execute", socket_path, |mut stream| async move { diff --git a/node/core/pvf/src/executor_intf.rs b/node/core/pvf/src/executor_intf.rs index f1d951174422..0e19b3d59b7d 100644 --- a/node/core/pvf/src/executor_intf.rs +++ b/node/core/pvf/src/executor_intf.rs @@ -54,7 +54,7 @@ const CONFIG: Config = Config { }, }; -/// Runs the prevaldation on the given code. Returns a [`RuntimeBlob`] if it succeeds. +/// Runs the prevalidation on the given code. Returns a [`RuntimeBlob`] if it succeeds. pub fn prevalidate(code: &[u8]) -> Result { let blob = RuntimeBlob::new(code)?; // It's assumed this function will take care of any prevalidation logic diff --git a/node/core/pvf/src/host.rs b/node/core/pvf/src/host.rs index 1777af3b4ed4..ca571a49daac 100644 --- a/node/core/pvf/src/host.rs +++ b/node/core/pvf/src/host.rs @@ -49,7 +49,7 @@ pub struct ValidationHost { } impl ValidationHost { - /// Execute PVF with the given code, params and priority. The result of execution will be sent + /// Execute PVF with the given code, parameters and priority. The result of execution will be sent /// to the provided result sender. /// /// This is async to accommodate the fact a possibility of back-pressure. In the vast majority of @@ -106,7 +106,7 @@ pub struct Config { pub cache_path: PathBuf, /// The path to the program that can be used to spawn the prepare workers. pub prepare_worker_program_path: PathBuf, - /// The time alloted for a prepare worker to spawn and report to the host. + /// The time allotted for a prepare worker to spawn and report to the host. pub prepare_worker_spawn_timeout: Duration, /// The maximum number of workers that can be spawned in the prepare pool for tasks with the /// priority below critical. @@ -115,7 +115,7 @@ pub struct Config { pub prepare_workers_hard_max_num: usize, /// The path to the program that can be used to spawn the execute workers. pub execute_worker_program_path: PathBuf, - /// The time alloted for an execute worker to spawn and report to the host. + /// The time allotted for an execute worker to spawn and report to the host. pub execute_worker_spawn_timeout: Duration, /// The maximum number of execute workers that can run at the same time. pub execute_workers_max_num: usize, @@ -147,7 +147,7 @@ impl Config { /// must be polled in order for validation host to function. /// /// The future should not return normally but if it does then that indicates an unrecoverable error. -/// In that case all pending requests will be cancelled, dropping the result senders and new ones +/// In that case all pending requests will be canceled, dropping the result senders and new ones /// will be rejected. pub fn start(config: Config) -> (ValidationHost, impl Future) { let (to_host_tx, to_host_rx) = mpsc::channel(10); @@ -220,7 +220,7 @@ struct PendingExecutionRequest { } /// A mapping from an artifact ID which is in preparation state to the list of pending execution -/// requests that should be executed once the artifact's prepration is finished. +/// requests that should be executed once the artifact's preparation is finished. #[derive(Default)] struct AwaitingPrepare(HashMap>); @@ -628,7 +628,7 @@ mod tests { } } - /// Creates a new pvf which artifact id can be uniquely identified by the given number. + /// Creates a new PVF which artifact id can be uniquely identified by the given number. fn artifact_id(descriminator: u32) -> ArtifactId { Pvf::from_discriminator(descriminator).as_artifact_id() } diff --git a/node/core/pvf/src/lib.rs b/node/core/pvf/src/lib.rs index b65d5797cea8..04719944cbca 100644 --- a/node/core/pvf/src/lib.rs +++ b/node/core/pvf/src/lib.rs @@ -23,14 +23,14 @@ //! //! Then using the handle the client can send two types of requests: //! -//! (a) PVF execution. This accepts the PVF [params][`polkadot_parachain::primitives::ValidationParams`] +//! (a) PVF execution. This accepts the PVF [`params`][`polkadot_parachain::primitives::ValidationParams`] //! and the PVF [code][`Pvf`], prepares (verifies and compiles) the code, and then executes PVF -//! with the params. +//! with the `params`. //! //! (b) Heads up. This request allows to signal that the given PVF may be needed soon and that it //! should be prepared for execution. //! -//! The preparation results are cached for some time after they either used or was signalled in heads up. +//! The preparation results are cached for some time after they either used or was signaled in heads up. //! All requests that depends on preparation of the same PVF are bundled together and will be executed //! as soon as the artifact is prepared. //! @@ -70,7 +70,7 @@ //! //! The execute workers will be fed by the requests from the execution queue, which is basically a //! combination of a path to the compiled artifact and the -//! [params][`polkadot_parachain::primitives::ValidationParams`]. +//! [`params`][`polkadot_parachain::primitives::ValidationParams`]. //! //! Each fixed interval of time a pruning task will run. This task will remove all artifacts that //! weren't used or received a heads up signal for a while. diff --git a/node/core/pvf/src/prepare/pool.rs b/node/core/pvf/src/prepare/pool.rs index ca7b6f65d52d..2c92e023f7a7 100644 --- a/node/core/pvf/src/prepare/pool.rs +++ b/node/core/pvf/src/prepare/pool.rs @@ -80,7 +80,7 @@ pub enum FromPool { Spawned(Worker), /// The given worker either succeeded or failed the given job. Under any circumstances the - /// artifact file has been written. The bool says whether the worker ripped. + /// artifact file has been written. The `bool` says whether the worker ripped. Concluded(Worker, bool), /// The given worker ceased to exist. diff --git a/node/core/pvf/src/prepare/queue.rs b/node/core/pvf/src/prepare/queue.rs index b81a47ee9918..53ccc0d6cb4a 100644 --- a/node/core/pvf/src/prepare/queue.rs +++ b/node/core/pvf/src/prepare/queue.rs @@ -530,7 +530,7 @@ mod tests { use std::task::Poll; use super::*; - /// Creates a new pvf which artifact id can be uniquely identified by the given number. + /// Creates a new PVF which artifact id can be uniquely identified by the given number. fn pvf(descriminator: u32) -> Pvf { Pvf::from_discriminator(descriminator) } diff --git a/node/core/pvf/src/prepare/worker.rs b/node/core/pvf/src/prepare/worker.rs index a7854e83e6bb..2de955eb5a79 100644 --- a/node/core/pvf/src/prepare/worker.rs +++ b/node/core/pvf/src/prepare/worker.rs @@ -273,7 +273,7 @@ fn renice(pid: u32, niceness: i32) { } } -/// The entrypoint that the spawned prepare worker should start with. The socket_path specifies +/// The entrypoint that the spawned prepare worker should start with. The `socket_path` specifies /// the path to the socket used to communicate with the host. pub fn worker_entrypoint(socket_path: &str) { worker_event_loop("prepare", socket_path, |mut stream| async move { diff --git a/node/core/pvf/src/pvf.rs b/node/core/pvf/src/pvf.rs index 00c0777a5489..901cc1c70d6e 100644 --- a/node/core/pvf/src/pvf.rs +++ b/node/core/pvf/src/pvf.rs @@ -42,7 +42,7 @@ impl Pvf { Self { code, code_hash } } - /// Creates a new pvf which artifact id can be uniquely identified by the given number. + /// Creates a new PVF which artifact id can be uniquely identified by the given number. #[cfg(test)] pub(crate) fn from_discriminator(num: u32) -> Self { let descriminator_buf = num.to_le_bytes().to_vec(); diff --git a/node/core/pvf/src/worker_common.rs b/node/core/pvf/src/worker_common.rs index 46d0b730b929..02f7bce4ea63 100644 --- a/node/core/pvf/src/worker_common.rs +++ b/node/core/pvf/src/worker_common.rs @@ -177,7 +177,7 @@ pub enum SpawnErr { Accept, /// An error happened during spawning the process. ProcessSpawn, - /// The deadline alloted for the worker spawning and connecting to the socket has elapsed. + /// The deadline allotted for the worker spawning and connecting to the socket has elapsed. AcceptTimeout, } @@ -187,7 +187,7 @@ pub enum SpawnErr { /// has been terminated. Since the worker is running in another process it is obviously not necessarily /// to poll this future to make the worker run, it's only for termination detection. /// -/// This future relies on the fact that a child process's stdout fd is closed upon it's termination. +/// This future relies on the fact that a child process's stdout `fd` is closed upon it's termination. #[pin_project] pub struct WorkerHandle { child: async_process::Child, diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index 5cba9bbb7611..54eb95b4cdb6 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -51,10 +51,10 @@ mod tests; const LOG_TARGET: &str = "parachain::runtime-api"; -/// The number of maximum runtime api requests can be executed in parallel. Further requests will be buffered. +/// The number of maximum runtime API requests can be executed in parallel. Further requests will be buffered. const MAX_PARALLEL_REQUESTS: usize = 4; -/// The name of the blocking task that executes a runtime api request. +/// The name of the blocking task that executes a runtime API request. const API_REQUEST_TASK_NAME: &str = "polkadot-runtime-api-request"; /// The `RuntimeApiSubsystem`. See module docs for more details. @@ -67,7 +67,7 @@ pub struct RuntimeApiSubsystem { Pin + Send>>, oneshot::Receiver>, )>, - /// All the active runtime api requests that are currently being executed. + /// All the active runtime API requests that are currently being executed. active_requests: FuturesUnordered>>, /// Requests results cache requests_cache: RequestResultCache, @@ -210,7 +210,7 @@ impl RuntimeApiSubsystem where } } - /// Spawn a runtime api request. + /// Spawn a runtime API request. /// /// If there are already [`MAX_PARALLEL_REQUESTS`] requests being executed, the request will be buffered. fn spawn_request(&mut self, relay_parent: Hash, request: Request) { @@ -239,7 +239,7 @@ impl RuntimeApiSubsystem where if self.waiting_requests.len() > MAX_PARALLEL_REQUESTS * 10 { tracing::warn!( target: LOG_TARGET, - "{} runtime api requests waiting to be executed.", + "{} runtime API requests waiting to be executed.", self.waiting_requests.len(), ) } @@ -249,7 +249,7 @@ impl RuntimeApiSubsystem where } } - /// Poll the active runtime api requests. + /// Poll the active runtime API requests. async fn poll_requests(&mut self) { // If there are no active requests, this future should be pending forever. if self.active_requests.len() == 0 { diff --git a/node/jaeger/src/lib.rs b/node/jaeger/src/lib.rs index 4b88966f3f96..dc8c329316ee 100644 --- a/node/jaeger/src/lib.rs +++ b/node/jaeger/src/lib.rs @@ -84,13 +84,13 @@ impl Jaeger { Jaeger::Prep(cfg) } - /// Spawn the background task in order to send the tracing information out via udp + /// Spawn the background task in order to send the tracing information out via UDP #[cfg(target_os = "unknown")] pub fn launch(self, _spawner: S) -> result::Result<(), JaegerError> { Ok(()) } - /// Spawn the background task in order to send the tracing information out via udp + /// Spawn the background task in order to send the tracing information out via UDP #[cfg(not(target_os = "unknown"))] pub fn launch(self, spawner: S) -> result::Result<(), JaegerError> { let cfg = match self { diff --git a/node/jaeger/src/spans.rs b/node/jaeger/src/spans.rs index 4f3114d39b59..36e63cadcc7b 100644 --- a/node/jaeger/src/spans.rs +++ b/node/jaeger/src/spans.rs @@ -326,7 +326,7 @@ impl Span { /// Add an additional int tag to the span without consuming. /// - /// Should be used sparingly, introduction of new types is prefered. + /// Should be used sparingly, introduction of new types is preferred. #[inline(always)] pub fn with_int_tag(mut self, tag: &'static str, i: i64) -> Self { self.add_int_tag(tag, i); @@ -354,11 +354,11 @@ impl Span { } } - /// Add a pov hash meta tag with lazy hash eval, without consuming the span. + /// Add a PoV hash meta tag with lazy hash evaluation, without consuming the span. #[inline(always)] pub fn add_pov(&mut self, pov: &PoV) { if self.is_enabled() { - // avoid computing the pov hash if jaeger is not enabled + // avoid computing the PoV hash if jaeger is not enabled self.add_string_fmt_debug_tag("pov", pov.hash()); } } diff --git a/node/malus/Cargo.toml b/node/malus/Cargo.toml index 084a02b9591a..bb8a6601d07d 100644 --- a/node/malus/Cargo.toml +++ b/node/malus/Cargo.toml @@ -8,7 +8,7 @@ path = "src/variant-a.rs" [package] name = "polkadot-test-malus" -description = "Misbehaving nodes for local testnets, system and simnet tests." +description = "Misbehaving nodes for local testnets, system and Simnet tests." license = "GPL-3.0-only" version = "0.9.8" authors = ["Parity Technologies "] diff --git a/node/malus/src/lib.rs b/node/malus/src/lib.rs index 516d4840b3c4..8939f917d0ff 100644 --- a/node/malus/src/lib.rs +++ b/node/malus/src/lib.rs @@ -27,7 +27,7 @@ use std::pin::Pin; /// Filter incoming and outgoing messages. pub trait MsgFilter: Send + Sync + Clone + 'static { - /// The message type the original subsystm handles incoming. + /// The message type the original subsystem handles incoming. type Message: Send + 'static; /// Filter messages that are to be received by diff --git a/node/malus/src/variant-a.rs b/node/malus/src/variant-a.rs index 6b89f64071ec..f53e9d36f84a 100644 --- a/node/malus/src/variant-a.rs +++ b/node/malus/src/variant-a.rs @@ -18,7 +18,7 @@ //! //! An example on how to use the `OverseerGen` pattern to //! instantiate a modified subsystem implementation -//! for usage with simnet/gurke. +//! for usage with `simnet`/Gurke. #![allow(missing_docs)] diff --git a/node/metrics/src/lib.rs b/node/metrics/src/lib.rs index 7a149a553b7c..6ee207779257 100644 --- a/node/metrics/src/lib.rs +++ b/node/metrics/src/lib.rs @@ -49,7 +49,7 @@ pub mod metrics { /// Try to register metrics in the Prometheus registry. fn try_register(registry: &prometheus::Registry) -> Result; - /// Convenience method to register metrics in the optional Promethius registry. + /// Convenience method to register metrics in the optional Prometheus registry. /// /// If no registry is provided, returns `Default::default()`. Otherwise, returns the same /// thing that `try_register` does. diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index a0d55c3371d3..00f862e35fce 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -274,11 +274,11 @@ fn try_import_the_same_assignment() { }); } -/// https://github.com/paritytech/polkadot/pull/2160#discussion_r547594835 +/// /// /// 1. Send a view update that removes block B from their view. -/// 2. Send a message from B that they incur COST_UNEXPECTED_MESSAGE for, -/// but then they receive BENEFIT_VALID_MESSAGE. +/// 2. Send a message from B that they incur `COST_UNEXPECTED_MESSAGE` for, +/// but then they receive `BENEFIT_VALID_MESSAGE`. /// 3. Send all other messages related to B. #[test] fn spam_attack_results_in_negative_reputation_change() { @@ -360,7 +360,7 @@ fn spam_attack_results_in_negative_reputation_change() { /// Upon receiving them, they both will try to send the message each other. /// This test makes sure they will not punish each other for such duplicate messages. /// -/// See https://github.com/paritytech/polkadot/issues/2499. +/// See . #[test] fn peer_sending_us_the_same_we_just_sent_them_is_ok() { let parent_hash = Hash::repeat_byte(0xFF); diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs index c936d443fc6b..0b2aeb6d41f2 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -72,7 +72,7 @@ enum FetchedState { /// /// Once the contained `Sender` is dropped, any still running task will be canceled. Started(oneshot::Sender<()>), - /// All relevant live_in have been removed, before we were able to get our chunk. + /// All relevant `live_in` have been removed, before we were able to get our chunk. Canceled, } @@ -118,7 +118,7 @@ struct RunningTask { /// Sender for communicating with other subsystems and reporting results. sender: mpsc::Sender, - /// Prometheues metrics for reporting results. + /// Prometheus metrics for reporting results. metrics: Metrics, /// Span tracking the fetching of this chunk. diff --git a/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/node/network/availability-distribution/src/requester/fetch_task/tests.rs index 240cf8c5e9a6..1a770fb641c8 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/tests.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/tests.rs @@ -199,7 +199,7 @@ fn task_stores_valid_chunk_if_there_is_one() { struct TestRun { /// Response to deliver for a given validator index. - /// None means, answer with NetworkError. + /// None means, answer with `NetworkError`. chunk_responses: HashMap, /// Set of chunks that should be considered valid: valid_chunks: HashSet>, @@ -238,7 +238,7 @@ impl TestRun { }); } - /// Returns true, if after processing of the given message it would be ok for the stream to + /// Returns true, if after processing of the given message it would be OK for the stream to /// end. async fn handle_message(&self, msg: AllMessages) -> bool { match msg { diff --git a/node/network/availability-distribution/src/requester/mod.rs b/node/network/availability-distribution/src/requester/mod.rs index 68ebe90ca0b1..57bc94686423 100644 --- a/node/network/availability-distribution/src/requester/mod.rs +++ b/node/network/availability-distribution/src/requester/mod.rs @@ -153,8 +153,8 @@ impl Requester { /// /// Starting requests where necessary. /// - /// Note: The passed in `leaf` is not the same as CandidateDescriptor::relay_parent in the - /// given cores. The latter is the relay_parent this candidate considers its parent, while the + /// Note: The passed in `leaf` is not the same as `CandidateDescriptor::relay_parent` in the + /// given cores. The latter is the `relay_parent` this candidate considers its parent, while the /// passed in leaf might be some later block where the candidate is still pending availability. async fn add_cores( &mut self, diff --git a/node/network/availability-distribution/src/requester/session_cache.rs b/node/network/availability-distribution/src/requester/session_cache.rs index 60503cec8531..8c62c0cd3254 100644 --- a/node/network/availability-distribution/src/requester/session_cache.rs +++ b/node/network/availability-distribution/src/requester/session_cache.rs @@ -35,7 +35,7 @@ use crate::{ /// It should be ensured that a cached session stays live in the cache as long as we might need it. pub struct SessionCache { - /// Look up cached sessions by SessionIndex. + /// Look up cached sessions by `SessionIndex`. /// /// Note: Performance of fetching is really secondary here, but we need to ensure we are going /// to get any existing cache entry, before fetching new information, as we should not mess up diff --git a/node/network/availability-distribution/src/responder.rs b/node/network/availability-distribution/src/responder.rs index e45574b5a6ed..0805b9d332d6 100644 --- a/node/network/availability-distribution/src/responder.rs +++ b/node/network/availability-distribution/src/responder.rs @@ -85,7 +85,7 @@ where /// Answer an incoming PoV fetch request by querying the av store. /// -/// Returns: Ok(true) if chunk was found and served. +/// Returns: `Ok(true)` if chunk was found and served. pub async fn answer_pov_request( ctx: &mut Context, req: IncomingRequest, @@ -113,7 +113,7 @@ where /// Answer an incoming chunk request by querying the av store. /// -/// Returns: Ok(true) if chunk was found and served. +/// Returns: `Ok(true)` if chunk was found and served. pub async fn answer_chunk_request( ctx: &mut Context, req: IncomingRequest, diff --git a/node/network/availability-distribution/src/tests/state.rs b/node/network/availability-distribution/src/tests/state.rs index 3d8ea8f40a23..cceb6b95cff1 100644 --- a/node/network/availability-distribution/src/tests/state.rs +++ b/node/network/availability-distribution/src/tests/state.rs @@ -57,7 +57,7 @@ pub struct TestHarness { pub pool: TaskExecutor, } -/// TestState for mocking execution of this subsystem. +/// `TestState` for mocking execution of this subsystem. /// /// The `Default` instance provides data, which makes the system succeed by providing a couple of /// valid occupied cores. You can tune the data before calling `TestState::run`. E.g. modify some diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index 6f963e97c2aa..031343cd76e4 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -53,7 +53,7 @@ use polkadot_node_network_protocol::{ }; use polkadot_node_subsystem_util::metrics::{self, prometheus}; -/// Peer set infos for network initialization. +/// Peer set info for network initialization. /// /// To be added to [`NetworkConfiguration::extra_sets`]. pub use polkadot_node_network_protocol::peer_set::{peer_sets_info, IsAuthority}; diff --git a/node/network/bridge/src/multiplexer.rs b/node/network/bridge/src/multiplexer.rs index 0c750bd048b7..ca8102bdec0b 100644 --- a/node/network/bridge/src/multiplexer.rs +++ b/node/network/bridge/src/multiplexer.rs @@ -39,7 +39,7 @@ use polkadot_overseer::AllMessages; /// /// The resulting stream will end once any of its input ends. /// -/// TODO: Get rid of this: https://github.com/paritytech/polkadot/issues/2842 +// TODO: Get rid of this: pub struct RequestMultiplexer { receivers: Vec<(Protocol, mpsc::Receiver)>, statement_fetching: Option>, diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 3ffdb4baa114..4c0139f022c9 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -232,7 +232,7 @@ impl Network for Arc> { } } -/// We assume one peer_id per authority_id. +/// We assume one `peer_id` per `authority_id`. pub async fn get_peer_id_by_authority_id( authority_discovery: &mut AD, authority: AuthorityDiscoveryId, diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index c17fe42c0f30..46f575c0f38e 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -56,7 +56,7 @@ impl Service { /// /// This method will also disconnect from previously connected validators not in the `validator_ids` set. /// it takes `network_service` and `authority_discovery_service` by value - /// and returns them as a workaround for the Future: Send requirement imposed by async fn impl. + /// and returns them as a workaround for the Future: Send requirement imposed by async function implementation. pub async fn on_request( &mut self, validator_ids: Vec, diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 6928ed74832d..51cf87ea9139 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -58,7 +58,7 @@ const COST_APPARENT_FLOOD: Rep = Rep::CostMinor("Message received when previous /// /// This is to protect from a single slow validator preventing collations from happening. /// -/// With a collation size of 5Meg and bandwidth of 500Mbit/s (requirement for Kusama validators), +/// With a collation size of 5MB and bandwidth of 500Mbit/s (requirement for Kusama validators), /// the transfer should be possible within 0.1 seconds. 400 milliseconds should therefore be /// plenty and should be low enough for later validators to still be able to finish on time. /// diff --git a/node/network/collator-protocol/src/collator_side/tests.rs b/node/network/collator-protocol/src/collator_side/tests.rs index 40ab2610b127..3b610a9ca873 100644 --- a/node/network/collator-protocol/src/collator_side/tests.rs +++ b/node/network/collator-protocol/src/collator_side/tests.rs @@ -863,7 +863,7 @@ fn collators_reject_declare_messages() { /// /// After the first response is done, the passed in lambda will be called with the receiver for the /// next response and a sender for giving feedback on the response of the first transmission. After -/// the lamda has passed it is assumed that the second response is sent, which is checked by this +/// the lambda has passed it is assumed that the second response is sent, which is checked by this /// function. /// /// The lambda can trigger occasions on which the second response should be sent, like timeouts, diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 67c767195b1b..3795c0488b61 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -71,7 +71,7 @@ const BENEFIT_NOTIFY_GOOD: Rep = Rep::BenefitMinor("A collator was noted good by /// /// This is to protect from a single slow collator preventing collations from happening. /// -/// With a collation size of 5Meg and bandwidth of 500Mbit/s (requirement for Kusama validators), +/// With a collation size of 5MB and bandwidth of 500Mbit/s (requirement for Kusama validators), /// the transfer should be possible within 0.1 seconds. 400 milliseconds should therefore be /// plenty, even with multiple heads and should be low enough for later collators to still be able /// to finish on time. @@ -718,7 +718,7 @@ where } /// A peer's view has changed. A number of things should be done: -/// - Ongoing collation requests have to be cancelled. +/// - Ongoing collation requests have to be canceled. /// - Advertisements by this peer that are no longer relevant have to be removed. async fn handle_peer_view_change( state: &mut State, @@ -738,7 +738,7 @@ async fn handle_peer_view_change( /// This function will /// - Check for duplicate requests. /// - Check if the requested collation is in our view. -/// - Update PerRequest records with the `result` field if necessary. +/// - Update `PerRequest` records with the `result` field if necessary. /// And as such invocations of this function may rely on that. async fn request_collation( ctx: &mut Context, diff --git a/node/network/dispute-distribution/src/error.rs b/node/network/dispute-distribution/src/error.rs index f56d250648bc..5c0690139d2d 100644 --- a/node/network/dispute-distribution/src/error.rs +++ b/node/network/dispute-distribution/src/error.rs @@ -62,15 +62,15 @@ pub enum Fatal { #[error("Spawning subsystem task failed")] SpawnTask(#[source] SubsystemError), - /// DisputeSender mpsc receiver exhausted. + /// `DisputeSender` mpsc receiver exhausted. #[error("Erasure chunk requester stream exhausted")] SenderExhausted, - /// Errors coming from runtime::Runtime. + /// Errors coming from `runtime::Runtime`. #[error("Error while accessing runtime information")] Runtime(#[from] runtime::Fatal), - /// Errors coming from DisputeSender + /// Errors coming from `DisputeSender` #[error("Error while accessing runtime information")] Sender(#[from] sender::Fatal), } @@ -78,7 +78,7 @@ pub enum Fatal { /// Non-fatal errors of this subsystem. #[derive(Debug, Error)] pub enum NonFatal { - /// Errors coming from DisputeSender + /// Errors coming from `DisputeSender` #[error("Error while accessing runtime information")] Sender(#[from] sender::NonFatal), } diff --git a/node/network/dispute-distribution/src/receiver/mod.rs b/node/network/dispute-distribution/src/receiver/mod.rs index 83ca4ba3077d..a01afae91bfb 100644 --- a/node/network/dispute-distribution/src/receiver/mod.rs +++ b/node/network/dispute-distribution/src/receiver/mod.rs @@ -103,7 +103,7 @@ enum MuxedMessage { /// /// - We need to make sure responses are actually sent (therefore we need to await futures /// promptly). - /// - We need to update banned_peers accordingly to the result. + /// - We need to update `banned_peers` accordingly to the result. ConfirmedImport(NonFatalResult<(PeerId, ImportStatementsResult)>), /// A new request has arrived and should be handled. diff --git a/node/network/dispute-distribution/src/tests/mock.rs b/node/network/dispute-distribution/src/tests/mock.rs index ca057ce49e2a..b8d1986cbd1a 100644 --- a/node/network/dispute-distribution/src/tests/mock.rs +++ b/node/network/dispute-distribution/src/tests/mock.rs @@ -56,7 +56,7 @@ pub const ALICE_INDEX: ValidatorIndex = ValidatorIndex(1); lazy_static! { -/// Mocked AuthorityDiscovery service. +/// Mocked `AuthorityDiscovery` service. pub static ref MOCK_AUTHORITY_DISCOVERY: MockAuthorityDiscovery = MockAuthorityDiscovery::new(); // Creating an innocent looking `SessionInfo` is really expensive in a debug build. Around // 700ms on my machine, We therefore cache those keys here: @@ -80,7 +80,7 @@ pub static ref MOCK_SESSION_INFO: SessionInfo = ..Default::default() }; -/// SessionInfo for the second session. (No more validators, but two more authorities. +/// `SessionInfo` for the second session. (No more validators, but two more authorities. pub static ref MOCK_NEXT_SESSION_INFO: SessionInfo = SessionInfo { discovery_keys: diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index fc56e75febd7..d18334705c26 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -199,12 +199,12 @@ where failed_rx } -/// We partition the list of all sorted `authorities` into sqrt(len) groups of sqrt(len) size +/// We partition the list of all sorted `authorities` into `sqrt(len)` groups of `sqrt(len)` size /// and form a matrix where each validator is connected to all validators in its row and column. -/// This is similar to [web3] research proposed topology, except for the groups are not parachain +/// This is similar to `[web3]` research proposed topology, except for the groups are not parachain /// groups (because not all validators are parachain validators and the group size is small), /// but formed randomly via BABE randomness from two epochs ago. -/// This limits the amount of gossip peers to 2 * sqrt(len) and ensures the diameter of 2. +/// This limits the amount of gossip peers to 2 * `sqrt(len)` and ensures the diameter of 2. /// /// [web3]: https://research.web3.foundation/en/latest/polkadot/networking/3-avail-valid.html#topology async fn update_gossip_topology( diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 8ce017f8bd5c..4282489babe1 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -284,7 +284,7 @@ impl View { /// Check if two views have the same heads. /// - /// Equivalent to the `PartialEq` fn, + /// Equivalent to the `PartialEq` function, /// but ignores the `finalized_number` field. pub fn check_heads_eq(&self, other: &Self) -> bool { self.heads == other.heads @@ -325,7 +325,7 @@ pub mod v1 { /// Seconded statement with large payload (e.g. containing a runtime upgrade). /// /// We only gossip the hash in that case, actual payloads can be fetched from sending node - /// via req/response. + /// via request/response. #[codec(index = 1)] LargeStatement(StatementMetadata), } diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index c264c5d2ee0c..c2de3e526862 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -16,18 +16,18 @@ //! Overview over request/responses as used in `Polkadot`. //! -//! enum Protocol .... List of all supported protocols. +//! `enum Protocol` .... List of all supported protocols. //! -//! enum Requests .... List of all supported requests, each entry matches one in protocols, but +//! `enum Requests` .... List of all supported requests, each entry matches one in protocols, but //! has the actual request as payload. //! -//! struct IncomingRequest .... wrapper for incoming requests, containing a sender for sending +//! `struct IncomingRequest` .... wrapper for incoming requests, containing a sender for sending //! responses. //! -//! struct OutgoingRequest .... wrapper for outgoing requests, containing a sender used by the +//! `struct OutgoingRequest` .... wrapper for outgoing requests, containing a sender used by the //! networking code for delivering responses/delivery errors. //! -//! trait `IsRequest` .... A trait describing a particular request. It is used for gathering meta +//! `trait IsRequest` .... A trait describing a particular request. It is used for gathering meta //! data, like what is the corresponding response type. //! //! Versioned (v1 module): The actual requests and responses as sent over the network. @@ -72,7 +72,7 @@ pub enum Protocol { /// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately -/// 50Meg bytes per second: +/// 50MB per second: const MIN_BANDWIDTH_BYTES: u64 = 50 * 1024 * 1024; /// Default request timeout in seconds. diff --git a/node/network/protocol/src/request_response/request.rs b/node/network/protocol/src/request_response/request.rs index 1d26ddf1f429..736b93389c38 100644 --- a/node/network/protocol/src/request_response/request.rs +++ b/node/network/protocol/src/request_response/request.rs @@ -79,7 +79,7 @@ impl Requests { /// /// Note: `Requests` is just an enum collecting all supported requests supported by network /// bridge, it is never sent over the wire. This function just encodes the individual requests - /// contained in the enum. + /// contained in the `enum`. pub fn encode_request(self) -> (Protocol, OutgoingRequest>) { match self { Self::ChunkFetching(r) => r.encode_request(), @@ -219,7 +219,7 @@ impl From for RequestError { /// `IncomingRequest`s are produced by `RequestMultiplexer` on behalf of the network bridge. #[derive(Debug)] pub struct IncomingRequest { - /// PeerId of sending peer. + /// `PeerId` of sending peer. pub peer: PeerId, /// The sent request. pub payload: Req, @@ -227,7 +227,7 @@ pub struct IncomingRequest { pub pending_response: OutgoingResponseSender, } -/// Sender for sendinb back responses on an `IncomingRequest`. +/// Sender for sending back responses on an `IncomingRequest`. #[derive(Debug)] pub struct OutgoingResponseSender{ pending_response: oneshot::Sender, @@ -241,9 +241,9 @@ where { /// Send the response back. /// - /// On success we return Ok(()), on error we return the not sent `Response`. + /// On success we return `Ok(())`, on error we return the not sent `Response`. /// - /// netconfig::OutgoingResponse exposes a way of modifying the peer's reputation. If needed we + /// `netconfig::OutgoingResponse` exposes a way of modifying the peer's reputation. If needed we /// can change this function to expose this feature as well. pub fn send_response(self, resp: Req::Response) -> Result<(), Req::Response> { self.pending_response @@ -375,7 +375,7 @@ where } } -/// Future for actually receiving a typed response for an OutgoingRequest. +/// Future for actually receiving a typed response for an `OutgoingRequest`. async fn receive_response( rec: oneshot::Receiver, network::RequestFailure>>, ) -> OutgoingResult diff --git a/node/network/protocol/src/request_response/v1.rs b/node/network/protocol/src/request_response/v1.rs index 15307d3362b7..6dcc3552e683 100644 --- a/node/network/protocol/src/request_response/v1.rs +++ b/node/network/protocol/src/request_response/v1.rs @@ -172,7 +172,7 @@ impl IsRequest for AvailableDataFetchingRequest { pub struct StatementFetchingRequest { /// Data needed to locate and identify the needed statement. pub relay_parent: Hash, - /// Hash of candidate that was used create the CommitedCandidateRecept. + /// Hash of candidate that was used create the `CommitedCandidateRecept`. pub candidate_hash: CandidateHash, } diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 81e87d8ba35c..8d0cecdd63b4 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -17,7 +17,7 @@ //! The Statement Distribution Subsystem. //! //! This is responsible for distributing signed statements about candidate -//! validity amongst validators. +//! validity among validators. #![deny(unused_crate_dependencies)] #![warn(missing_docs)] @@ -208,7 +208,7 @@ struct PeerRelayParentKnowledge { /// How many large statements this peer already sent us. /// /// Flood protection for large statements is rather hard and as soon as we get - /// https://github.com/paritytech/polkadot/issues/2979 implemented also no longer necessary. + /// `https://github.com/paritytech/polkadot/issues/2979` implemented also no longer necessary. /// Reason: We keep messages around until we fetched the payload, but if a node makes up /// statements and never provides the data, we will keep it around for the slot duration. Not /// even signature checking would help, as the sender, if a validator, can just sign arbitrary @@ -290,7 +290,7 @@ impl PeerRelayParentKnowledge { /// Provide the maximum message count that we can receive per candidate. In practice we should /// not receive more statements for any one candidate than there are members in the group assigned /// to that para, but this maximum needs to be lenient to account for equivocations that may be - /// cross-group. As such, a maximum of 2 * n_validators is recommended. + /// cross-group. As such, a maximum of 2 * `n_validators` is recommended. /// /// This returns an error if the peer should not have sent us this message according to protocol /// rules for flood protection. @@ -459,7 +459,7 @@ impl PeerData { /// Provide the maximum message count that we can receive per candidate. In practice we should /// not receive more statements for any one candidate than there are members in the group assigned /// to that para, but this maximum needs to be lenient to account for equivocations that may be - /// cross-group. As such, a maximum of 2 * n_validators is recommended. + /// cross-group. As such, a maximum of 2 * `n_validators` is recommended. /// /// This returns an error if the peer should not have sent us this message according to protocol /// rules for flood protection. diff --git a/node/network/statement-distribution/src/requester.rs b/node/network/statement-distribution/src/requester.rs index f2430ed10d75..47201e3ce729 100644 --- a/node/network/statement-distribution/src/requester.rs +++ b/node/network/statement-distribution/src/requester.rs @@ -45,7 +45,7 @@ pub enum RequesterMessage { candidate_hash: CandidateHash, tx: oneshot::Sender> }, - /// Fetching finished, ask for verification. If verification failes, task will continue asking + /// Fetching finished, ask for verification. If verification fails, task will continue asking /// peers for data. Finished { /// Relay parent this candidate is in the context of. diff --git a/node/overseer/overseer-gen/proc-macro/src/impl_misc.rs b/node/overseer/overseer-gen/proc-macro/src/impl_misc.rs index b3406b62f7eb..652337faeaea 100644 --- a/node/overseer/overseer-gen/proc-macro/src/impl_misc.rs +++ b/node/overseer/overseer-gen/proc-macro/src/impl_misc.rs @@ -42,7 +42,7 @@ pub(crate) fn impl_misc(info: &OverseerInfo) -> proc_macro2::TokenStream { signals_received: SignalsReceived, } - /// impl for wrapping message type... + /// implementation for wrapping message type... #[#support_crate ::async_trait] impl SubsystemSender< #wrapper_message > for #subsystem_sender_name { async fn send_message(&mut self, msg: #wrapper_message) { diff --git a/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs b/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs index 3c578fdf4a51..accb006f36bc 100644 --- a/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs +++ b/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs @@ -98,7 +98,7 @@ pub(crate) fn impl_overseer_struct(info: &OverseerInfo) -> proc_macro2::TokenStr } impl #generics #overseer_name #generics #where_clause { - /// Send the given signal, a terminatin signal, to all subsystems + /// Send the given signal, a termination signal, to all subsystems /// and wait for all subsystems to go down. /// /// The definition of a termination signal is up to the user and diff --git a/node/overseer/overseer-gen/proc-macro/src/parse_struct.rs b/node/overseer/overseer-gen/proc-macro/src/parse_struct.rs index 2448cfb143b2..280b0cb6a9b8 100644 --- a/node/overseer/overseer-gen/proc-macro/src/parse_struct.rs +++ b/node/overseer/overseer-gen/proc-macro/src/parse_struct.rs @@ -86,14 +86,14 @@ pub(crate) struct SubSysField { /// Type to be consumed by the subsystem. pub(crate) consumes: Path, /// If `no_dispatch` is present, if the message is incoming via - /// an extern `Event`, it will not be dispatched to all subsystems. + /// an `extern` `Event`, it will not be dispatched to all subsystems. pub(crate) no_dispatch: bool, /// If the subsystem implementation is blocking execution and hence /// has to be spawned on a separate thread or thread pool. pub(crate) blocking: bool, /// The subsystem is a work in progress. /// Avoids dispatching `Wrapper` type messages, but generates the variants. - /// Does not require the subsystem to be instanciated with the builder pattern. + /// Does not require the subsystem to be instantiated with the builder pattern. pub(crate) wip: bool, } @@ -133,7 +133,7 @@ pub(crate) struct SubSystemTags { pub(crate) attrs: Vec, #[allow(dead_code)] pub(crate) no_dispatch: bool, - /// The subsystem is WIP, only generate the `Wrapper` variant, but do not forward messages + /// The subsystem is in progress, only generate the `Wrapper` variant, but do not forward messages /// and also not include the subsystem in the list of subsystems. pub(crate) wip: bool, pub(crate) blocking: bool, diff --git a/node/overseer/overseer-gen/src/lib.rs b/node/overseer/overseer-gen/src/lib.rs index a112820ca92b..6471c1803403 100644 --- a/node/overseer/overseer-gen/src/lib.rs +++ b/node/overseer/overseer-gen/src/lib.rs @@ -225,7 +225,7 @@ pub trait AnnotateErrorOrigin: 'static + Send + Sync + std::error::Error { /// An asynchronous subsystem task.. /// -/// In essence it's just a newtype wrapping a `BoxFuture`. +/// In essence it's just a new type wrapping a `BoxFuture`. pub struct SpawnedSubsystem where E: std::error::Error @@ -366,12 +366,12 @@ impl From for FromOverseer { #[async_trait::async_trait] pub trait SubsystemContext: Send + 'static { /// The message type of this context. Subsystems launched with this context will expect - /// to receive messages of this type. Commonly uses the wrapping enum commonly called + /// to receive messages of this type. Commonly uses the wrapping `enum` commonly called /// `AllMessages`. type Message: std::fmt::Debug + Send + 'static; /// And the same for signals. type Signal: std::fmt::Debug + Send + 'static; - /// The overarching all messages enum. + /// The overarching all messages `enum`. /// In some cases can be identical to `Self::Message`. type AllMessages: From + Send + 'static; /// The sender type as provided by `sender()` and underlying. diff --git a/node/overseer/src/metrics.rs b/node/overseer/src/metrics.rs index 3563f23fbd4b..8cf7bb93ecc1 100644 --- a/node/overseer/src/metrics.rs +++ b/node/overseer/src/metrics.rs @@ -34,7 +34,7 @@ struct MetricsInner { } -/// A sharable metrics type for usage with the overseer. +/// A shareable metrics type for usage with the overseer. #[derive(Default, Clone)] pub struct Metrics(Option); diff --git a/node/overseer/src/subsystems.rs b/node/overseer/src/subsystems.rs index b75e7e50e6b8..3b3894b8369e 100644 --- a/node/overseer/src/subsystems.rs +++ b/node/overseer/src/subsystems.rs @@ -17,7 +17,7 @@ //! Legacy way of defining subsystems. //! //! In the future, everything should be set up using the generated -//! overeseer builder pattern instead. +//! overseer builder pattern instead. use polkadot_node_subsystem_types::errors::SubsystemError; use polkadot_overseer_gen::{ @@ -170,7 +170,7 @@ impl } } - /// Reference every indidviudal subsystem. + /// Reference every individual subsystem. pub fn as_ref(&self) -> AllSubsystems<&'_ CV, &'_ CB, &'_ SD, &'_ AD, &'_ AR, &'_ BS, &'_ BD, &'_ P, &'_ RA, &'_ AS, &'_ NB, &'_ CA, &'_ CG, &'_ CP, &'_ ApD, &'_ ApV, &'_ GS> { AllSubsystems { candidate_validation: &self.candidate_validation, diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index 743c37f32759..c57b3844e42e 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -86,7 +86,7 @@ pub struct AssignmentCert { pub vrf: (VRFOutput, VRFProof), } -/// An assignment crt which refers to the candidate under which the assignment is +/// An assignment criterion which refers to the candidate under which the assignment is /// relevant by block hash. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub struct IndirectAssignmentCert { diff --git a/node/primitives/src/disputes/message.rs b/node/primitives/src/disputes/message.rs index d703eb456ce8..f52047999255 100644 --- a/node/primitives/src/disputes/message.rs +++ b/node/primitives/src/disputes/message.rs @@ -27,7 +27,7 @@ use polkadot_primitives::v1::{CandidateReceipt, DisputeStatement, SessionIndex, use super::{InvalidDisputeVote, SignedDisputeStatement, ValidDisputeVote}; -/// A dispute initiating/participtating message that is guaranteed to have been built from signed +/// A dispute initiating/participating message that is guaranteed to have been built from signed /// statements. /// /// And most likely has been constructed correctly. This is used with @@ -102,7 +102,7 @@ impl DisputeMessage { /// - the invalid statement is indeed an invalid one /// - the valid statement is indeed a valid one /// - The passed `CandidateReceipt` has the correct hash (as signed in the statements). - /// - the given validator indeces match with the given `ValidatorId`s in the statements, + /// - the given validator indices match with the given `ValidatorId`s in the statements, /// given a `SessionInfo`. /// /// We don't check whether the given `SessionInfo` matches the `SessionIndex` in the @@ -210,7 +210,7 @@ impl DisputeMessage { } impl UncheckedDisputeMessage { - /// Try to recover the two signed dispute votes from an UncheckedDisputeMessage. + /// Try to recover the two signed dispute votes from an `UncheckedDisputeMessage`. pub fn try_into_signed_votes(self, session_info: &SessionInfo) -> Result<(CandidateReceipt, (SignedDisputeStatement, ValidatorIndex), (SignedDisputeStatement, ValidatorIndex)), ()> { diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs index 57e07b2bf0f7..52e2e2280f9a 100644 --- a/node/service/src/chain_spec.rs +++ b/node/service/src/chain_spec.rs @@ -1166,7 +1166,7 @@ fn testnet_accounts() -> Vec { ] } -/// Helper function to create polkadot GenesisConfig for testing +/// Helper function to create polkadot `GenesisConfig` for testing pub fn polkadot_testnet_genesis( wasm_binary: &[u8], initial_authorities: Vec<( @@ -1264,7 +1264,7 @@ pub fn polkadot_testnet_genesis( } } -/// Helper function to create kusama GenesisConfig for testing +/// Helper function to create kusama `GenesisConfig` for testing #[cfg(feature = "kusama-native")] pub fn kusama_testnet_genesis( wasm_binary: &[u8], @@ -1368,7 +1368,7 @@ pub fn kusama_testnet_genesis( } } -/// Helper function to create westend GenesisConfig for testing +/// Helper function to create westend `GenesisConfig` for testing #[cfg(feature = "westend-native")] pub fn westend_testnet_genesis( wasm_binary: &[u8], @@ -1456,7 +1456,7 @@ pub fn westend_testnet_genesis( } } -/// Helper function to create rococo GenesisConfig for testing +/// Helper function to create rococo `GenesisConfig` for testing #[cfg(feature = "rococo-native")] pub fn rococo_testnet_genesis( wasm_binary: &[u8], diff --git a/node/service/src/grandpa_support.rs b/node/service/src/grandpa_support.rs index 7736ce3a18cf..742e6bffced2 100644 --- a/node/service/src/grandpa_support.rs +++ b/node/service/src/grandpa_support.rs @@ -227,7 +227,7 @@ where target_hash = *target_header.parent_hash(); target_header = backend .header(BlockId::Hash(target_hash))? - .expect("Header known to exist due to the existence of one of its descendents; qed"); + .expect("Header known to exist due to the existence of one of its descendants; qed"); } } @@ -281,7 +281,7 @@ where } /// GRANDPA hard forks due to borked migration of session keys after a runtime -/// upgrade (at #1491596), the signalled authority set changes were invalid +/// upgrade (at #1491596), the signaled authority set changes were invalid /// (blank keys) and were impossible to finalize. The authorities for these /// intermediary pending changes are replaced with a static list comprised of /// w3f validators and randomly selected validators from the latest session (at diff --git a/node/service/src/parachains_db/mod.rs b/node/service/src/parachains_db/mod.rs index c9c86fad964b..4af16c42b34c 100644 --- a/node/service/src/parachains_db/mod.rs +++ b/node/service/src/parachains_db/mod.rs @@ -11,7 +11,7 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -//! A RocksDB instance for storing parachain data; availability data, and approvals. +//! A `RocksDB` instance for storing parachain data; availability data, and approvals. #[cfg(feature = "full-node")] use { diff --git a/node/service/src/relay_chain_selection.rs b/node/service/src/relay_chain_selection.rs index eecbc4b5eda1..5d03712a5a71 100644 --- a/node/service/src/relay_chain_selection.rs +++ b/node/service/src/relay_chain_selection.rs @@ -245,7 +245,7 @@ impl SelectChain for SelectRelayChain self.block_header(best_leaf) } - /// Get the best descendent of `target_hash` that we should attempt to + /// Get the best descendant of `target_hash` that we should attempt to /// finalize next, if any. It is valid to return the `target_hash` if /// no better block exists. /// diff --git a/node/subsystem-types/src/lib.rs b/node/subsystem-types/src/lib.rs index f3d350898c06..cba7fa43ae02 100644 --- a/node/subsystem-types/src/lib.rs +++ b/node/subsystem-types/src/lib.rs @@ -52,7 +52,7 @@ pub enum LeafStatus { } impl LeafStatus { - /// Returns a bool indicating fresh status. + /// Returns a `bool` indicating fresh status. pub fn is_fresh(&self) -> bool { match *self { LeafStatus::Fresh => true, @@ -60,7 +60,7 @@ impl LeafStatus { } } - /// Returns a bool indicating stale status. + /// Returns a `bool` indicating stale status. pub fn is_stale(&self) -> bool { match *self { LeafStatus::Fresh => false, @@ -97,12 +97,12 @@ pub struct ActiveLeavesUpdate { } impl ActiveLeavesUpdate { - /// Create a ActiveLeavesUpdate with a single activated hash + /// Create a `ActiveLeavesUpdate` with a single activated hash pub fn start_work(activated: ActivatedLeaf) -> Self { Self { activated: [activated][..].into(), ..Default::default() } } - /// Create a ActiveLeavesUpdate with a single deactivated hash + /// Create a `ActiveLeavesUpdate` with a single deactivated hash pub fn stop_work(hash: Hash) -> Self { Self { deactivated: [hash][..].into(), ..Default::default() } } @@ -114,7 +114,7 @@ impl ActiveLeavesUpdate { } impl PartialEq for ActiveLeavesUpdate { - /// Equality for `ActiveLeavesUpdate` doesnt imply bitwise equality. + /// Equality for `ActiveLeavesUpdate` doesn't imply bitwise equality. /// /// Instead, it means equality when `activated` and `deactivated` are considered as sets. fn eq(&self, other: &Self) -> bool { diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index b2f44e986de4..5a3d6333a010 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -217,7 +217,7 @@ pub enum DisputeCoordinatorMessage { /// `InvalidImport`) /// - or were known already (in that case the result will still be `ValidImport`) /// - or we recorded them because (`ValidImport`) - /// - we casted our own vote already on that dispute + /// - we cast our own vote already on that dispute /// - or we have approval votes on that candidate /// - or other explicit votes on that candidate already recorded /// - or recovered availability for the candidate @@ -490,7 +490,7 @@ pub enum AvailabilityStoreMessage { } impl AvailabilityStoreMessage { - /// In fact, none of the AvailabilityStore messages assume a particular relay parent. + /// In fact, none of the `AvailabilityStore` messages assume a particular relay parent. pub fn relay_parent(&self) -> Option { match self { _ => None, @@ -697,8 +697,8 @@ pub enum ProvisionerMessage { /// This message allows external subsystems to request the set of bitfields and backed candidates /// associated with a particular potential block hash. /// - /// This is expected to be used by a proposer, to inject that information into the InherentData - /// where it can be assembled into the ParaInherent. + /// This is expected to be used by a proposer, to inject that information into the `InherentData` + /// where it can be assembled into the `ParaInherent`. RequestInherentData(Hash, oneshot::Sender), /// This data should become part of a relay chain block ProvisionableData(Hash, ProvisionableData), diff --git a/node/subsystem-types/src/messages/network_bridge_event.rs b/node/subsystem-types/src/messages/network_bridge_event.rs index 84f2d69479ae..173d8ac1632d 100644 --- a/node/subsystem-types/src/messages/network_bridge_event.rs +++ b/node/subsystem-types/src/messages/network_bridge_event.rs @@ -59,8 +59,8 @@ impl NetworkBridgeEvent { /// for example into a `BitfieldDistributionMessage` in case of the `BitfieldDistribution` /// constructor. /// - /// Therefore a NetworkBridgeEvent will become for example a - /// NetworkBridgeEvent, with the more specific message type + /// Therefore a `NetworkBridgeEvent` will become for example a + /// `NetworkBridgeEvent`, with the more specific message type /// `BitfieldDistributionMessage`. /// /// This acts as a call to `clone`, except in the case where the event is a message event, diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index 096957167b45..1fb1a4d124f2 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -126,7 +126,7 @@ pub enum Error { /// The type system wants this even though it doesn't make sense #[error(transparent)] Infallible(#[from] std::convert::Infallible), - /// Attempted to convert from an AllMessages to a FromJob, and failed. + /// Attempted to convert from an `AllMessages` to a `FromJob`, and failed. #[error("AllMessage not relevant to Job")] SenderConversion(String), /// The local node is not a validator. @@ -276,7 +276,7 @@ pub fn choose_random_subset bool>(is_priority: F, mut v: Vec< v } -/// Returns a bool with a probability of `a / b` of being true. +/// Returns a `bool` with a probability of `a / b` of being true. pub fn gen_ratio(a: usize, b: usize) -> bool { use rand::Rng as _; let mut rng = rand::thread_rng(); @@ -372,7 +372,7 @@ impl Drop for AbortOnDrop { } } -/// A JobHandle manages a particular job for a subsystem. +/// A `JobHandle` manages a particular job for a subsystem. struct JobHandle { _abort_handle: AbortOnDrop, to_job: mpsc::Sender, diff --git a/node/subsystem-util/src/runtime/mod.rs b/node/subsystem-util/src/runtime/mod.rs index c691a0010165..1dd8465afb50 100644 --- a/node/subsystem-util/src/runtime/mod.rs +++ b/node/subsystem-util/src/runtime/mod.rs @@ -62,14 +62,14 @@ pub struct RuntimeInfo { /// overseer seems sensible. session_index_cache: LruCache, - /// Look up cached sessions by SessionIndex. + /// Look up cached sessions by `SessionIndex`. session_info_cache: LruCache, /// Key store for determining whether we are a validator and what `ValidatorIndex` we have. keystore: Option, } -/// SessionInfo with additional useful data for validator nodes. +/// `SessionInfo` with additional useful data for validator nodes. pub struct ExtendedSessionInfo { /// Actual session info as fetched from the runtime. pub session_info: SessionInfo, @@ -303,7 +303,7 @@ where ) } -/// Get group rotation info based on the given relay_parent. +/// Get group rotation info based on the given `relay_parent`. pub async fn get_group_rotation_info(ctx: &mut Context, relay_parent: Hash) -> Result where diff --git a/node/test/client/src/block_builder.rs b/node/test/client/src/block_builder.rs index 00ca965dff97..a30ff4b8be0a 100644 --- a/node/test/client/src/block_builder.rs +++ b/node/test/client/src/block_builder.rs @@ -24,7 +24,7 @@ use sc_block_builder::{BlockBuilderProvider, BlockBuilder}; use sp_state_machine::BasicExternalities; use parity_scale_codec::{Encode, Decode}; -/// An extension for the test client to init a Polkadot specific block builder. +/// An extension for the test client to initialize a Polkadot specific block builder. pub trait InitPolkadotBlockBuilder { /// Init a Polkadot specific block builder that works for the test runtime. /// diff --git a/node/test/client/src/lib.rs b/node/test/client/src/lib.rs index 52697c8bfa29..0374395cccf8 100644 --- a/node/test/client/src/lib.rs +++ b/node/test/client/src/lib.rs @@ -38,7 +38,7 @@ pub type Executor = client::LocalCallExecutor; -/// LongestChain type for the test runtime/client. +/// `LongestChain` type for the test runtime/client. pub type LongestChain = sc_consensus::LongestChain; /// Parameters of test-client builder with test-runtime. diff --git a/node/test/polkadot-simnet/common/src/lib.rs b/node/test/polkadot-simnet/common/src/lib.rs index fec6e151074f..2ac986ae7fd7 100644 --- a/node/test/polkadot-simnet/common/src/lib.rs +++ b/node/test/polkadot-simnet/common/src/lib.rs @@ -47,7 +47,7 @@ sc_executor::native_executor_instance!( (benchmarking::benchmarking::HostFunctions, SignatureVerificationOverride), ); -/// ChainInfo implementation. +/// `ChainInfo` implementation. pub struct PolkadotChainInfo; impl ChainInfo for PolkadotChainInfo { diff --git a/node/test/polkadot-simnet/node/src/main.rs b/node/test/polkadot-simnet/node/src/main.rs index 807c37d038e6..3a52182cde45 100644 --- a/node/test/polkadot-simnet/node/src/main.rs +++ b/node/test/polkadot-simnet/node/src/main.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Binary used for simnet nodes, supports all runtimes, although only polkadot is implemented currently. -//! This binary accepts all the cli args the polkadot binary does, Only difference is it uses +//! Binary used for Simnet nodes, supports all runtimes, although only polkadot is implemented currently. +//! This binary accepts all the CLI args the polkadot binary does, Only difference is it uses //! manual-seal™ and babe for block authorship, it has a no-op verifier, so all blocks received over the network //! are imported and executed straight away. Block authorship/Finalization maybe done by calling the //! `engine_createBlock` & `engine_FinalizeBlock` rpc methods respectively. diff --git a/node/test/polkadot-simnet/test/src/main.rs b/node/test/polkadot-simnet/test/src/main.rs index 79730d546446..3b57c40a6079 100644 --- a/node/test/polkadot-simnet/test/src/main.rs +++ b/node/test/polkadot-simnet/test/src/main.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Attempts to upgrade the polkadot runtime, in a simnet environment +//! Attempts to upgrade the polkadot runtime, in a Simnet environment use std::{error::Error, str::FromStr}; use polkadot_simnet::{run, dispatch_with_root}; diff --git a/node/test/service/src/chain_spec.rs b/node/test/service/src/chain_spec.rs index c65b70f107cf..5a54bebf1bc0 100644 --- a/node/test/service/src/chain_spec.rs +++ b/node/test/service/src/chain_spec.rs @@ -92,7 +92,7 @@ fn testnet_accounts() -> Vec { ] } -/// Helper function to create polkadot GenesisConfig for testing +/// Helper function to create polkadot `GenesisConfig` for testing fn polkadot_testnet_genesis( initial_authorities: Vec<( AccountId, diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index 0e7d5d9db9b5..6e17e37edebe 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -285,7 +285,7 @@ pub fn run_collator_node( /// A Polkadot test node instance used for testing. pub struct PolkadotTestNode { - /// TaskManager's instance. + /// `TaskManager`'s instance. pub task_manager: TaskManager, /// Client's instance. pub client: Arc, @@ -293,7 +293,7 @@ pub struct PolkadotTestNode { pub overseer_handler: Handle, /// The `MultiaddrWithPeerId` to this node. This is useful if you want to pass it as "boot node" to other nodes. pub addr: MultiaddrWithPeerId, - /// RPCHandlers to make RPC queries. + /// `RPCHandlers` to make RPC queries. pub rpc_handlers: RpcHandlers, } diff --git a/primitives/src/v0.rs b/primitives/src/v0.rs index 917bda2aa72a..76274c40f254 100644 --- a/primitives/src/v0.rs +++ b/primitives/src/v0.rs @@ -427,7 +427,7 @@ pub struct AbridgedCandidateReceipt { pub collator: CollatorId, /// Signature on blake2-256 of the block data by collator. pub signature: CollatorSignature, - /// The hash of the pov-block. + /// The hash of the `pov-block`. pub pov_block_hash: H, /// Commitments made as a result of validation. pub commitments: CandidateCommitments, @@ -561,9 +561,9 @@ pub struct CandidateDescriptor { /// The collator's relay-chain account ID pub collator: CollatorId, /// Signature on blake2-256 of components of this receipt: - /// The para ID, the relay parent, and the pov_hash. + /// The para ID, the relay parent, and the `pov_hash`. pub signature: CollatorSignature, - /// The hash of the pov-block. + /// The hash of the `pov-block`. pub pov_hash: H, } @@ -582,12 +582,12 @@ pub struct CollationInfo { pub signature: CollatorSignature, /// The head-data pub head_data: HeadData, - /// blake2-256 Hash of the pov-block + /// blake2-256 Hash of the `pov-block` pub pov_block_hash: Hash, } impl CollationInfo { - /// Check integrity vs. a pov-block. + /// Check integrity vs. a `pov-block`. pub fn check_signature(&self) -> Result<(), ()> { check_collator_signature( &self.relay_parent, diff --git a/primitives/src/v1/mod.rs b/primitives/src/v1/mod.rs index 83192f4f6ab3..0d9f429d469a 100644 --- a/primitives/src/v1/mod.rs +++ b/primitives/src/v1/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! V1 Primitives. +//! `V1` Primitives. use sp_std::prelude::*; use sp_std::collections::btree_map::BTreeMap; @@ -114,7 +114,7 @@ pub mod well_known_keys { }) } - /// The hrmp channel for the given identifier. + /// The HRMP channel for the given identifier. /// /// The storage entry should be accessed as an `AbridgedHrmpChannel` encoded value. pub fn hrmp_channels(channel: HrmpChannelId) -> Vec { @@ -293,12 +293,12 @@ pub struct CandidateDescriptor { /// relay-chain state which may vary based on bitfields included before the candidate. /// Thus it cannot be derived entirely from the relay-parent. pub persisted_validation_data_hash: Hash, - /// The blake2-256 hash of the pov. + /// The blake2-256 hash of the PoV. pub pov_hash: Hash, /// The root of a block's erasure encoding Merkle tree. pub erasure_root: Hash, /// Signature on blake2-256 of components of this receipt: - /// The parachain index, the relay parent, the validation data hash, and the pov_hash. + /// The parachain index, the relay parent, the validation data hash, and the `pov_hash`. pub signature: CollatorSignature, /// Hash of the para header that is being generated by this candidate. pub para_head: Hash, @@ -374,7 +374,7 @@ impl CommittedCandidateReceipt { } impl CommittedCandidateReceipt { - /// Transforms this into a plain CandidateReceipt. + /// Transforms this into a plain `CandidateReceipt`. pub fn to_plain(&self) -> CandidateReceipt { CandidateReceipt { descriptor: self.descriptor.clone(), @@ -642,7 +642,7 @@ impl GroupRotationInfo { /// Returns the index of the group needed to validate the core at the given index, assuming /// the given number of cores. /// - /// `core_index` should be less than `cores`, which is capped at u32::max(). + /// `core_index` should be less than `cores`, which is capped at `u32::max()`. pub fn group_for_core(&self, core_index: CoreIndex, cores: usize) -> GroupIndex { if self.group_rotation_frequency == 0 { return GroupIndex(core_index.0) } if cores == 0 { return GroupIndex(0) } @@ -660,7 +660,7 @@ impl GroupRotationInfo { /// Returns the index of the group assigned to the given core. This does no checking or /// whether the group index is in-bounds. /// - /// `core_index` should be less than `cores`, which is capped at u32::max(). + /// `core_index` should be less than `cores`, which is capped at `u32::max()`. pub fn core_for_group(&self, group_index: GroupIndex, cores: usize) -> CoreIndex { if self.group_rotation_frequency == 0 { return CoreIndex(group_index.0) } if cores == 0 { return CoreIndex(0) } @@ -844,7 +844,7 @@ pub struct SessionInfo { pub n_cores: u32, /// The zeroth delay tranche width. pub zeroth_delay_tranche_width: u32, - /// The number of samples we do of relay_vrf_modulo. + /// The number of samples we do of `relay_vrf_modulo`. pub relay_vrf_modulo_samples: u32, /// The number of delay tranches in total. pub n_delay_tranches: u32, @@ -886,7 +886,7 @@ sp_api::decl_runtime_apis! { /// Cores are either free or occupied. Free cores can have paras assigned to them. fn availability_cores() -> Vec>; - /// Yields the persisted validation data for the given ParaId along with an assumption that + /// Yields the persisted validation data for the given `ParaId` along with an assumption that /// should be used if the para currently occupies a core. /// /// Returns `None` if either the para is not registered or the assumption is `Freed` @@ -1029,7 +1029,7 @@ pub enum ConsensusLog { #[codec(index = 3)] ForceApprove(BlockNumber), /// A signal to revert the block number in the same chain as the - /// header this digest is part of and all of its descendents. + /// header this digest is part of and all of its descendants. /// /// It is a no-op for a block to contain a revert digest targeting /// its own number or a higher number. diff --git a/primitives/src/v1/signed.rs b/primitives/src/v1/signed.rs index 3aa6d964342c..28d84022aacd 100644 --- a/primitives/src/v1/signed.rs +++ b/primitives/src/v1/signed.rs @@ -255,11 +255,11 @@ impl From> for UncheckedSigne } } -/// This helper trait ensures that we can encode Statement as CompactStatement, +/// This helper trait ensures that we can encode `Statement` as `CompactStatement`, /// and anything as itself. /// /// This resembles `parity_scale_codec::EncodeLike`, but it's distinct: -/// EncodeLike is a marker trait which asserts at the typesystem level that +/// `EncodeLike` is a marker trait which asserts at the typesystem level that /// one type's encoding is a valid encoding for another type. It doesn't /// perform any type conversion when encoding. /// diff --git a/roadmap/implementers-guide/src/node/README.md b/roadmap/implementers-guide/src/node/README.md index f20c970aff6c..edd72d2335b5 100644 --- a/roadmap/implementers-guide/src/node/README.md +++ b/roadmap/implementers-guide/src/node/README.md @@ -26,6 +26,6 @@ The Node-side code comes with a set of assumptions that we build upon. These ass We assume the following constraints regarding provided basic functionality: * The underlying **consensus** algorithm, whether it is BABE or SASSAFRAS is implemented. * There is a **chain synchronization** protocol which will search for and download the longest available chains at all times. - * The **state** of all blocks at the head of the chain is available. There may be **state pruning** such that state of the last `k` blocks behind the last finalized block are available, as well as the state of all their descendents. This assumption implies that the state of all active leaves and their last `k` ancestors are all available. The underlying implementation is expected to support `k` of a few hundred blocks, but we reduce this to a very conservative `k=5` for our purposes. + * The **state** of all blocks at the head of the chain is available. There may be **state pruning** such that state of the last `k` blocks behind the last finalized block are available, as well as the state of all their descendants. This assumption implies that the state of all active leaves and their last `k` ancestors are all available. The underlying implementation is expected to support `k` of a few hundred blocks, but we reduce this to a very conservative `k=5` for our purposes. * There is an underlying **networking** framework which provides **peer discovery** services which will provide us with peers and will not create "loopback" connections to our own node. The number of peers we will have is assumed to be bounded at 1000. * There is a **transaction pool** and a **transaction propagation** mechanism which maintains a set of current transactions and distributes to connected peers. Current transactions are those which are not outdated relative to some "best" fork of the chain, which is part of the active heads, and have not been included in the best fork. diff --git a/roadmap/implementers-guide/src/node/approval/approval-voting.md b/roadmap/implementers-guide/src/node/approval/approval-voting.md index 0ba6e0db23ef..2366d7281d9e 100644 --- a/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -4,9 +4,9 @@ Reading the [section on the approval protocol](../../protocol-approval.md) will Approval votes are split into two parts: Assignments and Approvals. Validators first broadcast their assignment to indicate intent to check a candidate. Upon successfully checking, they broadcast an approval vote. If a validator doesn't broadcast their approval vote shortly after issuing an assignment, this is an indication that they are being prevented from recovering or validating the block data and that more validators should self-select to check the candidate. This is known as a "no-show". -The core of this subsystem is a Tick-based timer loop, where Ticks are 500ms. We also reason about time in terms of DelayTranches, which measure the number of ticks elapsed since a block was produced. We track metadata for all un-finalized but included candidates. We compute our local assignments to check each candidate, as well as which DelayTranche those assignments may be minimally triggered at. As the same candidate may appear in more than one block, we must produce our potential assignments for each (Block, Candidate) pair. The timing loop is based on waiting for assignments to become no-shows or waiting to broadcast and begin our own assignment to check. +The core of this subsystem is a Tick-based timer loop, where Ticks are 500ms. We also reason about time in terms of `DelayTranche`s, which measure the number of ticks elapsed since a block was produced. We track metadata for all un-finalized but included candidates. We compute our local assignments to check each candidate, as well as which `DelayTranche` those assignments may be minimally triggered at. As the same candidate may appear in more than one block, we must produce our potential assignments for each (Block, Candidate) pair. The timing loop is based on waiting for assignments to become no-shows or waiting to broadcast and begin our own assignment to check. -Another main component of this subsystem is the logic for determining when a (Block, Candidate) pair has been approved and when to broadcast and trigger our own assignment. Once a (Block, Candidate) pair has been approved, we mark a corresponding bit in the BlockEntry that indicates the candidate has been approved under the block. When we trigger our own assignment, we broadcast it via Approval Distribution, begin fetching the data from Availability Recovery, and then pass it through to the Candidate Validation. Once these steps are successful, we issue our approval vote. If any of these steps fail, we don't issue any vote and will "no-show" from the perspective of other validators. In the future we will initiate disputes as well. +Another main component of this subsystem is the logic for determining when a (Block, Candidate) pair has been approved and when to broadcast and trigger our own assignment. Once a (Block, Candidate) pair has been approved, we mark a corresponding bit in the `BlockEntry` that indicates the candidate has been approved under the block. When we trigger our own assignment, we broadcast it via Approval Distribution, begin fetching the data from Availability Recovery, and then pass it through to the Candidate Validation. Once these steps are successful, we issue our approval vote. If any of these steps fail, we don't issue any vote and will "no-show" from the perspective of other validators. In the future we will initiate disputes as well. Where this all fits into Polkadot is via block finality. Our goal is to not finalize any block containing a candidate that is not approved. We provide a hook for a custom GRANDPA voting rule - GRANDPA makes requests of the form (target, minimum) consisting of a target block (i.e. longest chain) that it would like to finalize, and a minimum block which, due to the rules of GRANDPA, must be voted on. The minimum is typically the last finalized block, but may be beyond it, in the case of having a last-round-estimate beyond the last finalized. Thus, our goal is to inform GRANDPA of some block between target and minimum which we believe can be finalized safely. We do this by iterating backwards from the target to the minimum and finding the longest continuous chain from minimum where all candidates included by those blocks have been approved. @@ -164,23 +164,23 @@ Main loop: #### `OverseerSignal::BlockFinalized` -On receiving an `OverseerSignal::BlockFinalized(h)`, we fetch the block number `b` of that block from the ChainApi subsystem. We update our `StoredBlockRange` to begin at `b+1`. Additionally, we remove all block entries and candidates referenced by them up to and including `b`. Lastly, we prune out all descendents of `h` transitively: when we remove a `BlockEntry` with number `b` that is not equal to `h`, we recursively delete all the `BlockEntry`s referenced as children. We remove the `block_assignments` entry for the block hash and if `block_assignments` is now empty, remove the `CandidateEntry`. We also update each of the `BlockNumber -> Vec` keys in the database to reflect the blocks at that height, clearing if empty. +On receiving an `OverseerSignal::BlockFinalized(h)`, we fetch the block number `b` of that block from the `ChainApi` subsystem. We update our `StoredBlockRange` to begin at `b+1`. Additionally, we remove all block entries and candidates referenced by them up to and including `b`. Lastly, we prune out all descendants of `h` transitively: when we remove a `BlockEntry` with number `b` that is not equal to `h`, we recursively delete all the `BlockEntry`s referenced as children. We remove the `block_assignments` entry for the block hash and if `block_assignments` is now empty, remove the `CandidateEntry`. We also update each of the `BlockNumber -> Vec` keys in the database to reflect the blocks at that height, clearing if empty. #### `OverseerSignal::ActiveLeavesUpdate` On receiving an `OverseerSignal::ActiveLeavesUpdate(update)`: - * We determine the set of new blocks that were not in our previous view. This is done by querying the ancestry of all new items in the view and contrasting against the stored `BlockNumber`s. Typically, there will be only one new block. We fetch the headers and information on these blocks from the ChainApi subsystem. Stale leaves in the update can be ignored. + * We determine the set of new blocks that were not in our previous view. This is done by querying the ancestry of all new items in the view and contrasting against the stored `BlockNumber`s. Typically, there will be only one new block. We fetch the headers and information on these blocks from the `ChainApi` subsystem. Stale leaves in the update can be ignored. * We update the `StoredBlockRange` and the `BlockNumber` maps. - * We use the RuntimeApiSubsystem to determine information about these blocks. It is generally safe to assume that runtime state is available for recent, unfinalized blocks. In the case that it isn't, it means that we are catching up to the head of the chain and needn't worry about assignments to those blocks anyway, as the security assumption of the protocol tolerates nodes being temporarily offline or out-of-date. + * We use the `RuntimeApiSubsystem` to determine information about these blocks. It is generally safe to assume that runtime state is available for recent, unfinalized blocks. In the case that it isn't, it means that we are catching up to the head of the chain and needn't worry about assignments to those blocks anyway, as the security assumption of the protocol tolerates nodes being temporarily offline or out-of-date. * We fetch the set of candidates included by each block by dispatching a `RuntimeApiRequest::CandidateEvents` and checking the `CandidateIncluded` events. * We fetch the session of the block by dispatching a `session_index_for_child` request with the parent-hash of the block. * If the `session index - APPROVAL_SESSIONS > state.earliest_session`, then bump `state.earliest_sessions` to that amount and prune earlier sessions. * If the session isn't in our `state.session_info`, load the session info for it and for all sessions since the earliest-session, including the earliest-session, if that is missing. And it can be, just after pruning, if we've done a big jump forward, as is the case when we've just finished chain synchronization. * If any of the runtime API calls fail, we just warn and skip the block. - * We use the RuntimeApiSubsystem to determine the set of candidates included in these blocks and use BABE logic to determine the slot number and VRF of the blocks. + * We use the `RuntimeApiSubsystem` to determine the set of candidates included in these blocks and use BABE logic to determine the slot number and VRF of the blocks. * We also note how late we appear to have received the block. We create a `BlockEntry` for each block and a `CandidateEntry` for each candidate obtained from `CandidateIncluded` events after making a `RuntimeApiRequest::CandidateEvents` request. - * For each candidate, if the amount of needed approvals is more than the validators remaining after the backing group of the candidate is subtracted, then the candidate is insta-approved as approval would be impossible otherwise. If all candidates in the block are insta-approved, or there are no candidates in the block, then the block is insta-approved. If the block is insta-approved, a [`ChainSelectionMessage::Approvedl][CSM] should be sent for the block. + * For each candidate, if the amount of needed approvals is more than the validators remaining after the backing group of the candidate is subtracted, then the candidate is insta-approved as approval would be impossible otherwise. If all candidates in the block are insta-approved, or there are no candidates in the block, then the block is insta-approved. If the block is insta-approved, a [`ChainSelectionMessage::Approved`][CSM] should be sent for the block. * Ensure that the `CandidateEntry` contains a `block_assignments` entry for the block, with the correct backing group set. * If a validator in this session, compute and assign `our_assignment` for the `block_assignments` * Only if not a member of the backing group. @@ -262,25 +262,27 @@ On receiving an `ApprovedAncestor(Hash, BlockNumber, response_channel)`: * [Schedule a new wakeup](#schedule-wakeup) of the candidate. #### Schedule Wakeup + * Requires `(approval_entry, candidate_entry)` which effectively denotes a `(Block Hash, Candidate Hash)` pair - the candidate, along with the block it appears in. * Also requires `RequiredTranches` * If the `approval_entry` is approved, this doesn't need to be woken up again. * If `RequiredTranches::All` - no wakeup. We assume other incoming votes will trigger wakeup and potentially re-schedule. * If `RequiredTranches::Pending { considered, next_no_show, uncovered, maximum_broadcast, clock_drift }` - schedule at the lesser of the next no-show tick, or the tick, offset positively by `clock_drift` of the next non-empty tranche we are aware of after `considered`, including any tranche containing our own unbroadcast assignment. This can lead to no wakeup in the case that we have already broadcast our assignment and there are no pending no-shows; that is, we have approval votes for every assignment we've received that is not already a no-show. In this case, we will be re-triggered by other validators broadcasting their assignments. - * If `RequiredTranches::Exact { next_no_show, .. } - set a wakeup for the next no-show tick. + * If `RequiredTranches::Exact { next_no_show, .. }` - set a wakeup for the next no-show tick. #### Launch Approval Work - * Requires `(SessionIndex, SessionInfo, CandidateReceipt, ValidatorIndex, backing_group, block_hash, candidate_index)` - * Extract the public key of the `ValidatorIndex` from the `SessionInfo` for the session. - * Issue an `AvailabilityRecoveryMessage::RecoverAvailableData(candidate, session_index, Some(backing_group), response_sender)` - * Load the historical validation code of the parachain by dispatching a `RuntimeApiRequest::ValidationCodeByHash(`descriptor.validation_code_hash`)` against the state of `block_hash`. - * Spawn a background task with a clone of `background_tx` - * Wait for the available data - * Issue a `CandidateValidationMessage::ValidateFromExhaustive` message - * Wait for the result of validation - * Check that the result of validation, if valid, matches the commitments in the receipt. - * If valid, issue a message on `background_tx` detailing the request. - * If any of the data, the candidate, or the commitments are invalid, issue on `background_tx` a [`DisputeCoordinatorMessage::IssueLocalStatement`](../../types/overseer-protocol.md#dispute-coordinator-message) with `valid = false` to initiate a dispute. + +* Requires `(SessionIndex, SessionInfo, CandidateReceipt, ValidatorIndex, backing_group, block_hash, candidate_index)` +* Extract the public key of the `ValidatorIndex` from the `SessionInfo` for the session. +* Issue an `AvailabilityRecoveryMessage::RecoverAvailableData(candidate, session_index, Some(backing_group), response_sender)` +* Load the historical validation code of the parachain by dispatching a `RuntimeApiRequest::ValidationCodeByHash(descriptor.validation_code_hash)` against the state of `block_hash`. +* Spawn a background task with a clone of `background_tx` + * Wait for the available data + * Issue a `CandidateValidationMessage::ValidateFromExhaustive` message + * Wait for the result of validation + * Check that the result of validation, if valid, matches the commitments in the receipt. + * If valid, issue a message on `background_tx` detailing the request. + * If any of the data, the candidate, or the commitments are invalid, issue on `background_tx` a [`DisputeCoordinatorMessage::IssueLocalStatement`](../../types/overseer-protocol.md#dispute-coordinator-message) with `valid = false` to initiate a dispute. #### Issue Approval Vote * Fetch the block entry and candidate entry. Ignore if `None` - we've probably just lost a race with finality. diff --git a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md index b2a5fe1021c1..2b4f936f1b6e 100644 --- a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md +++ b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md @@ -81,13 +81,14 @@ dispute participation subsystem. ### On `OverseerSignal::ActiveLeavesUpdate` For each leaf in the leaves update: - * Fetch the session index for the child of the block with a [`RuntimeApiMessage::SessionIndexForChild`][RuntimeApiMessage]. - * If the session index is higher than `state.highest_session`: - * update `state.highest_session` - * remove everything with session index less than `state.highest_session - DISPUTE_WINDOW` from the `"recent-disputes"` in the DB. - * Use `iter_with_prefix` to remove everything from `"earliest-session"` up to `state.highest_session - DISPUTE_WINDOW` from the DB under `"candidate-votes"`. - * Update `"earliest-session"` to be equal to `state.highest_session - DISPUTE_WINDOW`. - * For each new block, explicitly or implicitly, under the new leaf, scan for a dispute digest which indicates a rollback. If a rollback is detected, use the ChainApi subsystem to blacklist the chain. + +* Fetch the session index for the child of the block with a [`RuntimeApiMessage::SessionIndexForChild`][RuntimeApiMessage]. +* If the session index is higher than `state.highest_session`: + * update `state.highest_session` + * remove everything with session index less than `state.highest_session - DISPUTE_WINDOW` from the `"recent-disputes"` in the DB. + * Use `iter_with_prefix` to remove everything from `"earliest-session"` up to `state.highest_session - DISPUTE_WINDOW` from the DB under `"candidate-votes"`. + * Update `"earliest-session"` to be equal to `state.highest_session - DISPUTE_WINDOW`. +* For each new block, explicitly or implicitly, under the new leaf, scan for a dispute digest which indicates a rollback. If a rollback is detected, use the `ChainApi` subsystem to blacklist the chain. ### On `OverseerSignal::Conclude` @@ -144,7 +145,7 @@ Do nothing. ### On `DisputeCoordinatorMessage::QueryCandidateVotes` * Load `"candidate-votes"` for every `(SessionIndex, CandidateHash)` in the query and return data within each `CandidateVote`. - If a particular `candidate-vote` is missing, that particular request is ommitted from the response. + If a particular `candidate-vote` is missing, that particular request is omitted from the response. ### On `DisputeCoordinatorMessage::IssueLocalStatement` diff --git a/roadmap/implementers-guide/src/node/utility/availability-store.md b/roadmap/implementers-guide/src/node/utility/availability-store.md index aec10b1967e8..0ab5c680cda6 100644 --- a/roadmap/implementers-guide/src/node/utility/availability-store.md +++ b/roadmap/implementers-guide/src/node/utility/availability-store.md @@ -21,33 +21,34 @@ There may be multiple competing blocks all ending the availability phase for a p ```dot process digraph { - label = "Block data FSM\n\n\n"; - labelloc = "t"; - rankdir="LR"; - - st [label = "Stored"; shape = circle] - inc [label = "Included"; shape = circle] - fin [label = "Finalized"; shape = circle] - prn [label = "Pruned"; shape = circle] - - st -> inc [label = "Block\nincluded"] - st -> prn [label = "Stored block\ntimed out"] - inc -> fin [label = "Block\nfinalized"] - inc -> st [label = "Competing blocks\nfinalized"] - fin -> prn [label = "Block keep time\n(1 day + 1 hour) elapsed"] + label = "Block data FSM\n\n\n"; + labelloc = "t"; + rankdir="LR"; + + st [label = "Stored"; shape = circle] + inc [label = "Included"; shape = circle] + fin [label = "Finalized"; shape = circle] + prn [label = "Pruned"; shape = circle] + + st -> inc [label = "Block\nincluded"] + st -> prn [label = "Stored block\ntimed out"] + inc -> fin [label = "Block\nfinalized"] + inc -> st [label = "Competing blocks\nfinalized"] + fin -> prn [label = "Block keep time\n(1 day + 1 hour) elapsed"] } ``` ## Database Schema We use an underlying Key-Value database where we assume we have the following operations available: - * `write(key, value)` - * `read(key) -> Option` - * `iter_with_prefix(prefix) -> Iterator<(key, value)>` - gives all keys and values in lexicographical order where the key starts with `prefix`. + +- `write(key, value)` +- `read(key) -> Option` +- `iter_with_prefix(prefix) -> Iterator<(key, value)>` - gives all keys and values in lexicographical order where the key starts with `prefix`. We use this database to encode the following schema: -``` +```rust ("available", CandidateHash) -> Option ("chunk", CandidateHash, u32) -> Option ("meta", CandidateHash) -> Option @@ -56,7 +57,7 @@ We use this database to encode the following schema: ("prune_by_time", Timestamp, CandidateHash) -> Option<()> ``` -Timestamps are the wall-clock seconds since unix epoch. Timestamps and block numbers are both encoded as big-endian so lexicographic order is ascending. +Timestamps are the wall-clock seconds since Unix epoch. Timestamps and block numbers are both encoded as big-endian so lexicographic order is ascending. The meta information that we track per-candidate is defined as the `CandidateMeta` struct @@ -88,84 +89,87 @@ Additionally, there is exactly one `prune_by_time` entry which holds the candida Input: [`AvailabilityStoreMessage`][ASM] Output: -- [`RuntimeApiMessage`][RAM] +- [`RuntimeApiMessage`][RAM] ## Functionality For each head in the `activated` list: - - Load all ancestors of the head back to the finalized block so we don't miss anything if import notifications are missed. If a `StoreChunk` message is received for a candidate which has no entry, then we will prematurely lose the data. - - Note any new candidates backed in the head. Update the `CandidateMeta` for each. If the `CandidateMeta` does not exist, create it as `Unavailable` with the current timestamp. Register a `"prune_by_time"` entry based on the current timestamp + 1 hour. - - Note any new candidate included in the head. Update the `CandidateMeta` for each, performing a transition from `Unavailable` to `Unfinalized` if necessary. That includes removing the `"prune_by_time"` entry. Add the head hash and number to the state, if unfinalized. Add an `"unfinalized"` entry for the block and candidate. - - The `CandidateEvent` runtime API can be used for this purpose. + +- Load all ancestors of the head back to the finalized block so we don't miss anything if import notifications are missed. If a `StoreChunk` message is received for a candidate which has no entry, then we will prematurely lose the data. +- Note any new candidates backed in the head. Update the `CandidateMeta` for each. If the `CandidateMeta` does not exist, create it as `Unavailable` with the current timestamp. Register a `"prune_by_time"` entry based on the current timestamp + 1 hour. +- Note any new candidate included in the head. Update the `CandidateMeta` for each, performing a transition from `Unavailable` to `Unfinalized` if necessary. That includes removing the `"prune_by_time"` entry. Add the head hash and number to the state, if unfinalized. Add an `"unfinalized"` entry for the block and candidate. +- The `CandidateEvent` runtime API can be used for this purpose. On `OverseerSignal::BlockFinalized(finalized)` events: - - for each key in `iter_with_prefix("unfinalized")` - - Stop if the key is beyond `("unfinalized, finalized)` - - For each block number f that we encounter, load the finalized hash for that block. - - The state of each `CandidateMeta` we encounter here must be `Unfinalized`, since we loaded the candidate from an `"unfinalized"` key. - - For each candidate that we encounter under `f` and the finalized block hash, - - Update the `CandidateMeta` to have `State::Finalized`. Remove all `"unfinalized"` entries from the old `Unfinalized` state. - - Register a `"prune_by_time"` entry for the candidate based on the current time + 1 day + 1 hour. - - For each candidate that we encounter under `f` which is not under the finalized block hash, - - Remove all entries under `f` in the `Unfinalized` state. - - If the `CandidateMeta` has state `Unfinalized` with an empty list of blocks, downgrade to `Unavailable` and re-schedule pruning under the timestamp + 1 hour. We do not prune here as the candidate still may be included in a descendent of the finalized chain. - - Remove all `"unfinalized"` keys under `f`. - - Update last_finalized = finalized. + +- for each key in `iter_with_prefix("unfinalized")` + - Stop if the key is beyond `("unfinalized, finalized)` + - For each block number f that we encounter, load the finalized hash for that block. + - The state of each `CandidateMeta` we encounter here must be `Unfinalized`, since we loaded the candidate from an `"unfinalized"` key. + - For each candidate that we encounter under `f` and the finalized block hash, + - Update the `CandidateMeta` to have `State::Finalized`. Remove all `"unfinalized"` entries from the old `Unfinalized` state. + - Register a `"prune_by_time"` entry for the candidate based on the current time + 1 day + 1 hour. + - For each candidate that we encounter under `f` which is not under the finalized block hash, + - Remove all entries under `f` in the `Unfinalized` state. + - If the `CandidateMeta` has state `Unfinalized` with an empty list of blocks, downgrade to `Unavailable` and re-schedule pruning under the timestamp + 1 hour. We do not prune here as the candidate still may be included in a descendant of the finalized chain. + - Remove all `"unfinalized"` keys under `f`. +- Update `last_finalized` = finalized. This is roughly `O(n * m)` where n is the number of blocks finalized since the last update, and `m` is the number of parachains. On `QueryAvailableData` message: - - Query `("available", candidate_hash)` +- Query `("available", candidate_hash)` This is `O(n)` in the size of the data, which may be large. On `QueryDataAvailability` message: - - Query whether `("meta", candidate_hash)` exists and `data_available == true`. +- Query whether `("meta", candidate_hash)` exists and `data_available == true`. This is `O(n)` in the size of the metadata which is small. On `QueryChunk` message: - - Query `("chunk", candidate_hash, index)` +- Query `("chunk", candidate_hash, index)` This is `O(n)` in the size of the data, which may be large. On `QueryAllChunks` message: - - Query `("meta", candidate_hash)`. If `None`, send an empty response and return. - - For all `1` bits in the `chunks_stored`, query `("chunk", candidate_hash, index)`. Ignore but warn on errors, and return a vector of all loaded chunks. -On `QueryChunkAvailability message: +- Query `("meta", candidate_hash)`. If `None`, send an empty response and return. +- For all `1` bits in the `chunks_stored`, query `("chunk", candidate_hash, index)`. Ignore but warn on errors, and return a vector of all loaded chunks. + +On `QueryChunkAvailability` message: - - Query whether `("meta", candidate_hash)` exists and the bit at `index` is set. +- Query whether `("meta", candidate_hash)` exists and the bit at `index` is set. This is `O(n)` in the size of the metadata which is small. On `StoreChunk` message: - - If there is a `CandidateMeta` under the candidate hash, set the bit of the erasure-chunk in the `chunks_stored` bitfield to `1`. If it was not `1` already, write the chunk under `("chunk", candidate_hash, chunk_index)`. +- If there is a `CandidateMeta` under the candidate hash, set the bit of the erasure-chunk in the `chunks_stored` bitfield to `1`. If it was not `1` already, write the chunk under `("chunk", candidate_hash, chunk_index)`. This is `O(n)` in the size of the chunk. On `StoreAvailableData` message: - - If there is no `CandidateMeta` under the candidate hash, create it with `State::Unavailable(now)`. Load the `CandidateMeta` otherwise. - - Store `data` under `("available", candidate_hash)` and set `data_available` to true. - - Store each chunk under `("chunk", candidate_hash, index)` and set every bit in `chunks_stored` to `1`. +- If there is no `CandidateMeta` under the candidate hash, create it with `State::Unavailable(now)`. Load the `CandidateMeta` otherwise. +- Store `data` under `("available", candidate_hash)` and set `data_available` to true. +- Store each chunk under `("chunk", candidate_hash, index)` and set every bit in `chunks_stored` to `1`. This is `O(n)` in the size of the data as the aggregate size of the chunks is proportional to the data. Every 5 minutes, run a pruning routine: - - for each key in `iter_with_prefix("prune_by_time")`: - - If the key is beyond ("prune_by_time", now), return. - - Remove the key. - - Extract `candidate_hash` from the key. - - Load and remove the `("meta", candidate_hash)` - - For each erasure chunk bit set, remove `("chunk", candidate_hash, bit_index)`. - - If `data_available`, remove `("available", candidate_hash) +- for each key in `iter_with_prefix("prune_by_time")`: + - If the key is beyond `("prune_by_time", now)`, return. + - Remove the key. + - Extract `candidate_hash` from the key. + - Load and remove the `("meta", candidate_hash)` + - For each erasure chunk bit set, remove `("chunk", candidate_hash, bit_index)`. + - If `data_available`, remove `("available", candidate_hash)` This is O(n * m) in the amount of candidates and average size of the data stored. This is probably the most expensive operation but does not need to be run very often. @@ -193,7 +197,7 @@ Basically we need to test the correctness of data flow through state FSMs descri - Wait until the data should have been pruned. - The data is no longer available. -- Forkfulness of the relay chain is taken into account +- Fork-awareness of the relay chain is taken into account - Block `B1` is added to the store. - Block `B2` is added to the store. - Notify the subsystem that both `B1` and `B2` were included in different leafs of relay chain. diff --git a/roadmap/implementers-guide/src/protocol-overview.md b/roadmap/implementers-guide/src/protocol-overview.md index 8f6c389ab4af..77b3a7448c44 100644 --- a/roadmap/implementers-guide/src/protocol-overview.md +++ b/roadmap/implementers-guide/src/protocol-overview.md @@ -34,7 +34,7 @@ Note that the candidate can fail to be included in any of the following ways: This process can be divided further down. Steps 2 & 3 relate to the work of the collator in collating and distributing the candidate to validators via the Collation Distribution Subsystem. Steps 3 & 4 relate to the work of the validators in the Candidate Backing Subsystem and the block author (itself a validator) to include the block into the relay chain. Steps 6, 7, and 8 correspond to the logic of the relay-chain state-machine (otherwise known as the Runtime) used to fully incorporate the block into the chain. Step 7 requires further work on the validators' parts to participate in the Availability Distribution Subsystem and include that information into the relay chain for step 8 to be fully realized. -This brings us to the second part of the process. Once a parablock is considered available and part of the parachain, it is still "pending approval". At this stage in the pipeline, the parablock has been backed by a majority of validators in the group assigned to that parachain, and its data has been guaranteed available by the set of validators as a whole. Once it's considered available, the host will even begin to accept children of that block. At this point, we can consider the parablock as having been tentatively included in the parachain, although more confirmations are desired. However, the validators in the parachain-group (known as the "Parachain Validators" for that parachain) are sampled from a validator set which contains some proportion of byzantine, or arbitrarily malicious members. This implies that the Parachain Validators for some parachain may be majority-dishonest, which means that (secondary) approval checks must be done on the block before it can be considered approved. This is necessary only because the Parachain Validators for a given parachain are sampled from an overall validator set which is assumed to be up to <1/3 dishonest - meaning that there is a chance to randomly sample Parachain Validators for a parachain that are majority or fully dishonest and can back a candidate wrongly. The Approval Process allows us to detect such misbehavior after-the-fact without allocating more Parachain Validators and reducing the throughput of the system. A parablock's failure to pass the approval process will invalidate the block as well as all of its descendents. However, only the validators who backed the block in question will be slashed, not the validators who backed the descendents. +This brings us to the second part of the process. Once a parablock is considered available and part of the parachain, it is still "pending approval". At this stage in the pipeline, the parablock has been backed by a majority of validators in the group assigned to that parachain, and its data has been guaranteed available by the set of validators as a whole. Once it's considered available, the host will even begin to accept children of that block. At this point, we can consider the parablock as having been tentatively included in the parachain, although more confirmations are desired. However, the validators in the parachain-group (known as the "Parachain Validators" for that parachain) are sampled from a validator set which contains some proportion of byzantine, or arbitrarily malicious members. This implies that the Parachain Validators for some parachain may be majority-dishonest, which means that (secondary) approval checks must be done on the block before it can be considered approved. This is necessary only because the Parachain Validators for a given parachain are sampled from an overall validator set which is assumed to be up to <1/3 dishonest - meaning that there is a chance to randomly sample Parachain Validators for a parachain that are majority or fully dishonest and can back a candidate wrongly. The Approval Process allows us to detect such misbehavior after-the-fact without allocating more Parachain Validators and reducing the throughput of the system. A parablock's failure to pass the approval process will invalidate the block as well as all of its descendants. However, only the validators who backed the block in question will be slashed, not the validators who backed the descendants. The Approval Process, at a glance, looks like this: @@ -170,7 +170,7 @@ digraph { } ``` -In this example, group 1 has received block C while the others have not due to network asynchrony. Now, a validator from group 2 may be able to build another block on top of B, called C'. Assume that afterwards, some validators become aware of both C and C', while others remain only aware of one. +In this example, group 1 has received block C while the others have not due to network asynchrony. Now, a validator from group 2 may be able to build another block on top of B, called `C'`. Assume that afterwards, some validators become aware of both C and `C'`, while others remain only aware of one. ```dot process digraph { diff --git a/roadmap/implementers-guide/src/runtime-api/README.md b/roadmap/implementers-guide/src/runtime-api/README.md index a40290a2d065..740ffd38ccee 100644 --- a/roadmap/implementers-guide/src/runtime-api/README.md +++ b/roadmap/implementers-guide/src/runtime-api/README.md @@ -4,7 +4,7 @@ Runtime APIs are the means by which the node-side code extracts information from Every block in the relay-chain contains a *state root* which is the root hash of a state trie encapsulating all storage of runtime modules after execution of the block. This is a cryptographic commitment to a unique state. We use the terminology of accessing the *state at* a block to refer accessing the state referred to by the state root of that block. -Although Runtime APIs are often used for simple storage access, they are actually empowered to do arbitrary computation. The implementation of the Runtime APIs lives within the Runtime as Wasm code and exposes extern functions that can be invoked with arguments and have a return value. Runtime APIs have access to a variety of host functions, which are contextual functions provided by the Wasm execution context, that allow it to carry out many different types of behaviors. +Although Runtime APIs are often used for simple storage access, they are actually empowered to do arbitrary computation. The implementation of the Runtime APIs lives within the Runtime as Wasm code and exposes `extern` functions that can be invoked with arguments and have a return value. Runtime APIs have access to a variety of host functions, which are contextual functions provided by the Wasm execution context, that allow it to carry out many different types of behaviors. Abilities provided by host functions includes: diff --git a/roadmap/implementers-guide/src/runtime/README.md b/roadmap/implementers-guide/src/runtime/README.md index c3cddbda7a95..178346e184f5 100644 --- a/roadmap/implementers-guide/src/runtime/README.md +++ b/roadmap/implementers-guide/src/runtime/README.md @@ -21,9 +21,9 @@ We will split the logic of the runtime up into these modules: * Scheduler: manages parachain and parathread scheduling as well as validator assignments. * Inclusion: handles the inclusion and availability of scheduled parachains and parathreads. * Validity: handles secondary checks and dispute resolution for included, available parablocks. -* Hrmp: handles horizontal messages between paras. -* Ump: Handles upward messages from a para to the relay chain. -* Dmp: Handles downward messages from the relay chain to the para. +* HRMP: handles horizontal messages between paras. +* UMP: Handles upward messages from a para to the relay chain. +* DMP: Handles downward messages from the relay chain to the para. The [Initializer module](initializer.md) is special - it's responsible for handling the initialization logic of the other modules to ensure that the correct initialization order and related invariants are maintained. The other modules won't specify a on-initialize logic, but will instead expose a special semi-private routine that the initialization module will call. The other modules are relatively straightforward and perform the roles described above. @@ -31,7 +31,7 @@ The Parachain Host operates under a changing set of validators. Time is split up The relay chain is intended to use BABE or SASSAFRAS, which both have the property that a session changing at a block is determined not by the number of the block but instead by the time the block is authored. In some sense, sessions change in-between blocks, not at blocks. This has the side effect that the session of a child block cannot be determined solely by the parent block's identifier. Being able to unilaterally determine the validator-set at a specific block based on its parent hash would make a lot of Node-side logic much simpler. -In order to regain the property that the validator set of a block is predictable by its parent block, we delay session changes' application to Parachains by 1 block. This means that if there is a session change at block X, that session change will be stored and applied during initialization of direct descendents of X. This principal side effect of this change is that the Parachains runtime can disagree with session or consensus modules about which session it currently is. Misbehavior reporting routines in particular will be affected by this, although not severely. The parachains runtime might believe it is the last block of the session while the system is really in the first block of the next session. In such cases, a historical validator-set membership proof will need to accompany any misbehavior report, although they typically do not need to during current-session misbehavior reports. +In order to regain the property that the validator set of a block is predictable by its parent block, we delay session changes' application to Parachains by 1 block. This means that if there is a session change at block X, that session change will be stored and applied during initialization of direct descendants of X. This principal side effect of this change is that the Parachains runtime can disagree with session or consensus modules about which session it currently is. Misbehavior reporting routines in particular will be affected by this, although not severely. The parachains runtime might believe it is the last block of the session while the system is really in the first block of the next session. In such cases, a historical validator-set membership proof will need to accompany any misbehavior report, although they typically do not need to during current-session misbehavior reports. So the other role of the initializer module is to forward session change notifications to modules in the initialization order. Session change is also the point at which the [Configuration Module](configuration.md) updates the configuration. Most of the other modules will handle changes in the configuration during their session change operation, so the initializer should provide both the old and new configuration to all the other modules alongside the session change notification. This means that a session change notification should consist of the following data: diff --git a/roadmap/implementers-guide/src/types/candidate.md b/roadmap/implementers-guide/src/types/candidate.md index 5dccfb6c40b9..b9d5900b7f17 100644 --- a/roadmap/implementers-guide/src/types/candidate.md +++ b/roadmap/implementers-guide/src/types/candidate.md @@ -78,12 +78,12 @@ struct CandidateDescriptor { /// derived from relay-chain state that influence the validity of the block which /// must also be kept available for secondary checkers. persisted_validation_data_hash: Hash, - /// The blake2-256 hash of the pov-block. + /// The blake2-256 hash of the `pov-block`. pov_hash: Hash, /// The root of a block's erasure encoding Merkle tree. erasure_root: Hash, /// Signature on blake2-256 of components of this receipt: - /// The parachain index, the relay parent, the validation data hash, and the pov_hash. + /// The parachain index, the relay parent, the validation data hash, and the `pov_hash`. signature: CollatorSignature, /// Hash of the para header that is being generated by this candidate. para_head: Hash, @@ -92,7 +92,7 @@ struct CandidateDescriptor { } ``` -## PersistedValidationData +## `PersistedValidationData` The validation data provides information about how to create the inputs for validation of a candidate. This information is derived from the chain state and will vary from para to para, although some of the fields may be the same for every para. @@ -102,7 +102,7 @@ Furthermore, the validation data acts as a way to authorize the additional data Since the commitments of the validation function are checked by the relay-chain, secondary checkers can rely on the invariant that the relay-chain only includes para-blocks for which these checks have already been done. As such, there is no need for the validation data used to inform validators and collators about the checks the relay-chain will perform to be persisted by the availability system. -The `PersistedValidationData` should be relatively lightweight primarly because it is constructed during inclusion for each candidate and therefore lies on the critical path of inclusion. +The `PersistedValidationData` should be relatively lightweight primarily because it is constructed during inclusion for each candidate and therefore lies on the critical path of inclusion. ```rust struct PersistedValidationData { @@ -124,7 +124,7 @@ struct PersistedValidationData { } ``` -## HeadData +## `HeadData` Head data is a type-safe abstraction around bytes (`Vec`) for the purposes of representing heads of parachains or parathreads. diff --git a/roadmap/implementers-guide/src/types/network.md b/roadmap/implementers-guide/src/types/network.md index 4a71f42e1fbe..edd039a127dd 100644 --- a/roadmap/implementers-guide/src/types/network.md +++ b/roadmap/implementers-guide/src/types/network.md @@ -81,7 +81,7 @@ enum PoVDistributionV1Message { /// specific relay-parent hash. Awaiting(Hash, Vec), /// Notification of an awaited PoV, in a given relay-parent context. - /// (relay_parent, pov_hash, pov) + /// (`relay_parent`, `pov_hash`, `pov`) SendPoV(Hash, Hash, PoV), } ``` diff --git a/runtime/common/slot_range_helper/src/lib.rs b/runtime/common/slot_range_helper/src/lib.rs index ec680c87d736..4ebf1e2e282c 100644 --- a/runtime/common/slot_range_helper/src/lib.rs +++ b/runtime/common/slot_range_helper/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! A helper macro for generating SlotRange enum. +//! A helper macro for generating `SlotRange` enum. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/runtime/common/src/auctions.rs b/runtime/common/src/auctions.rs index 7c5fa1d75ca0..e577faa165fd 100644 --- a/runtime/common/src/auctions.rs +++ b/runtime/common/src/auctions.rs @@ -89,7 +89,7 @@ pub mod pallet { /// The length of each sample to take during the ending period. /// - /// EndingPeriod / SampleLength = Total # of Samples + /// `EndingPeriod` / `SampleLength` = Total # of Samples #[pallet::constant] type SampleLength: Get; @@ -114,24 +114,24 @@ pub mod pallet { pub enum Event { /// An auction started. Provides its index and the block number where it will begin to /// close and the first lease period of the quadruplet that is auctioned. - /// [auction_index, lease_period, ending] + /// `[auction_index, lease_period, ending]` AuctionStarted(AuctionIndex, LeasePeriodOf, T::BlockNumber), - /// An auction ended. All funds become unreserved. [auction_index] + /// An auction ended. All funds become unreserved. `[auction_index]` AuctionClosed(AuctionIndex), /// Funds were reserved for a winning bid. First balance is the extra amount reserved. - /// Second is the total. [bidder, extra_reserved, total_amount] + /// Second is the total. `[bidder, extra_reserved, total_amount]` Reserved(T::AccountId, BalanceOf, BalanceOf), - /// Funds were unreserved since bidder is no longer active. [bidder, amount] + /// Funds were unreserved since bidder is no longer active. `[bidder, amount]` Unreserved(T::AccountId, BalanceOf), /// Someone attempted to lease the same slot twice for a parachain. The amount is held in reserve /// but no parachain slot has been leased. - /// \[parachain_id, leaser, amount\] + /// `[parachain_id, leaser, amount]` ReserveConfiscated(ParaId, T::AccountId, BalanceOf), /// A new bid has been accepted as the current winner. - /// \[who, para_id, amount, first_slot, last_slot\] + /// `[who, para_id, amount, first_slot, last_slot]` BidAccepted(T::AccountId, ParaId, BalanceOf, LeasePeriodOf, LeasePeriodOf), /// The winning offset was chosen for an auction. This will map into the `Winning` storage map. - /// \[auction_index, block_number\] + /// `[auction_index, block_number]` WinningOffset(AuctionIndex, T::BlockNumber), } @@ -565,7 +565,7 @@ impl Pallet { /// Calculate the final winners from the winning slots. /// /// This is a simple dynamic programming algorithm designed by Al, the original code is at: - /// https://github.com/w3f/consensus/blob/master/NPoS/auctiondynamicthing.py + /// `https://github.com/w3f/consensus/blob/master/NPoS/auctiondynamicthing.py` fn calculate_winners( mut winning: WinningData ) -> WinnersData { diff --git a/runtime/common/src/claims.rs b/runtime/common/src/claims.rs index 18dbe4502d66..c06db1d0d9cc 100644 --- a/runtime/common/src/claims.rs +++ b/runtime/common/src/claims.rs @@ -157,7 +157,7 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] #[pallet::metadata(T::AccountId = "AccountId", BalanceOf = "Balance")] pub enum Event { - /// Someone claimed some DOTs. [who, ethereum_address, amount] + /// Someone claimed some DOTs. `[who, ethereum_address, amount]` Claimed(T::AccountId, EthereumAddress, BalanceOf), } @@ -167,7 +167,7 @@ pub mod pallet { InvalidEthereumSignature, /// Ethereum address has no claim. SignerHasNoClaim, - /// Account ID sending tx has no claim. + /// Account ID sending transaction has no claim. SenderHasNoClaim, /// There's not enough in the pot to pay out some unvested amount. Generally implies a logic /// error. diff --git a/runtime/common/src/crowdloan.rs b/runtime/common/src/crowdloan.rs index 356431fe04d5..8215b9ac293c 100644 --- a/runtime/common/src/crowdloan.rs +++ b/runtime/common/src/crowdloan.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! # Parachain Crowdloaning pallet +//! # Parachain `Crowdloaning` pallet //! //! The point of this pallet is to allow parachain projects to offer the ability to help fund a //! deposit for the parachain. When the crowdloan has ended, the funds are returned. @@ -136,11 +136,11 @@ pub struct FundInfo { /// If this is `Ending(n)`, this fund received a contribution during the current ending period, /// where `n` is how far into the ending period the contribution was made. last_contribution: LastContribution, - /// First lease period in range to bid on; it's actually a LeasePeriod, but that's the same type - /// as BlockNumber. + /// First lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same type + /// as `BlockNumber`. first_period: LeasePeriod, - /// Last lease period in range to bid on; it's actually a LeasePeriod, but that's the same type - /// as BlockNumber. + /// Last lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same type + /// as `BlockNumber`. last_period: LeasePeriod, /// Index used for the child trie of this fund trie_index: TrieIndex, @@ -160,7 +160,7 @@ pub mod pallet { pub trait Config: frame_system::Config { type Event: From> + IsType<::Event>; - /// PalletId for the crowdloan pallet. An appropriate value could be ```PalletId(*b"py/cfund")``` + /// `PalletId` for the crowdloan pallet. An appropriate value could be `PalletId(*b"py/cfund")` #[pallet::constant] type PalletId: Get; @@ -168,7 +168,7 @@ pub mod pallet { type SubmissionDeposit: Get>; /// The minimum amount that may be contributed into a crowdloan. Should almost certainly be at - /// least ExistentialDeposit. + /// least `ExistentialDeposit`. #[pallet::constant] type MinContribution: Get>; @@ -176,7 +176,7 @@ pub mod pallet { #[pallet::constant] type RemoveKeysLimit: Get; - /// The parachain registrar type. We jus use this to ensure that only the manager of a para is able to + /// The parachain registrar type. We just use this to ensure that only the manager of a para is able to /// start a crowdloan for its slot. type Registrar: Registrar; @@ -223,26 +223,26 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] #[pallet::metadata(T::AccountId = "AccountId", BalanceOf = "Balance")] pub enum Event { - /// Create a new crowdloaning campaign. [fund_index] + /// Create a new crowdloaning campaign. `[fund_index]` Created(ParaId), - /// Contributed to a crowd sale. [who, fund_index, amount] + /// Contributed to a crowd sale. `[who, fund_index, amount]` Contributed(T::AccountId, ParaId, BalanceOf), - /// Withdrew full balance of a contributor. [who, fund_index, amount] + /// Withdrew full balance of a contributor. `[who, fund_index, amount]` Withdrew(T::AccountId, ParaId, BalanceOf), /// The loans in a fund have been partially dissolved, i.e. there are some left - /// over child keys that still need to be killed. [fund_index] + /// over child keys that still need to be killed. `[fund_index]` PartiallyRefunded(ParaId), - /// All loans in a fund have been refunded. [fund_index] + /// All loans in a fund have been refunded. `[fund_index]` AllRefunded(ParaId), - /// Fund is dissolved. [fund_index] + /// Fund is dissolved. `[fund_index]` Dissolved(ParaId), /// The result of trying to submit a new bid to the Slots pallet. HandleBidResult(ParaId, DispatchResult), - /// The configuration to a crowdloan has been edited. [fund_index] + /// The configuration to a crowdloan has been edited. `[fund_index]` Edited(ParaId), - /// A memo has been updated. [who, fund_index, memo] + /// A memo has been updated. `[who, fund_index, memo]` MemoUpdated(T::AccountId, ParaId, Vec), - /// A parachain has been moved to NewRaise + /// A parachain has been moved to `NewRaise` AddedToNewRaise(ParaId), } @@ -288,7 +288,7 @@ pub mod pallet { InvalidSignature, /// The provided memo is too large. MemoTooLarge, - /// The fund is already in NewRaise + /// The fund is already in `NewRaise` AlreadyInNewRaise, /// No contributions allowed during the VRF delay VrfDelayInProgress, @@ -637,7 +637,7 @@ pub mod pallet { Ok(()) } - /// Poke the fund into NewRaise + /// Poke the fund into `NewRaise` /// /// Origin must be Signed, and the fund has non-zero raise. #[pallet::weight(T::WeightInfo::poke())] diff --git a/runtime/common/src/impls.rs b/runtime/common/src/impls.rs index 0549518ff6a1..655b6b3401a0 100644 --- a/runtime/common/src/impls.rs +++ b/runtime/common/src/impls.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Auxillary struct/enums for polkadot runtime. +//! Auxiliary `struct`/`enum`s for polkadot runtime. use frame_support::traits::{OnUnbalanced, Imbalance, Currency}; use crate::NegativeImbalance; diff --git a/runtime/common/src/lib.rs b/runtime/common/src/lib.rs index 7d23da786839..725cf95b8b40 100644 --- a/runtime/common/src/lib.rs +++ b/runtime/common/src/lib.rs @@ -121,7 +121,7 @@ pub type SlowAdjustingFeeUpdate = TargetedFeeAdjustment< /// The type used for currency conversion. /// -/// This must only be used as long as the balance type is u128. +/// This must only be used as long as the balance type is `u128`. pub type CurrencyToVote = frame_support::traits::U128CurrencyToVote; static_assertions::assert_eq_size!(primitives::v1::Balance, u128); diff --git a/runtime/common/src/mmr.rs b/runtime/common/src/mmr.rs index 2cf2bf115cd0..6ba20bb04654 100644 --- a/runtime/common/src/mmr.rs +++ b/runtime/common/src/mmr.rs @@ -44,7 +44,7 @@ impl pallet_mmr::primitives::OnNewRoot for Dep } } -/// Convert BEEFY secp256k1 public keys into uncompressed form +/// Convert BEEFY `secp256k1` public keys into uncompressed form pub struct UncompressBeefyEcdsaKeys; impl Convert> for UncompressBeefyEcdsaKeys { fn convert(a: beefy_primitives::crypto::AuthorityId) -> Vec { @@ -60,7 +60,7 @@ impl Convert> for UncompressBeefy } } -/// A leaf that gets added every block to the MMR constructed by [pallet_mmr]. +/// A leaf that gets added every block to the MMR constructed by `[pallet_mmr]`. #[derive(RuntimeDebug, PartialEq, Eq, Clone, Encode, Decode)] pub struct MmrLeaf { /// Current block parent number and hash. @@ -86,7 +86,7 @@ pub struct BeefyNextAuthoritySet { /// of signatures. We put set length here, so that these clients can verify the minimal /// number of required signatures. pub len: u32, - /// Merkle Root Hash build from BEEFY AuthorityIds. + /// Merkle Root Hash build from BEEFY `AuthorityIds`. /// /// This is used by Light Clients to confirm that the commitments are signed by the correct /// validator set. Light Clients using interactive protocol, might verify only subset of @@ -132,9 +132,9 @@ pub mod pallet { #[pallet::config] #[pallet::disable_frame_system_supertrait_check] pub trait Config: pallet_mmr::Config + pallet_beefy::Config { - /// Convert BEEFY AuthorityId to a form that would end up in the Merkle Tree. + /// Convert BEEFY `AuthorityId` to a form that would end up in the Merkle Tree. /// - /// For instance for ECDSA (secp256k1) we want to store uncompressed public keys (65 bytes) + /// For instance for ECDSA (`secp256k1`) we want to store uncompressed public keys (65 bytes) /// to simplify using them on Ethereum chain, but the rest of the Substrate codebase /// is storing them compressed (33 bytes) for efficiency reasons. type BeefyAuthorityToMerkleLeaf: Convert<::BeefyId, Vec>; @@ -186,7 +186,7 @@ impl Pallet where /// NOTE this does not include parathreads - only parachains are part of the merkle tree. /// /// NOTE This is an initial and inefficient implementation, which re-constructs - /// the merkle tree every block. Instead we should update the merkle root in [Self::on_initialize] + /// the merkle tree every block. Instead we should update the merkle root in `[Self::on_initialize]` /// call of this pallet and update the merkle tree efficiently (use on-chain storage to persist inner nodes). fn parachain_heads_merkle_root() -> MerkleRootOf { let para_heads = T::ParachainHeads::encoded_heads(); @@ -196,7 +196,7 @@ impl Pallet where /// Returns details of the next BEEFY authority set. /// /// Details contain authority set id, authority set length and a merkle root, - /// constructed from uncompressed secp256k1 public keys of the next BEEFY authority set. + /// constructed from uncompressed `secp256k1` public keys of the next BEEFY authority set. /// /// This function will use a storage-cached entry in case the set didn't change, or compute and cache /// new one in case it did. diff --git a/runtime/common/src/paras_registrar.rs b/runtime/common/src/paras_registrar.rs index 8de23ea3bb30..831c0715f265 100644 --- a/runtime/common/src/paras_registrar.rs +++ b/runtime/common/src/paras_registrar.rs @@ -202,7 +202,7 @@ pub mod pallet { /// /// This function must be called by a Root origin. /// - /// The deposit taken can be specified for this registration. Any ParaId + /// The deposit taken can be specified for this registration. Any `ParaId` /// can be registered, including sub-1000 IDs which are System Parachains. #[pallet::weight(T::WeightInfo::force_register())] pub fn force_register( @@ -344,7 +344,7 @@ impl Registrar for Pallet { // Register a Para ID under control of `manager`. // - // Note this is a backend registration api, so verification of ParaId + // Note this is a backend registration API, so verification of ParaId // is not done here to prevent. fn register( manager: T::AccountId, diff --git a/runtime/common/src/purchase.rs b/runtime/common/src/purchase.rs index 563bbe940c6f..fb50622b20f7 100644 --- a/runtime/common/src/purchase.rs +++ b/runtime/common/src/purchase.rs @@ -140,7 +140,7 @@ pub mod pallet { PaymentAccountSet(T::AccountId), /// A new statement was set. StatementUpdated, - /// A new statement was set. [block_number] + /// A new statement was set. `[block_number]` UnlockBlockUpdated(T::BlockNumber), } @@ -226,7 +226,7 @@ pub mod pallet { /// Update the validity status of an existing account. If set to completed, the account /// will no longer be able to continue through the crowdfund process. /// - /// We check tht the account exists at this stage, but has not completed the process. + /// We check that the account exists at this stage, but has not completed the process. /// /// Origin must match the `ValidityOrigin`. #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] @@ -248,7 +248,7 @@ pub mod pallet { /// Update the balance of a valid account. /// - /// We check tht the account is valid for a balance transfer at this point. + /// We check that the account is valid for a balance transfer at this point. /// /// Origin must match the `ValidityOrigin`. #[pallet::weight(T::DbWeight::get().reads_writes(2, 1))] diff --git a/runtime/common/src/slot_range.rs b/runtime/common/src/slot_range.rs index 3dcda158dd33..ea09d1f08384 100644 --- a/runtime/common/src/slot_range.rs +++ b/runtime/common/src/slot_range.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! The SlotRange struct which succinctly handles the 36 values that +//! The `SlotRange` struct which succinctly handles the 36 values that //! represent all sub ranges between 0 and 7 inclusive. slot_range_helper::generate_slot_range!(Zero(0), One(1), Two(2), Three(3), Four(4), Five(5), Six(6), Seven(7)); diff --git a/runtime/common/src/slots.rs b/runtime/common/src/slots.rs index 599ec3609900..2fb0c47be272 100644 --- a/runtime/common/src/slots.rs +++ b/runtime/common/src/slots.rs @@ -97,12 +97,12 @@ decl_event!( ParaId = ParaId, Balance = BalanceOf, { - /// A new [lease_period] is beginning. + /// A new `[lease_period]` is beginning. NewLeasePeriod(LeasePeriod), /// A para has won the right to a continuous set of lease periods as a parachain. /// First balance is any extra amount reserved on top of the para's existing deposit. /// Second balance is the total amount reserved. - /// \[parachain_id, leaser, period_begin, period_count, extra_reserved, total_amount\] + /// `[parachain_id, leaser, period_begin, period_count, extra_reserved, total_amount]` Leased(ParaId, AccountId, LeasePeriod, LeasePeriod, Balance, Balance), } ); @@ -135,7 +135,7 @@ decl_module! { } } - /// Just a hotwire into the `lease_out` call, in case Root wants to force some lease to happen + /// Just a connect to the `lease_out` call, in case Root wants to force some lease to happen /// independently of any other on-chain mechanism to use it. /// /// Can only be called by the Root origin. diff --git a/runtime/common/src/traits.rs b/runtime/common/src/traits.rs index 35a369bb292d..1c42902dcb4c 100644 --- a/runtime/common/src/traits.rs +++ b/runtime/common/src/traits.rs @@ -31,18 +31,18 @@ pub trait Registrar { /// Report the manager (permissioned owner) of a parachain, if there is one. fn manager_of(id: ParaId) -> Option; - /// All parachains. Ordered ascending by ParaId. Parathreads are not included. + /// All parachains. Ordered ascending by `ParaId`. Parathreads are not included. fn parachains() -> Vec; - /// Return if a ParaId is a Parachain. + /// Return if a `ParaId` is a Parachain. fn is_parachain(id: ParaId) -> bool { Self::parachains().binary_search(&id).is_ok() } - /// Return if a ParaId is a Parathread. + /// Return if a `ParaId` is a Parathread. fn is_parathread(id: ParaId) -> bool; - /// Return if a ParaId is registered in the system. + /// Return if a `ParaId` is registered in the system. fn is_registered(id: ParaId) -> bool { Self::is_parathread(id) || Self::is_parachain(id) } @@ -109,9 +109,9 @@ pub trait Leaser { /// Lease a new parachain slot for `para`. /// - /// `leaser` shall have a total of `amount` balance reserved by the implementor of this trait. + /// `leaser` shall have a total of `amount` balance reserved by the implementer of this trait. /// - /// Note: The implementor of the trait (the leasing system) is expected to do all reserve/unreserve calls. The + /// Note: The implementer of the trait (the leasing system) is expected to do all reserve/unreserve calls. The /// caller of this trait *SHOULD NOT* pre-reserve the deposit (though should ensure that it is reservable). /// /// The lease will last from `period_begin` for `period_count` lease periods. It is undefined if the `para` diff --git a/runtime/common/src/xcm_sender.rs b/runtime/common/src/xcm_sender.rs index 0b817a237fb9..bf1572c45432 100644 --- a/runtime/common/src/xcm_sender.rs +++ b/runtime/common/src/xcm_sender.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Xcm sender for relay chain. +//! XCM sender for relay chain. use parity_scale_codec::Encode; use sp_std::marker::PhantomData; use xcm::opaque::{VersionedXcm, v0::{SendXcm, MultiLocation, Junction, Xcm, Result, Error}}; use runtime_parachains::{configuration, dmp}; -/// Xcm sender for relay chain. It only sends downward message. +/// XCM sender for relay chain. It only sends downward message. pub struct ChildParachainRouter(PhantomData); impl SendXcm for ChildParachainRouter { diff --git a/runtime/kusama/Cargo.toml b/runtime/kusama/Cargo.toml index 8d26bd06a524..cf1838bc91b9 100644 --- a/runtime/kusama/Cargo.toml +++ b/runtime/kusama/Cargo.toml @@ -246,10 +246,10 @@ try-runtime = [ "pallet-babe/try-runtime", "runtime-common/try-runtime", ] -# When enabled, the runtime api will not be build. +# When enabled, the runtime API will not be build. # # This is required by Cumulus to access certain types of the -# runtime without clashing with the runtime api exported functions +# runtime without clashing with the runtime API exported functions # in WASM. disable-runtime-api = [] diff --git a/runtime/kusama/src/constants.rs b/runtime/kusama/src/constants.rs index 29040efb481a..0ccbdee37eb6 100644 --- a/runtime/kusama/src/constants.rs +++ b/runtime/kusama/src/constants.rs @@ -62,7 +62,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, MAXIMUM_BLOCK_WEIGHT] + /// - [0, `MAXIMUM_BLOCK_WEIGHT`] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 8b14506dfe36..21968806e88b 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -553,7 +553,7 @@ impl pallet_democracy::Config for Runtime { /// A unanimous council can have the next scheduled referendum be a straight default-carries /// (NTB) vote. type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>; - /// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote + /// Two thirds of the technical committee can have an `ExternalMajority/ExternalDefault` vote /// be tabled immediately and with a shorter voting/enactment period. type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; @@ -1510,9 +1510,9 @@ pub type Header = generic::Header; pub type Block = generic::Block; /// A Block signed with a Justification pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. +/// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The `SignedExtension` to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion, frame_system::CheckTxVersion, diff --git a/runtime/kusama/src/weights/frame_system.rs b/runtime/kusama/src/weights/frame_system.rs index e5c7f504a094..ec892350aad0 100644 --- a/runtime/kusama/src/weights/frame_system.rs +++ b/runtime/kusama/src/weights/frame_system.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for frame_system +//! Autogenerated weights for `frame_system` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for frame_system. +/// Weight functions for `frame_system`. pub struct WeightInfo(PhantomData); impl frame_system::WeightInfo for WeightInfo { fn remark(_b: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_balances.rs b/runtime/kusama/src/weights/pallet_balances.rs index 747863a737a6..edc3b81e6819 100644 --- a/runtime/kusama/src/weights/pallet_balances.rs +++ b/runtime/kusama/src/weights/pallet_balances.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_balances +//! Autogenerated weights for `pallet_balances` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_balances. +/// Weight functions for `pallet_balances`. pub struct WeightInfo(PhantomData); impl pallet_balances::WeightInfo for WeightInfo { fn transfer() -> Weight { diff --git a/runtime/kusama/src/weights/pallet_bounties.rs b/runtime/kusama/src/weights/pallet_bounties.rs index 5ca7c519b871..ee5cf0212eac 100644 --- a/runtime/kusama/src/weights/pallet_bounties.rs +++ b/runtime/kusama/src/weights/pallet_bounties.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_bounties +//! Autogenerated weights for `pallet_bounties` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_bounties. +/// Weight functions for `pallet_bounties`. pub struct WeightInfo(PhantomData); impl pallet_bounties::WeightInfo for WeightInfo { fn propose_bounty(d: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_collective.rs b/runtime/kusama/src/weights/pallet_collective.rs index 8fc3e1c564ea..9a59fb34bb01 100644 --- a/runtime/kusama/src/weights/pallet_collective.rs +++ b/runtime/kusama/src/weights/pallet_collective.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_collective +//! Autogenerated weights for `pallet_collective` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_collective. +/// Weight functions for `pallet_collective`. pub struct WeightInfo(PhantomData); impl pallet_collective::WeightInfo for WeightInfo { fn set_members(m: u32, n: u32, p: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_democracy.rs b/runtime/kusama/src/weights/pallet_democracy.rs index 36bff259cb60..b3263b2c3abf 100644 --- a/runtime/kusama/src/weights/pallet_democracy.rs +++ b/runtime/kusama/src/weights/pallet_democracy.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_democracy +//! Autogenerated weights for `pallet_democracy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_democracy. +/// Weight functions for `pallet_democracy`. pub struct WeightInfo(PhantomData); impl pallet_democracy::WeightInfo for WeightInfo { fn propose() -> Weight { diff --git a/runtime/kusama/src/weights/pallet_election_provider_multi_phase.rs b/runtime/kusama/src/weights/pallet_election_provider_multi_phase.rs index 6cf5682e96fe..b6de878cf7c4 100644 --- a/runtime/kusama/src/weights/pallet_election_provider_multi_phase.rs +++ b/runtime/kusama/src/weights/pallet_election_provider_multi_phase.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_election_provider_multi_phase +//! Autogenerated weights for `pallet_election_provider_multi_phase` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-06-18, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_election_provider_multi_phase. +/// Weight functions for `pallet_election_provider_multi_phase`. pub struct WeightInfo(PhantomData); impl pallet_election_provider_multi_phase::WeightInfo for WeightInfo { fn on_initialize_nothing() -> Weight { diff --git a/runtime/kusama/src/weights/pallet_elections_phragmen.rs b/runtime/kusama/src/weights/pallet_elections_phragmen.rs index df20c7b17afd..7d732d080c04 100644 --- a/runtime/kusama/src/weights/pallet_elections_phragmen.rs +++ b/runtime/kusama/src/weights/pallet_elections_phragmen.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_elections_phragmen +//! Autogenerated weights for `pallet_elections_phragmen` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 //! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_elections_phragmen. +/// Weight functions for `pallet_elections_phragmen`. pub struct WeightInfo(PhantomData); impl pallet_elections_phragmen::WeightInfo for WeightInfo { fn vote_equal(v: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_gilt.rs b/runtime/kusama/src/weights/pallet_gilt.rs index 41bbd987e622..28813c9a5c9a 100644 --- a/runtime/kusama/src/weights/pallet_gilt.rs +++ b/runtime/kusama/src/weights/pallet_gilt.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_gilt +//! Autogenerated weights for `pallet_gilt` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_gilt. +/// Weight functions for `pallet_gilt`. pub struct WeightInfo(PhantomData); impl pallet_gilt::WeightInfo for WeightInfo { fn place_bid(l: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_identity.rs b/runtime/kusama/src/weights/pallet_identity.rs index bef743c77a4f..87f881c36d8b 100644 --- a/runtime/kusama/src/weights/pallet_identity.rs +++ b/runtime/kusama/src/weights/pallet_identity.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_identity +//! Autogenerated weights for `pallet_identity` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_identity. +/// Weight functions for `pallet_identity`. pub struct WeightInfo(PhantomData); impl pallet_identity::WeightInfo for WeightInfo { fn add_registrar(r: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_im_online.rs b/runtime/kusama/src/weights/pallet_im_online.rs index 481009d3d756..7112c0b36ff8 100644 --- a/runtime/kusama/src/weights/pallet_im_online.rs +++ b/runtime/kusama/src/weights/pallet_im_online.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_im_online +//! Autogenerated weights for `pallet_im_online` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_im_online. +/// Weight functions for `pallet_im_online`. pub struct WeightInfo(PhantomData); impl pallet_im_online::WeightInfo for WeightInfo { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_indices.rs b/runtime/kusama/src/weights/pallet_indices.rs index 213620648940..fb6ef191cad4 100644 --- a/runtime/kusama/src/weights/pallet_indices.rs +++ b/runtime/kusama/src/weights/pallet_indices.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_indices +//! Autogenerated weights for `pallet_indices` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_indices. +/// Weight functions for `pallet_indices`. pub struct WeightInfo(PhantomData); impl pallet_indices::WeightInfo for WeightInfo { fn claim() -> Weight { diff --git a/runtime/kusama/src/weights/pallet_membership.rs b/runtime/kusama/src/weights/pallet_membership.rs index 29cad36612f6..0f5083e6e87e 100644 --- a/runtime/kusama/src/weights/pallet_membership.rs +++ b/runtime/kusama/src/weights/pallet_membership.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_membership +//! Autogenerated weights for `pallet_membership` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_membership. +/// Weight functions for `pallet_membership`. pub struct WeightInfo(PhantomData); impl pallet_membership::WeightInfo for WeightInfo { fn add_member(m: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_multisig.rs b/runtime/kusama/src/weights/pallet_multisig.rs index c2ac465aaada..0779f8a189de 100644 --- a/runtime/kusama/src/weights/pallet_multisig.rs +++ b/runtime/kusama/src/weights/pallet_multisig.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_multisig +//! Autogenerated weights for `pallet_multisig` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_multisig. +/// Weight functions for `pallet_multisig`. pub struct WeightInfo(PhantomData); impl pallet_multisig::WeightInfo for WeightInfo { fn as_multi_threshold_1(_z: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_proxy.rs b/runtime/kusama/src/weights/pallet_proxy.rs index 8b0b2801beda..5f551494e1ab 100644 --- a/runtime/kusama/src/weights/pallet_proxy.rs +++ b/runtime/kusama/src/weights/pallet_proxy.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_proxy +//! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_proxy. +/// Weight functions for `pallet_proxy`. pub struct WeightInfo(PhantomData); impl pallet_proxy::WeightInfo for WeightInfo { fn proxy(p: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_scheduler.rs b/runtime/kusama/src/weights/pallet_scheduler.rs index a89f745c7a1e..fe7b45604b71 100644 --- a/runtime/kusama/src/weights/pallet_scheduler.rs +++ b/runtime/kusama/src/weights/pallet_scheduler.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_scheduler +//! Autogenerated weights for `pallet_scheduler` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_scheduler. +/// Weight functions for `pallet_scheduler`. pub struct WeightInfo(PhantomData); impl pallet_scheduler::WeightInfo for WeightInfo { fn schedule(s: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_session.rs b/runtime/kusama/src/weights/pallet_session.rs index 4c9a9fe8e5b0..1ebe0adeac8a 100644 --- a/runtime/kusama/src/weights/pallet_session.rs +++ b/runtime/kusama/src/weights/pallet_session.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_session +//! Autogenerated weights for `pallet_session` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_session. +/// Weight functions for `pallet_session`. pub struct WeightInfo(PhantomData); impl pallet_session::WeightInfo for WeightInfo { fn set_keys() -> Weight { diff --git a/runtime/kusama/src/weights/pallet_staking.rs b/runtime/kusama/src/weights/pallet_staking.rs index c517d84c6ed4..af87a6f6b041 100644 --- a/runtime/kusama/src/weights/pallet_staking.rs +++ b/runtime/kusama/src/weights/pallet_staking.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_staking +//! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_staking. +/// Weight functions for `pallet_staking`. pub struct WeightInfo(PhantomData); impl pallet_staking::WeightInfo for WeightInfo { fn bond() -> Weight { diff --git a/runtime/kusama/src/weights/pallet_timestamp.rs b/runtime/kusama/src/weights/pallet_timestamp.rs index cb9098577a61..62c648462aaf 100644 --- a/runtime/kusama/src/weights/pallet_timestamp.rs +++ b/runtime/kusama/src/weights/pallet_timestamp.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_timestamp +//! Autogenerated weights for `pallet_timestamp` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_timestamp. +/// Weight functions for `pallet_timestamp`. pub struct WeightInfo(PhantomData); impl pallet_timestamp::WeightInfo for WeightInfo { fn set() -> Weight { diff --git a/runtime/kusama/src/weights/pallet_tips.rs b/runtime/kusama/src/weights/pallet_tips.rs index bf358e3b80ef..13e0f4b9f4e4 100644 --- a/runtime/kusama/src/weights/pallet_tips.rs +++ b/runtime/kusama/src/weights/pallet_tips.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_tips +//! Autogenerated weights for `pallet_tips` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_tips. +/// Weight functions for `pallet_tips`. pub struct WeightInfo(PhantomData); impl pallet_tips::WeightInfo for WeightInfo { fn report_awesome(r: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_treasury.rs b/runtime/kusama/src/weights/pallet_treasury.rs index 80e169edc7db..2f8423a055a1 100644 --- a/runtime/kusama/src/weights/pallet_treasury.rs +++ b/runtime/kusama/src/weights/pallet_treasury.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_treasury +//! Autogenerated weights for `pallet_treasury` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_treasury. +/// Weight functions for `pallet_treasury`. pub struct WeightInfo(PhantomData); impl pallet_treasury::WeightInfo for WeightInfo { fn propose_spend() -> Weight { diff --git a/runtime/kusama/src/weights/pallet_utility.rs b/runtime/kusama/src/weights/pallet_utility.rs index 66f9857c00ec..ce79e4f0a243 100644 --- a/runtime/kusama/src/weights/pallet_utility.rs +++ b/runtime/kusama/src/weights/pallet_utility.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_utility +//! Autogenerated weights for `pallet_utility` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_utility. +/// Weight functions for `pallet_utility`. pub struct WeightInfo(PhantomData); impl pallet_utility::WeightInfo for WeightInfo { fn batch(c: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/pallet_vesting.rs b/runtime/kusama/src/weights/pallet_vesting.rs index af19647c27e0..af97d0b84b32 100644 --- a/runtime/kusama/src/weights/pallet_vesting.rs +++ b/runtime/kusama/src/weights/pallet_vesting.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_vesting +//! Autogenerated weights for `pallet_vesting` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_vesting. +/// Weight functions for `pallet_vesting`. pub struct WeightInfo(PhantomData); impl pallet_vesting::WeightInfo for WeightInfo { fn vest_locked(l: u32, ) -> Weight { diff --git a/runtime/kusama/src/weights/runtime_common_auctions.rs b/runtime/kusama/src/weights/runtime_common_auctions.rs index f3fd08ec3ae5..100719bcb10e 100644 --- a/runtime/kusama/src/weights/runtime_common_auctions.rs +++ b/runtime/kusama/src/weights/runtime_common_auctions.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for runtime_common::auctions +//! Autogenerated weights for `runtime_common::auctions` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for runtime_common::auctions. +/// Weight functions for `runtime_common::auctions`. pub struct WeightInfo(PhantomData); impl runtime_common::auctions::WeightInfo for WeightInfo { fn new_auction() -> Weight { diff --git a/runtime/kusama/src/weights/runtime_common_claims.rs b/runtime/kusama/src/weights/runtime_common_claims.rs index 94a4bd36f9e0..2e54af89de44 100644 --- a/runtime/kusama/src/weights/runtime_common_claims.rs +++ b/runtime/kusama/src/weights/runtime_common_claims.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for runtime_common::claims +//! Autogenerated weights for `runtime_common::claims` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for runtime_common::claims. +/// Weight functions for `runtime_common::claims`. pub struct WeightInfo(PhantomData); impl runtime_common::claims::WeightInfo for WeightInfo { fn claim() -> Weight { diff --git a/runtime/kusama/src/weights/runtime_common_crowdloan.rs b/runtime/kusama/src/weights/runtime_common_crowdloan.rs index d269b826db36..a04eda47ab91 100644 --- a/runtime/kusama/src/weights/runtime_common_crowdloan.rs +++ b/runtime/kusama/src/weights/runtime_common_crowdloan.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for runtime_common::crowdloan +//! Autogenerated weights for `runtime_common::crowdloan` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for runtime_common::crowdloan. +/// Weight functions for `runtime_common::crowdloan`. pub struct WeightInfo(PhantomData); impl runtime_common::crowdloan::WeightInfo for WeightInfo { fn create() -> Weight { diff --git a/runtime/kusama/src/weights/runtime_common_paras_registrar.rs b/runtime/kusama/src/weights/runtime_common_paras_registrar.rs index 666e1edffffe..f0edfc18916d 100644 --- a/runtime/kusama/src/weights/runtime_common_paras_registrar.rs +++ b/runtime/kusama/src/weights/runtime_common_paras_registrar.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for runtime_common::paras_registrar +//! Autogenerated weights for `runtime_common::paras_registrar` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for runtime_common::paras_registrar. +/// Weight functions for `runtime_common::paras_registrar`. pub struct WeightInfo(PhantomData); impl runtime_common::paras_registrar::WeightInfo for WeightInfo { fn reserve() -> Weight { diff --git a/runtime/kusama/src/weights/runtime_common_slots.rs b/runtime/kusama/src/weights/runtime_common_slots.rs index b1867c175c8c..591ddf724ac9 100644 --- a/runtime/kusama/src/weights/runtime_common_slots.rs +++ b/runtime/kusama/src/weights/runtime_common_slots.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for runtime_common::slots +//! Autogenerated weights for `runtime_common::slots` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for runtime_common::slots. +/// Weight functions for `runtime_common::slots`. pub struct WeightInfo(PhantomData); impl runtime_common::slots::WeightInfo for WeightInfo { fn force_lease() -> Weight { diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index 5b500b9d0ded..04ff169a0a79 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -146,7 +146,7 @@ pub struct HostConfiguration { /// /// `None` means no maximum. pub max_validators_per_core: Option, - /// The maximum number of valdiators to use for parachain consensus, period. + /// The maximum number of validators to use for parachain consensus, period. /// /// `None` means no maximum. pub max_validators: Option, @@ -170,7 +170,7 @@ pub struct HostConfiguration { pub zeroth_delay_tranche_width: u32, /// The number of validators needed to approve a block. pub needed_approvals: u32, - /// The number of samples to do of the RelayVRFModulo approval assignment criterion. + /// The number of samples to do of the `RelayVRFModulo` approval assignment criterion. pub relay_vrf_modulo_samples: u32, } @@ -527,7 +527,7 @@ decl_module! { Ok(()) } - /// Set the number of samples to do of the RelayVRFModulo approval assignment criterion. + /// Set the number of samples to do of the `RelayVRFModulo` approval assignment criterion. #[weight = (1_000, DispatchClass::Operational)] pub fn set_relay_vrf_modulo_samples(origin, new: u32) -> DispatchResult { ensure_root(origin)?; diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index e1d7d3d57c08..a8fbcdbfe28b 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -242,7 +242,7 @@ decl_storage! { HrmpOpenChannelRequests: map hasher(twox_64_concat) HrmpChannelId => Option; HrmpOpenChannelRequestsList: Vec; - /// This mapping tracks how many open channel requests are inititated by a given sender para. + /// This mapping tracks how many open channel requests are initiated by a given sender para. /// Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has `(X, _)` /// as the number of `HrmpOpenChannelRequestCount` for `X`. HrmpOpenChannelRequestCount: map hasher(twox_64_concat) ParaId => u32; @@ -291,7 +291,7 @@ decl_storage! { /// Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`. HrmpChannelContents: map hasher(twox_64_concat) HrmpChannelId => Vec>; /// Maintains a mapping that can be used to answer the question: - /// What paras sent a message at the given block number for a given reciever. + /// What paras sent a message at the given block number for a given receiver. /// Invariants: /// - The inner `Vec` is never empty. /// - The inner `Vec` cannot store two same `ParaId`. @@ -384,11 +384,11 @@ decl_error! { decl_event! { pub enum Event { /// Open HRMP channel requested. - /// \[sender, recipient, proposed_max_capacity, proposed_max_message_size\] + /// `[sender, recipient, proposed_max_capacity, proposed_max_message_size]` OpenChannelRequested(ParaId, ParaId, u32, u32), - /// Open HRMP channel accepted. \[sender, recipient\] + /// Open HRMP channel accepted. `[sender, recipient]` OpenChannelAccepted(ParaId, ParaId), - /// HRMP channel closed. \[by_parachain, channel_id\] + /// HRMP channel closed. `[by_parachain, channel_id]` ChannelClosed(ParaId, HrmpChannelId), } } @@ -468,7 +468,7 @@ decl_module! { Ok(()) } - /// Force process hrmp open channel requests. + /// Force process HRMP open channel requests. /// /// If there are pending HRMP open channel requests, you can use this /// function process all of those requests immediately. @@ -480,7 +480,7 @@ decl_module! { Ok(()) } - /// Force process hrmp close channel requests. + /// Force process HRMP close channel requests. /// /// If there are pending HRMP close channel requests, you can use this /// function process all of those requests immediately. @@ -667,7 +667,7 @@ impl Module { /// /// This includes returning the deposits. /// - /// This function is indempotent, meaning that after the first application it should have no + /// This function is idempotent, meaning that after the first application it should have no /// effect (i.e. it won't return the deposits twice). fn close_hrmp_channel(channel_id: &HrmpChannelId) { if let Some(HrmpChannel { diff --git a/runtime/parachains/src/inclusion.rs b/runtime/parachains/src/inclusion.rs index eb58f315a0fa..e6e159b6ee22 100644 --- a/runtime/parachains/src/inclusion.rs +++ b/runtime/parachains/src/inclusion.rs @@ -92,7 +92,7 @@ impl CandidatePendingAvailability { self.hash } - /// Get the canddiate descriptor. + /// Get the candidate descriptor. pub(crate) fn candidate_descriptor(&self) -> &CandidateDescriptor { &self.descriptor } @@ -131,7 +131,7 @@ decl_storage! { PendingAvailability: map hasher(twox_64_concat) ParaId => Option>; - /// The commitments of candidates pending availability, by ParaId. + /// The commitments of candidates pending availability, by `ParaId`. PendingAvailabilityCommitments: map hasher(twox_64_concat) ParaId => Option; } @@ -192,11 +192,11 @@ decl_error! { decl_event! { pub enum Event where ::Hash { - /// A candidate was backed. [candidate, head_data] + /// A candidate was backed. `[candidate, head_data]` CandidateBacked(CandidateReceipt, HeadData, CoreIndex, GroupIndex), - /// A candidate was included. [candidate, head_data] + /// A candidate was included. `[candidate, head_data]` CandidateIncluded(CandidateReceipt, HeadData, CoreIndex, GroupIndex), - /// A candidate timed out. [candidate, head_data] + /// A candidate timed out. `[candidate, head_data]` CandidateTimedOut(CandidateReceipt, HeadData, CoreIndex), } } @@ -232,7 +232,7 @@ impl Module { for _ in >::drain() { } } - /// Process a set of incoming bitfields. Return a vec of cores freed by candidates + /// Process a set of incoming bitfields. Return a `vec` of cores freed by candidates /// becoming available. pub(crate) fn process_bitfields( expected_bits: usize, @@ -781,7 +781,7 @@ impl Module { } } - /// Returns the CommittedCandidateReceipt pending availability for the para provided, if any. + /// Returns the `CommittedCandidateReceipt` pending availability for the para provided, if any. pub(crate) fn candidate_pending_availability(para: ParaId) -> Option> { diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index 5bbc56c21455..21e3c2612061 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -35,7 +35,7 @@ pub use pallet::*; pub struct SessionChangeNotification { /// The new validators in the session. pub validators: Vec, - /// The qeueud validators for the following session. + /// The queued validators for the following session. pub queued: Vec, /// The configuration before handling the session change pub prev_config: HostConfiguration, @@ -99,10 +99,10 @@ pub mod pallet { /// Whether the parachains modules have been initialized within this block. /// - /// Semantically a bool, but this guarantees it should never hit the trie, + /// Semantically a `bool`, but this guarantees it should never hit the trie, /// as this is cleared in `on_finalize` and Frame optimizes `None` values to be empty values. /// - /// As a bool, `set(false)` and `remove()` both lead to the next `get()` being false, but one of + /// As a `bool`, `set(false)` and `remove()` both lead to the next `get()` being false, but one of /// them writes to the trie and one does not. This confusion makes `Option<()>` more suitable for /// the semantics of this variable. #[pallet::storage] diff --git a/runtime/parachains/src/paras.rs b/runtime/parachains/src/paras.rs index ce407963fc39..2ae809006fd4 100644 --- a/runtime/parachains/src/paras.rs +++ b/runtime/parachains/src/paras.rs @@ -287,15 +287,15 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// Current code has been updated for a Para. \[para_id\] + /// Current code has been updated for a Para. `para_id` CurrentCodeUpdated(ParaId), - /// Current head has been updated for a Para. \[para_id\] + /// Current head has been updated for a Para. `para_id` CurrentHeadUpdated(ParaId), - /// A code upgrade has been scheduled for a Para. \[para_id\] + /// A code upgrade has been scheduled for a Para. `para_id` CodeUpgradeScheduled(ParaId), - /// A new head has been noted for a Para. \[para_id\] + /// A new head has been noted for a Para. `para_id` NewHeadNoted(ParaId), - /// A para has been queued to execute pending actions. \[para_id\] + /// A para has been queued to execute pending actions. `para_id` ActionQueued(ParaId, SessionIndex), } @@ -313,7 +313,7 @@ pub mod pallet { CannotDowngrade, } - /// All parachains. Ordered ascending by ParaId. Parathreads are not included. + /// All parachains. Ordered ascending by `ParaId`. Parathreads are not included. #[pallet::storage] #[pallet::getter(fn parachains)] pub(super) type Parachains = StorageValue<_, Vec, ValueQuery>; @@ -1443,7 +1443,7 @@ mod tests { run_to_block(expected_at + 1 + 4, None); - // the candidate is in the context of the first descendent of `expected_at`, and triggers + // the candidate is in the context of the first descendant of `expected_at`, and triggers // the upgrade. { Paras::note_new_head(para_id, Default::default(), expected_at + 4); diff --git a/runtime/parachains/src/paras_inherent.rs b/runtime/parachains/src/paras_inherent.rs index c96495d639e3..551a7a1f55b3 100644 --- a/runtime/parachains/src/paras_inherent.rs +++ b/runtime/parachains/src/paras_inherent.rs @@ -53,7 +53,7 @@ decl_storage! { trait Store for Module as ParaInherent { /// Whether the paras inherent was included within this block. /// - /// The `Option<()>` is effectively a bool, but it never hits storage in the `None` variant + /// The `Option<()>` is effectively a `bool`, but it never hits storage in the `None` variant /// due to the guarantees of FRAME's storage APIs. /// /// If this is `None` at the end of the block, we panic and render the block invalid. diff --git a/runtime/parachains/src/runtime_api_impl/v1.rs b/runtime/parachains/src/runtime_api_impl/v1.rs index d51e980c933a..883a0a7f309e 100644 --- a/runtime/parachains/src/runtime_api_impl/v1.rs +++ b/runtime/parachains/src/runtime_api_impl/v1.rs @@ -236,7 +236,7 @@ pub fn session_index_for_child() -> SessionIndex { /// Implementation for the `AuthorityDiscoveryApi::authorities()` function of the runtime API. /// It is a heavy call, but currently only used for authority discovery, so it is fine. -/// Gets next, current and some historical authority ids using session_info module. +/// Gets next, current and some historical authority ids using `session_info` module. pub fn relevant_authority_ids() -> Vec { let current_session_index = session_index_for_child::(); let earliest_stored_session = >::earliest_stored_session(); diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 44e03b701681..d7bf7dbe24a8 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -17,7 +17,7 @@ //! The scheduler module for parachains and parathreads. //! //! This module is responsible for two main tasks: -//! - Paritioning validators into groups and assigning groups to parachains and parathreads +//! - Partitioning validators into groups and assigning groups to parachains and parathreads //! - Scheduling parachains and parathreads //! //! It aims to achieve these tasks with these goals in mind: @@ -183,7 +183,7 @@ decl_storage! { ParathreadClaimIndex: Vec; /// The block number where the session start occurred. Used to track how many group rotations have occurred. /// - /// Note that in the context of parachains modules the session change is signalled during + /// Note that in the context of parachains modules the session change is signaled during /// the block and enacted at the end of the block (at the finalization stage, to be exact). /// Thus for all intents and purposes the effect of the session change is observed at the /// block following the session change, block number of which we save in this storage value. @@ -644,7 +644,7 @@ impl Module { /// occupied and the candidate occupying it became available. /// /// For parachains, this is always the ID of the parachain and no specified collator. - /// For parathreads, this is based on the next item in the ParathreadQueue assigned to that + /// For parathreads, this is based on the next item in the `ParathreadQueue` assigned to that /// core, and is None if there isn't one. pub(crate) fn next_up_on_available(core: CoreIndex) -> Option { let parachains = >::parachains(); @@ -667,7 +667,7 @@ impl Module { /// occupied and the candidate occupying it became available. /// /// For parachains, this is always the ID of the parachain and no specified collator. - /// For parathreads, this is based on the next item in the ParathreadQueue assigned to that + /// For parathreads, this is based on the next item in the `ParathreadQueue` assigned to that /// core, or if there isn't one, the claim that is currently occupying the core, as long /// as the claim's retries would not exceed the limit. Otherwise None. pub(crate) fn next_up_on_time_out(core: CoreIndex) -> Option { diff --git a/runtime/parachains/src/ump.rs b/runtime/parachains/src/ump.rs index 08c4da68e72f..5f2dff984175 100644 --- a/runtime/parachains/src/ump.rs +++ b/runtime/parachains/src/ump.rs @@ -59,7 +59,7 @@ impl UmpSink for () { /// if the message content is unique. pub type MessageId = [u8; 32]; -/// A specific implementation of a UmpSink where messages are in the XCM format +/// A specific implementation of a `UmpSink` where messages are in the XCM format /// and will be forwarded to the XCM Executor. pub struct XcmSink(PhantomData<(XcmExecutor, Config)>); @@ -181,7 +181,7 @@ decl_storage! { /// First item in the tuple is the count of messages and second /// is the total length (in bytes) of the message payloads. /// - /// Note that this is an auxilary mapping: it's possible to tell the byte size and the number of + /// Note that this is an auxiliary mapping: it's possible to tell the byte size and the number of /// messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of /// loading the whole message queue if only the total size and count are required. /// @@ -430,7 +430,7 @@ impl Module { /// thus increasing the peak memory consumption of the wasm runtime. Under such conditions persisting /// queues might play better since it's unlikely that they are going to be requested once more. /// -/// On the other hand, the situation when deep queues exist and it takes more than one dipsatcher +/// On the other hand, the situation when deep queues exist and it takes more than one dispatcher /// cycle to traverse the queues is already sub-optimal and better be avoided. /// /// This struct is not supposed to be dropped but rather to be consumed by [`flush`]. diff --git a/runtime/polkadot/Cargo.toml b/runtime/polkadot/Cargo.toml index 4effd887e43e..5655a0ae5ee8 100644 --- a/runtime/polkadot/Cargo.toml +++ b/runtime/polkadot/Cargo.toml @@ -221,10 +221,10 @@ try-runtime = [ "pallet-utility/try-runtime", "runtime-common/try-runtime", ] -# When enabled, the runtime api will not be build. +# When enabled, the runtime API will not be build. # # This is required by Cumulus to access certain types of the -# runtime without clashing with the runtime api exported functions +# runtime without clashing with the runtime API exported functions # in WASM. disable-runtime-api = [] diff --git a/runtime/polkadot/src/constants.rs b/runtime/polkadot/src/constants.rs index ecfa74d78664..276046307727 100644 --- a/runtime/polkadot/src/constants.rs +++ b/runtime/polkadot/src/constants.rs @@ -61,7 +61,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, MAXIMUM_BLOCK_WEIGHT] + /// - [0, `MAXIMUM_BLOCK_WEIGHT`] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index ec50f69eba2c..96c11958f5e1 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -521,7 +521,7 @@ impl pallet_democracy::Config for Runtime { pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>, frame_system::EnsureRoot, >; - /// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote + /// Two thirds of the technical committee can have an `ExternalMajority/ExternalDefault` vote /// be tabled immediately and with a shorter voting/enactment period. type FastTrackOrigin = frame_system::EnsureOneOf, @@ -1074,9 +1074,9 @@ pub type Header = generic::Header; pub type Block = generic::Block; /// A Block signed with a Justification pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. +/// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The `SignedExtension` to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion, frame_system::CheckTxVersion, diff --git a/runtime/polkadot/src/weights/frame_system.rs b/runtime/polkadot/src/weights/frame_system.rs index e6c489b993ba..8798291667d7 100644 --- a/runtime/polkadot/src/weights/frame_system.rs +++ b/runtime/polkadot/src/weights/frame_system.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for frame_system +//! Autogenerated weights for `frame_system` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-06-18, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for frame_system. +/// Weight functions for `frame_system`. pub struct WeightInfo(PhantomData); impl frame_system::WeightInfo for WeightInfo { fn remark(_b: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_balances.rs b/runtime/polkadot/src/weights/pallet_balances.rs index 19ebf60963e3..52555a0ddadd 100644 --- a/runtime/polkadot/src/weights/pallet_balances.rs +++ b/runtime/polkadot/src/weights/pallet_balances.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_balances +//! Autogenerated weights for `pallet_balances` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_balances. +/// Weight functions for `pallet_balances`. pub struct WeightInfo(PhantomData); impl pallet_balances::WeightInfo for WeightInfo { fn transfer() -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_bounties.rs b/runtime/polkadot/src/weights/pallet_bounties.rs index 05e2d438d6dd..dcb3b21d49f7 100644 --- a/runtime/polkadot/src/weights/pallet_bounties.rs +++ b/runtime/polkadot/src/weights/pallet_bounties.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_bounties +//! Autogenerated weights for `pallet_bounties` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_bounties. +/// Weight functions for `pallet_bounties`. pub struct WeightInfo(PhantomData); impl pallet_bounties::WeightInfo for WeightInfo { fn propose_bounty(d: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_collective.rs b/runtime/polkadot/src/weights/pallet_collective.rs index dca397766b7b..ec3c568d25e9 100644 --- a/runtime/polkadot/src/weights/pallet_collective.rs +++ b/runtime/polkadot/src/weights/pallet_collective.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_collective +//! Autogenerated weights for `pallet_collective` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_collective. +/// Weight functions for `pallet_collective`. pub struct WeightInfo(PhantomData); impl pallet_collective::WeightInfo for WeightInfo { fn set_members(m: u32, n: u32, p: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_democracy.rs b/runtime/polkadot/src/weights/pallet_democracy.rs index b970e12d1711..1fa65a03fdf3 100644 --- a/runtime/polkadot/src/weights/pallet_democracy.rs +++ b/runtime/polkadot/src/weights/pallet_democracy.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_democracy +//! Autogenerated weights for `pallet_democracy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_democracy. +/// Weight functions for `pallet_democracy`. pub struct WeightInfo(PhantomData); impl pallet_democracy::WeightInfo for WeightInfo { fn propose() -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_election_provider_multi_phase.rs b/runtime/polkadot/src/weights/pallet_election_provider_multi_phase.rs index 7671336e4eb9..a8686e373cbf 100644 --- a/runtime/polkadot/src/weights/pallet_election_provider_multi_phase.rs +++ b/runtime/polkadot/src/weights/pallet_election_provider_multi_phase.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_election_provider_multi_phase +//! Autogenerated weights for `pallet_election_provider_multi_phase` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-06-18, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_election_provider_multi_phase. +/// Weight functions for `pallet_election_provider_multi_phase`. pub struct WeightInfo(PhantomData); impl pallet_election_provider_multi_phase::WeightInfo for WeightInfo { fn on_initialize_nothing() -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_elections_phragmen.rs b/runtime/polkadot/src/weights/pallet_elections_phragmen.rs index 06155383261f..5f57059aee9f 100644 --- a/runtime/polkadot/src/weights/pallet_elections_phragmen.rs +++ b/runtime/polkadot/src/weights/pallet_elections_phragmen.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_elections_phragmen +//! Autogenerated weights for `pallet_elections_phragmen` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_elections_phragmen. +/// Weight functions for `pallet_elections_phragmen`. pub struct WeightInfo(PhantomData); impl pallet_elections_phragmen::WeightInfo for WeightInfo { fn vote_equal(v: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_identity.rs b/runtime/polkadot/src/weights/pallet_identity.rs index a037e0b6737a..9ad4d20cef02 100644 --- a/runtime/polkadot/src/weights/pallet_identity.rs +++ b/runtime/polkadot/src/weights/pallet_identity.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_identity +//! Autogenerated weights for `pallet_identity` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_identity. +/// Weight functions for `pallet_identity`. pub struct WeightInfo(PhantomData); impl pallet_identity::WeightInfo for WeightInfo { fn add_registrar(r: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_im_online.rs b/runtime/polkadot/src/weights/pallet_im_online.rs index 620ea6c42643..d5c73b82a0f0 100644 --- a/runtime/polkadot/src/weights/pallet_im_online.rs +++ b/runtime/polkadot/src/weights/pallet_im_online.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_im_online +//! Autogenerated weights for `pallet_im_online` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_im_online. +/// Weight functions for `pallet_im_online`. pub struct WeightInfo(PhantomData); impl pallet_im_online::WeightInfo for WeightInfo { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_indices.rs b/runtime/polkadot/src/weights/pallet_indices.rs index 0b590d27adc4..cf976bfb7d6a 100644 --- a/runtime/polkadot/src/weights/pallet_indices.rs +++ b/runtime/polkadot/src/weights/pallet_indices.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_indices +//! Autogenerated weights for `pallet_indices` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_indices. +/// Weight functions for `pallet_indices`. pub struct WeightInfo(PhantomData); impl pallet_indices::WeightInfo for WeightInfo { fn claim() -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_membership.rs b/runtime/polkadot/src/weights/pallet_membership.rs index fbc04feb62d4..1c46690f6ab6 100644 --- a/runtime/polkadot/src/weights/pallet_membership.rs +++ b/runtime/polkadot/src/weights/pallet_membership.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_membership +//! Autogenerated weights for `pallet_membership` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_membership. +/// Weight functions for `pallet_membership`. pub struct WeightInfo(PhantomData); impl pallet_membership::WeightInfo for WeightInfo { fn add_member(m: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_multisig.rs b/runtime/polkadot/src/weights/pallet_multisig.rs index 4e7eca209113..8db5643e1e56 100644 --- a/runtime/polkadot/src/weights/pallet_multisig.rs +++ b/runtime/polkadot/src/weights/pallet_multisig.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_multisig +//! Autogenerated weights for `pallet_multisig` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_multisig. +/// Weight functions for `pallet_multisig`. pub struct WeightInfo(PhantomData); impl pallet_multisig::WeightInfo for WeightInfo { fn as_multi_threshold_1(_z: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_proxy.rs b/runtime/polkadot/src/weights/pallet_proxy.rs index cd44de29fec9..a4ced828fd88 100644 --- a/runtime/polkadot/src/weights/pallet_proxy.rs +++ b/runtime/polkadot/src/weights/pallet_proxy.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_proxy +//! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_proxy. +/// Weight functions for `pallet_proxy`. pub struct WeightInfo(PhantomData); impl pallet_proxy::WeightInfo for WeightInfo { fn proxy(p: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_scheduler.rs b/runtime/polkadot/src/weights/pallet_scheduler.rs index d071d389024d..9484e8db00af 100644 --- a/runtime/polkadot/src/weights/pallet_scheduler.rs +++ b/runtime/polkadot/src/weights/pallet_scheduler.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_scheduler +//! Autogenerated weights for `pallet_scheduler` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_scheduler. +/// Weight functions for `pallet_scheduler`. pub struct WeightInfo(PhantomData); impl pallet_scheduler::WeightInfo for WeightInfo { fn schedule(s: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_session.rs b/runtime/polkadot/src/weights/pallet_session.rs index b873df595f08..c7548a0afcaf 100644 --- a/runtime/polkadot/src/weights/pallet_session.rs +++ b/runtime/polkadot/src/weights/pallet_session.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_session +//! Autogenerated weights for `pallet_session` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_session. +/// Weight functions for `pallet_session`. pub struct WeightInfo(PhantomData); impl pallet_session::WeightInfo for WeightInfo { fn set_keys() -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_staking.rs b/runtime/polkadot/src/weights/pallet_staking.rs index 101956ffe9ab..d56a945f09ad 100644 --- a/runtime/polkadot/src/weights/pallet_staking.rs +++ b/runtime/polkadot/src/weights/pallet_staking.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_staking +//! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_staking. +/// Weight functions for `pallet_staking`. pub struct WeightInfo(PhantomData); impl pallet_staking::WeightInfo for WeightInfo { fn bond() -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_timestamp.rs b/runtime/polkadot/src/weights/pallet_timestamp.rs index f3078196b93e..6c56445db35a 100644 --- a/runtime/polkadot/src/weights/pallet_timestamp.rs +++ b/runtime/polkadot/src/weights/pallet_timestamp.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_timestamp +//! Autogenerated weights for `pallet_timestamp` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_timestamp. +/// Weight functions for `pallet_timestamp`. pub struct WeightInfo(PhantomData); impl pallet_timestamp::WeightInfo for WeightInfo { fn set() -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_tips.rs b/runtime/polkadot/src/weights/pallet_tips.rs index c3315cc0f250..0dcd11b2bea7 100644 --- a/runtime/polkadot/src/weights/pallet_tips.rs +++ b/runtime/polkadot/src/weights/pallet_tips.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_tips +//! Autogenerated weights for `pallet_tips` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_tips. +/// Weight functions for `pallet_tips`. pub struct WeightInfo(PhantomData); impl pallet_tips::WeightInfo for WeightInfo { fn report_awesome(r: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_treasury.rs b/runtime/polkadot/src/weights/pallet_treasury.rs index 058cf2d845b9..ceb9ba9a444f 100644 --- a/runtime/polkadot/src/weights/pallet_treasury.rs +++ b/runtime/polkadot/src/weights/pallet_treasury.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_treasury +//! Autogenerated weights for `pallet_treasury` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_treasury. +/// Weight functions for `pallet_treasury`. pub struct WeightInfo(PhantomData); impl pallet_treasury::WeightInfo for WeightInfo { fn propose_spend() -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_utility.rs b/runtime/polkadot/src/weights/pallet_utility.rs index e760a508f063..2257a086b758 100644 --- a/runtime/polkadot/src/weights/pallet_utility.rs +++ b/runtime/polkadot/src/weights/pallet_utility.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_utility +//! Autogenerated weights for `pallet_utility` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_utility. +/// Weight functions for `pallet_utility`. pub struct WeightInfo(PhantomData); impl pallet_utility::WeightInfo for WeightInfo { fn batch(c: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/pallet_vesting.rs b/runtime/polkadot/src/weights/pallet_vesting.rs index 03bbf926b8dd..93a6e6465024 100644 --- a/runtime/polkadot/src/weights/pallet_vesting.rs +++ b/runtime/polkadot/src/weights/pallet_vesting.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_vesting +//! Autogenerated weights for `pallet_vesting` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_vesting. +/// Weight functions for `pallet_vesting`. pub struct WeightInfo(PhantomData); impl pallet_vesting::WeightInfo for WeightInfo { fn vest_locked(l: u32, ) -> Weight { diff --git a/runtime/polkadot/src/weights/runtime_common_claims.rs b/runtime/polkadot/src/weights/runtime_common_claims.rs index 42d2b2f6d8eb..a64d81e75d7f 100644 --- a/runtime/polkadot/src/weights/runtime_common_claims.rs +++ b/runtime/polkadot/src/weights/runtime_common_claims.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for runtime_common::claims +//! Autogenerated weights for `runtime_common::claims` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for runtime_common::claims. +/// Weight functions for `runtime_common::claims`. pub struct WeightInfo(PhantomData); impl runtime_common::claims::WeightInfo for WeightInfo { fn claim() -> Weight { diff --git a/runtime/rococo/Cargo.toml b/runtime/rococo/Cargo.toml index d59aac3a0834..0924fa8dab7f 100644 --- a/runtime/rococo/Cargo.toml +++ b/runtime/rococo/Cargo.toml @@ -144,10 +144,10 @@ std = [ "pallet-xcm/std", "log/std", ] -# When enabled, the runtime api will not be build. +# When enabled, the runtime API will not be build. # # This is required by Cumulus to access certain types of the -# runtime without clashing with the runtime api exported functions +# runtime without clashing with the runtime API exported functions # in WASM. disable-runtime-api = [] runtime-benchmarks = [ diff --git a/runtime/rococo/src/bridge_messages.rs b/runtime/rococo/src/bridge_messages.rs index 93e7314c737d..f61620666a56 100644 --- a/runtime/rococo/src/bridge_messages.rs +++ b/runtime/rococo/src/bridge_messages.rs @@ -238,11 +238,11 @@ impl Get for GetDeliveryConfirmationTransactionFee { } } -/// This module contains definitions that are used by the messages pallet instance, 'deployed' at Rococo. +/// This module contains definitions that are used by the messages pallet instance, "deployed" at Rococo. mod at_rococo { use super::*; - /// Message bridge that is 'deployed' at Rococo chain and connecting it to Wococo chain. + /// Message bridge that is "deployed" at Rococo chain and connecting it to Wococo chain. #[derive(RuntimeDebug, Clone, Copy)] pub struct AtRococoWithWococoMessageBridge; @@ -283,11 +283,11 @@ mod at_rococo { >; } -/// This module contains definitions that are used by the messages pallet instance, 'deployed' at Wococo. +/// This module contains definitions that are used by the messages pallet instance, "deployed" at Wococo. mod at_wococo { use super::*; - /// Message bridge that is 'deployed' at Wococo chain and connecting it to Rococo chain. + /// Message bridge that is "deployed" at Wococo chain and connecting it to Rococo chain. #[derive(RuntimeDebug, Clone, Copy)] pub struct AtWococoWithRococoMessageBridge; diff --git a/runtime/rococo/src/constants.rs b/runtime/rococo/src/constants.rs index 1264a746f7df..b4949544fe65 100644 --- a/runtime/rococo/src/constants.rs +++ b/runtime/rococo/src/constants.rs @@ -63,7 +63,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, frame_system::MaximumBlockWeight] + /// - [0, `frame_system::MaximumBlockWeight`] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index cf107395b650..a416d87fe89d 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -146,9 +146,9 @@ pub type Header = generic::Header; pub type Block = generic::Block; /// A Block signed with a Justification pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. +/// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The `SignedExtension` to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -867,7 +867,7 @@ impl pallet_bridge_grandpa::Config for Runtime { type WeightInfo = pallet_bridge_grandpa::weights::RialtoWeight; } -// Instance that is 'deployed' at Wococo chain. Responsible for dispatching Rococo -> Wococo messages. +// Instance that is "deployed" at Wococo chain. Responsible for dispatching Rococo -> Wococo messages. pub type AtWococoFromRococoMessagesDispatch = pallet_bridge_dispatch::DefaultInstance; impl pallet_bridge_dispatch::Config for Runtime { type Event = Event; @@ -881,7 +881,7 @@ impl pallet_bridge_dispatch::Config for Runt type AccountIdConverter = bp_rococo::AccountIdConverter; } -// Instance that is 'deployed' at Rococo chain. Responsible for dispatching Wococo -> Rococo messages. +// Instance that is "deployed" at Rococo chain. Responsible for dispatching Wococo -> Rococo messages. pub type AtRococoFromWococoMessagesDispatch = pallet_bridge_dispatch::Instance1; impl pallet_bridge_dispatch::Config for Runtime { type Event = Event; @@ -904,7 +904,7 @@ parameter_types! { pub const RootAccountForPayments: Option = None; } -// Instance that is 'deployed' at Wococo chain. Responsible for sending Wococo -> Rococo messages +// Instance that is "deployed" at Wococo chain. Responsible for sending Wococo -> Rococo messages // and receiving Rococo -> Wococo messages. pub type AtWococoWithRococoMessagesInstance = pallet_bridge_messages::DefaultInstance; impl pallet_bridge_messages::Config for Runtime { @@ -938,7 +938,7 @@ impl pallet_bridge_messages::Config for Runt type MessageDispatch = crate::bridge_messages::FromRococoMessageDispatch; } -// Instance that is 'deployed' at Rococo chain. Responsible for sending Rococo -> Wococo messages +// Instance that is "deployed" at Rococo chain. Responsible for sending Rococo -> Wococo messages // and receiving Wococo -> Rococo messages. pub type AtRococoWithWococoMessagesInstance = pallet_bridge_messages::Instance1; impl pallet_bridge_messages::Config for Runtime { diff --git a/runtime/test-runtime/src/constants.rs b/runtime/test-runtime/src/constants.rs index 658a4bdca7a0..0cfdfacdec67 100644 --- a/runtime/test-runtime/src/constants.rs +++ b/runtime/test-runtime/src/constants.rs @@ -59,7 +59,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, frame_system::MaximumBlockWeight] + /// - [0, `frame_system::MaximumBlockWeight`] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index ed110376fd4d..70efad71b232 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -550,9 +550,9 @@ pub type Header = generic::Header; pub type Block = generic::Block; /// A Block signed with a Justification pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. +/// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The `SignedExtension` to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion, frame_system::CheckTxVersion, diff --git a/runtime/westend/Cargo.toml b/runtime/westend/Cargo.toml index 73f79f3aa278..1ecfbf43a078 100644 --- a/runtime/westend/Cargo.toml +++ b/runtime/westend/Cargo.toml @@ -236,9 +236,9 @@ try-runtime = [ "pallet-babe/try-runtime", "runtime-common/try-runtime", ] -# When enabled, the runtime api will not be build. +# When enabled, the runtime API will not be build. # # This is required by Cumulus to access certain types of the -# runtime without clashing with the runtime api exported functions +# runtime without clashing with the runtime API exported functions # in WASM. disable-runtime-api = [] diff --git a/runtime/westend/src/constants.rs b/runtime/westend/src/constants.rs index b5b6b354fcb7..ff74d8977bd1 100644 --- a/runtime/westend/src/constants.rs +++ b/runtime/westend/src/constants.rs @@ -61,7 +61,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, MAXIMUM_BLOCK_WEIGHT] + /// - [0,` MAXIMUM_BLOCK_WEIGHT`] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index fa827f174327..e904f6e6d482 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1102,9 +1102,9 @@ pub type Header = generic::Header; pub type Block = generic::Block; /// A Block signed with a Justification pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. +/// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The `SignedExtension` to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion, frame_system::CheckTxVersion, diff --git a/runtime/westend/src/weights/frame_system.rs b/runtime/westend/src/weights/frame_system.rs index 89b3533031b2..abaa20281347 100644 --- a/runtime/westend/src/weights/frame_system.rs +++ b/runtime/westend/src/weights/frame_system.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for frame_system +//! Autogenerated weights for `frame_system` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-06-18, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for frame_system. +/// Weight functions for `frame_system`. pub struct WeightInfo(PhantomData); impl frame_system::WeightInfo for WeightInfo { fn remark(_b: u32, ) -> Weight { diff --git a/runtime/westend/src/weights/pallet_balances.rs b/runtime/westend/src/weights/pallet_balances.rs index 86f2121b4732..4e9f870706b8 100644 --- a/runtime/westend/src/weights/pallet_balances.rs +++ b/runtime/westend/src/weights/pallet_balances.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_balances +//! Autogenerated weights for `pallet_balances` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_balances. +/// Weight functions for `pallet_balances`. pub struct WeightInfo(PhantomData); impl pallet_balances::WeightInfo for WeightInfo { fn transfer() -> Weight { diff --git a/runtime/westend/src/weights/pallet_election_provider_multi_phase.rs b/runtime/westend/src/weights/pallet_election_provider_multi_phase.rs index 31c54f974bed..faf166cf65b8 100644 --- a/runtime/westend/src/weights/pallet_election_provider_multi_phase.rs +++ b/runtime/westend/src/weights/pallet_election_provider_multi_phase.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_election_provider_multi_phase +//! Autogenerated weights for `pallet_election_provider_multi_phase` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-06-18, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_election_provider_multi_phase. +/// Weight functions for `pallet_election_provider_multi_phase`. pub struct WeightInfo(PhantomData); impl pallet_election_provider_multi_phase::WeightInfo for WeightInfo { fn on_initialize_nothing() -> Weight { diff --git a/runtime/westend/src/weights/pallet_identity.rs b/runtime/westend/src/weights/pallet_identity.rs index 1ca5dabd6ff2..dac367578ab8 100644 --- a/runtime/westend/src/weights/pallet_identity.rs +++ b/runtime/westend/src/weights/pallet_identity.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_identity +//! Autogenerated weights for `pallet_identity` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_identity. +/// Weight functions for `pallet_identity`. pub struct WeightInfo(PhantomData); impl pallet_identity::WeightInfo for WeightInfo { fn add_registrar(r: u32, ) -> Weight { diff --git a/runtime/westend/src/weights/pallet_im_online.rs b/runtime/westend/src/weights/pallet_im_online.rs index fc7cf81a9d40..72ebf13d83de 100644 --- a/runtime/westend/src/weights/pallet_im_online.rs +++ b/runtime/westend/src/weights/pallet_im_online.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_im_online +//! Autogenerated weights for `pallet_im_online` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_im_online. +/// Weight functions for `pallet_im_online`. pub struct WeightInfo(PhantomData); impl pallet_im_online::WeightInfo for WeightInfo { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { diff --git a/runtime/westend/src/weights/pallet_indices.rs b/runtime/westend/src/weights/pallet_indices.rs index 8bb176f7ce2d..ae663707e70b 100644 --- a/runtime/westend/src/weights/pallet_indices.rs +++ b/runtime/westend/src/weights/pallet_indices.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_indices +//! Autogenerated weights for `pallet_indices` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_indices. +/// Weight functions for `pallet_indices`. pub struct WeightInfo(PhantomData); impl pallet_indices::WeightInfo for WeightInfo { fn claim() -> Weight { diff --git a/runtime/westend/src/weights/pallet_multisig.rs b/runtime/westend/src/weights/pallet_multisig.rs index 8dc861b508a8..7ffaafee207b 100644 --- a/runtime/westend/src/weights/pallet_multisig.rs +++ b/runtime/westend/src/weights/pallet_multisig.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_multisig +//! Autogenerated weights for `pallet_multisig` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_multisig. +/// Weight functions for `pallet_multisig`. pub struct WeightInfo(PhantomData); impl pallet_multisig::WeightInfo for WeightInfo { fn as_multi_threshold_1(_z: u32, ) -> Weight { diff --git a/runtime/westend/src/weights/pallet_proxy.rs b/runtime/westend/src/weights/pallet_proxy.rs index e554547918f8..6b47b4ee19c7 100644 --- a/runtime/westend/src/weights/pallet_proxy.rs +++ b/runtime/westend/src/weights/pallet_proxy.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_proxy +//! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_proxy. +/// Weight functions for `pallet_proxy`. pub struct WeightInfo(PhantomData); impl pallet_proxy::WeightInfo for WeightInfo { fn proxy(p: u32, ) -> Weight { diff --git a/runtime/westend/src/weights/pallet_scheduler.rs b/runtime/westend/src/weights/pallet_scheduler.rs index d2c20742a91e..6443c0817713 100644 --- a/runtime/westend/src/weights/pallet_scheduler.rs +++ b/runtime/westend/src/weights/pallet_scheduler.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_scheduler +//! Autogenerated weights for `pallet_scheduler` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_scheduler. +/// Weight functions for `pallet_scheduler`. pub struct WeightInfo(PhantomData); impl pallet_scheduler::WeightInfo for WeightInfo { fn schedule(s: u32, ) -> Weight { diff --git a/runtime/westend/src/weights/pallet_session.rs b/runtime/westend/src/weights/pallet_session.rs index 4c2118257018..e506eebf803c 100644 --- a/runtime/westend/src/weights/pallet_session.rs +++ b/runtime/westend/src/weights/pallet_session.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_session +//! Autogenerated weights for `pallet_session` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_session. +/// Weight functions for `pallet_session`. pub struct WeightInfo(PhantomData); impl pallet_session::WeightInfo for WeightInfo { fn set_keys() -> Weight { diff --git a/runtime/westend/src/weights/pallet_staking.rs b/runtime/westend/src/weights/pallet_staking.rs index 75c3078a1712..8b57d67a0efc 100644 --- a/runtime/westend/src/weights/pallet_staking.rs +++ b/runtime/westend/src/weights/pallet_staking.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_staking +//! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_staking. +/// Weight functions for `pallet_staking`. pub struct WeightInfo(PhantomData); impl pallet_staking::WeightInfo for WeightInfo { fn bond() -> Weight { diff --git a/runtime/westend/src/weights/pallet_timestamp.rs b/runtime/westend/src/weights/pallet_timestamp.rs index ecf0ac9bc1b6..ca8250e8d80d 100644 --- a/runtime/westend/src/weights/pallet_timestamp.rs +++ b/runtime/westend/src/weights/pallet_timestamp.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_timestamp +//! Autogenerated weights for `pallet_timestamp` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_timestamp. +/// Weight functions for `pallet_timestamp`. pub struct WeightInfo(PhantomData); impl pallet_timestamp::WeightInfo for WeightInfo { fn set() -> Weight { diff --git a/runtime/westend/src/weights/pallet_utility.rs b/runtime/westend/src/weights/pallet_utility.rs index 8ee0f466139c..760a18f709a8 100644 --- a/runtime/westend/src/weights/pallet_utility.rs +++ b/runtime/westend/src/weights/pallet_utility.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_utility +//! Autogenerated weights for `pallet_utility` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_utility. +/// Weight functions for `pallet_utility`. pub struct WeightInfo(PhantomData); impl pallet_utility::WeightInfo for WeightInfo { fn batch(c: u32, ) -> Weight { diff --git a/runtime/westend/src/weights/pallet_vesting.rs b/runtime/westend/src/weights/pallet_vesting.rs index 4d6fb8a75bea..9bcaea7be251 100644 --- a/runtime/westend/src/weights/pallet_vesting.rs +++ b/runtime/westend/src/weights/pallet_vesting.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for pallet_vesting +//! Autogenerated weights for `pallet_vesting` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for pallet_vesting. +/// Weight functions for `pallet_vesting`. pub struct WeightInfo(PhantomData); impl pallet_vesting::WeightInfo for WeightInfo { fn vest_locked(l: u32, ) -> Weight { diff --git a/runtime/westend/src/weights/runtime_common_auctions.rs b/runtime/westend/src/weights/runtime_common_auctions.rs index 808be370df6c..7dcf6e258595 100644 --- a/runtime/westend/src/weights/runtime_common_auctions.rs +++ b/runtime/westend/src/weights/runtime_common_auctions.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for runtime_common::auctions +//! Autogenerated weights for `runtime_common::auctions` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for runtime_common::auctions. +/// Weight functions for `runtime_common::auctions`. pub struct WeightInfo(PhantomData); impl runtime_common::auctions::WeightInfo for WeightInfo { fn new_auction() -> Weight { diff --git a/runtime/westend/src/weights/runtime_common_crowdloan.rs b/runtime/westend/src/weights/runtime_common_crowdloan.rs index 23a9f78b6374..217bfbb9e5bd 100644 --- a/runtime/westend/src/weights/runtime_common_crowdloan.rs +++ b/runtime/westend/src/weights/runtime_common_crowdloan.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for runtime_common::crowdloan +//! Autogenerated weights for `runtime_common::crowdloan` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for runtime_common::crowdloan. +/// Weight functions for `runtime_common::crowdloan`. pub struct WeightInfo(PhantomData); impl runtime_common::crowdloan::WeightInfo for WeightInfo { fn create() -> Weight { diff --git a/runtime/westend/src/weights/runtime_common_paras_registrar.rs b/runtime/westend/src/weights/runtime_common_paras_registrar.rs index aeb93ac07430..242fe9bf99db 100644 --- a/runtime/westend/src/weights/runtime_common_paras_registrar.rs +++ b/runtime/westend/src/weights/runtime_common_paras_registrar.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for runtime_common::paras_registrar +//! Autogenerated weights for `runtime_common::paras_registrar` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for runtime_common::paras_registrar. +/// Weight functions for `runtime_common::paras_registrar`. pub struct WeightInfo(PhantomData); impl runtime_common::paras_registrar::WeightInfo for WeightInfo { fn reserve() -> Weight { diff --git a/runtime/westend/src/weights/runtime_common_slots.rs b/runtime/westend/src/weights/runtime_common_slots.rs index 6c886e7f0070..b4982ed56050 100644 --- a/runtime/westend/src/weights/runtime_common_slots.rs +++ b/runtime/westend/src/weights/runtime_common_slots.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for runtime_common::slots +//! Autogenerated weights for `runtime_common::slots` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-07-02, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -40,7 +40,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for runtime_common::slots. +/// Weight functions for `runtime_common::slots`. pub struct WeightInfo(PhantomData); impl runtime_common::slots::WeightInfo for WeightInfo { fn force_lease() -> Weight { diff --git a/scripts/gitlab/lingua.dic b/scripts/gitlab/lingua.dic new file mode 100644 index 000000000000..a532700e3a5d --- /dev/null +++ b/scripts/gitlab/lingua.dic @@ -0,0 +1,264 @@ +150 +accessor/MS +activations +adversary/SM +annualised +Apache-2.0/M +API +APIs +arg/MS +assignee/SM +async +asynchrony +autogenerated +backable +backend/MS +benchmark/DSMG +BFT/M +bitfield/MS +bitwise +blake2/MS +blockchain/MS +borked +broadcast/UDSMG +BTC/S +canonicalize/D +CentOS +CLI/MS +codebase/SM +codec/SM +commit/D +computable +conclude/UD +config/MS +could've +crowdfund +crowdloan/MSG +crypto/MS +CSM +Cucumber/MS +customizable/B +DDoS +Debian/M +decodable/MS +deduplicated +deinitializing +dequeue/SD +deregister +deserialize/G +disincentivize/D +dispatchable/SM +DMP/SM +DMQ +DoS +DOT +DOTs +ECDSA +ed25519 +encodable +enqueue/D +enqueue/DMSG +entrypoint/MS +ERC-20 +ETH/S +ethereum/MS +externality/MS +extrinsic +extrinsics +fedora/M +FRAME/MS +FSMs +gameable +getter/MS +GiB/S +GPL/M +GPLv3/M +Grafana/MS +Gurke/MS +Handler/MS +HMP/SM +HRMP +https +iff +implementer/MS +includable +include/BG +increment/DSMG +inherent +inherents +initialize/CRG +initializer +instantiate/B +instantiation/SM +intrinsic +intrinsics +invariant/MS +invariants +inverter/MS +io +IP/S +isn +isolate/BG +iterable +jaeger/MS +js +keccak256/M +keypair/MS +keystore/MS +Kovan +KSM/S +Kubernetes/MS +kusama/S +KYC/M +lib +libp2p +lifecycle/MS +lookahead/MS +lookup/MS +LRU +mainnet/MS +malus +MB/M +Mbit +merkle/MS +metadata/M +middleware/MS +Millau +misbehavior/SM +misbehaviors +misvalidate/D +MIT/M +MMR +modularity +mpsc +MPSC +MQC/SM +msg +multisig/S +multivalidator/SM +mutex +natively +NFA +NFT/SM +nonces +NTB +offboard/DMSG +onboard/DMSG +oneshot/MS +onwards +OOM/S +others' +ourself +overseer/MS +ownerless +parablock/MS +parachain/MS +parameterization +parameterize/D +parathread/MS +passthrough +PDK +peerset/MS +permission/D +phragmen +picosecond/SM +PoA/MS +polkadot/MS +PoV/MS +PR +preconfigured +preopen +prepend/G +prevalidation +preverify/G +programmatically +prometheus/MS +provisioner/MS +proxy/DMSG +proxy/G +PRs +PVF/S +README/MS +redhat/M +register/CD +repo/MS +reservable +responder/SM +retriability +reverify +roundtrip/MS +rpc +RPC/MS +runtime/MS +rustc/MS +SAFT +scalability +SDF +sending/S +shareable +Simnet/MS +spawn/SR +spawner +sr25519 +startup/MS +stateful +str +struct/MS +subcommand/SM +substream +subsystem/MS +subsystems' +supermajority +systemwide +taskmanager/MS +TCP +teleport/D +teleport/RG +teleportation/SM +teleporter/SM +teleporters +testnet/MS +timestamp/MS +transitionary +trie/MS +trustless/Y +tuple/SM +typesystem +ubuntu/M +UDP +UI +unfinalize/B +unfinalized +union/MSG +unordered +unreceived +unreserve +unreserving +unservable/B +unvested +URI +utilize +v0 +v1 +v2 +validator/SM +ve +vec +verifier +verify/R +versa +version/DMSG +versioned +VMP/SM +VRF/SM +w3f/MS +wakeups +warming/S +wasm/M +wasmtime +Westend/M +wildcard/MS +WND/S +Wococo +XCM/S +XCMP/M diff --git a/.config/spellcheck.toml b/scripts/gitlab/spellcheck.toml similarity index 71% rename from .config/spellcheck.toml rename to scripts/gitlab/spellcheck.toml index 0b66d54e2413..57c635f09a8d 100644 --- a/.config/spellcheck.toml +++ b/scripts/gitlab/spellcheck.toml @@ -2,12 +2,14 @@ lang = "en_US" search_dirs = ["."] extra_dictionaries = ["lingua.dic"] +skip_os_lookups = true +use_builtin = true [hunspell.quirks] # `Type`'s # 5x # He tagged it as 'TheGreatestOfAllTimes' # Transforms' -transform_regex = ["^'([^\\s])'$", "^[0-9]+(?:\\.[0-9]*)?(x|%)$", "^(.*)'$", "^\\+$"] +transform_regex = ["^'([^\\s])'$", "^[0-9]+(?:\\.[0-9]*)?(x|%)$", "^(.*)'$", "^\\+$", "^[0-9]*+k|MB|Mb|ms|Mbit|nd|th|rd$", "^=|>|<|%$"] allow_concatenation = true allow_dashes = true diff --git a/scripts/kubernetes/templates/serviceaccount.yaml b/scripts/kubernetes/templates/serviceaccount.yaml index cee891b1fa1e..b603ad13ddb4 100644 --- a/scripts/kubernetes/templates/serviceaccount.yaml +++ b/scripts/kubernetes/templates/serviceaccount.yaml @@ -1,6 +1,6 @@ {{- if .Values.rbac.enable }} # service account for polkadot pods themselves -# no permissions for the api are required +# no permissions for the API are required apiVersion: v1 kind: ServiceAccount metadata: diff --git a/simnet_tests/README.md b/simnet_tests/README.md index 9c6a74c56823..2a32a1e28d5c 100644 --- a/simnet_tests/README.md +++ b/simnet_tests/README.md @@ -1,39 +1,38 @@ -# Simulation tests, or high level integration tests. - +# Simulation tests, or high level integration tests _The content of this directory is meant to be used by Parity's private CI/CD infrastructure with private tools. At the moment those tools are still early stage of development and we don't know if / when they will available for public use._ +## Contents of this directory -## Content of this dir. - -`configs` dir contains config files in toml format that describe how to +`configs` directory contains config files in toml format that describe how to configure the simulation network that you want to launch. -`tests` dir contains [cucumber](https://cucumber.io/) files. Those are +`tests` directory contains [Cucumber](https://cucumber.io/) files. Those are Behavior-Driven Development test files that describe tests in plain English. Under the hood there are assertions that specific metrics should have specific -values. +values. At the moment we have only one test for parachains: `/parachains.features` -This test uses a JS script that we added to simnet image and it's launched +This test uses a JS script that we added to Simnet image and it's launched by this step in the cucumber file: -` Then launch 'node' with parameters '--unhandled-rejections=strict /usr/local/bin/simnet_scripts test_parachain ./configs/adder.json ws://localhost:11222 100 10'` +`Then launch 'node' with parameters '--unhandled-rejections=strict /usr/local/bin/simnet_scripts test_parachain ./configs/adder.json ws://localhost:11222 100 10'` -`run_test.sh` is an entry point for running all tests in the folder. -Any setup required for tests (but cannot be done in configs) is performed -here. The main script's responsibility is to run [gurke](https://github.com/paritytech/gurke) +`run_test.sh` is an entry point for running all tests in the folder. +Any setup required for tests (but cannot be done in configs) is performed +here. The main script's responsibility is to run [Gurke](https://github.com/paritytech/gurke) with passed parameters. -In order to use this script locally, you need to install -[gurke](https://github.com/paritytech/gurke) -Once you have access to a kubernetes cluster (meaning you can do `kubectl get pods`) +In order to use this script locally, you need to install +[Gurke](https://github.com/paritytech/gurke) +Once you have access to a kubernetes cluster (meaning you can do `kubectl get pods`) you can run this script with no arguments, like `./run_test.sh` and tests should run. -Kubernetes cluster can be local, spawned with +Kubernetes cluster can be local, spawned with [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) -or an instance living in the -[cloud](https://github.com/paritytech/gurke/blob/main/docs/How-to-setup-access-to-gke-k8s-cluster.md) +or an instance living in the +[cloud](https://github.com/paritytech/gurke/blob/main/docs/How-to-setup-access-to-gke-k8s-cluster.md) + +### [Here is link to barcamp presentation of Simnet](https://www.crowdcast.io/e/ph49xu01) -### [Here is link to barcamp presenation of simnet](https://www.crowdcast.io/e/ph49xu01) -### [Here is link to the simnet repo, hosted on private gitlab](https://gitlab.parity.io/parity/simnet/-/tree/master) +### [Here is link to the Simnet repo, hosted on private gitlab](https://gitlab.parity.io/parity/simnet/-/tree/master) diff --git a/utils/staking-miner/src/main.rs b/utils/staking-miner/src/main.rs index 271e749f9fa2..67e927424a16 100644 --- a/utils/staking-miner/src/main.rs +++ b/utils/staking-miner/src/main.rs @@ -17,7 +17,7 @@ //! # Polkadot Staking Miner. //! //! Simple bot capable of monitoring a polkadot (and cousins) chain and submitting solutions to the -//! 'pallet-election-provider-multi-phase'. See `--help` for more details. +//! `pallet-election-provider-multi-phase`. See `--help` for more details. //! //! # Implementation Notes: //! @@ -239,7 +239,7 @@ enum Command { Monitor(MonitorConfig), /// Just compute a solution now, and don't submit it. DryRun(DryRunConfig), - /// Provide a solution that can be submitted to the chian as an emergency response. + /// Provide a solution that can be submitted to the chain as an emergency response. EmergencySolution, } @@ -269,7 +269,7 @@ struct DryRunConfig { #[derive(Debug, Clone, StructOpt)] struct SharedConfig { - /// The ws node to connect to. + /// The `ws` node to connect to. #[structopt(long, default_value = DEFAULT_URI)] uri: String, @@ -283,7 +283,7 @@ struct SharedConfig { #[derive(Debug, Clone, StructOpt)] struct Opt { - /// The ws node to connect to. + /// The `ws` node to connect to. #[structopt(flatten)] shared: SharedConfig, diff --git a/utils/staking-miner/src/prelude.rs b/utils/staking-miner/src/prelude.rs index 7989c2e3c66b..646cab444b18 100644 --- a/utils/staking-miner/src/prelude.rs +++ b/utils/staking-miner/src/prelude.rs @@ -44,5 +44,5 @@ pub use pallet_election_provider_multi_phase as EPM; /// The externalities type. pub type Ext = sp_io::TestExternalities; -/// The key pair type being used. We 'strongly' assume sr25519 for simplicity. +/// The key pair type being used. We "strongly" assume sr25519 for simplicity. pub type Pair = sp_core::sr25519::Pair; diff --git a/utils/staking-miner/src/signer.rs b/utils/staking-miner/src/signer.rs index 409d7befab8a..6d7ffe8753a2 100644 --- a/utils/staking-miner/src/signer.rs +++ b/utils/staking-miner/src/signer.rs @@ -32,7 +32,7 @@ pub(crate) struct Signer { pub(crate) account: AccountId, /// The full crypto key-pair. pub(crate) pair: Pair, - /// The raw uri read from file. + /// The raw URI read from file. pub(crate) uri: String, } @@ -51,7 +51,7 @@ pub(crate) async fn get_account_info( .await } -/// Read the signer account's uri from the given `path`. +/// Read the signer account's URI from the given `path`. pub(crate) async fn read_signer_uri< P: AsRef, T: frame_system::Config, diff --git a/xcm/src/lib.rs b/xcm/src/lib.rs index 9768be8dacaa..1addc44bd552 100644 --- a/xcm/src/lib.rs +++ b/xcm/src/lib.rs @@ -48,7 +48,7 @@ pub mod opaque { pub use crate::v0::opaque::{Xcm, Order}; } - /// The basic VersionedXcm type which just uses the `Vec` as an encoded call. + /// The basic `VersionedXcm` type which just uses the `Vec` as an encoded call. pub type VersionedXcm = super::VersionedXcm<()>; } diff --git a/xcm/src/v0/junction.rs b/xcm/src/v0/junction.rs index 0111ee2e129f..b89c665a3903 100644 --- a/xcm/src/v0/junction.rs +++ b/xcm/src/v0/junction.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -//! Support datastructures for `MultiLocation`, primarily the `Junction` datatype. +//! Support data structures for `MultiLocation`, primarily the `Junction` datatype. use alloc::vec::Vec; use parity_scale_codec::{self, Encode, Decode}; diff --git a/xcm/src/v0/mod.rs b/xcm/src/v0/mod.rs index 368353f2d82a..05aad9581b05 100644 --- a/xcm/src/v0/mod.rs +++ b/xcm/src/v0/mod.rs @@ -171,7 +171,7 @@ pub enum Xcm { /// - `assets`: The asset(s) to be withdrawn. /// - `dest`: The new owner for the assets. /// - `effects`: The orders that should be contained in the `ReserveAssetDeposit` which is sent onwards to - /// `dest. + /// `dest`. /// /// Safety: No concerns. /// diff --git a/xcm/src/v0/multi_asset.rs b/xcm/src/v0/multi_asset.rs index 20032e7169a4..dc682902df65 100644 --- a/xcm/src/v0/multi_asset.rs +++ b/xcm/src/v0/multi_asset.rs @@ -28,7 +28,7 @@ pub enum AssetInstance { /// Undefined - used if the NFA class has only one instance. Undefined, - /// A compact index. Technically this could be greater than u128, but this implementation supports only + /// A compact index. Technically this could be greater than `u128`, but this implementation supports only /// values up to `2**128 - 1`. Index { #[codec(compact)] id: u128 }, diff --git a/xcm/src/v0/multi_location.rs b/xcm/src/v0/multi_location.rs index 10a620cc6f4e..cd01fb320a77 100644 --- a/xcm/src/v0/multi_location.rs +++ b/xcm/src/v0/multi_location.rs @@ -66,7 +66,7 @@ pub enum MultiLocation { X8(Junction, Junction, Junction, Junction, Junction, Junction, Junction, Junction), } -/// Maximum number of junctions a multilocation can contain. +/// Maximum number of junctions a `MultiLocation` can contain. pub const MAX_MULTILOCATION_LENGTH: usize = 8; impl From for MultiLocation { diff --git a/xcm/src/v0/order.rs b/xcm/src/v0/order.rs index a1c8339ecb25..776ac3691c74 100644 --- a/xcm/src/v0/order.rs +++ b/xcm/src/v0/order.rs @@ -49,7 +49,7 @@ pub enum Order { /// - `assets`: The asset(s) to remove from holding. /// - `dest`: The new owner for the assets. /// - `effects`: The orders that should be contained in the `ReserveAssetDeposit` which is sent onwards to - /// `dest. + /// `dest`. /// /// Errors: #[codec(index = 2)] @@ -101,7 +101,7 @@ pub enum Order { #[codec(index = 6)] QueryHolding { #[codec(compact)] query_id: u64, dest: MultiLocation, assets: Vec }, - /// Pay for the execution of some Xcm with up to `weight` picoseconds of execution time, paying for this with + /// Pay for the execution of some XCM with up to `weight` picoseconds of execution time, paying for this with /// up to `fees` from the holding account. /// /// Errors: diff --git a/xcm/src/v0/traits.rs b/xcm/src/v0/traits.rs index cbaaa3604b75..29c38902b134 100644 --- a/xcm/src/v0/traits.rs +++ b/xcm/src/v0/traits.rs @@ -171,7 +171,7 @@ impl ExecuteXcm for () { /// /// These can be amalgamated in tuples to form sophisticated routing systems. In tuple format, each router might return /// `CannotReachDestination` to pass the execution to the next sender item. Note that each `CannotReachDestination` -/// might alter the destination and the xcm message for to the next router. +/// might alter the destination and the XCM message for to the next router. /// /// /// # Example diff --git a/xcm/xcm-builder/src/filter_asset_location.rs b/xcm/xcm-builder/src/filter_asset_location.rs index 31db271e1830..9ad4f40aa71c 100644 --- a/xcm/xcm-builder/src/filter_asset_location.rs +++ b/xcm/xcm-builder/src/filter_asset_location.rs @@ -21,7 +21,7 @@ use xcm::v0::{MultiAsset, MultiLocation}; use frame_support::traits::Get; use xcm_executor::traits::FilterAssetLocation; -/// Accepts an asset IFF it is a native asset. +/// Accepts an asset iff it is a native asset. pub struct NativeAsset; impl FilterAssetLocation for NativeAsset { fn filter_asset_location(asset: &MultiAsset, origin: &MultiLocation) -> bool { @@ -29,7 +29,7 @@ impl FilterAssetLocation for NativeAsset { } } -/// Accepts an asset if it is contained in the given `T`'s `Get` impl. +/// Accepts an asset if it is contained in the given `T`'s `Get` implementation. pub struct Case(PhantomData); impl> FilterAssetLocation for Case { fn filter_asset_location(asset: &MultiAsset, origin: &MultiLocation) -> bool { diff --git a/xcm/xcm-builder/src/fungibles_adapter.rs b/xcm/xcm-builder/src/fungibles_adapter.rs index b0a9946c611c..80cd74c0124d 100644 --- a/xcm/xcm-builder/src/fungibles_adapter.rs +++ b/xcm/xcm-builder/src/fungibles_adapter.rs @@ -21,7 +21,7 @@ use xcm::v0::{Error as XcmError, Result, MultiAsset, MultiLocation, Junction}; use frame_support::traits::{Get, tokens::fungibles, Contains}; use xcm_executor::traits::{TransactAsset, Convert, MatchesFungibles, Error as MatchError}; -/// Converter struct implementing `AssetIdConversion` converting a numeric asset ID (must be TryFrom/TryInto) into +/// Converter struct implementing `AssetIdConversion` converting a numeric asset ID (must be `TryFrom/TryInto`) into /// a `GeneralIndex` junction, prefixed by some `MultiLocation` value. The `MultiLocation` value will typically be a /// `PalletInstance` junction. pub struct AsPrefixedGeneralIndex(PhantomData<(Prefix, AssetId, ConvertAssetId)>); diff --git a/xcm/xcm-builder/src/origin_conversion.rs b/xcm/xcm-builder/src/origin_conversion.rs index daa51f3ee8e8..79dbf957240c 100644 --- a/xcm/xcm-builder/src/origin_conversion.rs +++ b/xcm/xcm-builder/src/origin_conversion.rs @@ -172,7 +172,7 @@ impl< } } -/// EnsureOrigin barrier to convert from dispatch origin to XCM origin, if one exists. +/// `EnsureOrigin` barrier to convert from dispatch origin to XCM origin, if one exists. pub struct EnsureXcmOrigin(PhantomData<(Origin, Conversion)>); impl< Origin: OriginTrait + Clone, @@ -227,7 +227,7 @@ impl< } /// `Convert` implementation to convert from some an origin which implements `Backing` into a corresponding `Plurality` -/// MultiLocation. +/// `MultiLocation`. /// /// Typically used when configuring `pallet-xcm` for allowing a collective's Origin to dispatch an XCM from a /// `Plurality` origin. diff --git a/xcm/xcm-builder/src/weight.rs b/xcm/xcm-builder/src/weight.rs index b627db9c750f..e2096afcaa30 100644 --- a/xcm/xcm-builder/src/weight.rs +++ b/xcm/xcm-builder/src/weight.rs @@ -128,7 +128,7 @@ impl, R: TakeRevenue> Drop for FixedRateOfConcrete } } -/// Weight trader which uses the TransactionPayment pallet to set the right price for weight and then +/// Weight trader which uses the `TransactionPayment` pallet to set the right price for weight and then /// places any weight bought into the right account. pub struct UsingComponents< WeightToFee: WeightToFeePolynomial, diff --git a/xcm/xcm-executor/src/assets.rs b/xcm/xcm-executor/src/assets.rs index 69228112fbf7..c0d35052482b 100644 --- a/xcm/xcm-executor/src/assets.rs +++ b/xcm/xcm-executor/src/assets.rs @@ -26,7 +26,7 @@ pub enum AssetId { } impl AssetId { - /// Prepend a MultiLocation to a concrete asset, giving it a new root location. + /// Prepend a `MultiLocation` to a concrete asset, giving it a new root location. pub fn prepend_location(&mut self, prepend: &MultiLocation) -> Result<(), ()> { if let AssetId::Concrete(ref mut l) = self { l.prepend_with(prepend.clone()).map_err(|_| ())?; diff --git a/xcm/xcm-executor/src/config.rs b/xcm/xcm-executor/src/config.rs index 7fa571177fb8..1ab78ecb7eca 100644 --- a/xcm/xcm-executor/src/config.rs +++ b/xcm/xcm-executor/src/config.rs @@ -22,7 +22,7 @@ use crate::traits::{ OnResponse, }; -/// The trait to parametrize the `XcmExecutor`. +/// The trait to parameterize the `XcmExecutor`. pub trait Config { /// The outer call dispatch type. type Call: Parameter + Dispatchable + GetDispatchInfo; diff --git a/xcm/xcm-executor/src/traits/conversion.rs b/xcm/xcm-executor/src/traits/conversion.rs index 971b4b9e0e98..19b2de1a0076 100644 --- a/xcm/xcm-executor/src/traits/conversion.rs +++ b/xcm/xcm-executor/src/traits/conversion.rs @@ -19,7 +19,7 @@ use parity_scale_codec::{Encode, Decode}; use xcm::v0::{MultiLocation, OriginKind}; /// Generic third-party conversion trait. Use this when you don't want to force the user to use default -/// impls of `From` and `Into` for the types you wish to convert between. +/// implementations of `From` and `Into` for the types you wish to convert between. /// /// One of `convert`/`convert_ref` and `reverse`/`reverse_ref` MUST be implemented. If possible, implement /// `convert_ref`, since this will never result in a clone. Use `convert` when you definitely need to consume @@ -118,7 +118,7 @@ impl Convert, T> for Decoded { fn reverse_ref(value: impl Borrow) -> Result, ()> { Ok(value.borrow().encode()) } } -/// A convertor trait for origin types. +/// A converter `trait` for origin types. /// /// Can be amalgamated into tuples. If any of the tuple elements returns `Ok(_)`, it short circuits. Else, the `Err(_)` /// of the last tuple item is returned. Each intermediate `Err(_)` might return a different `origin` of type `Origin` diff --git a/xcm/xcm-executor/src/traits/should_execute.rs b/xcm/xcm-executor/src/traits/should_execute.rs index 19c8ef4a9c10..da64e4418504 100644 --- a/xcm/xcm-executor/src/traits/should_execute.rs +++ b/xcm/xcm-executor/src/traits/should_execute.rs @@ -31,7 +31,7 @@ pub trait ShouldExecute { /// - `message`: The message itself. /// - `shallow_weight`: The weight of the non-negotiable execution of the message. This does not include any /// embedded XCMs sat behind mechanisms like `BuyExecution` which would need to answer for their own weight. - /// - `weight_credit`: The pre-established amount of weight that the system has determined this message may utilise + /// - `weight_credit`: The pre-established amount of weight that the system has determined this message may utilize /// in its execution. Typically non-zero only because of prior fee payment, but could in principle be due to other /// factors. fn should_execute( diff --git a/xcm/xcm-executor/src/traits/transact_asset.rs b/xcm/xcm-executor/src/traits/transact_asset.rs index 8c04bc57ae21..7988422e5e24 100644 --- a/xcm/xcm-executor/src/traits/transact_asset.rs +++ b/xcm/xcm-executor/src/traits/transact_asset.rs @@ -44,7 +44,7 @@ pub trait TransactAsset { /// not be needed if the teleporting chains are to be trusted, but better to be safe than sorry). On chains /// where the asset is not native then it will generally just be a no-op. /// - /// When composed as a tuple, all type-items are called. It is up to the implementor that there exists no + /// When composed as a tuple, all type-items are called. It is up to the implementer that there exists no /// value for `_what` which can cause side-effects for more than one of the type-items. fn check_in(_origin: &MultiLocation, _what: &MultiAsset) {} @@ -56,7 +56,7 @@ pub trait TransactAsset { /// be needed if the teleporting chains are to be trusted, but better to be safe than sorry). On chains where /// the asset is not native then it will generally just be a no-op. /// - /// When composed as a tuple, all type-items are called. It is up to the implementor that there exists no + /// When composed as a tuple, all type-items are called. It is up to the implementer that there exists no /// value for `_what` which can cause side-effects for more than one of the type-items. fn check_out(_origin: &MultiLocation, _what: &MultiAsset) {} diff --git a/xcm/xcm-executor/src/traits/weight.rs b/xcm/xcm-executor/src/traits/weight.rs index abfd9ee07f14..dc9589803af5 100644 --- a/xcm/xcm-executor/src/traits/weight.rs +++ b/xcm/xcm-executor/src/traits/weight.rs @@ -41,7 +41,7 @@ pub trait WeightBounds { /// /// This is guaranteed equal to the eventual sum of all `shallow` XCM messages that get executed through /// any internal effects. Inner XCM messages may be executed by: - /// - Order::BuyExecution + /// - `Order::BuyExecution` fn deep(message: &mut Xcm) -> Result; /// Return the total weight for executing `message`.