diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b2716b018d6c0..f6f714ea52b10 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -209,18 +209,6 @@ default: AWS_SECRET_ACCESS_KEY: vault: cicd/gitlab/$CI_PROJECT_PATH/AWS_SECRET_ACCESS_KEY@kv file: false - AWX_TOKEN: - vault: cicd/gitlab/$CI_PROJECT_PATH/AWX_TOKEN@kv - file: false - CRATES_TOKEN: - vault: cicd/gitlab/$CI_PROJECT_PATH/CRATES_TOKEN@kv - file: false - DOCKER_CHAOS_TOKEN: - vault: cicd/gitlab/$CI_PROJECT_PATH/DOCKER_CHAOS_TOKEN@kv - file: false - DOCKER_CHAOS_USER: - vault: cicd/gitlab/$CI_PROJECT_PATH/DOCKER_CHAOS_USER@kv - file: false GITHUB_EMAIL: vault: cicd/gitlab/$CI_PROJECT_PATH/GITHUB_EMAIL@kv file: false @@ -239,22 +227,13 @@ default: MATRIX_ROOM_ID: vault: cicd/gitlab/$CI_PROJECT_PATH/MATRIX_ROOM_ID@kv file: false - PIPELINE_TOKEN: - vault: cicd/gitlab/$CI_PROJECT_PATH/PIPELINE_TOKEN@kv - file: false - VALIDATOR_KEYS: - vault: cicd/gitlab/$CI_PROJECT_PATH/VALIDATOR_KEYS@kv - file: false - VALIDATOR_KEYS_CHAOS: - vault: cicd/gitlab/$CI_PROJECT_PATH/VALIDATOR_KEYS_CHAOS@kv - file: false + #### stage: .pre skip-if-draft: image: paritytech/tools:latest <<: *kubernetes-env - <<: *vault-secrets stage: .pre rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs @@ -270,7 +249,6 @@ check-runtime: stage: check image: paritytech/tools:latest <<: *kubernetes-env - <<: *vault-secrets rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs variables: @@ -285,7 +263,6 @@ check-signed-tag: stage: check image: paritytech/tools:latest <<: *kubernetes-env - <<: *vault-secrets rules: - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 @@ -548,7 +525,6 @@ cargo-check-macos: stage: build <<: *docker-env <<: *test-refs-no-trigger-prs-only - <<: *vault-secrets script: - git clone --depth=1 diff --git a/Cargo.lock b/Cargo.lock index 837ca8732f0cd..fb84a29f0329b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4506,7 +4506,7 @@ dependencies = [ [[package]] name = "node-template" -version = "3.0.0" +version = "4.0.0-dev" dependencies = [ "clap 3.0.12", "frame-benchmarking", @@ -4543,7 +4543,7 @@ dependencies = [ [[package]] name = "node-template-runtime" -version = "3.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-executive", @@ -5817,7 +5817,7 @@ dependencies = [ [[package]] name = "pallet-template" -version = "3.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", diff --git a/bin/node-template/README.md b/bin/node-template/README.md index bb4df52f41a8f..a04f6a12ed73a 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -72,20 +72,20 @@ Start the development chain with detailed logging: ```bash RUST_BACKTRACE=1 ./target/release/node-template -ldebug --dev ``` + > Development chain means that the state of our chain will be in a tmp folder while the nodes are -> running. Also, **alice** account will be authority and sudo account as declared in the [genesis -> state](https://github.com/substrate-developer-hub/substrate-node-template/blob/main/node/src/chain_spec.rs#L49). -> At the same time the following accounts will be prefunded: +> running. Also, **alice** account will be authority and sudo account as declared in the +> [genesis state](https://github.com/substrate-developer-hub/substrate-node-template/blob/main/node/src/chain_spec.rs#L49). +> At the same time the following accounts will be pre-funded: > - Alice > - Bob > - Alice//stash > - Bob//stash -In case of being interested in maintaining the chain' state futher in time a base path other than -a temporary directory must be added so the db can be stored in the provided folder. We could use -this folder to store different chain databases, as a different folder will be created per different - chain that is ran. The following commands shows how to use a newly created folder as our db base - path. +In case of being interested in maintaining the chain' state between runs a base path must be added +so the db can be stored in the provided folder instead of a temporal one. We could use this folder +to store different chain databases, as a different folder will be created per different chain that +is ran. The following commands shows how to use a newly created folder as our db base path. ```bash // Create a folder to use as the db base path @@ -103,6 +103,7 @@ $ ls ./my-chain-state/chains/dev db keystore network ``` + ### Connect with Polkadot-JS Apps Front-end Once the node template is running locally, you can connect it with **Polkadot-JS Apps** front-end diff --git a/bin/node-template/docs/rust-setup.md b/bin/node-template/docs/rust-setup.md index 4b96da1146b8e..ea133ca847af7 100644 --- a/bin/node-template/docs/rust-setup.md +++ b/bin/node-template/docs/rust-setup.md @@ -2,32 +2,21 @@ title: Installation --- -This page will guide you through the steps needed to prepare a computer for development with the -Substrate Node Template. Since Substrate is built with -[the Rust programming language](https://www.rust-lang.org/), the first thing you will need to do is -prepare the computer for Rust development - these steps will vary based on the computer's operating -system. Once Rust is configured, you will use its toolchains to interact with Rust projects; the -commands for Rust's toolchains will be the same for all supported, Unix-based operating systems. +This guide is for reference only, please check the latest information on getting starting with Substrate +[here](https://docs.substrate.io/v3/getting-started/installation/). -## Unix-Based Operating Systems +This page will guide you through the **2 steps** needed to prepare a computer for **Substrate** development. +Since Substrate is built with [the Rust programming language](https://www.rust-lang.org/), the first +thing you will need to do is prepare the computer for Rust development - these steps will vary based +on the computer's operating system. Once Rust is configured, you will use its toolchains to interact +with Rust projects; the commands for Rust's toolchains will be the same for all supported, +Unix-based operating systems. -Substrate development is easiest on Unix-based operating systems like macOS or Linux. The examples -in the Substrate [Tutorials](https://docs.substrate.io/tutorials/v3) and -[How-to Guides](https://docs.substrate.io/how-to-guides/v3) use Unix-style terminals to demonstrate -how to interact with Substrate from the command line. - -### macOS +## Build dependencies -Open the Terminal application and execute the following commands: - -```bash -# Install Homebrew if necessary https://brew.sh/ -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" - -# Make sure Homebrew is up-to-date, install openssl and cmake -brew update -brew install openssl cmake -``` +Substrate development is easiest on Unix-based operating systems like macOS or Linux. The examples +in the [Substrate Docs](https://docs.substrate.io) use Unix-style terminals to demonstrate how to +interact with Substrate from the command line. ### Ubuntu/Debian @@ -36,7 +25,7 @@ Use a terminal shell to execute the following commands: ```bash sudo apt update # May prompt for location information -sudo apt install -y cmake pkg-config libssl-dev git build-essential clang libclang-dev curl +sudo apt install -y git clang curl libssl-dev llvm libudev-dev ``` ### Arch Linux @@ -44,39 +33,193 @@ sudo apt install -y cmake pkg-config libssl-dev git build-essential clang libcla Run these commands from a terminal: ```bash -pacman -Syu --needed --noconfirm cmake gcc openssl-1.0 pkgconf git clang -export OPENSSL_LIB_DIR="/usr/lib/openssl-1.0" -export OPENSSL_INCLUDE_DIR="/usr/include/openssl-1.0" +pacman -Syu --needed --noconfirm curl git clang ``` -### Fedora/RHEL/CentOS +### Fedora -Use a terminal to run the following commands: +Run these commands from a terminal: ```bash -# Update sudo dnf update -# Install packages -sudo dnf install cmake pkgconfig rocksdb rocksdb-devel llvm git libcurl libcurl-devel curl-devel clang +sudo dnf install clang curl git openssl-devel +``` + +### OpenSUSE + +Run these commands from a terminal: + +```bash +sudo zypper install clang curl git openssl-devel llvm-devel libudev-devel +``` + +### macOS + +> **Apple M1 ARM** +> If you have an Apple M1 ARM system on a chip, make sure that you have Apple Rosetta 2 +> installed through `softwareupdate --install-rosetta`. This is only needed to run the +> `protoc` tool during the build. The build itself and the target binaries would remain native. + +Open the Terminal application and execute the following commands: + +```bash +# Install Homebrew if necessary https://brew.sh/ +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" + +# Make sure Homebrew is up-to-date, install openssl +brew update +brew install openssl ``` -## Rust Developer Environment +### Windows + +**_PLEASE NOTE:_** Native development of Substrate is _not_ very well supported! It is _highly_ +recommend to use [Windows Subsystem Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10) +(WSL) and follow the instructions for [Ubuntu/Debian](#ubuntudebian). +Please refer to the separate +[guide for native Windows development](https://docs.substrate.io/v3/getting-started/windows-users/). -This project uses [`rustup`](https://rustup.rs/) to help manage the Rust toolchain. First install -and configure `rustup`: +## Rust developer environment + +This guide uses installer and the `rustup` tool to manage the Rust toolchain. +First install and configure `rustup`: ```bash # Install -curl https://sh.rustup.rs -sSf | sh +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # Configure source ~/.cargo/env ``` -Finally, configure the Rust toolchain: +Configure the Rust toolchain to default to the latest stable version, add nightly and the nightly wasm target: ```bash rustup default stable +rustup update +rustup update nightly +rustup target add wasm32-unknown-unknown --toolchain nightly +``` + +## Test your set-up + +Now the best way to ensure that you have successfully prepared a computer for Substrate +development is to follow the steps in [our first Substrate tutorial](https://docs.substrate.io/tutorials/v3/create-your-first-substrate-chain/). + +## Troubleshooting Substrate builds + +Sometimes you can't get the Substrate node template +to compile out of the box. Here are some tips to help you work through that. + +### Rust configuration check + +To see what Rust toolchain you are presently using, run: + +```bash +rustup show +``` + +This will show something like this (Ubuntu example) output: + +```text +Default host: x86_64-unknown-linux-gnu +rustup home: /home/user/.rustup + +installed toolchains +-------------------- + +stable-x86_64-unknown-linux-gnu (default) +nightly-2020-10-06-x86_64-unknown-linux-gnu +nightly-x86_64-unknown-linux-gnu + +installed targets for active toolchain +-------------------------------------- + +wasm32-unknown-unknown +x86_64-unknown-linux-gnu + +active toolchain +---------------- + +stable-x86_64-unknown-linux-gnu (default) +rustc 1.50.0 (cb75ad5db 2021-02-10) +``` + +As you can see above, the default toolchain is stable, and the +`nightly-x86_64-unknown-linux-gnu` toolchain as well as its `wasm32-unknown-unknown` target is installed. +You also see that `nightly-2020-10-06-x86_64-unknown-linux-gnu` is installed, but is not used unless explicitly defined as illustrated in the [specify your nightly version](#specifying-nightly-version) +section. + +### WebAssembly compilation + +Substrate uses [WebAssembly](https://webassembly.org) (Wasm) to produce portable blockchain +runtimes. You will need to configure your Rust compiler to use +[`nightly` builds](https://doc.rust-lang.org/book/appendix-07-nightly-rust.html) to allow you to +compile Substrate runtime code to the Wasm target. + +> There are upstream issues in Rust that need to be resolved before all of Substrate can use the stable Rust toolchain. +> [This is our tracking issue](https://github.com/paritytech/substrate/issues/1252) if you're curious as to why and how this will be resolved. + +#### Latest nightly for Substrate `master` + +Developers who are building Substrate _itself_ should always use the latest bug-free versions of +Rust stable and nightly. This is because the Substrate codebase follows the tip of Rust nightly, +which means that changes in Substrate often depend on upstream changes in the Rust nightly compiler. +To ensure your Rust compiler is always up to date, you should run: + +```bash +rustup update rustup update nightly -rustup update stable rustup target add wasm32-unknown-unknown --toolchain nightly ``` + +> NOTE: It may be necessary to occasionally rerun `rustup update` if a change in the upstream Substrate +> codebase depends on a new feature of the Rust compiler. When you do this, both your nightly +> and stable toolchains will be pulled to the most recent release, and for nightly, it is +> generally _not_ expected to compile WASM without error (although it very often does). +> Be sure to [specify your nightly version](#specifying-nightly-version) if you get WASM build errors +> from `rustup` and [downgrade nightly as needed](#downgrading-rust-nightly). + +#### Rust nightly toolchain + +If you want to guarantee that your build works on your computer as you update Rust and other +dependencies, you should use a specific Rust nightly version that is known to be +compatible with the version of Substrate they are using; this version will vary from project to +project and different projects may use different mechanisms to communicate this version to +developers. For instance, the Polkadot client specifies this information in its +[release notes](https://github.com/paritytech/polkadot/releases). + +```bash +# Specify the specific nightly toolchain in the date below: +rustup install nightly- +``` + +#### Wasm toolchain + +Now, configure the nightly version to work with the Wasm compilation target: + +```bash +rustup target add wasm32-unknown-unknown --toolchain nightly- +``` + +### Specifying nightly version + +Use the `WASM_BUILD_TOOLCHAIN` environment variable to specify the Rust nightly version a Substrate +project should use for Wasm compilation: + +```bash +WASM_BUILD_TOOLCHAIN=nightly- cargo build --release +``` + +> Note that this only builds _the runtime_ with the specified nightly. The rest of project will be +> compiled with **your default toolchain**, i.e. the latest installed stable toolchain. + +### Downgrading Rust nightly + +If your computer is configured to use the latest Rust nightly and you would like to downgrade to a +specific nightly version, follow these steps: + +```bash +rustup uninstall nightly +rustup install nightly- +rustup target add wasm32-unknown-unknown --toolchain nightly- +``` diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index c2b80e2fa08c5..a275e9ad819e3 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-template" -version = "3.0.0" +version = "4.0.0-dev" description = "A fresh FRAME-based Substrate node, ready for hacking." authors = ["Substrate DevHub "] homepage = "https://substrate.io/" @@ -52,7 +52,8 @@ pallet-transaction-payment-rpc = { version = "4.0.0-dev", path = "../../../frame frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarking" } frame-benchmarking-cli = { version = "4.0.0-dev", path = "../../../utils/frame/benchmarking-cli" } -node-template-runtime = { version = "3.0.0", path = "../runtime" } +# Local Dependencies +node-template-runtime = { version = "4.0.0-dev", path = "../runtime" } [build-dependencies] substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build-script-utils" } diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index 9702501a3e0ea..4dc4fb3c6e528 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-template" -version = "3.0.0" +version = "4.0.0-dev" description = "FRAME pallet template for defining custom runtime logic." authors = ["Substrate DevHub "] homepage = "https://substrate.io/" diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index b757029f2581d..eb8fb9eb52a87 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "node-template-runtime" -version = "3.0.0" -description = "A fresh FRAME-based Substrate runtime, ready for hacking." +version = "4.0.0-dev" +description = "A fresh FRAME-based Substrate node, ready for hacking." authors = ["Substrate DevHub "] homepage = "https://substrate.io/" edition = "2021" @@ -27,10 +27,10 @@ pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = ".. pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "4.0.0-dev"} +sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/consensus/aura" } sp-core = { version = "4.1.0-dev", default-features = false, path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "4.0.0-dev"} +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents"} sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } sp-runtime = { version = "4.1.0-dev", default-features = false, path = "../../../primitives/runtime" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } @@ -47,7 +47,8 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = " frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/benchmarking", optional = true } hex-literal = { version = "0.3.4", optional = true } -pallet-template = { version = "3.0.0", default-features = false, path = "../pallets/template" } +# Local Dependencies +pallet-template = { version = "4.0.0-dev", default-features = false, path = "../pallets/template" } [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 8f00dd8f64b4c..e70adc6aa7238 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -23,6 +23,7 @@ #![recursion_limit = "256"] use codec::{Decode, Encode, MaxEncodedLen}; +use frame_election_provider_support::onchain; use frame_support::{ construct_runtime, parameter_types, traits::{ @@ -528,12 +529,6 @@ parameter_types! { pub OffchainRepeat: BlockNumber = 5; } -use frame_election_provider_support::onchain; -impl onchain::Config for Runtime { - type Accuracy = Perbill; - type DataProvider = Staking; -} - pub struct StakingBenchmarkingConfig; impl pallet_staking::BenchmarkingConfig for StakingBenchmarkingConfig { type MaxNominators = ConstU32<1000>; @@ -649,6 +644,11 @@ impl frame_support::pallet_prelude::Get::DataProvider; +} + impl pallet_election_provider_multi_phase::Config for Runtime { type Event = Event; type Currency = Balances; @@ -671,6 +671,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type DataProvider = Staking; type Solution = NposSolution16; type Fallback = pallet_election_provider_multi_phase::NoFallback; + type GovernanceFallback = + frame_election_provider_support::onchain::OnChainSequentialPhragmen; type Solver = frame_election_provider_support::SequentialPhragmen< AccountId, pallet_election_provider_multi_phase::SolutionAccuracyOf, diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 004c0574fcbe8..bb2579e4a420e 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -70,14 +70,26 @@ pub struct ImportSummary { pub tree_route: Option>, } -/// Import operation wrapper +/// Finalization operation summary. +/// +/// Contains information about the block that just got finalized, +/// including tree heads that became stale at the moment of finalization. +pub struct FinalizeSummary { + /// Blocks that were finalized. + /// The last entry is the one that has been explicitly finalized. + pub finalized: Vec, + /// Heads that became stale during this finalization operation. + pub stale_heads: Vec, +} + +/// Import operation wrapper. pub struct ClientImportOperation> { /// DB Operation. pub op: B::BlockImportOperation, /// Summary of imported block. pub notify_imported: Option>, - /// A list of hashes of blocks that got finalized. - pub notify_finalized: Vec, + /// Summary of finalized block. + pub notify_finalized: Option>, } /// Helper function to apply auxiliary data insertion into an operation. diff --git a/client/api/src/client.rs b/client/api/src/client.rs index b6a5fbfad47a5..9bb212099565b 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -273,10 +273,14 @@ pub struct BlockImportNotification { /// Summary of a finalized block. #[derive(Clone, Debug)] pub struct FinalityNotification { - /// Imported block header hash. + /// Finalized block header hash. pub hash: Block::Hash, - /// Imported block header. + /// Finalized block header. pub header: Block::Header, + /// Path from the old finalized to new finalized parent (implicitly finalized blocks). + pub tree_route: Arc>, + /// Stale branches heads. + pub stale_heads: Arc>, } impl TryFrom> for ChainEvent { @@ -293,6 +297,6 @@ impl TryFrom> for ChainEvent { impl From> for ChainEvent { fn from(n: FinalityNotification) -> Self { - Self::Finalized { hash: n.hash } + Self::Finalized { hash: n.hash, tree_route: n.tree_route } } } diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index a22a68d2944fa..2412a7cdf83f0 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -164,6 +164,10 @@ struct ClientSpec { boot_nodes: Vec, telemetry_endpoints: Option, protocol_id: Option, + /// Arbitrary string. Nodes will only synchronize with other nodes that have the same value + /// in their `fork_id`. This can be used in order to segregate nodes in cases when multiple + /// chains have the same genesis hash. + #[serde(default = "Default::default", skip_serializing_if = "Option::is_none")] fork_id: Option, properties: Option, #[serde(flatten)] diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs index 680094a74143e..224fbd1a1e01a 100644 --- a/client/network/src/protocol/sync/extra_requests.rs +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -173,9 +173,7 @@ impl ExtraRequests { } if best_finalized_number > self.best_seen_finalized_number { - // normally we'll receive finality notifications for every block => finalize would be - // enough but if many blocks are finalized at once, some notifications may be omitted - // => let's use finalize_with_ancestors here + // we receive finality notification only for the finalized branch head. match self.tree.finalize_with_ancestors( best_finalized_hash, best_finalized_number, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index d9e27ce575110..b2345f3701b0a 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -976,14 +976,10 @@ where peer.network.service().announce_block(notification.hash, None); } - // We poll `finality_notification_stream`, but we only take the last event. - let mut last = None; - while let Poll::Ready(Some(item)) = + // We poll `finality_notification_stream`. + while let Poll::Ready(Some(notification)) = peer.finality_notification_stream.as_mut().poll_next(cx) { - last = Some(item); - } - if let Some(notification) = last { peer.network.on_block_finalized(notification.hash, notification.header); } } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 7673a7b4c5387..e8ca5343aa0d2 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -30,8 +30,8 @@ use rand::Rng; use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider, RecordProof}; use sc_client_api::{ backend::{ - self, apply_aux, BlockImportOperation, ClientImportOperation, Finalizer, ImportSummary, - LockImportRun, NewBlockState, StorageProvider, + self, apply_aux, BlockImportOperation, ClientImportOperation, FinalizeSummary, Finalizer, + ImportSummary, LockImportRun, NewBlockState, StorageProvider, }, client::{ BadBlocks, BlockBackend, BlockImportNotification, BlockOf, BlockchainEvents, ClientInfo, @@ -274,7 +274,7 @@ where let mut op = ClientImportOperation { op: self.backend.begin_operation()?, notify_imported: None, - notify_finalized: Vec::new(), + notify_finalized: None, }; let r = f(&mut op)?; @@ -622,18 +622,6 @@ where None }, }; - // Ensure parent chain is finalized to maintain invariant that - // finality is called sequentially. This will also send finality - // notifications for top 250 newly finalized blocks. - if finalized && parent_exists { - self.apply_finality_with_block_hash( - operation, - parent_hash, - None, - info.best_hash, - make_notifications, - )?; - } operation.op.update_cache(new_cache); storage_changes @@ -641,6 +629,18 @@ where None => None, }; + // Ensure parent chain is finalized to maintain invariant that finality is called + // sequentially. + if finalized && parent_exists { + self.apply_finality_with_block_hash( + operation, + parent_hash, + None, + info.best_hash, + make_notifications, + )?; + } + let is_new_best = !gap_block && (finalized || match fork_choice { @@ -683,11 +683,36 @@ where operation.op.insert_aux(aux)?; - // we only notify when we are already synced to the tip of the chain + // We only notify when we are already synced to the tip of the chain // or if this import triggers a re-org if make_notifications || tree_route.is_some() { if finalized { - operation.notify_finalized.push(hash); + let mut summary = match operation.notify_finalized.take() { + Some(summary) => summary, + None => FinalizeSummary { finalized: Vec::new(), stale_heads: Vec::new() }, + }; + summary.finalized.push(hash); + if parent_exists { + // Add to the stale list all heads that are branching from parent besides our + // current `head`. + for head in self + .backend + .blockchain() + .leaves()? + .into_iter() + .filter(|h| *h != parent_hash) + { + let route_from_parent = sp_blockchain::tree_route( + self.backend.blockchain(), + parent_hash, + head, + )?; + if route_from_parent.retracted().is_empty() { + summary.stale_heads.push(head); + } + } + } + operation.notify_finalized = Some(summary); } operation.notify_imported = Some(ImportSummary { @@ -831,58 +856,82 @@ where operation.op.mark_finalized(BlockId::Hash(block), justification)?; if notify { - // sometimes when syncing, tons of blocks can be finalized at once. - // we'll send notifications spuriously in that case. - const MAX_TO_NOTIFY: usize = 256; - let enacted = route_from_finalized.enacted(); - let start = enacted.len() - std::cmp::min(enacted.len(), MAX_TO_NOTIFY); - for finalized in &enacted[start..] { - operation.notify_finalized.push(finalized.hash); + let finalized = + route_from_finalized.enacted().iter().map(|elem| elem.hash).collect::>(); + + let last_finalized_number = self + .backend + .blockchain() + .number(last_finalized)? + .expect("Finalized block expected to be onchain; qed"); + let mut stale_heads = Vec::new(); + for head in self.backend.blockchain().leaves()? { + let route_from_finalized = + sp_blockchain::tree_route(self.backend.blockchain(), block, head)?; + let retracted = route_from_finalized.retracted(); + let pivot = route_from_finalized.common_block(); + // It is not guaranteed that `backend.blockchain().leaves()` doesn't return + // heads that were in a stale state before this finalization and thus already + // included in previous notifications. We want to skip such heads. + // Given the "route" from the currently finalized block to the head under + // analysis, the condition for it to be added to the new stale heads list is: + // `!retracted.is_empty() && last_finalized_number <= pivot.number` + // 1. "route" has some "retractions". + // 2. previously finalized block number is not greater than the "route" pivot: + // - if `last_finalized_number <= pivot.number` then this is a new stale head; + // - else the stale head was already included by some previous finalization. + if !retracted.is_empty() && last_finalized_number <= pivot.number { + stale_heads.push(head); + } } + operation.notify_finalized = Some(FinalizeSummary { finalized, stale_heads }); } Ok(()) } - fn notify_finalized(&self, notify_finalized: Vec) -> sp_blockchain::Result<()> { + fn notify_finalized( + &self, + notify_finalized: Option>, + ) -> sp_blockchain::Result<()> { let mut sinks = self.finality_notification_sinks.lock(); - if notify_finalized.is_empty() { - // cleanup any closed finality notification sinks - // since we won't be running the loop below which - // would also remove any closed sinks. - sinks.retain(|sink| !sink.is_closed()); - - return Ok(()) - } + let mut notify_finalized = match notify_finalized { + Some(notify_finalized) => notify_finalized, + None => { + // Cleanup any closed finality notification sinks + // since we won't be running the loop below which + // would also remove any closed sinks. + sinks.retain(|sink| !sink.is_closed()); + return Ok(()) + }, + }; - // We assume the list is sorted and only want to inform the - // telemetry once about the finalized block. - if let Some(last) = notify_finalized.last() { - let header = self.header(&BlockId::Hash(*last))?.expect( - "Header already known to exist in DB because it is indicated in the tree route; \ - qed", - ); + let last = notify_finalized.finalized.pop().expect( + "At least one finalized block shall exist within a valid finalization summary; qed", + ); - telemetry!( - self.telemetry; - SUBSTRATE_INFO; - "notify.finalized"; - "height" => format!("{}", header.number()), - "best" => ?last, - ); - } + let header = self.header(&BlockId::Hash(last))?.expect( + "Header already known to exist in DB because it is indicated in the tree route; \ + qed", + ); - for finalized_hash in notify_finalized { - let header = self.header(&BlockId::Hash(finalized_hash))?.expect( - "Header already known to exist in DB because it is indicated in the tree route; \ - qed", - ); + telemetry!( + self.telemetry; + SUBSTRATE_INFO; + "notify.finalized"; + "height" => format!("{}", header.number()), + "best" => ?last, + ); - let notification = FinalityNotification { header, hash: finalized_hash }; + let notification = FinalityNotification { + hash: last, + header, + tree_route: Arc::new(notify_finalized.finalized), + stale_heads: Arc::new(notify_finalized.stale_heads), + }; - sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); - } + sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); Ok(()) } @@ -901,7 +950,6 @@ where // temporary leak of closed/discarded notification sinks (e.g. // from consensus code). self.import_notification_sinks.lock().retain(|sink| !sink.is_closed()); - return Ok(()) }, }; diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index c6d9ea0a665b3..d1de80d6abad0 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -34,10 +34,10 @@ mod client; mod metrics; mod task_manager; -use std::{collections::HashMap, net::SocketAddr, pin::Pin, task::Poll}; +use std::{collections::HashMap, net::SocketAddr}; use codec::{Decode, Encode}; -use futures::{channel::mpsc, stream, FutureExt, Stream, StreamExt}; +use futures::{channel::mpsc, FutureExt, StreamExt}; use jsonrpsee::{core::Error as JsonRpseeError, RpcModule}; use log::{debug, error, warn}; use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; @@ -147,26 +147,7 @@ async fn build_network_future< let starting_block = client.info().best_number; // Stream of finalized blocks reported by the client. - let mut finality_notification_stream = { - let mut finality_notification_stream = client.finality_notification_stream().fuse(); - - // We tweak the `Stream` in order to merge together multiple items if they happen to be - // ready. This way, we only get the latest finalized block. - stream::poll_fn(move |cx| { - let mut last = None; - while let Poll::Ready(Some(item)) = - Pin::new(&mut finality_notification_stream).poll_next(cx) - { - last = Some(item); - } - if let Some(last) = last { - Poll::Ready(Some(last)) - } else { - Poll::Pending - } - }) - .fuse() - }; + let mut finality_notification_stream = client.finality_notification_stream().fuse(); loop { futures::select! { diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 535edfadaf29d..2b0ea460c4dd3 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -20,7 +20,9 @@ use futures::executor::block_on; use hex_literal::hex; use parity_scale_codec::{Decode, Encode, Joiner}; use sc_block_builder::BlockBuilderProvider; -use sc_client_api::{in_mem, BlockBackend, BlockchainEvents, StorageProvider}; +use sc_client_api::{ + in_mem, BlockBackend, BlockchainEvents, FinalityNotifications, StorageProvider, +}; use sc_client_db::{ Backend, DatabaseSettings, DatabaseSource, KeepBlocks, PruningMode, TransactionStorageMode, }; @@ -165,6 +167,24 @@ fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, + finalized: &[Hash], + stale_heads: &[Hash], +) { + match notifications.try_next() { + Ok(Some(notif)) => { + let stale_heads_expected: HashSet<_> = stale_heads.iter().collect(); + let stale_heads: HashSet<_> = notif.stale_heads.iter().collect(); + assert_eq!(notif.tree_route.as_ref(), &finalized[..finalized.len() - 1]); + assert_eq!(notif.hash, *finalized.last().unwrap()); + assert_eq!(stale_heads, stale_heads_expected); + }, + Ok(None) => panic!("unexpected notification result, client send channel was closed"), + Err(_) => assert!(finalized.is_empty()), + } +} + #[test] fn construct_genesis_should_work_with_native() { let mut storage = GenesisConfig::new( @@ -822,8 +842,12 @@ fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { #[test] fn import_with_justification() { + // block tree: + // G -> A1 -> A2 -> A3 let mut client = substrate_test_runtime_client::new(); + let mut finality_notifications = client.finality_notification_stream(); + // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); @@ -855,6 +879,10 @@ fn import_with_justification() { assert_eq!(client.justifications(&BlockId::Hash(a1.hash())).unwrap(), None); assert_eq!(client.justifications(&BlockId::Hash(a2.hash())).unwrap(), None); + + finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[]); + finality_notification_check(&mut finality_notifications, &[a3.hash()], &[]); + assert!(finality_notifications.try_next().is_err()); } #[test] @@ -864,6 +892,9 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { // G -> A1 -> A2 // \ // -> B1 + + let mut finality_notifications = client.finality_notification_stream(); + let a1 = client .new_block_at(&BlockId::Number(0), Default::default(), false) .unwrap() @@ -902,6 +933,9 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { assert_eq!(client.chain_info().best_hash, b1.hash()); assert_eq!(client.chain_info().finalized_hash, b1.hash()); + + finality_notification_check(&mut finality_notifications, &[b1.hash()], &[a2.hash()]); + assert!(finality_notifications.try_next().is_err()); } #[test] @@ -911,6 +945,9 @@ fn finalizing_diverged_block_should_trigger_reorg() { // G -> A1 -> A2 // \ // -> B1 -> B2 + + let mut finality_notifications = client.finality_notification_stream(); + let a1 = client .new_block_at(&BlockId::Number(0), Default::default(), false) .unwrap() @@ -975,6 +1012,113 @@ fn finalizing_diverged_block_should_trigger_reorg() { block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); assert_eq!(client.chain_info().best_hash, b3.hash()); + + finality_notification_check(&mut finality_notifications, &[b1.hash()], &[a2.hash()]); + assert!(finality_notifications.try_next().is_err()); +} + +#[test] +fn finality_notifications_content() { + let (mut client, _select_chain) = TestClientBuilder::new().build_with_longest_chain(); + + // -> D3 -> D4 + // G -> A1 -> A2 -> A3 + // -> B1 -> B2 + // -> C1 + + let mut finality_notifications = client.finality_notification_stream(); + + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); + + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); + + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); + + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let b1 = b1.build().unwrap().block; + block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); + + let b2 = client + .new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); + + let mut c1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + // needed to make sure B1 gets a different hash from A1 + c1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 2, + nonce: 0, + }) + .unwrap(); + let c1 = c1.build().unwrap().block; + block_on(client.import(BlockOrigin::Own, c1.clone())).unwrap(); + + let mut d3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap(); + // needed to make sure D3 gets a different hash from A3 + d3.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 2, + nonce: 0, + }) + .unwrap(); + let d3 = d3.build().unwrap().block; + block_on(client.import(BlockOrigin::Own, d3.clone())).unwrap(); + + let d4 = client + .new_block_at(&BlockId::Hash(d3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + + // Postpone import to test behavior of import of finalized block. + + ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); + + // Import and finalize D4 + block_on(client.import_as_final(BlockOrigin::Own, d4.clone())).unwrap(); + + finality_notification_check( + &mut finality_notifications, + &[a1.hash(), a2.hash()], + &[c1.hash(), b2.hash()], + ); + finality_notification_check(&mut finality_notifications, &[d3.hash(), d4.hash()], &[a3.hash()]); + assert!(finality_notifications.try_next().is_err()); } #[test] @@ -1069,6 +1213,8 @@ fn doesnt_import_blocks_that_revert_finality() { let mut client = TestClientBuilder::with_backend(backend).build(); + let mut finality_notifications = client.finality_notification_stream(); + // -> C1 // / // G -> A1 -> A2 @@ -1150,6 +1296,9 @@ fn doesnt_import_blocks_that_revert_finality() { ConsensusError::ClientImport(sp_blockchain::Error::NotInFinalizedChain.to_string()); assert_eq!(import_err.to_string(), expected_err.to_string()); + + finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[b2.hash()]); + assert!(finality_notifications.try_next().is_err()); } #[test] diff --git a/client/transaction-pool/api/src/lib.rs b/client/transaction-pool/api/src/lib.rs index c8698e1a2bdd3..b74b061c78831 100644 --- a/client/transaction-pool/api/src/lib.rs +++ b/client/transaction-pool/api/src/lib.rs @@ -279,7 +279,7 @@ impl ReadyTransactions for std::iter::Empty { /// Events that the transaction pool listens for. pub enum ChainEvent { - /// New best block have been added to the chain + /// New best block have been added to the chain. NewBestBlock { /// Hash of the block. hash: B::Hash, @@ -290,8 +290,10 @@ pub enum ChainEvent { }, /// An existing block has been finalized. Finalized { - /// Hash of just finalized block + /// Hash of just finalized block. hash: B::Hash, + /// Path from old finalized to new finalized parent. + tree_route: Arc>, }, } diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index b5af2d12d65c9..260d938217ad4 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -709,15 +709,17 @@ where } .boxed() }, - ChainEvent::Finalized { hash } => { + ChainEvent::Finalized { hash, tree_route } => { let pool = self.pool.clone(); async move { - if let Err(e) = pool.validated_pool().on_block_finalized(hash).await { - log::warn!( - target: "txpool", - "Error [{}] occurred while attempting to notify watchers of finalization {}", - e, hash - ) + for hash in tree_route.iter().chain(&[hash]) { + if let Err(e) = pool.validated_pool().on_block_finalized(*hash).await { + log::warn!( + target: "txpool", + "Error [{}] occurred while attempting to notify watchers of finalization {}", + e, hash + ) + } } } .boxed() diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index 4aeaf79a61540..21a87f6e006ec 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -387,7 +387,7 @@ fn should_push_watchers_during_maintenance() { let header_hash = header.hash(); block_on(pool.maintain(block_event(header))); - let event = ChainEvent::Finalized { hash: header_hash.clone() }; + let event = ChainEvent::Finalized { hash: header_hash.clone(), tree_route: Arc::new(vec![]) }; block_on(pool.maintain(event)); // then @@ -445,7 +445,7 @@ fn finalization() { let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); - let event = ChainEvent::Finalized { hash: header.hash() }; + let event = ChainEvent::Finalized { hash: header.hash(), tree_route: Arc::new(vec![]) }; block_on(pool.maintain(event)); let mut stream = futures::executor::block_on_stream(watcher); @@ -493,7 +493,7 @@ fn fork_aware_finalization() { b1 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); - let event = ChainEvent::Finalized { hash: b1 }; + let event = ChainEvent::Finalized { hash: b1, tree_route: Arc::new(vec![]) }; block_on(pool.maintain(event)); } @@ -537,7 +537,7 @@ fn fork_aware_finalization() { block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); - let event = ChainEvent::Finalized { hash: header.hash() }; + let event = ChainEvent::Finalized { hash: header.hash(), tree_route: Arc::new(vec![]) }; block_on(pool.maintain(event)); } @@ -554,7 +554,7 @@ fn fork_aware_finalization() { d1 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); - let event = ChainEvent::Finalized { hash: d1 }; + let event = ChainEvent::Finalized { hash: d1, tree_route: Arc::new(vec![]) }; block_on(pool.maintain(event)); } @@ -567,7 +567,7 @@ fn fork_aware_finalization() { let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); - block_on(pool.maintain(ChainEvent::Finalized { hash: e1 })); + block_on(pool.maintain(ChainEvent::Finalized { hash: e1, tree_route: Arc::new(vec![]) })); } for (canon_watcher, h) in canon_watchers { @@ -637,7 +637,7 @@ fn prune_and_retract_tx_at_same_time() { block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); - let event = ChainEvent::Finalized { hash: header.hash() }; + let event = ChainEvent::Finalized { hash: header.hash(), tree_route: Arc::new(vec![]) }; block_on(pool.maintain(event)); header.hash() diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index f1d1d78912c46..2b48373c5b0ea 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -230,7 +230,9 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::{Decode, Encode}; -use frame_election_provider_support::{ElectionDataProvider, ElectionProvider}; +use frame_election_provider_support::{ + ElectionDataProvider, ElectionProvider, InstantElectionProvider, +}; use frame_support::{ dispatch::DispatchResultWithPostInfo, ensure, @@ -322,6 +324,15 @@ impl ElectionProvider for NoFallback { } } +impl InstantElectionProvider for NoFallback { + fn instant_elect( + _: Option, + _: Option, + ) -> Result, Self::Error> { + Err("NoFallback.") + } +} + /// Current phase of the pallet. #[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo)] pub enum Phase { @@ -555,7 +566,7 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_election_provider_support::NposSolver; + use frame_election_provider_support::{InstantElectionProvider, NposSolver}; use frame_support::{pallet_prelude::*, traits::EstimateCallFee}; use frame_system::pallet_prelude::*; @@ -672,13 +683,23 @@ pub mod pallet { + NposSolution + TypeInfo; - /// Configuration for the fallback + /// Configuration for the fallback. type Fallback: ElectionProvider< AccountId = Self::AccountId, BlockNumber = Self::BlockNumber, DataProvider = Self::DataProvider, >; + /// Configuration of the governance-only fallback. + /// + /// As a side-note, it is recommend for test-nets to use `type ElectionProvider = + /// OnChainSeqPhragmen<_>` if the test-net is not expected to have thousands of nominators. + type GovernanceFallback: InstantElectionProvider< + AccountId = Self::AccountId, + BlockNumber = Self::BlockNumber, + DataProvider = Self::DataProvider, + >; + /// OCW election solution miner algorithm implementation. type Solver: NposSolver; @@ -1013,6 +1034,37 @@ pub mod pallet { }); Ok(()) } + + /// Trigger the governance fallback. + /// + /// This can only be called when [`Phase::Emergency`] is enabled, as an alternative to + /// calling [`Call::set_emergency_election_result`]. + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] + pub fn governance_fallback( + origin: OriginFor, + maybe_max_voters: Option, + maybe_max_targets: Option, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + ensure!(Self::current_phase().is_emergency(), >::CallNotAllowed); + + let maybe_max_voters = maybe_max_voters.map(|x| x as usize); + let maybe_max_targets = maybe_max_targets.map(|x| x as usize); + + let supports = + T::GovernanceFallback::instant_elect(maybe_max_voters, maybe_max_targets).map_err( + |e| { + log!(error, "GovernanceFallback failed: {:?}", e); + Error::::FallbackFailed + }, + )?; + + let solution = + ReadySolution { supports, score: [0, 0, 0], compute: ElectionCompute::Fallback }; + + >::put(solution); + Ok(()) + } } #[pallet::event] @@ -1063,6 +1115,8 @@ pub mod pallet { InvalidSubmissionIndex, /// The call is not allowed at this point. CallNotAllowed, + /// The fallback failed + FallbackFailed, } #[pallet::validate_unsigned] diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 1be93c363e321..9ac0ecfef5dce 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -410,6 +410,7 @@ impl crate::Config for Runtime { type WeightInfo = DualMockWeightInfo; type BenchmarkingConfig = TestBenchmarkingConfig; type Fallback = MockFallback; + type GovernanceFallback = NoFallback; type ForceOrigin = frame_system::EnsureRoot; type Solution = TestNposSolution; type VoterSnapshotPerBlock = VoterSnapshotPerBlock; diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 3374e1e97b8be..26efe5107b670 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -319,6 +319,22 @@ pub trait ElectionProvider { fn elect() -> Result, Self::Error>; } +/// A sub-trait of the [`ElectionProvider`] for cases where we need to be sure an election needs to +/// happen instantly, not asynchronously. +/// +/// The same `DataProvider` is assumed to be used. +/// +/// Consequently, allows for control over the amount of data that is being fetched from the +/// [`ElectionProvider::DataProvider`]. +pub trait InstantElectionProvider: ElectionProvider { + /// Elect a new set of winners, instantly, with the given given limits set on the + /// `DataProvider`. + fn instant_elect( + maybe_max_voters: Option, + maybe_max_targets: Option, + ) -> Result, Self::Error>; +} + /// An election provider to be used only for testing. #[cfg(feature = "std")] pub struct NoElection(sp_std::marker::PhantomData); diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index 808b49ba6234d..41245f67fb02c 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -17,7 +17,7 @@ //! An implementation of [`ElectionProvider`] that does an on-chain sequential phragmen. -use crate::{ElectionDataProvider, ElectionProvider}; +use crate::{ElectionDataProvider, ElectionProvider, InstantElectionProvider}; use frame_support::{traits::Get, weights::DispatchClass}; use sp_npos_elections::*; use sp_std::{collections::btree_map::BTreeMap, marker::PhantomData, prelude::*}; @@ -47,8 +47,14 @@ impl From for Error { /// implementation ignores the additional data of the election data provider and gives no insight on /// how much weight was consumed. /// -/// Finally, this implementation does not impose any limits on the number of voters and targets that -/// are provided. +/// Finally, the [`ElectionProvider`] implementation of this type does not impose any limits on the +/// number of voters and targets that are fetched. This could potentially make this unsuitable for +/// execution onchain. On the other hand, the [`InstantElectionProvider`] implementation does limit +/// these inputs. +/// +/// It is advisable to use the former ([`ElectionProvider::elect`]) only at genesis, or for testing, +/// the latter [`InstantElectionProvider::instant_elect`] for onchain operations, with thoughtful +/// bounds. pub struct OnChainSequentialPhragmen(PhantomData); /// Configuration trait of [`OnChainSequentialPhragmen`]. @@ -68,16 +74,17 @@ pub trait Config: frame_system::Config { >; } -impl ElectionProvider for OnChainSequentialPhragmen { - type AccountId = T::AccountId; - type BlockNumber = T::BlockNumber; - type Error = Error; - type DataProvider = T::DataProvider; - - fn elect() -> Result, Self::Error> { - let voters = Self::DataProvider::voters(None).map_err(Error::DataProvider)?; - let targets = Self::DataProvider::targets(None).map_err(Error::DataProvider)?; - let desired_targets = Self::DataProvider::desired_targets().map_err(Error::DataProvider)?; +impl OnChainSequentialPhragmen { + fn elect_with( + maybe_max_voters: Option, + maybe_max_targets: Option, + ) -> Result, Error> { + let voters = ::DataProvider::voters(maybe_max_voters) + .map_err(Error::DataProvider)?; + let targets = ::DataProvider::targets(maybe_max_targets) + .map_err(Error::DataProvider)?; + let desired_targets = ::DataProvider::desired_targets() + .map_err(Error::DataProvider)?; let stake_map: BTreeMap = voters .iter() @@ -102,6 +109,26 @@ impl ElectionProvider for OnChainSequentialPhragmen { } } +impl ElectionProvider for OnChainSequentialPhragmen { + type AccountId = T::AccountId; + type BlockNumber = T::BlockNumber; + type Error = Error; + type DataProvider = T::DataProvider; + + fn elect() -> Result, Self::Error> { + Self::elect_with(None, None) + } +} + +impl InstantElectionProvider for OnChainSequentialPhragmen { + fn instant_elect( + maybe_max_voters: Option, + maybe_max_targets: Option, + ) -> Result, Self::Error> { + Self::elect_with(maybe_max_voters, maybe_max_targets) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 2b634c780613f..343fb9804ffb7 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.8.0", features = ["ws-client", "macros"] } +jsonrpsee = { version = "0.8", features = ["ws-client", "macros"] } env_logger = "0.9" frame-support = { path = "../../../frame/support", optional = true, version = "4.0.0-dev" } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 496a1ff496851..d6dfd1c59d4ee 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -23,13 +23,10 @@ use codec::{Decode, Encode}; use jsonrpsee::{ - core::{ - client::{Client, ClientT}, - Error as RpcError, - }, + core::{client::ClientT, Error as RpcError}, proc_macros::rpc, rpc_params, - ws_client::WsClientBuilder, + ws_client::{WsClient, WsClientBuilder}, }; use log::*; @@ -47,6 +44,7 @@ use sp_runtime::traits::Block as BlockT; use std::{ fs, path::{Path, PathBuf}, + sync::Arc, }; pub mod rpc_api; @@ -126,21 +124,52 @@ impl> From

for SnapshotConfig { } /// Description of the transport protocol (for online execution). -#[derive(Debug)] -pub struct Transport { - uri: String, - client: Option, +#[derive(Debug, Clone)] +pub enum Transport { + /// Use the `URI` to open a new WebSocket connection. + Uri(String), + /// Use existing WebSocket connection. + RemoteClient(Arc), } -impl Clone for Transport { - fn clone(&self) -> Self { - Self { uri: self.uri.clone(), client: None } +impl Transport { + fn as_client(&self) -> Option<&WsClient> { + match self { + Self::RemoteClient(client) => Some(&*client), + _ => None, + } + } + + // Open a new WebSocket connection if it's not connected. + async fn map_uri(&mut self) -> Result<(), &'static str> { + if let Self::Uri(uri) = self { + log::debug!(target: LOG_TARGET, "initializing remote client to {:?}", uri); + + let ws_client = WsClientBuilder::default() + .max_request_body_size(u32::MAX) + .build(&uri) + .await + .map_err(|e| { + log::error!(target: LOG_TARGET, "error: {:?}", e); + "failed to build ws client" + })?; + + *self = Self::RemoteClient(Arc::new(ws_client)) + } + + Ok(()) } } impl From for Transport { - fn from(t: String) -> Self { - Self { uri: t, client: None } + fn from(uri: String) -> Self { + Transport::Uri(uri) + } +} + +impl From> for Transport { + fn from(client: Arc) -> Self { + Transport::RemoteClient(client) } } @@ -162,10 +191,9 @@ pub struct OnlineConfig { impl OnlineConfig { /// Return rpc (ws) client. - fn rpc_client(&self) -> &Client { + fn rpc_client(&self) -> &WsClient { self.transport - .client - .as_ref() + .as_client() .expect("ws client must have been initialized by now; qed.") } } @@ -173,7 +201,7 @@ impl OnlineConfig { impl Default for OnlineConfig { fn default() -> Self { Self { - transport: Transport { uri: DEFAULT_TARGET.to_owned(), client: None }, + transport: Transport::Uri(DEFAULT_TARGET.to_owned()), at: None, state_snapshot: None, pallets: vec![], @@ -632,19 +660,8 @@ impl Builder { } pub(crate) async fn init_remote_client(&mut self) -> Result<(), &'static str> { - let mut online = self.as_online_mut(); - log::debug!(target: LOG_TARGET, "initializing remote client to {:?}", online.transport.uri); - // First, initialize the ws client. - let ws_client = WsClientBuilder::default() - .max_request_body_size(u32::MAX) - .build(&online.transport.uri) - .await - .map_err(|e| { - log::error!(target: LOG_TARGET, "error: {:?}", e); - "failed to build ws client" - })?; - online.transport.client = Some(ws_client); + self.as_online_mut().transport.map_uri().await?; // Then, if `at` is not set, set it. if self.as_online().at.is_none() { diff --git a/utils/frame/remote-externalities/src/rpc_api.rs b/utils/frame/remote-externalities/src/rpc_api.rs index 53104237fa067..37555de480d4c 100644 --- a/utils/frame/remote-externalities/src/rpc_api.rs +++ b/utils/frame/remote-externalities/src/rpc_api.rs @@ -19,9 +19,9 @@ // TODO: Consolidate one off RPC calls https://github.com/paritytech/substrate/issues/8988 use jsonrpsee::{ - core::client::{Client, ClientT}, + core::client::ClientT, rpc_params, - ws_client::WsClientBuilder, + ws_client::{WsClient, WsClientBuilder}, }; use sp_runtime::{ generic::SignedBlock, @@ -73,8 +73,8 @@ where Ok(signed_block.block) } -/// Build a website client that connects to `from`. -async fn build_client>(from: S) -> Result { +/// Build a websocket client that connects to `from`. +async fn build_client>(from: S) -> Result { WsClientBuilder::default() .max_request_body_size(u32::MAX) .build(from.as_ref())